forked from cerc-io/plugeth
Merge pull request #110 from openrelayxyz/merge/geth-v1.13.14
Merge/geth v1.13.14
This commit is contained in:
commit
1a89183720
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@ -17,7 +17,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
go-version: 1.21.4
|
go-version: 1.21.4
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: go test ./...
|
run: go test -short ./...
|
||||||
env:
|
env:
|
||||||
GOOS: linux
|
GOOS: linux
|
||||||
GOARCH: 386
|
GOARCH: 386
|
||||||
|
12
Makefile
12
Makefile
@ -8,20 +8,25 @@ GOBIN = ./build/bin
|
|||||||
GO ?= latest
|
GO ?= latest
|
||||||
GORUN = go run
|
GORUN = go run
|
||||||
|
|
||||||
|
#? geth: Build geth
|
||||||
geth:
|
geth:
|
||||||
$(GORUN) build/ci.go install ./cmd/geth
|
$(GORUN) build/ci.go install ./cmd/geth
|
||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||||
|
|
||||||
|
#? all: Build all packages and executables
|
||||||
all:
|
all:
|
||||||
$(GORUN) build/ci.go install
|
$(GORUN) build/ci.go install
|
||||||
|
|
||||||
|
#? test: Run the tests
|
||||||
test: all
|
test: all
|
||||||
$(GORUN) build/ci.go test
|
$(GORUN) build/ci.go test
|
||||||
|
|
||||||
|
#? lint: Run certain pre-selected linters
|
||||||
lint: ## Run linters.
|
lint: ## Run linters.
|
||||||
$(GORUN) build/ci.go lint
|
$(GORUN) build/ci.go lint
|
||||||
|
|
||||||
|
#? clean: Clean go cache, built executables, and the auto generated folder
|
||||||
clean:
|
clean:
|
||||||
go clean -cache
|
go clean -cache
|
||||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||||
@ -29,6 +34,7 @@ clean:
|
|||||||
# The devtools target installs tools required for 'go generate'.
|
# The devtools target installs tools required for 'go generate'.
|
||||||
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
|
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
|
||||||
|
|
||||||
|
#? devtools: Install recommended developer tools
|
||||||
devtools:
|
devtools:
|
||||||
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
|
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
|
||||||
env GOBIN= go install github.com/fjl/gencodec@latest
|
env GOBIN= go install github.com/fjl/gencodec@latest
|
||||||
@ -36,3 +42,9 @@ devtools:
|
|||||||
env GOBIN= go install ./cmd/abigen
|
env GOBIN= go install ./cmd/abigen
|
||||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||||
|
|
||||||
|
#? help: Get more info on make commands.
|
||||||
|
help: Makefile
|
||||||
|
@echo " Choose a command run in go-ethereum:"
|
||||||
|
@sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /'
|
||||||
|
.PHONY: help
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// The ABI holds information about a contract's context and available
|
// The ABI holds information about a contract's context and available
|
||||||
// invokable methods. It will allow you to type check function calls and
|
// invocable methods. It will allow you to type check function calls and
|
||||||
// packs data accordingly.
|
// packs data accordingly.
|
||||||
type ABI struct {
|
type ABI struct {
|
||||||
Constructor Method
|
Constructor Method
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethclient/simulated"
|
"github.com/ethereum/go-ethereum/ethclient/simulated"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,8 +43,8 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parentHash common.Hash) err
|
|||||||
//
|
//
|
||||||
// Deprecated: please use simulated.Backend from package
|
// Deprecated: please use simulated.Backend from package
|
||||||
// github.com/ethereum/go-ethereum/ethclient/simulated instead.
|
// github.com/ethereum/go-ethereum/ethclient/simulated instead.
|
||||||
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
func NewSimulatedBackend(alloc types.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
||||||
b := simulated.New(alloc, gasLimit)
|
b := simulated.NewBackend(alloc, simulated.WithBlockGasLimit(gasLimit))
|
||||||
return &SimulatedBackend{
|
return &SimulatedBackend{
|
||||||
Backend: b,
|
Backend: b,
|
||||||
Client: b.Client(),
|
Client: b.Client(),
|
||||||
|
@ -289,7 +289,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -297,7 +297,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy an interaction tester contract and call a transaction on it
|
// Deploy an interaction tester contract and call a transaction on it
|
||||||
@ -345,7 +345,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -353,7 +353,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a tuple tester contract and execute a structured call on it
|
// Deploy a tuple tester contract and execute a structured call on it
|
||||||
@ -391,7 +391,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -399,7 +399,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a tuple tester contract and execute a structured call on it
|
// Deploy a tuple tester contract and execute a structured call on it
|
||||||
@ -449,7 +449,7 @@ var bindTests = []struct {
|
|||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -457,7 +457,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a slice tester contract and execute a n array call on it
|
// Deploy a slice tester contract and execute a n array call on it
|
||||||
@ -497,7 +497,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -505,7 +505,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a default method invoker contract and execute its default method
|
// Deploy a default method invoker contract and execute its default method
|
||||||
@ -564,7 +564,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -572,7 +572,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a structs method invoker contract and execute its default method
|
// Deploy a structs method invoker contract and execute its default method
|
||||||
@ -610,12 +610,12 @@ var bindTests = []struct {
|
|||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
// Create a simulator and wrap a non-deployed contract
|
// Create a simulator and wrap a non-deployed contract
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000))
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000))
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
nonexistent, err := NewNonExistent(common.Address{}, sim)
|
nonexistent, err := NewNonExistent(common.Address{}, sim)
|
||||||
@ -649,12 +649,12 @@ var bindTests = []struct {
|
|||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
// Create a simulator and wrap a non-deployed contract
|
// Create a simulator and wrap a non-deployed contract
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000))
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000))
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
nonexistent, err := NewNonExistentStruct(common.Address{}, sim)
|
nonexistent, err := NewNonExistentStruct(common.Address{}, sim)
|
||||||
@ -696,7 +696,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -704,7 +704,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a funky gas pattern contract
|
// Deploy a funky gas pattern contract
|
||||||
@ -746,7 +746,7 @@ var bindTests = []struct {
|
|||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -754,7 +754,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a sender tester contract and execute a structured call on it
|
// Deploy a sender tester contract and execute a structured call on it
|
||||||
@ -821,7 +821,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -829,7 +829,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a underscorer tester contract and execute a structured call on it
|
// Deploy a underscorer tester contract and execute a structured call on it
|
||||||
@ -915,7 +915,7 @@ var bindTests = []struct {
|
|||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -923,7 +923,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy an eventer contract
|
// Deploy an eventer contract
|
||||||
@ -1105,7 +1105,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -1113,7 +1113,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
//deploy the test contract
|
//deploy the test contract
|
||||||
@ -1240,7 +1240,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
|
|
||||||
@ -1248,7 +1248,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
_, _, contract, err := DeployTuple(auth, sim)
|
_, _, contract, err := DeployTuple(auth, sim)
|
||||||
@ -1382,7 +1382,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -1390,7 +1390,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
//deploy the test contract
|
//deploy the test contract
|
||||||
@ -1448,14 +1448,14 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
// Initialize test accounts
|
// Initialize test accounts
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// deploy the test contract
|
// deploy the test contract
|
||||||
@ -1537,7 +1537,7 @@ var bindTests = []struct {
|
|||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
// Initialize test accounts
|
// Initialize test accounts
|
||||||
@ -1545,7 +1545,7 @@ var bindTests = []struct {
|
|||||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
|
||||||
// Deploy registrar contract
|
// Deploy registrar contract
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
@ -1600,14 +1600,14 @@ var bindTests = []struct {
|
|||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
|
||||||
// Deploy registrar contract
|
// Deploy registrar contract
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
@ -1661,7 +1661,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
@ -1669,7 +1669,7 @@ var bindTests = []struct {
|
|||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
// Deploy a tester contract and execute a structured call on it
|
// Deploy a tester contract and execute a structured call on it
|
||||||
@ -1722,14 +1722,14 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
`,
|
`,
|
||||||
`
|
`
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
|
||||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000)
|
sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
@ -1810,7 +1810,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
`,
|
`,
|
||||||
@ -1818,7 +1818,7 @@ var bindTests = []struct {
|
|||||||
var (
|
var (
|
||||||
key, _ = crypto.GenerateKey()
|
key, _ = crypto.GenerateKey()
|
||||||
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
||||||
)
|
)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
@ -1881,7 +1881,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
`,
|
`,
|
||||||
@ -1889,7 +1889,7 @@ var bindTests = []struct {
|
|||||||
var (
|
var (
|
||||||
key, _ = crypto.GenerateKey()
|
key, _ = crypto.GenerateKey()
|
||||||
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
||||||
)
|
)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
@ -1934,7 +1934,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
`,
|
`,
|
||||||
@ -1942,7 +1942,7 @@ var bindTests = []struct {
|
|||||||
var (
|
var (
|
||||||
key, _ = crypto.GenerateKey()
|
key, _ = crypto.GenerateKey()
|
||||||
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
||||||
)
|
)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
@ -1983,7 +1983,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
`,
|
`,
|
||||||
@ -1991,7 +1991,7 @@ var bindTests = []struct {
|
|||||||
var (
|
var (
|
||||||
key, _ = crypto.GenerateKey()
|
key, _ = crypto.GenerateKey()
|
||||||
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
||||||
)
|
)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
@ -2024,7 +2024,7 @@ var bindTests = []struct {
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
`,
|
`,
|
||||||
@ -2032,7 +2032,7 @@ var bindTests = []struct {
|
|||||||
var (
|
var (
|
||||||
key, _ = crypto.GenerateKey()
|
key, _ = crypto.GenerateKey()
|
||||||
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
||||||
)
|
)
|
||||||
_, tx, _, err := DeployRangeKeyword(user, sim)
|
_, tx, _, err := DeployRangeKeyword(user, sim)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethclient/simulated"
|
"github.com/ethereum/go-ethereum/ethclient/simulated"
|
||||||
@ -56,17 +55,16 @@ var waitDeployedTests = map[string]struct {
|
|||||||
func TestWaitDeployed(t *testing.T) {
|
func TestWaitDeployed(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
for name, test := range waitDeployedTests {
|
for name, test := range waitDeployedTests {
|
||||||
backend := simulated.New(
|
backend := simulated.NewBackend(
|
||||||
core.GenesisAlloc{
|
types.GenesisAlloc{
|
||||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
||||||
},
|
},
|
||||||
10000000,
|
|
||||||
)
|
)
|
||||||
defer backend.Close()
|
defer backend.Close()
|
||||||
|
|
||||||
// Create the transaction
|
// Create the transaction
|
||||||
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
||||||
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
|
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
|
||||||
|
|
||||||
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
|
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
|
||||||
tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
|
tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
|
||||||
@ -102,11 +100,10 @@ func TestWaitDeployed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWaitDeployedCornerCases(t *testing.T) {
|
func TestWaitDeployedCornerCases(t *testing.T) {
|
||||||
backend := simulated.New(
|
backend := simulated.NewBackend(
|
||||||
core.GenesisAlloc{
|
types.GenesisAlloc{
|
||||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
||||||
},
|
},
|
||||||
10000000,
|
|
||||||
)
|
)
|
||||||
defer backend.Close()
|
defer backend.Close()
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@ func (hub *Hub) refreshWallets() {
|
|||||||
card.Disconnect(pcsc.LeaveCard)
|
card.Disconnect(pcsc.LeaveCard)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Card connected, start tracking in amongs the wallets
|
// Card connected, start tracking among the wallets
|
||||||
hub.wallets[reader] = wallet
|
hub.wallets[reader] = wallet
|
||||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||||
}
|
}
|
||||||
|
@ -279,7 +279,7 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
|
|||||||
}
|
}
|
||||||
hexstr := reply[1 : 1+int(reply[0])]
|
hexstr := reply[1 : 1+int(reply[0])]
|
||||||
|
|
||||||
// Decode the hex sting into an Ethereum address and return
|
// Decode the hex string into an Ethereum address and return
|
||||||
var address common.Address
|
var address common.Address
|
||||||
if _, err = hex.Decode(address[:], hexstr); err != nil {
|
if _, err = hex.Decode(address[:], hexstr); err != nil {
|
||||||
return common.Address{}, err
|
return common.Address{}, err
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
// This file contains the implementation for interacting with the Trezor hardware
|
// This file contains the implementation for interacting with the Trezor hardware
|
||||||
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
||||||
// https://wiki.trezor.io/Developers_guide-Message_Workflows
|
// https://docs.trezor.io/trezor-firmware/common/message-workflows.html
|
||||||
|
|
||||||
// !!! STAHP !!!
|
// !!! STAHP !!!
|
||||||
//
|
//
|
||||||
|
@ -26,6 +26,16 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PayloadVersion denotes the version of PayloadAttributes used to request the
|
||||||
|
// building of the payload to commence.
|
||||||
|
type PayloadVersion byte
|
||||||
|
|
||||||
|
var (
|
||||||
|
PayloadV1 PayloadVersion = 0x1
|
||||||
|
PayloadV2 PayloadVersion = 0x2
|
||||||
|
PayloadV3 PayloadVersion = 0x3
|
||||||
|
)
|
||||||
|
|
||||||
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
|
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
|
||||||
|
|
||||||
// PayloadAttributes describes the environment context in which a block should
|
// PayloadAttributes describes the environment context in which a block should
|
||||||
@ -115,6 +125,21 @@ type TransitionConfigurationV1 struct {
|
|||||||
// PayloadID is an identifier of the payload build process
|
// PayloadID is an identifier of the payload build process
|
||||||
type PayloadID [8]byte
|
type PayloadID [8]byte
|
||||||
|
|
||||||
|
// Version returns the payload version associated with the identifier.
|
||||||
|
func (b PayloadID) Version() PayloadVersion {
|
||||||
|
return PayloadVersion(b[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is returns whether the identifier matches any of provided payload versions.
|
||||||
|
func (b PayloadID) Is(versions ...PayloadVersion) bool {
|
||||||
|
for _, v := range versions {
|
||||||
|
if v == b.Version() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (b PayloadID) String() string {
|
func (b PayloadID) String() string {
|
||||||
return hexutil.Encode(b[:])
|
return hexutil.Encode(b[:])
|
||||||
}
|
}
|
||||||
@ -278,3 +303,21 @@ type ExecutionPayloadBodyV1 struct {
|
|||||||
TransactionData []hexutil.Bytes `json:"transactions"`
|
TransactionData []hexutil.Bytes `json:"transactions"`
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Client identifiers to support ClientVersionV1.
|
||||||
|
const (
|
||||||
|
ClientCode = "GE"
|
||||||
|
ClientName = "go-ethereum"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClientVersionV1 contains information which identifies a client implementation.
|
||||||
|
type ClientVersionV1 struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
Name string `json:"clientName"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Commit string `json:"commit"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *ClientVersionV1) String() string {
|
||||||
|
return fmt.Sprintf("%s-%s-%s-%s", v.Code, v.Name, v.Version, v.Commit)
|
||||||
|
}
|
||||||
|
@ -1,26 +1,26 @@
|
|||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
# version:spec-tests 1.0.6
|
# version:spec-tests 2.1.0
|
||||||
# https://github.com/ethereum/execution-spec-tests/releases
|
# https://github.com/ethereum/execution-spec-tests/releases
|
||||||
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
|
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
|
||||||
485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
|
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
|
||||||
|
|
||||||
# version:golang 1.21.5
|
# version:golang 1.21.6
|
||||||
# https://go.dev/dl/
|
# https://go.dev/dl/
|
||||||
285cbbdf4b6e6e62ed58f370f3f6d8c30825d6e56c5853c66d3c23bcdb09db19 go1.21.5.src.tar.gz
|
124926a62e45f78daabbaedb9c011d97633186a33c238ffc1e25320c02046248 go1.21.6.src.tar.gz
|
||||||
a2e1d5743e896e5fe1e7d96479c0a769254aed18cf216cf8f4c3a2300a9b3923 go1.21.5.darwin-amd64.tar.gz
|
31d6ecca09010ab351e51343a5af81d678902061fee871f912bdd5ef4d778850 go1.21.6.darwin-amd64.tar.gz
|
||||||
d0f8ac0c4fb3efc223a833010901d02954e3923cfe2c9a2ff0e4254a777cc9cc go1.21.5.darwin-arm64.tar.gz
|
0ff541fb37c38e5e5c5bcecc8f4f43c5ffd5e3a6c33a5d3e4003ded66fcfb331 go1.21.6.darwin-arm64.tar.gz
|
||||||
2c05bbe0dc62456b90b7ddd354a54f373b7c377a98f8b22f52ab694b4f6cca58 go1.21.5.freebsd-386.tar.gz
|
a1d1a149b34bf0f53965a237682c6da1140acabb131bf0e597240e4a140b0e5e go1.21.6.freebsd-386.tar.gz
|
||||||
30b6c64e9a77129605bc12f836422bf09eec577a8c899ee46130aeff81567003 go1.21.5.freebsd-amd64.tar.gz
|
de59e1217e4398b1522eed8dddabab2fa1b97aecbdca3af08e34832b4f0e3f81 go1.21.6.freebsd-amd64.tar.gz
|
||||||
8f4dba9cf5c61757bbd7e9ebdb93b6a30a1b03f4a636a1ba0cc2f27b907ab8e1 go1.21.5.linux-386.tar.gz
|
05d09041b5a1193c14e4b2db3f7fcc649b236c567f5eb93305c537851b72dd95 go1.21.6.linux-386.tar.gz
|
||||||
e2bc0b3e4b64111ec117295c088bde5f00eeed1567999ff77bc859d7df70078e go1.21.5.linux-amd64.tar.gz
|
3f934f40ac360b9c01f616a9aa1796d227d8b0328bf64cb045c7b8c4ee9caea4 go1.21.6.linux-amd64.tar.gz
|
||||||
841cced7ecda9b2014f139f5bab5ae31785f35399f236b8b3e75dff2a2978d96 go1.21.5.linux-arm64.tar.gz
|
e2e8aa88e1b5170a0d495d7d9c766af2b2b6c6925a8f8956d834ad6b4cacbd9a go1.21.6.linux-arm64.tar.gz
|
||||||
837f4bf4e22fcdf920ffeaa4abf3d02d1314e03725431065f4d44c46a01b42fe go1.21.5.linux-armv6l.tar.gz
|
6a8eda6cc6a799ff25e74ce0c13fdc1a76c0983a0bb07c789a2a3454bf6ec9b2 go1.21.6.linux-armv6l.tar.gz
|
||||||
907b8c6ec4be9b184952e5d3493be66b1746442394a8bc78556c56834cd7c38b go1.21.5.linux-ppc64le.tar.gz
|
e872b1e9a3f2f08fd4554615a32ca9123a4ba877ab6d19d36abc3424f86bc07f go1.21.6.linux-ppc64le.tar.gz
|
||||||
9c4a81b72ebe44368813cd03684e1080a818bf915d84163abae2ed325a1b2dc0 go1.21.5.linux-s390x.tar.gz
|
92894d0f732d3379bc414ffdd617eaadad47e1d72610e10d69a1156db03fc052 go1.21.6.linux-s390x.tar.gz
|
||||||
6da2418889dfb37763d0eb149c4a8d728c029e12f0cd54fbca0a31ae547e2d34 go1.21.5.windows-386.zip
|
65b38857135cf45c80e1d267e0ce4f80fe149326c68835217da4f2da9b7943fe go1.21.6.windows-386.zip
|
||||||
bbe603cde7c9dee658f45164b4d06de1eff6e6e6b800100824e7c00d56a9a92f go1.21.5.windows-amd64.zip
|
27ac9dd6e66fb3fd0acfa6792ff053c86e7d2c055b022f4b5d53bfddec9e3301 go1.21.6.windows-amd64.zip
|
||||||
9b7acca50e674294e43202df4fbc26d5af4d8bc3170a3342a1514f09a2dab5e9 go1.21.5.windows-arm64.zip
|
b93aff8f3c882c764c66a39b7a1483b0460e051e9992bf3435479129e5051bcd go1.21.6.windows-arm64.zip
|
||||||
|
|
||||||
# version:golangci 1.55.2
|
# version:golangci 1.55.2
|
||||||
# https://github.com/golangci/golangci-lint/releases/
|
# https://github.com/golangci/golangci-lint/releases/
|
||||||
|
@ -121,14 +121,13 @@ var (
|
|||||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||||
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
|
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
|
||||||
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
|
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
|
||||||
// kinetic
|
// kinetic, lunar
|
||||||
debDistroGoBoots = map[string]string{
|
debDistroGoBoots = map[string]string{
|
||||||
"trusty": "golang-1.11", // 14.04, EOL: 04/2024
|
"trusty": "golang-1.11", // 14.04, EOL: 04/2024
|
||||||
"xenial": "golang-go", // 16.04, EOL: 04/2026
|
"xenial": "golang-go", // 16.04, EOL: 04/2026
|
||||||
"bionic": "golang-go", // 18.04, EOL: 04/2028
|
"bionic": "golang-go", // 18.04, EOL: 04/2028
|
||||||
"focal": "golang-go", // 20.04, EOL: 04/2030
|
"focal": "golang-go", // 20.04, EOL: 04/2030
|
||||||
"jammy": "golang-go", // 22.04, EOL: 04/2032
|
"jammy": "golang-go", // 22.04, EOL: 04/2032
|
||||||
"lunar": "golang-go", // 23.04, EOL: 01/2024
|
|
||||||
"mantic": "golang-go", // 23.10, EOL: 07/2024
|
"mantic": "golang-go", // 23.10, EOL: 07/2024
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
# - NSIS Large Strings build, http://nsis.sourceforge.net/Special_Builds
|
# - NSIS Large Strings build, http://nsis.sourceforge.net/Special_Builds
|
||||||
# - SFP, http://nsis.sourceforge.net/NSIS_Simple_Firewall_Plugin (put dll in NSIS\Plugins\x86-ansi)
|
# - SFP, http://nsis.sourceforge.net/NSIS_Simple_Firewall_Plugin (put dll in NSIS\Plugins\x86-ansi)
|
||||||
#
|
#
|
||||||
# After intalling NSIS extra the NSIS Large Strings build zip and replace the makensis.exe and the
|
# After installing NSIS extra the NSIS Large Strings build zip and replace the makensis.exe and the
|
||||||
# files found in Stub.
|
# files found in Stub.
|
||||||
#
|
#
|
||||||
# based on: http://nsis.sourceforge.net/A_simple_installer_with_start_menu_shortcut_and_uninstaller
|
# based on: http://nsis.sourceforge.net/A_simple_installer_with_start_menu_shortcut_and_uninstaller
|
||||||
|
@ -166,7 +166,7 @@ func (c *Conn) ReadEth() (any, error) {
|
|||||||
case eth.TransactionsMsg:
|
case eth.TransactionsMsg:
|
||||||
msg = new(eth.TransactionsPacket)
|
msg = new(eth.TransactionsPacket)
|
||||||
case eth.NewPooledTransactionHashesMsg:
|
case eth.NewPooledTransactionHashesMsg:
|
||||||
msg = new(eth.NewPooledTransactionHashesPacket68)
|
msg = new(eth.NewPooledTransactionHashesPacket)
|
||||||
case eth.GetPooledTransactionsMsg:
|
case eth.GetPooledTransactionsMsg:
|
||||||
msg = new(eth.GetPooledTransactionsPacket)
|
msg = new(eth.GetPooledTransactionsPacket)
|
||||||
case eth.PooledTransactionsMsg:
|
case eth.PooledTransactionsMsg:
|
||||||
|
@ -64,23 +64,23 @@ func NewSuite(dest *enode.Node, chainDir, engineURL, jwt string) (*Suite, error)
|
|||||||
func (s *Suite) EthTests() []utesting.Test {
|
func (s *Suite) EthTests() []utesting.Test {
|
||||||
return []utesting.Test{
|
return []utesting.Test{
|
||||||
// status
|
// status
|
||||||
{Name: "TestStatus", Fn: s.TestStatus},
|
{Name: "Status", Fn: s.TestStatus},
|
||||||
// get block headers
|
// get block headers
|
||||||
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
|
{Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders},
|
||||||
{Name: "TestSimultaneousRequests", Fn: s.TestSimultaneousRequests},
|
{Name: "SimultaneousRequests", Fn: s.TestSimultaneousRequests},
|
||||||
{Name: "TestSameRequestID", Fn: s.TestSameRequestID},
|
{Name: "SameRequestID", Fn: s.TestSameRequestID},
|
||||||
{Name: "TestZeroRequestID", Fn: s.TestZeroRequestID},
|
{Name: "ZeroRequestID", Fn: s.TestZeroRequestID},
|
||||||
// get block bodies
|
// get block bodies
|
||||||
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
|
{Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
|
||||||
// // malicious handshakes + status
|
// // malicious handshakes + status
|
||||||
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
|
{Name: "MaliciousHandshake", Fn: s.TestMaliciousHandshake},
|
||||||
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
|
{Name: "MaliciousStatus", Fn: s.TestMaliciousStatus},
|
||||||
// test transactions
|
// test transactions
|
||||||
{Name: "TestTransaction", Fn: s.TestTransaction},
|
{Name: "LargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true},
|
||||||
{Name: "TestInvalidTxs", Fn: s.TestInvalidTxs},
|
{Name: "Transaction", Fn: s.TestTransaction},
|
||||||
{Name: "TestLargeTxRequest", Fn: s.TestLargeTxRequest},
|
{Name: "InvalidTxs", Fn: s.TestInvalidTxs},
|
||||||
{Name: "TestNewPooledTxs", Fn: s.TestNewPooledTxs},
|
{Name: "NewPooledTxs", Fn: s.TestNewPooledTxs},
|
||||||
{Name: "TestBlobViolations", Fn: s.TestBlobViolations},
|
{Name: "BlobViolations", Fn: s.TestBlobViolations},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,9 +94,9 @@ func (s *Suite) SnapTests() []utesting.Test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestStatus attempts to connect to the given node and exchange a status
|
|
||||||
// message with it on the eth protocol.
|
|
||||||
func (s *Suite) TestStatus(t *utesting.T) {
|
func (s *Suite) TestStatus(t *utesting.T) {
|
||||||
|
t.Log(`This test is just a sanity check. It performs an eth protocol handshake.`)
|
||||||
|
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -112,9 +112,9 @@ func headersMatch(expected []*types.Header, headers []*types.Header) bool {
|
|||||||
return reflect.DeepEqual(expected, headers)
|
return reflect.DeepEqual(expected, headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetBlockHeaders tests whether the given node can respond to an eth
|
|
||||||
// `GetBlockHeaders` request and that the response is accurate.
|
|
||||||
func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
||||||
|
t.Log(`This test requests block headers from the node.`)
|
||||||
|
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -154,10 +154,10 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSimultaneousRequests sends two simultaneous `GetBlockHeader` requests
|
|
||||||
// from the same connection with different request IDs and checks to make sure
|
|
||||||
// the node responds with the correct headers per request.
|
|
||||||
func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
||||||
|
t.Log(`This test requests blocks headers from the node, performing two requests
|
||||||
|
concurrently, with different request IDs.`)
|
||||||
|
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -228,9 +228,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSameRequestID sends two requests with the same request ID to a single
|
|
||||||
// node.
|
|
||||||
func (s *Suite) TestSameRequestID(t *utesting.T) {
|
func (s *Suite) TestSameRequestID(t *utesting.T) {
|
||||||
|
t.Log(`This test requests block headers, performing two concurrent requests with the
|
||||||
|
same request ID. The node should handle the request by responding to both requests.`)
|
||||||
|
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -298,9 +299,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestZeroRequestID checks that a message with a request ID of zero is still handled
|
|
||||||
// by the node.
|
|
||||||
func (s *Suite) TestZeroRequestID(t *utesting.T) {
|
func (s *Suite) TestZeroRequestID(t *utesting.T) {
|
||||||
|
t.Log(`This test sends a GetBlockHeaders message with a request-id of zero,
|
||||||
|
and expects a response.`)
|
||||||
|
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -333,9 +335,9 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetBlockBodies tests whether the given node can respond to a
|
|
||||||
// `GetBlockBodies` request and that the response is accurate.
|
|
||||||
func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
||||||
|
t.Log(`This test sends GetBlockBodies requests to the node for known blocks in the test chain.`)
|
||||||
|
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -376,12 +378,12 @@ func randBuf(size int) []byte {
|
|||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaliciousHandshake tries to send malicious data during the handshake.
|
|
||||||
func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
|
func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
|
||||||
key, _ := crypto.GenerateKey()
|
t.Log(`This test tries to send malicious data during the devp2p handshake, in various ways.`)
|
||||||
|
|
||||||
// Write hello to client.
|
// Write hello to client.
|
||||||
var (
|
var (
|
||||||
|
key, _ = crypto.GenerateKey()
|
||||||
pub0 = crypto.FromECDSAPub(&key.PublicKey)[1:]
|
pub0 = crypto.FromECDSAPub(&key.PublicKey)[1:]
|
||||||
version = eth.ProtocolVersions[0]
|
version = eth.ProtocolVersions[0]
|
||||||
)
|
)
|
||||||
@ -451,8 +453,9 @@ func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaliciousStatus sends a status package with a large total difficulty.
|
|
||||||
func (s *Suite) TestMaliciousStatus(t *utesting.T) {
|
func (s *Suite) TestMaliciousStatus(t *utesting.T) {
|
||||||
|
t.Log(`This test sends a malicious eth Status message to the node and expects a disconnect.`)
|
||||||
|
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -486,9 +489,10 @@ func (s *Suite) TestMaliciousStatus(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTransaction sends a valid transaction to the node and checks if the
|
|
||||||
// transaction gets propagated.
|
|
||||||
func (s *Suite) TestTransaction(t *utesting.T) {
|
func (s *Suite) TestTransaction(t *utesting.T) {
|
||||||
|
t.Log(`This test sends a valid transaction to the node and checks if the
|
||||||
|
transaction gets propagated.`)
|
||||||
|
|
||||||
// Nudge client out of syncing mode to accept pending txs.
|
// Nudge client out of syncing mode to accept pending txs.
|
||||||
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
||||||
t.Fatalf("failed to send next block: %v", err)
|
t.Fatalf("failed to send next block: %v", err)
|
||||||
@ -507,15 +511,16 @@ func (s *Suite) TestTransaction(t *utesting.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to sign tx: %v", err)
|
t.Fatalf("failed to sign tx: %v", err)
|
||||||
}
|
}
|
||||||
if err := s.sendTxs([]*types.Transaction{tx}); err != nil {
|
if err := s.sendTxs(t, []*types.Transaction{tx}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
s.chain.IncNonce(from, 1)
|
s.chain.IncNonce(from, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestInvalidTxs sends several invalid transactions and tests whether
|
|
||||||
// the node will propagate them.
|
|
||||||
func (s *Suite) TestInvalidTxs(t *utesting.T) {
|
func (s *Suite) TestInvalidTxs(t *utesting.T) {
|
||||||
|
t.Log(`This test sends several kinds of invalid transactions and checks that the node
|
||||||
|
does not propagate them.`)
|
||||||
|
|
||||||
// Nudge client out of syncing mode to accept pending txs.
|
// Nudge client out of syncing mode to accept pending txs.
|
||||||
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
||||||
t.Fatalf("failed to send next block: %v", err)
|
t.Fatalf("failed to send next block: %v", err)
|
||||||
@ -534,7 +539,7 @@ func (s *Suite) TestInvalidTxs(t *utesting.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to sign tx: %v", err)
|
t.Fatalf("failed to sign tx: %v", err)
|
||||||
}
|
}
|
||||||
if err := s.sendTxs([]*types.Transaction{tx}); err != nil {
|
if err := s.sendTxs(t, []*types.Transaction{tx}); err != nil {
|
||||||
t.Fatalf("failed to send txs: %v", err)
|
t.Fatalf("failed to send txs: %v", err)
|
||||||
}
|
}
|
||||||
s.chain.IncNonce(from, 1)
|
s.chain.IncNonce(from, 1)
|
||||||
@ -590,14 +595,15 @@ func (s *Suite) TestInvalidTxs(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
txs = append(txs, tx)
|
txs = append(txs, tx)
|
||||||
}
|
}
|
||||||
if err := s.sendInvalidTxs(txs); err != nil {
|
if err := s.sendInvalidTxs(t, txs); err != nil {
|
||||||
t.Fatalf("failed to send invalid txs: %v", err)
|
t.Fatalf("failed to send invalid txs: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestLargeTxRequest tests whether a node can fulfill a large GetPooledTransactions
|
|
||||||
// request.
|
|
||||||
func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
||||||
|
t.Log(`This test first send ~2000 transactions to the node, then requests them
|
||||||
|
on another peer connection using GetPooledTransactions.`)
|
||||||
|
|
||||||
// Nudge client out of syncing mode to accept pending txs.
|
// Nudge client out of syncing mode to accept pending txs.
|
||||||
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
||||||
t.Fatalf("failed to send next block: %v", err)
|
t.Fatalf("failed to send next block: %v", err)
|
||||||
@ -630,7 +636,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||||||
s.chain.IncNonce(from, uint64(count))
|
s.chain.IncNonce(from, uint64(count))
|
||||||
|
|
||||||
// Send txs.
|
// Send txs.
|
||||||
if err := s.sendTxs(txs); err != nil {
|
if err := s.sendTxs(t, txs); err != nil {
|
||||||
t.Fatalf("failed to send txs: %v", err)
|
t.Fatalf("failed to send txs: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -667,13 +673,15 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestNewPooledTxs tests whether a node will do a GetPooledTransactions request
|
|
||||||
// upon receiving a NewPooledTransactionHashes announcement.
|
|
||||||
func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
||||||
|
t.Log(`This test announces transaction hashes to the node and expects it to fetch
|
||||||
|
the transactions using a GetPooledTransactions request.`)
|
||||||
|
|
||||||
// Nudge client out of syncing mode to accept pending txs.
|
// Nudge client out of syncing mode to accept pending txs.
|
||||||
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
||||||
t.Fatalf("failed to send next block: %v", err)
|
t.Fatalf("failed to send next block: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
count = 50
|
count = 50
|
||||||
from, nonce = s.chain.GetSender(1)
|
from, nonce = s.chain.GetSender(1)
|
||||||
@ -710,7 +718,7 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send announcement.
|
// Send announcement.
|
||||||
ann := eth.NewPooledTransactionHashesPacket68{Types: txTypes, Sizes: sizes, Hashes: hashes}
|
ann := eth.NewPooledTransactionHashesPacket{Types: txTypes, Sizes: sizes, Hashes: hashes}
|
||||||
err = conn.Write(ethProto, eth.NewPooledTransactionHashesMsg, ann)
|
err = conn.Write(ethProto, eth.NewPooledTransactionHashesMsg, ann)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to write to connection: %v", err)
|
t.Fatalf("failed to write to connection: %v", err)
|
||||||
@ -728,7 +736,7 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
|||||||
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
|
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
continue
|
continue
|
||||||
case *eth.TransactionsPacket:
|
case *eth.TransactionsPacket:
|
||||||
continue
|
continue
|
||||||
@ -762,7 +770,7 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra
|
|||||||
from, nonce := s.chain.GetSender(5)
|
from, nonce := s.chain.GetSender(5)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
// Make blob data, max of 2 blobs per tx.
|
// Make blob data, max of 2 blobs per tx.
|
||||||
blobdata := make([]byte, blobs%2)
|
blobdata := make([]byte, blobs%3)
|
||||||
for i := range blobdata {
|
for i := range blobdata {
|
||||||
blobdata[i] = discriminator
|
blobdata[i] = discriminator
|
||||||
blobs -= 1
|
blobs -= 1
|
||||||
@ -787,6 +795,8 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) TestBlobViolations(t *utesting.T) {
|
func (s *Suite) TestBlobViolations(t *utesting.T) {
|
||||||
|
t.Log(`This test sends some invalid blob tx announcements and expects the node to disconnect.`)
|
||||||
|
|
||||||
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
if err := s.engine.sendForkchoiceUpdated(); err != nil {
|
||||||
t.Fatalf("send fcu failed: %v", err)
|
t.Fatalf("send fcu failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -796,12 +806,12 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
|
|||||||
t2 = s.makeBlobTxs(2, 3, 0x2)
|
t2 = s.makeBlobTxs(2, 3, 0x2)
|
||||||
)
|
)
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
ann eth.NewPooledTransactionHashesPacket68
|
ann eth.NewPooledTransactionHashesPacket
|
||||||
resp eth.PooledTransactionsResponse
|
resp eth.PooledTransactionsResponse
|
||||||
}{
|
}{
|
||||||
// Invalid tx size.
|
// Invalid tx size.
|
||||||
{
|
{
|
||||||
ann: eth.NewPooledTransactionHashesPacket68{
|
ann: eth.NewPooledTransactionHashesPacket{
|
||||||
Types: []byte{types.BlobTxType, types.BlobTxType},
|
Types: []byte{types.BlobTxType, types.BlobTxType},
|
||||||
Sizes: []uint32{uint32(t1[0].Size()), uint32(t1[1].Size() + 10)},
|
Sizes: []uint32{uint32(t1[0].Size()), uint32(t1[1].Size() + 10)},
|
||||||
Hashes: []common.Hash{t1[0].Hash(), t1[1].Hash()},
|
Hashes: []common.Hash{t1[0].Hash(), t1[1].Hash()},
|
||||||
@ -810,7 +820,7 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
|
|||||||
},
|
},
|
||||||
// Wrong tx type.
|
// Wrong tx type.
|
||||||
{
|
{
|
||||||
ann: eth.NewPooledTransactionHashesPacket68{
|
ann: eth.NewPooledTransactionHashesPacket{
|
||||||
Types: []byte{types.DynamicFeeTxType, types.BlobTxType},
|
Types: []byte{types.DynamicFeeTxType, types.BlobTxType},
|
||||||
Sizes: []uint32{uint32(t2[0].Size()), uint32(t2[1].Size())},
|
Sizes: []uint32{uint32(t2[0].Size()), uint32(t2[1].Size())},
|
||||||
Hashes: []common.Hash{t2[0].Hash(), t2[1].Hash()},
|
Hashes: []common.Hash{t2[0].Hash(), t2[1].Hash()},
|
||||||
|
@ -63,6 +63,9 @@ func TestEthSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, test := range suite.EthTests() {
|
for _, test := range suite.EthTests() {
|
||||||
t.Run(test.Name, func(t *testing.T) {
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
|
if test.Slow && testing.Short() {
|
||||||
|
t.Skipf("%s: skipping in -short mode", test.Name)
|
||||||
|
}
|
||||||
result := utesting.RunTests([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
result := utesting.RunTests([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
||||||
if result[0].Failed {
|
if result[0].Failed {
|
||||||
t.Fatal()
|
t.Fatal()
|
||||||
|
@ -25,11 +25,12 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
// sendTxs sends the given transactions to the node and
|
// sendTxs sends the given transactions to the node and
|
||||||
// expects the node to accept and propagate them.
|
// expects the node to accept and propagate them.
|
||||||
func (s *Suite) sendTxs(txs []*types.Transaction) error {
|
func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error {
|
||||||
// Open sending conn.
|
// Open sending conn.
|
||||||
sendConn, err := s.dial()
|
sendConn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -70,10 +71,19 @@ func (s *Suite) sendTxs(txs []*types.Transaction) error {
|
|||||||
for _, tx := range *msg {
|
for _, tx := range *msg {
|
||||||
got[tx.Hash()] = true
|
got[tx.Hash()] = true
|
||||||
}
|
}
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
for _, hash := range msg.Hashes {
|
for _, hash := range msg.Hashes {
|
||||||
got[hash] = true
|
got[hash] = true
|
||||||
}
|
}
|
||||||
|
case *eth.GetBlockHeadersPacket:
|
||||||
|
headers, err := s.chain.GetHeaders(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("invalid GetBlockHeaders request: %v", err)
|
||||||
|
}
|
||||||
|
recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{
|
||||||
|
RequestId: msg.RequestId,
|
||||||
|
BlockHeadersRequest: headers,
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
@ -95,7 +105,7 @@ func (s *Suite) sendTxs(txs []*types.Transaction) error {
|
|||||||
return fmt.Errorf("timed out waiting for txs")
|
return fmt.Errorf("timed out waiting for txs")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) sendInvalidTxs(txs []*types.Transaction) error {
|
func (s *Suite) sendInvalidTxs(t *utesting.T, txs []*types.Transaction) error {
|
||||||
// Open sending conn.
|
// Open sending conn.
|
||||||
sendConn, err := s.dial()
|
sendConn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -146,12 +156,21 @@ func (s *Suite) sendInvalidTxs(txs []*types.Transaction) error {
|
|||||||
return fmt.Errorf("received bad tx: %s", tx.Hash())
|
return fmt.Errorf("received bad tx: %s", tx.Hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
for _, hash := range msg.Hashes {
|
for _, hash := range msg.Hashes {
|
||||||
if _, ok := invalids[hash]; ok {
|
if _, ok := invalids[hash]; ok {
|
||||||
return fmt.Errorf("received bad tx: %s", hash)
|
return fmt.Errorf("received bad tx: %s", hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case *eth.GetBlockHeadersPacket:
|
||||||
|
headers, err := s.chain.GetHeaders(msg)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("invalid GetBlockHeaders request: %v", err)
|
||||||
|
}
|
||||||
|
recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{
|
||||||
|
RequestId: msg.RequestId,
|
||||||
|
BlockHeadersRequest: headers,
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
|
@ -497,7 +497,7 @@ func FindnodeAmplificationWrongIP(t *utesting.T) {
|
|||||||
// If we receive a NEIGHBORS response, the attack worked and the test fails.
|
// If we receive a NEIGHBORS response, the attack worked and the test fails.
|
||||||
reply, _, _ := te.read(te.l2)
|
reply, _, _ := te.read(te.l2)
|
||||||
if reply != nil {
|
if reply != nil {
|
||||||
t.Error("Got NEIGHORS response for FINDNODE from wrong IP")
|
t.Error("Got NEIGHBORS response for FINDNODE from wrong IP")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
324
cmd/era/main.go
Normal file
324
cmd/era/main.go
Normal file
@ -0,0 +1,324 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var app = flags.NewApp("go-ethereum era tool")
|
||||||
|
|
||||||
|
var (
|
||||||
|
dirFlag = &cli.StringFlag{
|
||||||
|
Name: "dir",
|
||||||
|
Usage: "directory storing all relevant era1 files",
|
||||||
|
Value: "eras",
|
||||||
|
}
|
||||||
|
networkFlag = &cli.StringFlag{
|
||||||
|
Name: "network",
|
||||||
|
Usage: "network name associated with era1 files",
|
||||||
|
Value: "mainnet",
|
||||||
|
}
|
||||||
|
eraSizeFlag = &cli.IntFlag{
|
||||||
|
Name: "size",
|
||||||
|
Usage: "number of blocks per era",
|
||||||
|
Value: era.MaxEra1Size,
|
||||||
|
}
|
||||||
|
txsFlag = &cli.BoolFlag{
|
||||||
|
Name: "txs",
|
||||||
|
Usage: "print full transaction values",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blockCommand = &cli.Command{
|
||||||
|
Name: "block",
|
||||||
|
Usage: "get block data",
|
||||||
|
ArgsUsage: "<number>",
|
||||||
|
Action: block,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
txsFlag,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
infoCommand = &cli.Command{
|
||||||
|
Name: "info",
|
||||||
|
ArgsUsage: "<epoch>",
|
||||||
|
Usage: "get epoch information",
|
||||||
|
Action: info,
|
||||||
|
}
|
||||||
|
verifyCommand = &cli.Command{
|
||||||
|
Name: "verify",
|
||||||
|
ArgsUsage: "<expected>",
|
||||||
|
Usage: "verifies each era1 against expected accumulator root",
|
||||||
|
Action: verify,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
app.Commands = []*cli.Command{
|
||||||
|
blockCommand,
|
||||||
|
infoCommand,
|
||||||
|
verifyCommand,
|
||||||
|
}
|
||||||
|
app.Flags = []cli.Flag{
|
||||||
|
dirFlag,
|
||||||
|
networkFlag,
|
||||||
|
eraSizeFlag,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// block prints the specified block from an era1 store.
|
||||||
|
func block(ctx *cli.Context) error {
|
||||||
|
num, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid block number: %w", err)
|
||||||
|
}
|
||||||
|
e, err := open(ctx, num/uint64(ctx.Int(eraSizeFlag.Name)))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening era1: %w", err)
|
||||||
|
}
|
||||||
|
defer e.Close()
|
||||||
|
// Read block with number.
|
||||||
|
block, err := e.GetBlockByNumber(num)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading block %d: %w", num, err)
|
||||||
|
}
|
||||||
|
// Convert block to JSON and print.
|
||||||
|
val := ethapi.RPCMarshalBlock(block, ctx.Bool(txsFlag.Name), ctx.Bool(txsFlag.Name), params.MainnetChainConfig)
|
||||||
|
b, err := json.MarshalIndent(val, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error marshaling json: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Println(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// info prints some high-level information about the era1 file.
|
||||||
|
func info(ctx *cli.Context) error {
|
||||||
|
epoch, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid epoch number: %w", err)
|
||||||
|
}
|
||||||
|
e, err := open(ctx, epoch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer e.Close()
|
||||||
|
acc, err := e.Accumulator()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading accumulator: %w", err)
|
||||||
|
}
|
||||||
|
td, err := e.InitialTD()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading total difficulty: %w", err)
|
||||||
|
}
|
||||||
|
info := struct {
|
||||||
|
Accumulator common.Hash `json:"accumulator"`
|
||||||
|
TotalDifficulty *big.Int `json:"totalDifficulty"`
|
||||||
|
StartBlock uint64 `json:"startBlock"`
|
||||||
|
Count uint64 `json:"count"`
|
||||||
|
}{
|
||||||
|
acc, td, e.Start(), e.Count(),
|
||||||
|
}
|
||||||
|
b, _ := json.MarshalIndent(info, "", " ")
|
||||||
|
fmt.Println(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// open opens an era1 file at a certain epoch.
|
||||||
|
func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
|
||||||
|
var (
|
||||||
|
dir = ctx.String(dirFlag.Name)
|
||||||
|
network = ctx.String(networkFlag.Name)
|
||||||
|
)
|
||||||
|
entries, err := era.ReadDir(dir, network)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading era dir: %w", err)
|
||||||
|
}
|
||||||
|
if epoch >= uint64(len(entries)) {
|
||||||
|
return nil, fmt.Errorf("epoch out-of-bounds: last %d, want %d", len(entries)-1, epoch)
|
||||||
|
}
|
||||||
|
return era.Open(path.Join(dir, entries[epoch]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify checks each era1 file in a directory to ensure it is well-formed and
|
||||||
|
// that the accumulator matches the expected value.
|
||||||
|
func verify(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() != 1 {
|
||||||
|
return fmt.Errorf("missing accumulators file")
|
||||||
|
}
|
||||||
|
|
||||||
|
roots, err := readHashes(ctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to read expected roots file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
dir = ctx.String(dirFlag.Name)
|
||||||
|
network = ctx.String(networkFlag.Name)
|
||||||
|
start = time.Now()
|
||||||
|
reported = time.Now()
|
||||||
|
)
|
||||||
|
|
||||||
|
entries, err := era.ReadDir(dir, network)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) != len(roots) {
|
||||||
|
return fmt.Errorf("number of era1 files should match the number of accumulator hashes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify each epoch matches the expected root.
|
||||||
|
for i, want := range roots {
|
||||||
|
// Wrap in function so defers don't stack.
|
||||||
|
err := func() error {
|
||||||
|
name := entries[i]
|
||||||
|
e, err := era.Open(path.Join(dir, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening era1 file %s: %w", name, err)
|
||||||
|
}
|
||||||
|
defer e.Close()
|
||||||
|
// Read accumulator and check against expected.
|
||||||
|
if got, err := e.Accumulator(); err != nil {
|
||||||
|
return fmt.Errorf("error retrieving accumulator for %s: %w", name, err)
|
||||||
|
} else if got != want {
|
||||||
|
return fmt.Errorf("invalid root %s: got %s, want %s", name, got, want)
|
||||||
|
}
|
||||||
|
// Recompute accumulator.
|
||||||
|
if err := checkAccumulator(e); err != nil {
|
||||||
|
return fmt.Errorf("error verify era1 file %s: %w", name, err)
|
||||||
|
}
|
||||||
|
// Give the user some feedback that something is happening.
|
||||||
|
if time.Since(reported) >= 8*time.Second {
|
||||||
|
fmt.Printf("Verifying Era1 files \t\t verified=%d,\t elapsed=%s\n", i, common.PrettyDuration(time.Since(start)))
|
||||||
|
reported = time.Now()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkAccumulator verifies the accumulator matches the data in the Era.
|
||||||
|
func checkAccumulator(e *era.Era) error {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
want common.Hash
|
||||||
|
td *big.Int
|
||||||
|
tds = make([]*big.Int, 0)
|
||||||
|
hashes = make([]common.Hash, 0)
|
||||||
|
)
|
||||||
|
if want, err = e.Accumulator(); err != nil {
|
||||||
|
return fmt.Errorf("error reading accumulator: %w", err)
|
||||||
|
}
|
||||||
|
if td, err = e.InitialTD(); err != nil {
|
||||||
|
return fmt.Errorf("error reading total difficulty: %w", err)
|
||||||
|
}
|
||||||
|
it, err := era.NewIterator(e)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error making era iterator: %w", err)
|
||||||
|
}
|
||||||
|
// To fully verify an era the following attributes must be checked:
|
||||||
|
// 1) the block index is constructed correctly
|
||||||
|
// 2) the tx root matches the value in the block
|
||||||
|
// 3) the receipts root matches the value in the block
|
||||||
|
// 4) the starting total difficulty value is correct
|
||||||
|
// 5) the accumulator is correct by recomputing it locally, which verifies
|
||||||
|
// the blocks are all correct (via hash)
|
||||||
|
//
|
||||||
|
// The attributes 1), 2), and 3) are checked for each block. 4) and 5) require
|
||||||
|
// accumulation across the entire set and are verified at the end.
|
||||||
|
for it.Next() {
|
||||||
|
// 1) next() walks the block index, so we're able to implicitly verify it.
|
||||||
|
if it.Error() != nil {
|
||||||
|
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
block, receipts, err := it.BlockAndReceipts()
|
||||||
|
if it.Error() != nil {
|
||||||
|
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
// 2) recompute tx root and verify against header.
|
||||||
|
tr := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil))
|
||||||
|
if tr != block.TxHash() {
|
||||||
|
return fmt.Errorf("tx root in block %d mismatch: want %s, got %s", block.NumberU64(), block.TxHash(), tr)
|
||||||
|
}
|
||||||
|
// 3) recompute receipt root and check value against block.
|
||||||
|
rr := types.DeriveSha(receipts, trie.NewStackTrie(nil))
|
||||||
|
if rr != block.ReceiptHash() {
|
||||||
|
return fmt.Errorf("receipt root in block %d mismatch: want %s, got %s", block.NumberU64(), block.ReceiptHash(), rr)
|
||||||
|
}
|
||||||
|
hashes = append(hashes, block.Hash())
|
||||||
|
td.Add(td, block.Difficulty())
|
||||||
|
tds = append(tds, new(big.Int).Set(td))
|
||||||
|
}
|
||||||
|
// 4+5) Verify accumulator and total difficulty.
|
||||||
|
got, err := era.ComputeAccumulator(hashes, tds)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error computing accumulator: %w", err)
|
||||||
|
}
|
||||||
|
if got != want {
|
||||||
|
return fmt.Errorf("expected accumulator root does not match calculated: got %s, want %s", got, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readHashes reads a file of newline-delimited hashes.
|
||||||
|
func readHashes(f string) ([]common.Hash, error) {
|
||||||
|
b, err := os.ReadFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to open accumulators file")
|
||||||
|
}
|
||||||
|
s := strings.Split(string(b), "\n")
|
||||||
|
// Remove empty last element, if present.
|
||||||
|
if s[len(s)-1] == "" {
|
||||||
|
s = s[:len(s)-1]
|
||||||
|
}
|
||||||
|
// Convert to hashes.
|
||||||
|
r := make([]common.Hash, len(s))
|
||||||
|
for i := range s {
|
||||||
|
r[i] = common.HexToHash(s[i])
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
@ -36,12 +36,14 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Prestate struct {
|
type Prestate struct {
|
||||||
Env stEnv `json:"env"`
|
Env stEnv `json:"env"`
|
||||||
Pre core.GenesisAlloc `json:"pre"`
|
Pre types.GenesisAlloc `json:"pre"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecutionResult contains the execution status after running a state test, any
|
// ExecutionResult contains the execution status after running a state test, any
|
||||||
@ -308,15 +310,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
reward.Sub(reward, new(big.Int).SetUint64(ommer.Delta))
|
reward.Sub(reward, new(big.Int).SetUint64(ommer.Delta))
|
||||||
reward.Mul(reward, blockReward)
|
reward.Mul(reward, blockReward)
|
||||||
reward.Div(reward, big.NewInt(8))
|
reward.Div(reward, big.NewInt(8))
|
||||||
statedb.AddBalance(ommer.Address, reward)
|
statedb.AddBalance(ommer.Address, uint256.MustFromBig(reward))
|
||||||
}
|
}
|
||||||
statedb.AddBalance(pre.Env.Coinbase, minerReward)
|
statedb.AddBalance(pre.Env.Coinbase, uint256.MustFromBig(minerReward))
|
||||||
}
|
}
|
||||||
// Apply withdrawals
|
// Apply withdrawals
|
||||||
for _, w := range pre.Env.Withdrawals {
|
for _, w := range pre.Env.Withdrawals {
|
||||||
// Amount is in gwei, turn into wei
|
// Amount is in gwei, turn into wei
|
||||||
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
|
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
|
||||||
statedb.AddBalance(w.Address, amount)
|
statedb.AddBalance(w.Address, uint256.MustFromBig(amount))
|
||||||
}
|
}
|
||||||
// Commit block
|
// Commit block
|
||||||
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
|
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
|
||||||
@ -353,13 +355,13 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
return statedb, execRs, body, nil
|
return statedb, execRs, body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
|
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB {
|
||||||
sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
|
sdb := state.NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
|
||||||
statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
|
statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
|
||||||
for addr, a := range accounts {
|
for addr, a := range accounts {
|
||||||
statedb.SetCode(addr, a.Code)
|
statedb.SetCode(addr, a.Code)
|
||||||
statedb.SetNonce(addr, a.Nonce)
|
statedb.SetNonce(addr, a.Nonce)
|
||||||
statedb.SetBalance(addr, a.Balance)
|
statedb.SetBalance(addr, uint256.MustFromBig(a.Balance))
|
||||||
for k, v := range a.Storage {
|
for k, v := range a.Storage {
|
||||||
statedb.SetState(addr, k, v)
|
statedb.SetState(addr, k, v)
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
@ -74,7 +73,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type input struct {
|
type input struct {
|
||||||
Alloc core.GenesisAlloc `json:"alloc,omitempty"`
|
Alloc types.GenesisAlloc `json:"alloc,omitempty"`
|
||||||
Env *stEnv `json:"env,omitempty"`
|
Env *stEnv `json:"env,omitempty"`
|
||||||
Txs []*txWithKey `json:"txs,omitempty"`
|
Txs []*txWithKey `json:"txs,omitempty"`
|
||||||
TxRlp string `json:"txsRlp,omitempty"`
|
TxRlp string `json:"txsRlp,omitempty"`
|
||||||
@ -188,7 +187,7 @@ func Transition(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Dump the excution result
|
// Dump the execution result
|
||||||
collector := make(Alloc)
|
collector := make(Alloc)
|
||||||
s.DumpToCollector(collector, nil)
|
s.DumpToCollector(collector, nil)
|
||||||
return dispatchOutput(ctx, baseDir, result, collector, body)
|
return dispatchOutput(ctx, baseDir, result, collector, body)
|
||||||
@ -272,7 +271,7 @@ func applyCancunChecks(env *stEnv, chainConfig *params.ChainConfig) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type Alloc map[common.Address]core.GenesisAccount
|
type Alloc map[common.Address]types.Account
|
||||||
|
|
||||||
func (g Alloc) OnRoot(common.Hash) {}
|
func (g Alloc) OnRoot(common.Hash) {}
|
||||||
|
|
||||||
@ -280,7 +279,7 @@ func (g Alloc) OnAccount(addr *common.Address, dumpAccount state.DumpAccount) {
|
|||||||
if addr == nil {
|
if addr == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
balance, _ := new(big.Int).SetString(dumpAccount.Balance, 10)
|
balance, _ := new(big.Int).SetString(dumpAccount.Balance, 0)
|
||||||
var storage map[common.Hash]common.Hash
|
var storage map[common.Hash]common.Hash
|
||||||
if dumpAccount.Storage != nil {
|
if dumpAccount.Storage != nil {
|
||||||
storage = make(map[common.Hash]common.Hash)
|
storage = make(map[common.Hash]common.Hash)
|
||||||
@ -288,7 +287,7 @@ func (g Alloc) OnAccount(addr *common.Address, dumpAccount state.DumpAccount) {
|
|||||||
storage[k] = common.HexToHash(v)
|
storage[k] = common.HexToHash(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
genesisAccount := core.GenesisAccount{
|
genesisAccount := types.Account{
|
||||||
Code: dumpAccount.Code,
|
Code: dumpAccount.Code,
|
||||||
Storage: storage,
|
Storage: storage,
|
||||||
Balance: balance,
|
Balance: balance,
|
||||||
|
@ -38,8 +38,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
"github.com/ethereum/go-ethereum/triedb/hashdb"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
triedb := trie.NewDatabase(db, &trie.Config{
|
triedb := triedb.NewDatabase(db, &triedb.Config{
|
||||||
Preimages: preimages,
|
Preimages: preimages,
|
||||||
HashDB: hashdb.Defaults,
|
HashDB: hashdb.Defaults,
|
||||||
})
|
})
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
@ -90,26 +89,27 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var tests map[string]tests.StateTest
|
var testsByName map[string]tests.StateTest
|
||||||
if err := json.Unmarshal(src, &tests); err != nil {
|
if err := json.Unmarshal(src, &testsByName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over all the tests, run them and aggregate the results
|
// Iterate over all the tests, run them and aggregate the results
|
||||||
results := make([]StatetestResult, 0, len(tests))
|
results := make([]StatetestResult, 0, len(testsByName))
|
||||||
for key, test := range tests {
|
for key, test := range testsByName {
|
||||||
for _, st := range test.Subtests() {
|
for _, st := range test.Subtests() {
|
||||||
// Run the test and aggregate the result
|
// Run the test and aggregate the result
|
||||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||||
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, statedb *state.StateDB) {
|
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, tstate *tests.StateTestState) {
|
||||||
var root common.Hash
|
var root common.Hash
|
||||||
if statedb != nil {
|
if tstate.StateDB != nil {
|
||||||
root = statedb.IntermediateRoot(false)
|
root = tstate.StateDB.IntermediateRoot(false)
|
||||||
result.Root = &root
|
result.Root = &root
|
||||||
if jsonOut {
|
if jsonOut {
|
||||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
|
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
|
||||||
}
|
}
|
||||||
if dump { // Dump any state to aid debugging
|
if dump { // Dump any state to aid debugging
|
||||||
cpy, _ := state.New(root, statedb.Database(), nil)
|
cpy, _ := state.New(root, tstate.StateDB.Database(), nil)
|
||||||
dump := cpy.RawDump(nil)
|
dump := cpy.RawDump(nil)
|
||||||
result.State = &dump
|
result.State = &dump
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ type Env struct {
|
|||||||
CurrentTimestamp uint64 `json:"currentTimestamp"`
|
CurrentTimestamp uint64 `json:"currentTimestamp"`
|
||||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||||
// optional
|
// optional
|
||||||
CurrentDifficulty *big.Int `json:"currentDifficuly"`
|
CurrentDifficulty *big.Int `json:"currentDifficulty"`
|
||||||
CurrentRandom *big.Int `json:"currentRandom"`
|
CurrentRandom *big.Int `json:"currentRandom"`
|
||||||
CurrentBaseFee *big.Int `json:"currentBaseFee"`
|
CurrentBaseFee *big.Int `json:"currentBaseFee"`
|
||||||
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
||||||
|
@ -35,10 +35,12 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -122,6 +124,33 @@ Optional second and third arguments control the first and
|
|||||||
last block to write. In this mode, the file will be appended
|
last block to write. In this mode, the file will be appended
|
||||||
if already existing. If the file ends with .gz, the output will
|
if already existing. If the file ends with .gz, the output will
|
||||||
be gzipped.`,
|
be gzipped.`,
|
||||||
|
}
|
||||||
|
importHistoryCommand = &cli.Command{
|
||||||
|
Action: importHistory,
|
||||||
|
Name: "import-history",
|
||||||
|
Usage: "Import an Era archive",
|
||||||
|
ArgsUsage: "<dir>",
|
||||||
|
Flags: flags.Merge([]cli.Flag{
|
||||||
|
utils.TxLookupLimitFlag,
|
||||||
|
},
|
||||||
|
utils.DatabaseFlags,
|
||||||
|
utils.NetworkFlags,
|
||||||
|
),
|
||||||
|
Description: `
|
||||||
|
The import-history command will import blocks and their corresponding receipts
|
||||||
|
from Era archives.
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
exportHistoryCommand = &cli.Command{
|
||||||
|
Action: exportHistory,
|
||||||
|
Name: "export-history",
|
||||||
|
Usage: "Export blockchain history to Era archives",
|
||||||
|
ArgsUsage: "<dir> <first> <last>",
|
||||||
|
Flags: flags.Merge(utils.DatabaseFlags),
|
||||||
|
Description: `
|
||||||
|
The export-history command will export blocks and their corresponding receipts
|
||||||
|
into Era archives. Eras are typically packaged in steps of 8192 blocks.
|
||||||
|
`,
|
||||||
}
|
}
|
||||||
importPreimagesCommand = &cli.Command{
|
importPreimagesCommand = &cli.Command{
|
||||||
Action: importPreimages,
|
Action: importPreimages,
|
||||||
@ -364,7 +393,97 @@ func exportChain(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Export error: %v\n", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func importHistory(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() != 1 {
|
||||||
|
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
|
||||||
|
chain, db := utils.MakeChain(ctx, stack, false)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
start = time.Now()
|
||||||
|
dir = ctx.Args().Get(0)
|
||||||
|
network string
|
||||||
|
)
|
||||||
|
|
||||||
|
// Determine network.
|
||||||
|
if utils.IsNetworkPreset(ctx) {
|
||||||
|
switch {
|
||||||
|
case ctx.Bool(utils.MainnetFlag.Name):
|
||||||
|
network = "mainnet"
|
||||||
|
case ctx.Bool(utils.SepoliaFlag.Name):
|
||||||
|
network = "sepolia"
|
||||||
|
case ctx.Bool(utils.GoerliFlag.Name):
|
||||||
|
network = "goerli"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No network flag set, try to determine network based on files
|
||||||
|
// present in directory.
|
||||||
|
var networks []string
|
||||||
|
for _, n := range params.NetworkNames {
|
||||||
|
entries, err := era.ReadDir(dir, n)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
networks = append(networks, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(networks) == 0 {
|
||||||
|
return fmt.Errorf("no era1 files found in %s", dir)
|
||||||
|
}
|
||||||
|
if len(networks) > 1 {
|
||||||
|
return fmt.Errorf("multiple networks found, use a network flag to specify desired network")
|
||||||
|
}
|
||||||
|
network = networks[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := utils.ImportHistory(chain, db, dir, network); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("Import done in %v\n", time.Since(start))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// exportHistory exports chain history in Era archives at a specified
|
||||||
|
// directory.
|
||||||
|
func exportHistory(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() != 3 {
|
||||||
|
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
|
||||||
|
chain, _ := utils.MakeChain(ctx, stack, true)
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
var (
|
||||||
|
dir = ctx.Args().Get(0)
|
||||||
|
first, ferr = strconv.ParseInt(ctx.Args().Get(1), 10, 64)
|
||||||
|
last, lerr = strconv.ParseInt(ctx.Args().Get(2), 10, 64)
|
||||||
|
)
|
||||||
|
if ferr != nil || lerr != nil {
|
||||||
|
utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
|
||||||
|
}
|
||||||
|
if first < 0 || last < 0 {
|
||||||
|
utils.Fatalf("Export error: block number must be greater than 0\n")
|
||||||
|
}
|
||||||
|
if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
|
||||||
|
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
|
||||||
|
}
|
||||||
|
err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Export error: %v\n", err)
|
utils.Fatalf("Export error: %v\n", err)
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -51,9 +50,6 @@ func (c customQuotedStringer) String() string {
|
|||||||
// logTest is an entry point which spits out some logs. This is used by testing
|
// logTest is an entry point which spits out some logs. This is used by testing
|
||||||
// to verify expected outputs
|
// to verify expected outputs
|
||||||
func logTest(ctx *cli.Context) error {
|
func logTest(ctx *cli.Context) error {
|
||||||
// clear field padding map
|
|
||||||
debug.ResetLogging()
|
|
||||||
|
|
||||||
{ // big.Int
|
{ // big.Int
|
||||||
ba, _ := new(big.Int).SetString("111222333444555678999", 10) // "111,222,333,444,555,678,999"
|
ba, _ := new(big.Int).SetString("111222333444555678999", 10) // "111,222,333,444,555,678,999"
|
||||||
bb, _ := new(big.Int).SetString("-111222333444555678999", 10) // "-111,222,333,444,555,678,999"
|
bb, _ := new(big.Int).SetString("-111222333444555678999", 10) // "-111,222,333,444,555,678,999"
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// geth is the official command-line client for Ethereum.
|
// geth is a command-line client for Ethereum.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -215,6 +215,8 @@ func init() {
|
|||||||
initCommand,
|
initCommand,
|
||||||
importCommand,
|
importCommand,
|
||||||
exportCommand,
|
exportCommand,
|
||||||
|
importHistoryCommand,
|
||||||
|
exportHistoryCommand,
|
||||||
importPreimagesCommand,
|
importPreimagesCommand,
|
||||||
removedbCommand,
|
removedbCommand,
|
||||||
dumpCommand,
|
dumpCommand,
|
||||||
@ -356,8 +358,7 @@ func geth(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
stack, backend := makeFullNode(ctx)
|
stack, backend := makeFullNode(ctx)
|
||||||
trieCfg := plugethCaptureTrieConfig(ctx, stack, backend)
|
wrapperBackend := backendwrapper.NewBackend(backend)
|
||||||
wrapperBackend := backendwrapper.NewBackend(backend, trieCfg)
|
|
||||||
|
|
||||||
pluginsInitializeNode(stack, wrapperBackend)
|
pluginsInitializeNode(stack, wrapperBackend)
|
||||||
if ok, err := plugins.RunSubcommand(ctx); ok {
|
if ok, err := plugins.RunSubcommand(ctx); ok {
|
||||||
|
@ -1,25 +1,15 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
||||||
gcore "github.com/ethereum/go-ethereum/core"
|
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/plugins"
|
"github.com/ethereum/go-ethereum/plugins"
|
||||||
"github.com/ethereum/go-ethereum/plugins/wrappers"
|
"github.com/ethereum/go-ethereum/plugins/wrappers"
|
||||||
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/core"
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
"github.com/openrelayxyz/plugeth-utils/restricted"
|
"github.com/openrelayxyz/plugeth-utils/restricted"
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func apiTranslate(apis []core.API) []rpc.API {
|
func apiTranslate(apis []core.API) []rpc.API {
|
||||||
@ -132,69 +122,3 @@ func pluginBlockChain() {
|
|||||||
}
|
}
|
||||||
BlockChain(plugins.DefaultPluginLoader)
|
BlockChain(plugins.DefaultPluginLoader)
|
||||||
}
|
}
|
||||||
|
|
||||||
func plugethCaptureTrieConfig(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) *trie.Config {
|
|
||||||
|
|
||||||
ethCfg := new(ethconfig.Config)
|
|
||||||
|
|
||||||
if ctx.IsSet(utils.CacheFlag.Name) || ctx.IsSet(utils.CacheTrieFlag.Name) {
|
|
||||||
ethCfg.TrieCleanCache = ctx.Int(utils.CacheFlag.Name) * ctx.Int(utils.CacheTrieFlag.Name) / 100
|
|
||||||
}
|
|
||||||
if ctx.IsSet(utils.CacheNoPrefetchFlag.Name) {
|
|
||||||
ethCfg.NoPrefetch = ctx.Bool(utils.CacheNoPrefetchFlag.Name)
|
|
||||||
}
|
|
||||||
if ctx.IsSet(utils.CacheFlag.Name) || ctx.IsSet(utils.CacheGCFlag.Name) {
|
|
||||||
ethCfg.TrieDirtyCache = ctx.Int(utils.CacheFlag.Name) * ctx.Int(utils.CacheGCFlag.Name) / 100
|
|
||||||
}
|
|
||||||
if ctx.IsSet(utils.GCModeFlag.Name) {
|
|
||||||
ethCfg.NoPruning = ctx.String(utils.GCModeFlag.Name) == "archive"
|
|
||||||
}
|
|
||||||
if ctx.IsSet(utils.CacheFlag.Name) || ctx.IsSet(utils.CacheSnapshotFlag.Name) {
|
|
||||||
ethCfg.SnapshotCache = ctx.Int(utils.CacheFlag.Name) * ctx.Int(utils.CacheSnapshotFlag.Name) / 100
|
|
||||||
}
|
|
||||||
ethCfg.Preimages = ctx.Bool(utils.CachePreimagesFlag.Name)
|
|
||||||
if ethCfg.NoPruning && !ethCfg.Preimages {
|
|
||||||
ethCfg.Preimages = true
|
|
||||||
log.Info("Enabling recording of key preimages since archive mode is used")
|
|
||||||
}
|
|
||||||
if ctx.IsSet(utils.StateHistoryFlag.Name) {
|
|
||||||
ethCfg.StateHistory = ctx.Uint64(utils.StateHistoryFlag.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
chaindb := backend.ChainDb()
|
|
||||||
|
|
||||||
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), chaindb)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ethCfg.StateScheme = scheme
|
|
||||||
|
|
||||||
cacheCfg := &gcore.CacheConfig{
|
|
||||||
TrieCleanLimit: ethCfg.TrieCleanCache,
|
|
||||||
TrieCleanNoPrefetch: ethCfg.NoPrefetch,
|
|
||||||
TrieDirtyLimit: ethCfg.TrieDirtyCache,
|
|
||||||
TrieDirtyDisabled: ethCfg.NoPruning,
|
|
||||||
TrieTimeLimit: ethconfig.Defaults.TrieTimeout,
|
|
||||||
SnapshotLimit: ethCfg.SnapshotCache,
|
|
||||||
Preimages: ethCfg.Preimages,
|
|
||||||
StateHistory: ethCfg.StateHistory,
|
|
||||||
StateScheme: ethCfg.StateScheme,
|
|
||||||
}
|
|
||||||
|
|
||||||
config := &trie.Config{Preimages: cacheCfg.Preimages}
|
|
||||||
if cacheCfg.StateScheme == rawdb.HashScheme {
|
|
||||||
config.HashDB = &hashdb.Config{
|
|
||||||
CleanCacheSize: cacheCfg.TrieCleanLimit * 1024 * 1024,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cacheCfg.StateScheme == rawdb.PathScheme {
|
|
||||||
config.PathDB = &pathdb.Config{
|
|
||||||
StateHistory: cacheCfg.StateHistory,
|
|
||||||
CleanCacheSize: cacheCfg.TrieCleanLimit * 1024 * 1024,
|
|
||||||
DirtyCacheSize: cacheCfg.TrieDirtyLimit * 1024 * 1024,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
@ -25,7 +25,9 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -37,6 +39,7 @@ var (
|
|||||||
reverseMode = flag.Bool("reverse", false, "convert ASCII to rlp")
|
reverseMode = flag.Bool("reverse", false, "convert ASCII to rlp")
|
||||||
noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably")
|
noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably")
|
||||||
single = flag.Bool("single", false, "print only the first element, discard the rest")
|
single = flag.Bool("single", false, "print only the first element, discard the rest")
|
||||||
|
showpos = flag.Bool("pos", false, "display element byte posititions")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -52,17 +55,17 @@ If the filename is omitted, data is read from stdin.`)
|
|||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
var r io.Reader
|
var r *inStream
|
||||||
switch {
|
switch {
|
||||||
case *hexMode != "":
|
case *hexMode != "":
|
||||||
data, err := hex.DecodeString(strings.TrimPrefix(*hexMode, "0x"))
|
data, err := hex.DecodeString(strings.TrimPrefix(*hexMode, "0x"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
die(err)
|
die(err)
|
||||||
}
|
}
|
||||||
r = bytes.NewReader(data)
|
r = newInStream(bytes.NewReader(data), int64(len(data)))
|
||||||
|
|
||||||
case flag.NArg() == 0:
|
case flag.NArg() == 0:
|
||||||
r = os.Stdin
|
r = newInStream(bufio.NewReader(os.Stdin), 0)
|
||||||
|
|
||||||
case flag.NArg() == 1:
|
case flag.NArg() == 1:
|
||||||
fd, err := os.Open(flag.Arg(0))
|
fd, err := os.Open(flag.Arg(0))
|
||||||
@ -70,13 +73,19 @@ func main() {
|
|||||||
die(err)
|
die(err)
|
||||||
}
|
}
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
r = fd
|
var size int64
|
||||||
|
finfo, err := fd.Stat()
|
||||||
|
if err == nil {
|
||||||
|
size = finfo.Size()
|
||||||
|
}
|
||||||
|
r = newInStream(bufio.NewReader(fd), size)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fmt.Fprintln(os.Stderr, "Error: too many arguments")
|
fmt.Fprintln(os.Stderr, "Error: too many arguments")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
out := os.Stdout
|
out := os.Stdout
|
||||||
if *reverseMode {
|
if *reverseMode {
|
||||||
data, err := textToRlp(r)
|
data, err := textToRlp(r)
|
||||||
@ -93,10 +102,10 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func rlpToText(r io.Reader, out io.Writer) error {
|
func rlpToText(in *inStream, out io.Writer) error {
|
||||||
s := rlp.NewStream(r, 0)
|
stream := rlp.NewStream(in, 0)
|
||||||
for {
|
for {
|
||||||
if err := dump(s, 0, out); err != nil {
|
if err := dump(in, stream, 0, out); err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -110,7 +119,10 @@ func rlpToText(r io.Reader, out io.Writer) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dump(s *rlp.Stream, depth int, out io.Writer) error {
|
func dump(in *inStream, s *rlp.Stream, depth int, out io.Writer) error {
|
||||||
|
if *showpos {
|
||||||
|
fmt.Fprintf(out, "%s: ", in.posLabel())
|
||||||
|
}
|
||||||
kind, size, err := s.Kind()
|
kind, size, err := s.Kind()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -137,7 +149,7 @@ func dump(s *rlp.Stream, depth int, out io.Writer) error {
|
|||||||
if i > 0 {
|
if i > 0 {
|
||||||
fmt.Fprint(out, ",\n")
|
fmt.Fprint(out, ",\n")
|
||||||
}
|
}
|
||||||
if err := dump(s, depth+1, out); err == rlp.EOL {
|
if err := dump(in, s, depth+1, out); err == rlp.EOL {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -208,3 +220,36 @@ func textToRlp(r io.Reader) ([]byte, error) {
|
|||||||
data, err := rlp.EncodeToBytes(obj[0])
|
data, err := rlp.EncodeToBytes(obj[0])
|
||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type inStream struct {
|
||||||
|
br rlp.ByteReader
|
||||||
|
pos int
|
||||||
|
columns int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInStream(br rlp.ByteReader, totalSize int64) *inStream {
|
||||||
|
col := int(math.Ceil(math.Log10(float64(totalSize))))
|
||||||
|
return &inStream{br: br, columns: col}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *inStream) Read(b []byte) (n int, err error) {
|
||||||
|
n, err = rc.br.Read(b)
|
||||||
|
rc.pos += n
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *inStream) ReadByte() (byte, error) {
|
||||||
|
b, err := rc.br.ReadByte()
|
||||||
|
if err == nil {
|
||||||
|
rc.pos++
|
||||||
|
}
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *inStream) posLabel() string {
|
||||||
|
l := strconv.FormatInt(int64(rc.pos), 10)
|
||||||
|
if len(l) < rc.columns {
|
||||||
|
l = strings.Repeat(" ", rc.columns-len(l)) + l
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
@ -34,7 +34,8 @@ func TestRoundtrip(t *testing.T) {
|
|||||||
"0xc780c0c1c0825208",
|
"0xc780c0c1c0825208",
|
||||||
} {
|
} {
|
||||||
var out strings.Builder
|
var out strings.Builder
|
||||||
err := rlpToText(bytes.NewReader(common.FromHex(want)), &out)
|
in := newInStream(bytes.NewReader(common.FromHex(want)), 0)
|
||||||
|
err := rlpToText(in, &out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
191
cmd/utils/cmd.go
191
cmd/utils/cmd.go
@ -19,12 +19,15 @@ package utils
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"crypto/sha256"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
@ -39,8 +42,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
@ -228,6 +233,105 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func readList(filename string) ([]string, error) {
|
||||||
|
b, err := os.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return strings.Split(string(b), "\n"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportHistory imports Era1 files containing historical block information,
|
||||||
|
// starting from genesis.
|
||||||
|
func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, network string) error {
|
||||||
|
if chain.CurrentSnapBlock().Number.BitLen() != 0 {
|
||||||
|
return fmt.Errorf("history import only supported when starting from genesis")
|
||||||
|
}
|
||||||
|
entries, err := era.ReadDir(dir, network)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
checksums, err := readList(path.Join(dir, "checksums.txt"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to read checksums.txt: %w", err)
|
||||||
|
}
|
||||||
|
if len(checksums) != len(entries) {
|
||||||
|
return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries", len(checksums), len(entries))
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
start = time.Now()
|
||||||
|
reported = time.Now()
|
||||||
|
imported = 0
|
||||||
|
forker = core.NewForkChoice(chain, nil)
|
||||||
|
h = sha256.New()
|
||||||
|
buf = bytes.NewBuffer(nil)
|
||||||
|
)
|
||||||
|
for i, filename := range entries {
|
||||||
|
err := func() error {
|
||||||
|
f, err := os.Open(path.Join(dir, filename))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to open era: %w", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Validate checksum.
|
||||||
|
if _, err := io.Copy(h, f); err != nil {
|
||||||
|
return fmt.Errorf("unable to recalculate checksum: %w", err)
|
||||||
|
}
|
||||||
|
if have, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; have != want {
|
||||||
|
return fmt.Errorf("checksum mismatch: have %s, want %s", have, want)
|
||||||
|
}
|
||||||
|
h.Reset()
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
// Import all block data from Era1.
|
||||||
|
e, err := era.From(f)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening era: %w", err)
|
||||||
|
}
|
||||||
|
it, err := era.NewIterator(e)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error making era reader: %w", err)
|
||||||
|
}
|
||||||
|
for it.Next() {
|
||||||
|
block, err := it.Block()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
if block.Number().BitLen() == 0 {
|
||||||
|
continue // skip genesis
|
||||||
|
}
|
||||||
|
receipts, err := it.Receipts()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start, forker); err != nil {
|
||||||
|
return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
|
||||||
|
} else if status != core.CanonStatTy {
|
||||||
|
return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)
|
||||||
|
}
|
||||||
|
if _, err := chain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{receipts}, 2^64-1); err != nil {
|
||||||
|
return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
imported += 1
|
||||||
|
|
||||||
|
// Give the user some feedback that something is happening.
|
||||||
|
if time.Since(reported) >= 8*time.Second {
|
||||||
|
log.Info("Importing Era files", "head", it.Number(), "imported", imported, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
imported = 0
|
||||||
|
reported = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
|
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
|
||||||
head := chain.CurrentBlock()
|
head := chain.CurrentBlock()
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
@ -297,6 +401,93 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExportHistory exports blockchain history into the specified directory,
|
||||||
|
// following the Era format.
|
||||||
|
func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) error {
|
||||||
|
log.Info("Exporting blockchain history", "dir", dir)
|
||||||
|
if head := bc.CurrentBlock().Number.Uint64(); head < last {
|
||||||
|
log.Warn("Last block beyond head, setting last = head", "head", head, "last", last)
|
||||||
|
last = head
|
||||||
|
}
|
||||||
|
network := "unknown"
|
||||||
|
if name, ok := params.NetworkNames[bc.Config().ChainID.String()]; ok {
|
||||||
|
network = name
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
|
||||||
|
return fmt.Errorf("error creating output directory: %w", err)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
start = time.Now()
|
||||||
|
reported = time.Now()
|
||||||
|
h = sha256.New()
|
||||||
|
buf = bytes.NewBuffer(nil)
|
||||||
|
checksums []string
|
||||||
|
)
|
||||||
|
for i := first; i <= last; i += step {
|
||||||
|
err := func() error {
|
||||||
|
filename := path.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
|
||||||
|
f, err := os.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not create era file: %w", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
w := era.NewBuilder(f)
|
||||||
|
for j := uint64(0); j < step && j <= last-i; j++ {
|
||||||
|
var (
|
||||||
|
n = i + j
|
||||||
|
block = bc.GetBlockByNumber(n)
|
||||||
|
)
|
||||||
|
if block == nil {
|
||||||
|
return fmt.Errorf("export failed on #%d: not found", n)
|
||||||
|
}
|
||||||
|
receipts := bc.GetReceiptsByHash(block.Hash())
|
||||||
|
if receipts == nil {
|
||||||
|
return fmt.Errorf("export failed on #%d: receipts not found", n)
|
||||||
|
}
|
||||||
|
td := bc.GetTd(block.Hash(), block.NumberU64())
|
||||||
|
if td == nil {
|
||||||
|
return fmt.Errorf("export failed on #%d: total difficulty not found", n)
|
||||||
|
}
|
||||||
|
if err := w.Add(block, receipts, td); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
root, err := w.Finalize()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("export failed to finalize %d: %w", step/i, err)
|
||||||
|
}
|
||||||
|
// Set correct filename with root.
|
||||||
|
os.Rename(filename, path.Join(dir, era.Filename(network, int(i/step), root)))
|
||||||
|
|
||||||
|
// Compute checksum of entire Era1.
|
||||||
|
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.Copy(h, f); err != nil {
|
||||||
|
return fmt.Errorf("unable to calculate checksum: %w", err)
|
||||||
|
}
|
||||||
|
checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
|
||||||
|
h.Reset()
|
||||||
|
buf.Reset()
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if time.Since(reported) >= 8*time.Second {
|
||||||
|
log.Info("Exporting blocks", "exported", i, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
reported = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
os.WriteFile(path.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
|
||||||
|
|
||||||
|
log.Info("Exported blockchain to", "dir", dir)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ImportPreimages imports a batch of exported hash preimages into the database.
|
// ImportPreimages imports a batch of exported hash preimages into the database.
|
||||||
// It's a part of the deprecated functionality, should be removed in the future.
|
// It's a part of the deprecated functionality, should be removed in the future.
|
||||||
func ImportPreimages(db ethdb.Database, fn string) error {
|
func ImportPreimages(db ethdb.Database, fn string) error {
|
||||||
|
@ -69,9 +69,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
"github.com/ethereum/go-ethereum/triedb/hashdb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
pcsclite "github.com/gballet/go-libpcsclite"
|
pcsclite "github.com/gballet/go-libpcsclite"
|
||||||
gopsutil "github.com/shirou/gopsutil/mem"
|
gopsutil "github.com/shirou/gopsutil/mem"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -2194,8 +2194,8 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MakeTrieDatabase constructs a trie database based on the configured scheme.
|
// MakeTrieDatabase constructs a trie database based on the configured scheme.
|
||||||
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *trie.Database {
|
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *triedb.Database {
|
||||||
config := &trie.Config{
|
config := &triedb.Config{
|
||||||
Preimages: preimage,
|
Preimages: preimage,
|
||||||
IsVerkle: isVerkle,
|
IsVerkle: isVerkle,
|
||||||
}
|
}
|
||||||
@ -2208,12 +2208,12 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
|
|||||||
// ignore the parameter silently. TODO(rjl493456442)
|
// ignore the parameter silently. TODO(rjl493456442)
|
||||||
// please config it if read mode is implemented.
|
// please config it if read mode is implemented.
|
||||||
config.HashDB = hashdb.Defaults
|
config.HashDB = hashdb.Defaults
|
||||||
return trie.NewDatabase(disk, config)
|
return triedb.NewDatabase(disk, config)
|
||||||
}
|
}
|
||||||
if readOnly {
|
if readOnly {
|
||||||
config.PathDB = pathdb.ReadOnly
|
config.PathDB = pathdb.ReadOnly
|
||||||
} else {
|
} else {
|
||||||
config.PathDB = pathdb.Defaults
|
config.PathDB = pathdb.Defaults
|
||||||
}
|
}
|
||||||
return trie.NewDatabase(disk, config)
|
return triedb.NewDatabase(disk, config)
|
||||||
}
|
}
|
||||||
|
185
cmd/utils/history_test.go
Normal file
185
cmd/utils/history_test.go
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
count uint64 = 128
|
||||||
|
step uint64 = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHistoryImportAndExport(t *testing.T) {
|
||||||
|
var (
|
||||||
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
genesis = &core.Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
Alloc: types.GenesisAlloc{address: {Balance: big.NewInt(1000000000000000000)}},
|
||||||
|
}
|
||||||
|
signer = types.LatestSigner(genesis.Config)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generate chain.
|
||||||
|
db, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), int(count), func(i int, g *core.BlockGen) {
|
||||||
|
if i == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tx, err := types.SignNewTx(key, signer, &types.DynamicFeeTx{
|
||||||
|
ChainID: genesis.Config.ChainID,
|
||||||
|
Nonce: uint64(i - 1),
|
||||||
|
GasTipCap: common.Big0,
|
||||||
|
GasFeeCap: g.PrevBlock(0).BaseFee(),
|
||||||
|
Gas: 50000,
|
||||||
|
To: &common.Address{0xaa},
|
||||||
|
Value: big.NewInt(int64(i)),
|
||||||
|
Data: nil,
|
||||||
|
AccessList: nil,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating tx: %v", err)
|
||||||
|
}
|
||||||
|
g.AddTx(tx)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Initialize BlockChain.
|
||||||
|
chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to initialize chain: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := chain.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("error insterting chain: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make temp directory for era files.
|
||||||
|
dir, err := os.MkdirTemp("", "history-export-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating temp test directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
|
// Export history to temp directory.
|
||||||
|
if err := ExportHistory(chain, dir, 0, count, step); err != nil {
|
||||||
|
t.Fatalf("error exporting history: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read checksums.
|
||||||
|
b, err := os.ReadFile(path.Join(dir, "checksums.txt"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read checksums: %v", err)
|
||||||
|
}
|
||||||
|
checksums := strings.Split(string(b), "\n")
|
||||||
|
|
||||||
|
// Verify each Era.
|
||||||
|
entries, _ := era.ReadDir(dir, "mainnet")
|
||||||
|
for i, filename := range entries {
|
||||||
|
func() {
|
||||||
|
f, err := os.Open(path.Join(dir, filename))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error opening era file: %v", err)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
h = sha256.New()
|
||||||
|
buf = bytes.NewBuffer(nil)
|
||||||
|
)
|
||||||
|
if _, err := io.Copy(h, f); err != nil {
|
||||||
|
t.Fatalf("unable to recalculate checksum: %v", err)
|
||||||
|
}
|
||||||
|
if got, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; got != want {
|
||||||
|
t.Fatalf("checksum %d does not match: got %s, want %s", i, got, want)
|
||||||
|
}
|
||||||
|
e, err := era.From(f)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error opening era: %v", err)
|
||||||
|
}
|
||||||
|
defer e.Close()
|
||||||
|
it, err := era.NewIterator(e)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error making era reader: %v", err)
|
||||||
|
}
|
||||||
|
for j := 0; it.Next(); j++ {
|
||||||
|
n := i*int(step) + j
|
||||||
|
if it.Error() != nil {
|
||||||
|
t.Fatalf("error reading block entry %d: %v", n, it.Error())
|
||||||
|
}
|
||||||
|
block, receipts, err := it.BlockAndReceipts()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading block entry %d: %v", n, err)
|
||||||
|
}
|
||||||
|
want := chain.GetBlockByNumber(uint64(n))
|
||||||
|
if want, got := uint64(n), block.NumberU64(); want != got {
|
||||||
|
t.Fatalf("blocks out of order: want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
if want.Hash() != block.Hash() {
|
||||||
|
t.Fatalf("block hash mismatch %d: want %s, got %s", n, want.Hash().Hex(), block.Hash().Hex())
|
||||||
|
}
|
||||||
|
if got := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); got != want.TxHash() {
|
||||||
|
t.Fatalf("tx hash %d mismatch: want %s, got %s", n, want.TxHash(), got)
|
||||||
|
}
|
||||||
|
if got := types.CalcUncleHash(block.Uncles()); got != want.UncleHash() {
|
||||||
|
t.Fatalf("uncle hash %d mismatch: want %s, got %s", n, want.UncleHash(), got)
|
||||||
|
}
|
||||||
|
if got := types.DeriveSha(receipts, trie.NewStackTrie(nil)); got != want.ReceiptHash() {
|
||||||
|
t.Fatalf("receipt root %d mismatch: want %s, got %s", n, want.ReceiptHash(), got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now import Era.
|
||||||
|
freezer := t.TempDir()
|
||||||
|
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
db2.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults))
|
||||||
|
imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to initialize chain: %v", err)
|
||||||
|
}
|
||||||
|
if err := ImportHistory(imported, db2, dir, "mainnet"); err != nil {
|
||||||
|
t.Fatalf("failed to import chain: %v", err)
|
||||||
|
}
|
||||||
|
if have, want := imported.CurrentHeader(), chain.CurrentHeader(); have.Hash() != want.Hash() {
|
||||||
|
t.Fatalf("imported chain does not match expected, have (%d, %s) want (%d, %s)", have.Number, have.Hash(), want.Number, want.Hash())
|
||||||
|
}
|
||||||
|
}
|
@ -16,7 +16,11 @@
|
|||||||
|
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import "math/big"
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/holiman/uint256"
|
||||||
|
)
|
||||||
|
|
||||||
// Common big integers often used
|
// Common big integers often used
|
||||||
var (
|
var (
|
||||||
@ -27,4 +31,6 @@ var (
|
|||||||
Big32 = big.NewInt(32)
|
Big32 = big.NewInt(32)
|
||||||
Big256 = big.NewInt(256)
|
Big256 = big.NewInt(256)
|
||||||
Big257 = big.NewInt(257)
|
Big257 = big.NewInt(257)
|
||||||
|
|
||||||
|
U2560 = uint256.NewInt(0)
|
||||||
)
|
)
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Proof-of-stake protocol constants.
|
// Proof-of-stake protocol constants.
|
||||||
@ -355,8 +356,8 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.
|
|||||||
// Withdrawals processing.
|
// Withdrawals processing.
|
||||||
for _, w := range withdrawals {
|
for _, w := range withdrawals {
|
||||||
// Convert amount from gwei to wei.
|
// Convert amount from gwei to wei.
|
||||||
amount := new(big.Int).SetUint64(w.Amount)
|
amount := new(uint256.Int).SetUint64(w.Amount)
|
||||||
amount = amount.Mul(amount, big.NewInt(params.GWei))
|
amount = amount.Mul(amount, uint256.NewInt(params.GWei))
|
||||||
state.AddBalance(w.Address, amount)
|
state.AddBalance(w.Address, amount)
|
||||||
}
|
}
|
||||||
// No block reward which is issued by consensus layer instead.
|
// No block reward which is issued by consensus layer instead.
|
||||||
|
@ -47,7 +47,7 @@ func TestReimportMirroredState(t *testing.T) {
|
|||||||
genspec := &core.Genesis{
|
genspec := &core.Genesis{
|
||||||
Config: params.AllCliqueProtocolChanges,
|
Config: params.AllCliqueProtocolChanges,
|
||||||
ExtraData: make([]byte, extraVanity+common.AddressLength+extraSeal),
|
ExtraData: make([]byte, extraVanity+common.AddressLength+extraSeal),
|
||||||
Alloc: map[common.Address]core.GenesisAccount{
|
Alloc: map[common.Address]types.Account{
|
||||||
addr: {Balance: big.NewInt(10000000000000000)},
|
addr: {Balance: big.NewInt(10000000000000000)},
|
||||||
},
|
},
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
@ -33,14 +33,15 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ethash proof-of-work protocol constants.
|
// Ethash proof-of-work protocol constants.
|
||||||
var (
|
var (
|
||||||
FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
FrontierBlockReward = uint256.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||||
ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
ByzantiumBlockReward = uint256.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||||
ConstantinopleBlockReward = big.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople
|
ConstantinopleBlockReward = uint256.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople
|
||||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||||
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
|
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
|
||||||
|
|
||||||
@ -562,8 +563,8 @@ func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
|
|||||||
|
|
||||||
// Some weird constants to avoid constant memory allocs for them.
|
// Some weird constants to avoid constant memory allocs for them.
|
||||||
var (
|
var (
|
||||||
big8 = big.NewInt(8)
|
u256_8 = uint256.NewInt(8)
|
||||||
big32 = big.NewInt(32)
|
u256_32 = uint256.NewInt(32)
|
||||||
)
|
)
|
||||||
|
|
||||||
// AccumulateRewards credits the coinbase of the given block with the mining
|
// AccumulateRewards credits the coinbase of the given block with the mining
|
||||||
@ -579,16 +580,18 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header
|
|||||||
blockReward = ConstantinopleBlockReward
|
blockReward = ConstantinopleBlockReward
|
||||||
}
|
}
|
||||||
// Accumulate the rewards for the miner and any included uncles
|
// Accumulate the rewards for the miner and any included uncles
|
||||||
reward := new(big.Int).Set(blockReward)
|
reward := new(uint256.Int).Set(blockReward)
|
||||||
r := new(big.Int)
|
r := new(uint256.Int)
|
||||||
|
hNum, _ := uint256.FromBig(header.Number)
|
||||||
for _, uncle := range uncles {
|
for _, uncle := range uncles {
|
||||||
r.Add(uncle.Number, big8)
|
uNum, _ := uint256.FromBig(uncle.Number)
|
||||||
r.Sub(r, header.Number)
|
r.AddUint64(uNum, 8)
|
||||||
|
r.Sub(r, hNum)
|
||||||
r.Mul(r, blockReward)
|
r.Mul(r, blockReward)
|
||||||
r.Div(r, big8)
|
r.Div(r, u256_8)
|
||||||
state.AddBalance(uncle.Coinbase, r)
|
state.AddBalance(uncle.Coinbase, r)
|
||||||
|
|
||||||
r.Div(blockReward, big32)
|
r.Div(blockReward, u256_32)
|
||||||
reward.Add(reward, r)
|
reward.Add(reward, r)
|
||||||
}
|
}
|
||||||
state.AddBalance(header.Coinbase, reward)
|
state.AddBalance(header.Coinbase, reward)
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -81,6 +82,6 @@ func ApplyDAOHardFork(statedb *state.StateDB) {
|
|||||||
// Move every DAO account and extra-balance account funds into the refund contract
|
// Move every DAO account and extra-balance account funds into the refund contract
|
||||||
for _, addr := range params.DAODrainList() {
|
for _, addr := range params.DAODrainList() {
|
||||||
statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr))
|
statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr))
|
||||||
statedb.SetBalance(addr, new(big.Int))
|
statedb.SetBalance(addr, new(uint256.Int))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -189,7 +189,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
|||||||
// generator function.
|
// generator function.
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}},
|
Alloc: types.GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}},
|
||||||
}
|
}
|
||||||
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), b.N, gen)
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), b.N, gen)
|
||||||
|
|
||||||
@ -243,7 +243,7 @@ func BenchmarkChainWrite_full_500k(b *testing.B) {
|
|||||||
|
|
||||||
// makeChainForBench writes a given number of headers or empty blocks/receipts
|
// makeChainForBench writes a given number of headers or empty blocks/receipts
|
||||||
// into a database.
|
// into a database.
|
||||||
func makeChainForBench(db ethdb.Database, full bool, count uint64) {
|
func makeChainForBench(db ethdb.Database, genesis *Genesis, full bool, count uint64) {
|
||||||
var hash common.Hash
|
var hash common.Hash
|
||||||
for n := uint64(0); n < count; n++ {
|
for n := uint64(0); n < count; n++ {
|
||||||
header := &types.Header{
|
header := &types.Header{
|
||||||
@ -255,6 +255,9 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
|
|||||||
TxHash: types.EmptyTxsHash,
|
TxHash: types.EmptyTxsHash,
|
||||||
ReceiptHash: types.EmptyReceiptsHash,
|
ReceiptHash: types.EmptyReceiptsHash,
|
||||||
}
|
}
|
||||||
|
if n == 0 {
|
||||||
|
header = genesis.ToBlock().Header()
|
||||||
|
}
|
||||||
hash = header.Hash()
|
hash = header.Hash()
|
||||||
|
|
||||||
rawdb.WriteHeader(db, header)
|
rawdb.WriteHeader(db, header)
|
||||||
@ -262,7 +265,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
|
|||||||
rawdb.WriteTd(db, hash, n, big.NewInt(int64(n+1)))
|
rawdb.WriteTd(db, hash, n, big.NewInt(int64(n+1)))
|
||||||
|
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
rawdb.WriteChainConfig(db, hash, params.AllEthashProtocolChanges)
|
rawdb.WriteChainConfig(db, hash, genesis.Config)
|
||||||
}
|
}
|
||||||
rawdb.WriteHeadHeaderHash(db, hash)
|
rawdb.WriteHeadHeaderHash(db, hash)
|
||||||
|
|
||||||
@ -276,13 +279,14 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func benchWriteChain(b *testing.B, full bool, count uint64) {
|
func benchWriteChain(b *testing.B, full bool, count uint64) {
|
||||||
|
genesis := &Genesis{Config: params.AllEthashProtocolChanges}
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
dir := b.TempDir()
|
dir := b.TempDir()
|
||||||
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||||
}
|
}
|
||||||
makeChainForBench(db, full, count)
|
makeChainForBench(db, genesis, full, count)
|
||||||
db.Close()
|
db.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -294,7 +298,8 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||||
}
|
}
|
||||||
makeChainForBench(db, full, count)
|
genesis := &Genesis{Config: params.AllEthashProtocolChanges}
|
||||||
|
makeChainForBench(db, genesis, full, count)
|
||||||
db.Close()
|
db.Close()
|
||||||
cacheConfig := *defaultCacheConfig
|
cacheConfig := *defaultCacheConfig
|
||||||
cacheConfig.TrieDirtyDisabled = true
|
cacheConfig.TrieDirtyDisabled = true
|
||||||
@ -307,7 +312,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||||
}
|
}
|
||||||
chain, err := NewBlockChain(db, &cacheConfig, nil, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error creating chain: %v", err)
|
b.Fatalf("error creating chain: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -106,7 +106,7 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
|
|||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: &config,
|
Config: &config,
|
||||||
ExtraData: make([]byte, 32+common.AddressLength+crypto.SignatureLength),
|
ExtraData: make([]byte, 32+common.AddressLength+crypto.SignatureLength),
|
||||||
Alloc: map[common.Address]GenesisAccount{
|
Alloc: map[common.Address]types.Account{
|
||||||
addr: {Balance: big.NewInt(1)},
|
addr: {Balance: big.NewInt(1)},
|
||||||
},
|
},
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
@ -47,9 +47,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
"github.com/ethereum/go-ethereum/triedb/hashdb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -149,8 +149,8 @@ type CacheConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// triedbConfig derives the configures for trie database.
|
// triedbConfig derives the configures for trie database.
|
||||||
func (c *CacheConfig) triedbConfig() *trie.Config {
|
func (c *CacheConfig) triedbConfig() *triedb.Config {
|
||||||
config := &trie.Config{Preimages: c.Preimages}
|
config := &triedb.Config{Preimages: c.Preimages}
|
||||||
if c.StateScheme == rawdb.HashScheme {
|
if c.StateScheme == rawdb.HashScheme {
|
||||||
config.HashDB = &hashdb.Config{
|
config.HashDB = &hashdb.Config{
|
||||||
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
|
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
|
||||||
@ -185,6 +185,13 @@ func DefaultCacheConfigWithScheme(scheme string) *CacheConfig {
|
|||||||
return &config
|
return &config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// txLookup is wrapper over transaction lookup along with the corresponding
|
||||||
|
// transaction object.
|
||||||
|
type txLookup struct {
|
||||||
|
lookup *rawdb.LegacyTxLookupEntry
|
||||||
|
transaction *types.Transaction
|
||||||
|
}
|
||||||
|
|
||||||
// BlockChain represents the canonical chain given a database with a genesis
|
// BlockChain represents the canonical chain given a database with a genesis
|
||||||
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
|
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
|
||||||
//
|
//
|
||||||
@ -209,15 +216,9 @@ type BlockChain struct {
|
|||||||
gcproc time.Duration // Accumulates canonical block processing for trie dumping
|
gcproc time.Duration // Accumulates canonical block processing for trie dumping
|
||||||
lastWrite uint64 // Last block when the state was flushed
|
lastWrite uint64 // Last block when the state was flushed
|
||||||
flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state
|
flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state
|
||||||
triedb *trie.Database // The database handler for maintaining trie nodes.
|
triedb *triedb.Database // The database handler for maintaining trie nodes.
|
||||||
stateCache state.Database // State database to reuse between imports (contains state cache)
|
stateCache state.Database // State database to reuse between imports (contains state cache)
|
||||||
|
txIndexer *txIndexer // Transaction indexer, might be nil if not enabled
|
||||||
// txLookupLimit is the maximum number of blocks from head whose tx indices
|
|
||||||
// are reserved:
|
|
||||||
// * 0: means no limit and regenerate any missing indexes
|
|
||||||
// * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes
|
|
||||||
// * nil: disable tx reindexer/deleter, but still index new blocks
|
|
||||||
txLookupLimit uint64
|
|
||||||
|
|
||||||
hc *HeaderChain
|
hc *HeaderChain
|
||||||
rmLogsFeed event.Feed
|
rmLogsFeed event.Feed
|
||||||
@ -242,12 +243,12 @@ type BlockChain struct {
|
|||||||
bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue]
|
bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue]
|
||||||
receiptsCache *lru.Cache[common.Hash, []*types.Receipt]
|
receiptsCache *lru.Cache[common.Hash, []*types.Receipt]
|
||||||
blockCache *lru.Cache[common.Hash, *types.Block]
|
blockCache *lru.Cache[common.Hash, *types.Block]
|
||||||
txLookupCache *lru.Cache[common.Hash, *rawdb.LegacyTxLookupEntry]
|
txLookupCache *lru.Cache[common.Hash, txLookup]
|
||||||
|
|
||||||
// future blocks are blocks added for later processing
|
// future blocks are blocks added for later processing
|
||||||
futureBlocks *lru.Cache[common.Hash, *types.Block]
|
futureBlocks *lru.Cache[common.Hash, *types.Block]
|
||||||
|
|
||||||
wg sync.WaitGroup //
|
wg sync.WaitGroup
|
||||||
quit chan struct{} // shutdown signal, closed in Stop.
|
quit chan struct{} // shutdown signal, closed in Stop.
|
||||||
stopping atomic.Bool // false if chain is running, true when stopped
|
stopping atomic.Bool // false if chain is running, true when stopped
|
||||||
procInterrupt atomic.Bool // interrupt signaler for block processing
|
procInterrupt atomic.Bool // interrupt signaler for block processing
|
||||||
@ -268,7 +269,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
cacheConfig = defaultCacheConfig
|
cacheConfig = defaultCacheConfig
|
||||||
}
|
}
|
||||||
// Open trie database with provided config
|
// Open trie database with provided config
|
||||||
triedb := trie.NewDatabase(db, cacheConfig.triedbConfig())
|
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig())
|
||||||
|
|
||||||
// Setup the genesis block, commit the provided genesis specification
|
// Setup the genesis block, commit the provided genesis specification
|
||||||
// to database if the genesis block is not present yet, or load the
|
// to database if the genesis block is not present yet, or load the
|
||||||
@ -297,7 +298,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
|
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
|
||||||
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
|
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
|
||||||
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
|
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
|
||||||
txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit),
|
txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit),
|
||||||
futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks),
|
futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks),
|
||||||
engine: engine,
|
engine: engine,
|
||||||
vmConfig: vmConfig,
|
vmConfig: vmConfig,
|
||||||
@ -463,12 +464,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
}
|
}
|
||||||
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
|
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
|
||||||
}
|
}
|
||||||
// Start tx indexer/unindexer if required.
|
// Start tx indexer if it's enabled.
|
||||||
if txLookupLimit != nil {
|
if txLookupLimit != nil {
|
||||||
bc.txLookupLimit = *txLookupLimit
|
bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
|
||||||
|
|
||||||
bc.wg.Add(1)
|
|
||||||
go bc.maintainTxIndex()
|
|
||||||
}
|
}
|
||||||
return bc, nil
|
return bc, nil
|
||||||
}
|
}
|
||||||
@ -958,7 +956,10 @@ func (bc *BlockChain) stopWithoutSaving() {
|
|||||||
if !bc.stopping.CompareAndSwap(false, true) {
|
if !bc.stopping.CompareAndSwap(false, true) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// Signal shutdown tx indexer.
|
||||||
|
if bc.txIndexer != nil {
|
||||||
|
bc.txIndexer.close()
|
||||||
|
}
|
||||||
// Unsubscribe all subscriptions registered from blockchain.
|
// Unsubscribe all subscriptions registered from blockchain.
|
||||||
bc.scope.Close()
|
bc.scope.Close()
|
||||||
|
|
||||||
@ -1155,14 +1156,13 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
// Ensure genesis is in ancients.
|
// Ensure genesis is in ancients.
|
||||||
if first.NumberU64() == 1 {
|
if first.NumberU64() == 1 {
|
||||||
if frozen, _ := bc.db.Ancients(); frozen == 0 {
|
if frozen, _ := bc.db.Ancients(); frozen == 0 {
|
||||||
b := bc.genesisBlock
|
|
||||||
td := bc.genesisBlock.Difficulty()
|
td := bc.genesisBlock.Difficulty()
|
||||||
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, td)
|
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td)
|
||||||
size += writeSize
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error writing genesis to ancients", "err", err)
|
log.Error("Error writing genesis to ancients", "err", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
size += writeSize
|
||||||
log.Info("Wrote genesis to ancients")
|
log.Info("Wrote genesis to ancients")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1176,44 +1176,11 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
// Write all chain data to ancients.
|
// Write all chain data to ancients.
|
||||||
td := bc.GetTd(first.Hash(), first.NumberU64())
|
td := bc.GetTd(first.Hash(), first.NumberU64())
|
||||||
writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
|
writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
|
||||||
size += writeSize
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error importing chain data to ancients", "err", err)
|
log.Error("Error importing chain data to ancients", "err", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
size += writeSize
|
||||||
// Write tx indices if any condition is satisfied:
|
|
||||||
// * If user requires to reserve all tx indices(txlookuplimit=0)
|
|
||||||
// * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
|
|
||||||
// * If block number is large enough to be regarded as a recent block
|
|
||||||
// It means blocks below the ancientLimit-txlookupLimit won't be indexed.
|
|
||||||
//
|
|
||||||
// But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
|
|
||||||
// an external ancient database, during the setup, blockchain will start
|
|
||||||
// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
|
|
||||||
// range. In this case, all tx indices of newly imported blocks should be
|
|
||||||
// generated.
|
|
||||||
batch := bc.db.NewBatch()
|
|
||||||
for i, block := range blockChain {
|
|
||||||
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
|
||||||
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
|
||||||
}
|
|
||||||
stats.processed++
|
|
||||||
|
|
||||||
if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
|
|
||||||
size += int64(batch.ValueSize())
|
|
||||||
if err = batch.Write(); err != nil {
|
|
||||||
snapBlock := bc.CurrentSnapBlock().Number.Uint64()
|
|
||||||
if _, err := bc.db.TruncateHead(snapBlock + 1); err != nil {
|
|
||||||
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
batch.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
||||||
if err := bc.db.Sync(); err != nil {
|
if err := bc.db.Sync(); err != nil {
|
||||||
@ -1231,8 +1198,10 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete block data from the main database.
|
// Delete block data from the main database.
|
||||||
batch.Reset()
|
var (
|
||||||
canonHashes := make(map[common.Hash]struct{})
|
batch = bc.db.NewBatch()
|
||||||
|
canonHashes = make(map[common.Hash]struct{})
|
||||||
|
)
|
||||||
for _, block := range blockChain {
|
for _, block := range blockChain {
|
||||||
canonHashes[block.Hash()] = struct{}{}
|
canonHashes[block.Hash()] = struct{}{}
|
||||||
if block.NumberU64() == 0 {
|
if block.NumberU64() == 0 {
|
||||||
@ -1250,13 +1219,16 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
stats.processed += int32(len(blockChain))
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeLive writes blockchain and corresponding receipt chain into active store.
|
// writeLive writes blockchain and corresponding receipt chain into active store.
|
||||||
writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
|
writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
|
||||||
skipPresenceCheck := false
|
var (
|
||||||
batch := bc.db.NewBatch()
|
skipPresenceCheck = false
|
||||||
|
batch = bc.db.NewBatch()
|
||||||
|
)
|
||||||
for i, block := range blockChain {
|
for i, block := range blockChain {
|
||||||
// Short circuit insertion if shutting down or processing failed
|
// Short circuit insertion if shutting down or processing failed
|
||||||
if bc.insertStopped() {
|
if bc.insertStopped() {
|
||||||
@ -1281,11 +1253,10 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
// Write all the data out into the database
|
// Write all the data out into the database
|
||||||
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
|
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
|
||||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
|
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed
|
|
||||||
|
|
||||||
// Write everything belongs to the blocks into the database. So that
|
// Write everything belongs to the blocks into the database. So that
|
||||||
// we can ensure all components of body is completed(body, receipts,
|
// we can ensure all components of body is completed(body, receipts)
|
||||||
// tx indexes)
|
// except transaction indexes(will be created once sync is finished).
|
||||||
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -1317,19 +1288,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Write the tx index tail (block number from where we index) before write any live blocks
|
|
||||||
if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
|
|
||||||
// The tx index tail can only be one of the following two options:
|
|
||||||
// * 0: all ancient blocks have been indexed
|
|
||||||
// * ancient-limit: the indices of blocks before ancient-limit are ignored
|
|
||||||
if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
|
|
||||||
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
|
|
||||||
rawdb.WriteTxIndexTail(bc.db, 0)
|
|
||||||
} else {
|
|
||||||
rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(liveBlocks) > 0 {
|
if len(liveBlocks) > 0 {
|
||||||
if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
|
if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
|
||||||
if err == errInsertionInterrupted {
|
if err == errInsertionInterrupted {
|
||||||
@ -1338,13 +1296,14 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var (
|
||||||
head := blockChain[len(blockChain)-1]
|
head = blockChain[len(blockChain)-1]
|
||||||
context := []interface{}{
|
context = []interface{}{
|
||||||
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
|
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
|
||||||
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
|
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
|
||||||
"size", common.StorageSize(size),
|
"size", common.StorageSize(size),
|
||||||
}
|
}
|
||||||
|
)
|
||||||
if stats.ignored > 0 {
|
if stats.ignored > 0 {
|
||||||
context = append(context, []interface{}{"ignored", stats.ignored}...)
|
context = append(context, []interface{}{"ignored", stats.ignored}...)
|
||||||
}
|
}
|
||||||
@ -1360,7 +1319,6 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e
|
|||||||
if bc.insertStopped() {
|
if bc.insertStopped() {
|
||||||
return errInsertionInterrupted
|
return errInsertionInterrupted
|
||||||
}
|
}
|
||||||
|
|
||||||
batch := bc.db.NewBatch()
|
batch := bc.db.NewBatch()
|
||||||
rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
|
rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
|
||||||
rawdb.WriteBlock(batch, block)
|
rawdb.WriteBlock(batch, block)
|
||||||
@ -1737,7 +1695,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
|||||||
// The chain importer is starting and stopping trie prefetchers. If a bad
|
// The chain importer is starting and stopping trie prefetchers. If a bad
|
||||||
// block or other error is hit however, an early return may not properly
|
// block or other error is hit however, an early return may not properly
|
||||||
// terminate the background threads. This defer ensures that we clean up
|
// terminate the background threads. This defer ensures that we clean up
|
||||||
// and dangling prefetcher, without defering each and holding on live refs.
|
// and dangling prefetcher, without deferring each and holding on live refs.
|
||||||
if activeState != nil {
|
if activeState != nil {
|
||||||
activeState.StopPrefetcher()
|
activeState.StopPrefetcher()
|
||||||
}
|
}
|
||||||
@ -2255,6 +2213,12 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
|
|||||||
// rewind the canonical chain to a lower point.
|
// rewind the canonical chain to a lower point.
|
||||||
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
|
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
|
||||||
}
|
}
|
||||||
|
// Reset the tx lookup cache in case to clear stale txlookups.
|
||||||
|
// This is done before writing any new chain data to avoid the
|
||||||
|
// weird scenario that canonical chain is changed while the
|
||||||
|
// stale lookups are still cached.
|
||||||
|
bc.txLookupCache.Purge()
|
||||||
|
|
||||||
// Insert the new chain(except the head block(reverse order)),
|
// Insert the new chain(except the head block(reverse order)),
|
||||||
// taking care of the proper incremental order.
|
// taking care of the proper incremental order.
|
||||||
for i := len(newChain) - 1; i >= 1; i-- {
|
for i := len(newChain) - 1; i >= 1; i-- {
|
||||||
@ -2269,11 +2233,13 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
|
|||||||
|
|
||||||
// Delete useless indexes right now which includes the non-canonical
|
// Delete useless indexes right now which includes the non-canonical
|
||||||
// transaction indexes, canonical chain indexes which above the head.
|
// transaction indexes, canonical chain indexes which above the head.
|
||||||
indexesBatch := bc.db.NewBatch()
|
var (
|
||||||
for _, tx := range types.HashDifference(deletedTxs, addedTxs) {
|
indexesBatch = bc.db.NewBatch()
|
||||||
|
diffs = types.HashDifference(deletedTxs, addedTxs)
|
||||||
|
)
|
||||||
|
for _, tx := range diffs {
|
||||||
rawdb.DeleteTxLookupEntry(indexesBatch, tx)
|
rawdb.DeleteTxLookupEntry(indexesBatch, tx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete all hash markers that are not part of the new canonical chain.
|
// Delete all hash markers that are not part of the new canonical chain.
|
||||||
// Because the reorg function does not handle new chain head, all hash
|
// Because the reorg function does not handle new chain head, all hash
|
||||||
// markers greater than or equal to new chain head should be deleted.
|
// markers greater than or equal to new chain head should be deleted.
|
||||||
@ -2456,102 +2422,6 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// indexBlocks reindexes or unindexes transactions depending on user configuration
|
|
||||||
func (bc *BlockChain) indexBlocks(tail *uint64, head uint64, done chan struct{}) {
|
|
||||||
defer func() { close(done) }()
|
|
||||||
|
|
||||||
// If head is 0, it means the chain is just initialized and no blocks are inserted,
|
|
||||||
// so don't need to indexing anything.
|
|
||||||
if head == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// The tail flag is not existent, it means the node is just initialized
|
|
||||||
// and all blocks(may from ancient store) are not indexed yet.
|
|
||||||
if tail == nil {
|
|
||||||
from := uint64(0)
|
|
||||||
if bc.txLookupLimit != 0 && head >= bc.txLookupLimit {
|
|
||||||
from = head - bc.txLookupLimit + 1
|
|
||||||
}
|
|
||||||
rawdb.IndexTransactions(bc.db, from, head+1, bc.quit)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// The tail flag is existent, but the whole chain is required to be indexed.
|
|
||||||
if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
|
|
||||||
if *tail > 0 {
|
|
||||||
// It can happen when chain is rewound to a historical point which
|
|
||||||
// is even lower than the indexes tail, recap the indexing target
|
|
||||||
// to new head to avoid reading non-existent block bodies.
|
|
||||||
end := *tail
|
|
||||||
if end > head+1 {
|
|
||||||
end = head + 1
|
|
||||||
}
|
|
||||||
rawdb.IndexTransactions(bc.db, 0, end, bc.quit)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Update the transaction index to the new chain state
|
|
||||||
if head-bc.txLookupLimit+1 < *tail {
|
|
||||||
// Reindex a part of missing indices and rewind index tail to HEAD-limit
|
|
||||||
rawdb.IndexTransactions(bc.db, head-bc.txLookupLimit+1, *tail, bc.quit)
|
|
||||||
} else {
|
|
||||||
// Unindex a part of stale indices and forward index tail to HEAD-limit
|
|
||||||
rawdb.UnindexTransactions(bc.db, *tail, head-bc.txLookupLimit+1, bc.quit)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// maintainTxIndex is responsible for the construction and deletion of the
|
|
||||||
// transaction index.
|
|
||||||
//
|
|
||||||
// User can use flag `txlookuplimit` to specify a "recentness" block, below
|
|
||||||
// which ancient tx indices get deleted. If `txlookuplimit` is 0, it means
|
|
||||||
// all tx indices will be reserved.
|
|
||||||
//
|
|
||||||
// The user can adjust the txlookuplimit value for each launch after sync,
|
|
||||||
// Geth will automatically construct the missing indices or delete the extra
|
|
||||||
// indices.
|
|
||||||
func (bc *BlockChain) maintainTxIndex() {
|
|
||||||
defer bc.wg.Done()
|
|
||||||
|
|
||||||
// Listening to chain events and manipulate the transaction indexes.
|
|
||||||
var (
|
|
||||||
done chan struct{} // Non-nil if background unindexing or reindexing routine is active.
|
|
||||||
headCh = make(chan ChainHeadEvent, 1) // Buffered to avoid locking up the event feed
|
|
||||||
)
|
|
||||||
sub := bc.SubscribeChainHeadEvent(headCh)
|
|
||||||
if sub == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer sub.Unsubscribe()
|
|
||||||
log.Info("Initialized transaction indexer", "limit", bc.TxLookupLimit())
|
|
||||||
|
|
||||||
// Launch the initial processing if chain is not empty. This step is
|
|
||||||
// useful in these scenarios that chain has no progress and indexer
|
|
||||||
// is never triggered.
|
|
||||||
if head := rawdb.ReadHeadBlock(bc.db); head != nil {
|
|
||||||
done = make(chan struct{})
|
|
||||||
go bc.indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.NumberU64(), done)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case head := <-headCh:
|
|
||||||
if done == nil {
|
|
||||||
done = make(chan struct{})
|
|
||||||
go bc.indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.Block.NumberU64(), done)
|
|
||||||
}
|
|
||||||
case <-done:
|
|
||||||
done = nil
|
|
||||||
case <-bc.quit:
|
|
||||||
if done != nil {
|
|
||||||
log.Info("Waiting background transaction indexer to exit")
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// reportBlock logs a bad block error.
|
// reportBlock logs a bad block error.
|
||||||
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
||||||
rawdb.WriteBadBlock(bc.db, block)
|
rawdb.WriteBadBlock(bc.db, block)
|
||||||
@ -2618,7 +2488,7 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
|
|||||||
bc.flushInterval.Store(int64(interval))
|
bc.flushInterval.Store(int64(interval))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTrieFlushInterval gets the in-memory tries flush interval
|
// GetTrieFlushInterval gets the in-memory tries flushAlloc interval
|
||||||
func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
|
func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
|
||||||
return time.Duration(bc.flushInterval.Load())
|
return time.Duration(bc.flushInterval.Load())
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -29,7 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CurrentHeader retrieves the current head header of the canonical chain. The
|
// CurrentHeader retrieves the current head header of the canonical chain. The
|
||||||
@ -254,20 +255,46 @@ func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, max
|
|||||||
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
|
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTransactionLookup retrieves the lookup associate with the given transaction
|
// GetTransactionLookup retrieves the lookup along with the transaction
|
||||||
// hash from the cache or database.
|
// itself associate with the given transaction hash.
|
||||||
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
|
//
|
||||||
|
// An error will be returned if the transaction is not found, and background
|
||||||
|
// indexing for transactions is still in progress. The transaction might be
|
||||||
|
// reachable shortly once it's indexed.
|
||||||
|
//
|
||||||
|
// A null will be returned in the transaction is not found and background
|
||||||
|
// transaction indexing is already finished. The transaction is not existent
|
||||||
|
// from the node's perspective.
|
||||||
|
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) (*rawdb.LegacyTxLookupEntry, *types.Transaction, error) {
|
||||||
// Short circuit if the txlookup already in the cache, retrieve otherwise
|
// Short circuit if the txlookup already in the cache, retrieve otherwise
|
||||||
if lookup, exist := bc.txLookupCache.Get(hash); exist {
|
if item, exist := bc.txLookupCache.Get(hash); exist {
|
||||||
return lookup
|
return item.lookup, item.transaction, nil
|
||||||
}
|
}
|
||||||
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
|
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
|
||||||
if tx == nil {
|
if tx == nil {
|
||||||
return nil
|
progress, err := bc.TxIndexProgress()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
|
// The transaction indexing is not finished yet, returning an
|
||||||
bc.txLookupCache.Add(hash, lookup)
|
// error to explicitly indicate it.
|
||||||
return lookup
|
if !progress.Done() {
|
||||||
|
return nil, nil, errors.New("transaction indexing still in progress")
|
||||||
|
}
|
||||||
|
// The transaction is already indexed, the transaction is either
|
||||||
|
// not existent or not in the range of index, returning null.
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
lookup := &rawdb.LegacyTxLookupEntry{
|
||||||
|
BlockHash: blockHash,
|
||||||
|
BlockIndex: blockNumber,
|
||||||
|
Index: txIndex,
|
||||||
|
}
|
||||||
|
bc.txLookupCache.Add(hash, txLookup{
|
||||||
|
lookup: lookup,
|
||||||
|
transaction: tx,
|
||||||
|
})
|
||||||
|
return lookup, tx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
||||||
@ -370,23 +397,24 @@ func (bc *BlockChain) GetVMConfig() *vm.Config {
|
|||||||
return &bc.vmConfig
|
return &bc.vmConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTxLookupLimit is responsible for updating the txlookup limit to the
|
// TxIndexProgress returns the transaction indexing progress.
|
||||||
// original one stored in db if the new mismatches with the old one.
|
func (bc *BlockChain) TxIndexProgress() (TxIndexProgress, error) {
|
||||||
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
|
if bc.txIndexer == nil {
|
||||||
bc.txLookupLimit = limit
|
return TxIndexProgress{}, errors.New("tx indexer is not enabled")
|
||||||
}
|
}
|
||||||
|
return bc.txIndexer.txIndexProgress()
|
||||||
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
|
|
||||||
// stale transaction indices.
|
|
||||||
func (bc *BlockChain) TxLookupLimit() uint64 {
|
|
||||||
return bc.txLookupLimit
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrieDB retrieves the low level trie database used for data storage.
|
// TrieDB retrieves the low level trie database used for data storage.
|
||||||
func (bc *BlockChain) TrieDB() *trie.Database {
|
func (bc *BlockChain) TrieDB() *triedb.Database {
|
||||||
return bc.triedb
|
return bc.triedb
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HeaderChain returns the underlying header chain.
|
||||||
|
func (bc *BlockChain) HeaderChain() *HeaderChain {
|
||||||
|
return bc.hc
|
||||||
|
}
|
||||||
|
|
||||||
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
|
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
|
||||||
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
|
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
|
||||||
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
|
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
|
||||||
|
@ -34,9 +34,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
"github.com/ethereum/go-ethereum/triedb/hashdb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// rewindTest is a test case for chain rollback upon user request.
|
// rewindTest is a test case for chain rollback upon user request.
|
||||||
@ -2033,13 +2033,13 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
|
|||||||
}
|
}
|
||||||
// Reopen the trie database without persisting in-memory dirty nodes.
|
// Reopen the trie database without persisting in-memory dirty nodes.
|
||||||
chain.triedb.Close()
|
chain.triedb.Close()
|
||||||
dbconfig := &trie.Config{}
|
dbconfig := &triedb.Config{}
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
dbconfig.PathDB = pathdb.Defaults
|
dbconfig.PathDB = pathdb.Defaults
|
||||||
} else {
|
} else {
|
||||||
dbconfig.HashDB = hashdb.Defaults
|
dbconfig.HashDB = hashdb.Defaults
|
||||||
}
|
}
|
||||||
chain.triedb = trie.NewDatabase(chain.db, dbconfig)
|
chain.triedb = triedb.NewDatabase(chain.db, dbconfig)
|
||||||
chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb)
|
chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb)
|
||||||
|
|
||||||
// Force run a freeze cycle
|
// Force run a freeze cycle
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// So we can deterministically seed different blockchains
|
// So we can deterministically seed different blockchains
|
||||||
@ -838,7 +839,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
|
|||||||
funds = big.NewInt(1000000000000000)
|
funds = big.NewInt(1000000000000000)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
Alloc: types.GenesisAlloc{address: {Balance: funds}},
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
}
|
}
|
||||||
signer = types.LatestSigner(gspec.Config)
|
signer = types.LatestSigner(gspec.Config)
|
||||||
@ -971,7 +972,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
|
|||||||
funds = big.NewInt(1000000000000000)
|
funds = big.NewInt(1000000000000000)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
Alloc: types.GenesisAlloc{address: {Balance: funds}},
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -1091,7 +1092,7 @@ func testChainTxReorgs(t *testing.T, scheme string) {
|
|||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
GasLimit: 3141592,
|
GasLimit: 3141592,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
addr1: {Balance: big.NewInt(1000000000000000)},
|
addr1: {Balance: big.NewInt(1000000000000000)},
|
||||||
addr2: {Balance: big.NewInt(1000000000000000)},
|
addr2: {Balance: big.NewInt(1000000000000000)},
|
||||||
addr3: {Balance: big.NewInt(1000000000000000)},
|
addr3: {Balance: big.NewInt(1000000000000000)},
|
||||||
@ -1206,7 +1207,7 @@ func testLogReorgs(t *testing.T, scheme string) {
|
|||||||
|
|
||||||
// this code generates a log
|
// this code generates a log
|
||||||
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
||||||
signer = types.LatestSigner(gspec.Config)
|
signer = types.LatestSigner(gspec.Config)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1263,7 +1264,7 @@ func testLogRebirth(t *testing.T, scheme string) {
|
|||||||
var (
|
var (
|
||||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
||||||
signer = types.LatestSigner(gspec.Config)
|
signer = types.LatestSigner(gspec.Config)
|
||||||
engine = ethash.NewFaker()
|
engine = ethash.NewFaker()
|
||||||
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
|
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
|
||||||
@ -1345,7 +1346,7 @@ func testSideLogRebirth(t *testing.T, scheme string) {
|
|||||||
var (
|
var (
|
||||||
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
||||||
signer = types.LatestSigner(gspec.Config)
|
signer = types.LatestSigner(gspec.Config)
|
||||||
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
)
|
)
|
||||||
@ -1442,7 +1443,7 @@ func testReorgSideEvent(t *testing.T, scheme string) {
|
|||||||
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
|
Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
|
||||||
}
|
}
|
||||||
signer = types.LatestSigner(gspec.Config)
|
signer = types.LatestSigner(gspec.Config)
|
||||||
)
|
)
|
||||||
@ -1585,7 +1586,7 @@ func testEIP155Transition(t *testing.T, scheme string) {
|
|||||||
EIP155Block: big.NewInt(2),
|
EIP155Block: big.NewInt(2),
|
||||||
HomesteadBlock: new(big.Int),
|
HomesteadBlock: new(big.Int),
|
||||||
},
|
},
|
||||||
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
|
Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
genDb, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, block *BlockGen) {
|
genDb, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, block *BlockGen) {
|
||||||
@ -1700,7 +1701,7 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) {
|
|||||||
EIP150Block: new(big.Int),
|
EIP150Block: new(big.Int),
|
||||||
EIP158Block: big.NewInt(2),
|
EIP158Block: big.NewInt(2),
|
||||||
},
|
},
|
||||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
Alloc: types.GenesisAlloc{address: {Balance: funds}},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
_, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, block *BlockGen) {
|
_, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, block *BlockGen) {
|
||||||
@ -1931,7 +1932,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
|
|||||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
funds = big.NewInt(1000000000)
|
funds = big.NewInt(1000000000)
|
||||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}}
|
||||||
)
|
)
|
||||||
height := uint64(1024)
|
height := uint64(1024)
|
||||||
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
||||||
@ -2136,7 +2137,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
|
|||||||
|
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: &chainConfig,
|
Config: &chainConfig,
|
||||||
Alloc: GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
|
Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
}
|
}
|
||||||
signer = types.LatestSigner(gspec.Config)
|
signer = types.LatestSigner(gspec.Config)
|
||||||
@ -2722,191 +2723,6 @@ func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTransactionIndices(t *testing.T) {
|
|
||||||
// Configure and generate a sample block chain
|
|
||||||
var (
|
|
||||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
||||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
||||||
funds = big.NewInt(100000000000000000)
|
|
||||||
gspec = &Genesis{
|
|
||||||
Config: params.TestChainConfig,
|
|
||||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
||||||
}
|
|
||||||
signer = types.LatestSigner(gspec.Config)
|
|
||||||
)
|
|
||||||
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) {
|
|
||||||
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
block.AddTx(tx)
|
|
||||||
})
|
|
||||||
|
|
||||||
check := func(tail *uint64, chain *BlockChain) {
|
|
||||||
stored := rawdb.ReadTxIndexTail(chain.db)
|
|
||||||
if tail == nil && stored != nil {
|
|
||||||
t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
|
|
||||||
}
|
|
||||||
if tail != nil && *stored != *tail {
|
|
||||||
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
|
|
||||||
}
|
|
||||||
if tail != nil {
|
|
||||||
for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
|
|
||||||
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
||||||
if block.Transactions().Len() == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, tx := range block.Transactions() {
|
|
||||||
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
|
|
||||||
t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := uint64(0); i < *tail; i++ {
|
|
||||||
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
||||||
if block.Transactions().Len() == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, tx := range block.Transactions() {
|
|
||||||
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
|
|
||||||
t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Init block chain with external ancients, check all needed indices has been indexed.
|
|
||||||
limit := []uint64{0, 32, 64, 128}
|
|
||||||
for _, l := range limit {
|
|
||||||
frdir := t.TempDir()
|
|
||||||
ancientDb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
|
||||||
rawdb.WriteAncientBlocks(ancientDb, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
|
||||||
|
|
||||||
l := l
|
|
||||||
chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create tester chain: %v", err)
|
|
||||||
}
|
|
||||||
chain.indexBlocks(rawdb.ReadTxIndexTail(ancientDb), 128, make(chan struct{}))
|
|
||||||
|
|
||||||
var tail uint64
|
|
||||||
if l != 0 {
|
|
||||||
tail = uint64(128) - l + 1
|
|
||||||
}
|
|
||||||
check(&tail, chain)
|
|
||||||
chain.Stop()
|
|
||||||
ancientDb.Close()
|
|
||||||
os.RemoveAll(frdir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reconstruct a block chain which only reserves HEAD-64 tx indices
|
|
||||||
ancientDb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
|
||||||
defer ancientDb.Close()
|
|
||||||
|
|
||||||
rawdb.WriteAncientBlocks(ancientDb, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
|
||||||
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
|
|
||||||
for _, l := range limit {
|
|
||||||
l := l
|
|
||||||
chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create tester chain: %v", err)
|
|
||||||
}
|
|
||||||
var tail uint64
|
|
||||||
if l != 0 {
|
|
||||||
tail = uint64(128) - l + 1
|
|
||||||
}
|
|
||||||
chain.indexBlocks(rawdb.ReadTxIndexTail(ancientDb), 128, make(chan struct{}))
|
|
||||||
check(&tail, chain)
|
|
||||||
chain.Stop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
|
|
||||||
testSkipStaleTxIndicesInSnapSync(t, rawdb.HashScheme)
|
|
||||||
testSkipStaleTxIndicesInSnapSync(t, rawdb.PathScheme)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testSkipStaleTxIndicesInSnapSync(t *testing.T, scheme string) {
|
|
||||||
// Configure and generate a sample block chain
|
|
||||||
var (
|
|
||||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
||||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
||||||
funds = big.NewInt(100000000000000000)
|
|
||||||
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
|
||||||
signer = types.LatestSigner(gspec.Config)
|
|
||||||
)
|
|
||||||
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) {
|
|
||||||
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
block.AddTx(tx)
|
|
||||||
})
|
|
||||||
|
|
||||||
check := func(tail *uint64, chain *BlockChain) {
|
|
||||||
stored := rawdb.ReadTxIndexTail(chain.db)
|
|
||||||
if tail == nil && stored != nil {
|
|
||||||
t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
|
|
||||||
}
|
|
||||||
if tail != nil && *stored != *tail {
|
|
||||||
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
|
|
||||||
}
|
|
||||||
if tail != nil {
|
|
||||||
for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
|
|
||||||
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
||||||
if block.Transactions().Len() == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, tx := range block.Transactions() {
|
|
||||||
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
|
|
||||||
t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := uint64(0); i < *tail; i++ {
|
|
||||||
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
||||||
if block.Transactions().Len() == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, tx := range block.Transactions() {
|
|
||||||
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
|
|
||||||
t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
||||||
}
|
|
||||||
defer ancientDb.Close()
|
|
||||||
|
|
||||||
// Import all blocks into ancient db, only HEAD-32 indices are kept.
|
|
||||||
l := uint64(32)
|
|
||||||
chain, err := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create tester chain: %v", err)
|
|
||||||
}
|
|
||||||
defer chain.Stop()
|
|
||||||
|
|
||||||
headers := make([]*types.Header, len(blocks))
|
|
||||||
for i, block := range blocks {
|
|
||||||
headers[i] = block.Header()
|
|
||||||
}
|
|
||||||
if n, err := chain.InsertHeaderChain(headers); err != nil {
|
|
||||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
||||||
}
|
|
||||||
// The indices before ancient-N(32) should be ignored. After that all blocks should be indexed.
|
|
||||||
if n, err := chain.InsertReceiptChain(blocks, receipts, 64); err != nil {
|
|
||||||
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
||||||
}
|
|
||||||
tail := uint64(32)
|
|
||||||
check(&tail, chain)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Benchmarks large blocks with value transfers to non-existing accounts
|
// Benchmarks large blocks with value transfers to non-existing accounts
|
||||||
func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
|
func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
|
||||||
var (
|
var (
|
||||||
@ -2916,7 +2732,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
|
|||||||
bankFunds = big.NewInt(100000000000000000)
|
bankFunds = big.NewInt(100000000000000000)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
testBankAddress: {Balance: bankFunds},
|
testBankAddress: {Balance: bankFunds},
|
||||||
common.HexToAddress("0xc0de"): {
|
common.HexToAddress("0xc0de"): {
|
||||||
Code: []byte{0x60, 0x01, 0x50},
|
Code: []byte{0x60, 0x01, 0x50},
|
||||||
@ -3094,7 +2910,7 @@ func testDeleteCreateRevert(t *testing.T, scheme string) {
|
|||||||
funds = big.NewInt(100000000000000000)
|
funds = big.NewInt(100000000000000000)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
// The address 0xAAAAA selfdestructs if called
|
// The address 0xAAAAA selfdestructs if called
|
||||||
aa: {
|
aa: {
|
||||||
@ -3218,7 +3034,7 @@ func testDeleteRecreateSlots(t *testing.T, scheme string) {
|
|||||||
|
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
// The address 0xAAAAA selfdestructs if called
|
// The address 0xAAAAA selfdestructs if called
|
||||||
aa: {
|
aa: {
|
||||||
@ -3304,7 +3120,7 @@ func testDeleteRecreateAccount(t *testing.T, scheme string) {
|
|||||||
|
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
// The address 0xAAAAA selfdestructs if called
|
// The address 0xAAAAA selfdestructs if called
|
||||||
aa: {
|
aa: {
|
||||||
@ -3425,7 +3241,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
|
|||||||
t.Logf("Destination address: %x\n", aa)
|
t.Logf("Destination address: %x\n", aa)
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
// The address 0xAAAAA selfdestructs if called
|
// The address 0xAAAAA selfdestructs if called
|
||||||
aa: {
|
aa: {
|
||||||
@ -3620,7 +3436,7 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) {
|
|||||||
|
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
// The address aa has some funds
|
// The address aa has some funds
|
||||||
aa: {Balance: big.NewInt(100000)},
|
aa: {Balance: big.NewInt(100000)},
|
||||||
@ -3652,7 +3468,7 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) {
|
|||||||
defer chain.Stop()
|
defer chain.Stop()
|
||||||
|
|
||||||
statedb, _ := chain.State()
|
statedb, _ := chain.State()
|
||||||
if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 {
|
if got, exp := statedb.GetBalance(aa), uint256.NewInt(100000); got.Cmp(exp) != 0 {
|
||||||
t.Fatalf("Genesis err, got %v exp %v", got, exp)
|
t.Fatalf("Genesis err, got %v exp %v", got, exp)
|
||||||
}
|
}
|
||||||
// First block tries to create, but fails
|
// First block tries to create, but fails
|
||||||
@ -3662,7 +3478,7 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) {
|
|||||||
t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
|
t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
|
||||||
}
|
}
|
||||||
statedb, _ = chain.State()
|
statedb, _ = chain.State()
|
||||||
if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 {
|
if got, exp := statedb.GetBalance(aa), uint256.NewInt(100000); got.Cmp(exp) != 0 {
|
||||||
t.Fatalf("block %d: got %v exp %v", block.NumberU64(), got, exp)
|
t.Fatalf("block %d: got %v exp %v", block.NumberU64(), got, exp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3695,7 +3511,7 @@ func testEIP2718Transition(t *testing.T, scheme string) {
|
|||||||
funds = big.NewInt(1000000000000000)
|
funds = big.NewInt(1000000000000000)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
// The address 0xAAAA sloads 0x00 and 0x01
|
// The address 0xAAAA sloads 0x00 and 0x01
|
||||||
aa: {
|
aa: {
|
||||||
@ -3780,7 +3596,7 @@ func testEIP1559Transition(t *testing.T, scheme string) {
|
|||||||
config = *params.AllEthashProtocolChanges
|
config = *params.AllEthashProtocolChanges
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: &config,
|
Config: &config,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
addr1: {Balance: funds},
|
addr1: {Balance: funds},
|
||||||
addr2: {Balance: funds},
|
addr2: {Balance: funds},
|
||||||
// The address 0xAAAA sloads 0x00 and 0x01
|
// The address 0xAAAA sloads 0x00 and 0x01
|
||||||
@ -3848,17 +3664,17 @@ func testEIP1559Transition(t *testing.T, scheme string) {
|
|||||||
state, _ := chain.State()
|
state, _ := chain.State()
|
||||||
|
|
||||||
// 3: Ensure that miner received only the tx's tip.
|
// 3: Ensure that miner received only the tx's tip.
|
||||||
actual := state.GetBalance(block.Coinbase())
|
actual := state.GetBalance(block.Coinbase()).ToBig()
|
||||||
expected := new(big.Int).Add(
|
expected := new(big.Int).Add(
|
||||||
new(big.Int).SetUint64(block.GasUsed()*block.Transactions()[0].GasTipCap().Uint64()),
|
new(big.Int).SetUint64(block.GasUsed()*block.Transactions()[0].GasTipCap().Uint64()),
|
||||||
ethash.ConstantinopleBlockReward,
|
ethash.ConstantinopleBlockReward.ToBig(),
|
||||||
)
|
)
|
||||||
if actual.Cmp(expected) != 0 {
|
if actual.Cmp(expected) != 0 {
|
||||||
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
|
// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
|
||||||
actual = new(big.Int).Sub(funds, state.GetBalance(addr1))
|
actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig())
|
||||||
expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
|
expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
|
||||||
if actual.Cmp(expected) != 0 {
|
if actual.Cmp(expected) != 0 {
|
||||||
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
||||||
@ -3888,17 +3704,17 @@ func testEIP1559Transition(t *testing.T, scheme string) {
|
|||||||
effectiveTip := block.Transactions()[0].GasTipCap().Uint64() - block.BaseFee().Uint64()
|
effectiveTip := block.Transactions()[0].GasTipCap().Uint64() - block.BaseFee().Uint64()
|
||||||
|
|
||||||
// 6+5: Ensure that miner received only the tx's effective tip.
|
// 6+5: Ensure that miner received only the tx's effective tip.
|
||||||
actual = state.GetBalance(block.Coinbase())
|
actual = state.GetBalance(block.Coinbase()).ToBig()
|
||||||
expected = new(big.Int).Add(
|
expected = new(big.Int).Add(
|
||||||
new(big.Int).SetUint64(block.GasUsed()*effectiveTip),
|
new(big.Int).SetUint64(block.GasUsed()*effectiveTip),
|
||||||
ethash.ConstantinopleBlockReward,
|
ethash.ConstantinopleBlockReward.ToBig(),
|
||||||
)
|
)
|
||||||
if actual.Cmp(expected) != 0 {
|
if actual.Cmp(expected) != 0 {
|
||||||
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4: Ensure the tx sender paid for the gasUsed * (effectiveTip + block baseFee).
|
// 4: Ensure the tx sender paid for the gasUsed * (effectiveTip + block baseFee).
|
||||||
actual = new(big.Int).Sub(funds, state.GetBalance(addr2))
|
actual = new(big.Int).Sub(funds, state.GetBalance(addr2).ToBig())
|
||||||
expected = new(big.Int).SetUint64(block.GasUsed() * (effectiveTip + block.BaseFee().Uint64()))
|
expected = new(big.Int).SetUint64(block.GasUsed() * (effectiveTip + block.BaseFee().Uint64()))
|
||||||
if actual.Cmp(expected) != 0 {
|
if actual.Cmp(expected) != 0 {
|
||||||
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
||||||
@ -3921,7 +3737,7 @@ func testSetCanonical(t *testing.T, scheme string) {
|
|||||||
funds = big.NewInt(100000000000000000)
|
funds = big.NewInt(100000000000000000)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
Alloc: types.GenesisAlloc{address: {Balance: funds}},
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
}
|
}
|
||||||
signer = types.LatestSigner(gspec.Config)
|
signer = types.LatestSigner(gspec.Config)
|
||||||
@ -4038,7 +3854,7 @@ func testCanonicalHashMarker(t *testing.T, scheme string) {
|
|||||||
var (
|
var (
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{},
|
Alloc: types.GenesisAlloc{},
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
}
|
}
|
||||||
engine = ethash.NewFaker()
|
engine = ethash.NewFaker()
|
||||||
@ -4103,212 +3919,6 @@ func testCanonicalHashMarker(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTxIndexer tests the tx indexes are updated correctly.
|
|
||||||
func TestTxIndexer(t *testing.T) {
|
|
||||||
var (
|
|
||||||
testBankKey, _ = crypto.GenerateKey()
|
|
||||||
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
|
|
||||||
testBankFunds = big.NewInt(1000000000000000000)
|
|
||||||
|
|
||||||
gspec = &Genesis{
|
|
||||||
Config: params.TestChainConfig,
|
|
||||||
Alloc: GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
|
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
||||||
}
|
|
||||||
engine = ethash.NewFaker()
|
|
||||||
nonce = uint64(0)
|
|
||||||
)
|
|
||||||
_, blocks, receipts := GenerateChainWithGenesis(gspec, engine, 128, func(i int, gen *BlockGen) {
|
|
||||||
tx, _ := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("0xdeadbeef"), big.NewInt(1000), params.TxGas, big.NewInt(10*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey)
|
|
||||||
gen.AddTx(tx)
|
|
||||||
nonce += 1
|
|
||||||
})
|
|
||||||
|
|
||||||
// verifyIndexes checks if the transaction indexes are present or not
|
|
||||||
// of the specified block.
|
|
||||||
verifyIndexes := func(db ethdb.Database, number uint64, exist bool) {
|
|
||||||
if number == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
block := blocks[number-1]
|
|
||||||
for _, tx := range block.Transactions() {
|
|
||||||
lookup := rawdb.ReadTxLookupEntry(db, tx.Hash())
|
|
||||||
if exist && lookup == nil {
|
|
||||||
t.Fatalf("missing %d %x", number, tx.Hash().Hex())
|
|
||||||
}
|
|
||||||
if !exist && lookup != nil {
|
|
||||||
t.Fatalf("unexpected %d %x", number, tx.Hash().Hex())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// verifyRange runs verifyIndexes for a range of blocks, from and to are included.
|
|
||||||
verifyRange := func(db ethdb.Database, from, to uint64, exist bool) {
|
|
||||||
for number := from; number <= to; number += 1 {
|
|
||||||
verifyIndexes(db, number, exist)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
verify := func(db ethdb.Database, expTail uint64) {
|
|
||||||
tail := rawdb.ReadTxIndexTail(db)
|
|
||||||
if tail == nil {
|
|
||||||
t.Fatal("Failed to write tx index tail")
|
|
||||||
}
|
|
||||||
if *tail != expTail {
|
|
||||||
t.Fatalf("Unexpected tx index tail, want %v, got %d", expTail, *tail)
|
|
||||||
}
|
|
||||||
if *tail != 0 {
|
|
||||||
verifyRange(db, 0, *tail-1, false)
|
|
||||||
}
|
|
||||||
verifyRange(db, *tail, 128, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cases = []struct {
|
|
||||||
limitA uint64
|
|
||||||
tailA uint64
|
|
||||||
limitB uint64
|
|
||||||
tailB uint64
|
|
||||||
limitC uint64
|
|
||||||
tailC uint64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
// LimitA: 0
|
|
||||||
// TailA: 0
|
|
||||||
//
|
|
||||||
// all blocks are indexed
|
|
||||||
limitA: 0,
|
|
||||||
tailA: 0,
|
|
||||||
|
|
||||||
// LimitB: 1
|
|
||||||
// TailB: 128
|
|
||||||
//
|
|
||||||
// block-128 is indexed
|
|
||||||
limitB: 1,
|
|
||||||
tailB: 128,
|
|
||||||
|
|
||||||
// LimitB: 64
|
|
||||||
// TailB: 65
|
|
||||||
//
|
|
||||||
// block [65, 128] are indexed
|
|
||||||
limitC: 64,
|
|
||||||
tailC: 65,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// LimitA: 64
|
|
||||||
// TailA: 65
|
|
||||||
//
|
|
||||||
// block [65, 128] are indexed
|
|
||||||
limitA: 64,
|
|
||||||
tailA: 65,
|
|
||||||
|
|
||||||
// LimitB: 1
|
|
||||||
// TailB: 128
|
|
||||||
//
|
|
||||||
// block-128 is indexed
|
|
||||||
limitB: 1,
|
|
||||||
tailB: 128,
|
|
||||||
|
|
||||||
// LimitB: 64
|
|
||||||
// TailB: 65
|
|
||||||
//
|
|
||||||
// block [65, 128] are indexed
|
|
||||||
limitC: 64,
|
|
||||||
tailC: 65,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// LimitA: 127
|
|
||||||
// TailA: 2
|
|
||||||
//
|
|
||||||
// block [2, 128] are indexed
|
|
||||||
limitA: 127,
|
|
||||||
tailA: 2,
|
|
||||||
|
|
||||||
// LimitB: 1
|
|
||||||
// TailB: 128
|
|
||||||
//
|
|
||||||
// block-128 is indexed
|
|
||||||
limitB: 1,
|
|
||||||
tailB: 128,
|
|
||||||
|
|
||||||
// LimitB: 64
|
|
||||||
// TailB: 65
|
|
||||||
//
|
|
||||||
// block [65, 128] are indexed
|
|
||||||
limitC: 64,
|
|
||||||
tailC: 65,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// LimitA: 128
|
|
||||||
// TailA: 1
|
|
||||||
//
|
|
||||||
// block [2, 128] are indexed
|
|
||||||
limitA: 128,
|
|
||||||
tailA: 1,
|
|
||||||
|
|
||||||
// LimitB: 1
|
|
||||||
// TailB: 128
|
|
||||||
//
|
|
||||||
// block-128 is indexed
|
|
||||||
limitB: 1,
|
|
||||||
tailB: 128,
|
|
||||||
|
|
||||||
// LimitB: 64
|
|
||||||
// TailB: 65
|
|
||||||
//
|
|
||||||
// block [65, 128] are indexed
|
|
||||||
limitC: 64,
|
|
||||||
tailC: 65,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// LimitA: 129
|
|
||||||
// TailA: 0
|
|
||||||
//
|
|
||||||
// block [0, 128] are indexed
|
|
||||||
limitA: 129,
|
|
||||||
tailA: 0,
|
|
||||||
|
|
||||||
// LimitB: 1
|
|
||||||
// TailB: 128
|
|
||||||
//
|
|
||||||
// block-128 is indexed
|
|
||||||
limitB: 1,
|
|
||||||
tailB: 128,
|
|
||||||
|
|
||||||
// LimitB: 64
|
|
||||||
// TailB: 65
|
|
||||||
//
|
|
||||||
// block [65, 128] are indexed
|
|
||||||
limitC: 64,
|
|
||||||
tailC: 65,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, c := range cases {
|
|
||||||
frdir := t.TempDir()
|
|
||||||
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
|
||||||
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
|
||||||
|
|
||||||
// Index the initial blocks from ancient store
|
|
||||||
chain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, &c.limitA)
|
|
||||||
chain.indexBlocks(nil, 128, make(chan struct{}))
|
|
||||||
verify(db, c.tailA)
|
|
||||||
|
|
||||||
chain.SetTxLookupLimit(c.limitB)
|
|
||||||
chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
|
|
||||||
verify(db, c.tailB)
|
|
||||||
|
|
||||||
chain.SetTxLookupLimit(c.limitC)
|
|
||||||
chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
|
|
||||||
verify(db, c.tailC)
|
|
||||||
|
|
||||||
// Recover all indexes
|
|
||||||
chain.SetTxLookupLimit(0)
|
|
||||||
chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
|
|
||||||
verify(db, 0)
|
|
||||||
|
|
||||||
chain.Stop()
|
|
||||||
db.Close()
|
|
||||||
os.RemoveAll(frdir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateThenDeletePreByzantium(t *testing.T) {
|
func TestCreateThenDeletePreByzantium(t *testing.T) {
|
||||||
// We use Ropsten chain config instead of Testchain config, this is
|
// We use Ropsten chain config instead of Testchain config, this is
|
||||||
// deliberate: we want to use pre-byz rules where we have intermediate state roots
|
// deliberate: we want to use pre-byz rules where we have intermediate state roots
|
||||||
@ -4357,7 +3967,7 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
|
|||||||
}...)
|
}...)
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: config,
|
Config: config,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -4443,7 +4053,7 @@ func TestDeleteThenCreate(t *testing.T) {
|
|||||||
|
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -4555,7 +4165,7 @@ func TestTransientStorageReset(t *testing.T) {
|
|||||||
}...)
|
}...)
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -4623,7 +4233,7 @@ func TestEIP3651(t *testing.T) {
|
|||||||
config = *params.AllEthashProtocolChanges
|
config = *params.AllEthashProtocolChanges
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: &config,
|
Config: &config,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
addr1: {Balance: funds},
|
addr1: {Balance: funds},
|
||||||
addr2: {Balance: funds},
|
addr2: {Balance: funds},
|
||||||
// The address 0xAAAA sloads 0x00 and 0x01
|
// The address 0xAAAA sloads 0x00 and 0x01
|
||||||
@ -4703,14 +4313,14 @@ func TestEIP3651(t *testing.T) {
|
|||||||
state, _ := chain.State()
|
state, _ := chain.State()
|
||||||
|
|
||||||
// 3: Ensure that miner received only the tx's tip.
|
// 3: Ensure that miner received only the tx's tip.
|
||||||
actual := state.GetBalance(block.Coinbase())
|
actual := state.GetBalance(block.Coinbase()).ToBig()
|
||||||
expected := new(big.Int).SetUint64(block.GasUsed() * block.Transactions()[0].GasTipCap().Uint64())
|
expected := new(big.Int).SetUint64(block.GasUsed() * block.Transactions()[0].GasTipCap().Uint64())
|
||||||
if actual.Cmp(expected) != 0 {
|
if actual.Cmp(expected) != 0 {
|
||||||
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
|
// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
|
||||||
actual = new(big.Int).Sub(funds, state.GetBalance(addr1))
|
actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig())
|
||||||
expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
|
expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
|
||||||
if actual.Cmp(expected) != 0 {
|
if actual.Cmp(expected) != 0 {
|
||||||
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
||||||
|
@ -31,7 +31,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlockGen creates blocks for testing.
|
// BlockGen creates blocks for testing.
|
||||||
@ -82,7 +83,7 @@ func (b *BlockGen) SetDifficulty(diff *big.Int) {
|
|||||||
b.header.Difficulty = diff
|
b.header.Difficulty = diff
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPos makes the header a PoS-header (0 difficulty)
|
// SetPoS makes the header a PoS-header (0 difficulty)
|
||||||
func (b *BlockGen) SetPoS() {
|
func (b *BlockGen) SetPoS() {
|
||||||
b.header.Difficulty = new(big.Int)
|
b.header.Difficulty = new(big.Int)
|
||||||
}
|
}
|
||||||
@ -157,7 +158,7 @@ func (b *BlockGen) AddTxWithVMConfig(tx *types.Transaction, config vm.Config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetBalance returns the balance of the given address at the generated block.
|
// GetBalance returns the balance of the given address at the generated block.
|
||||||
func (b *BlockGen) GetBalance(addr common.Address) *big.Int {
|
func (b *BlockGen) GetBalance(addr common.Address) *uint256.Int {
|
||||||
return b.statedb.GetBalance(addr)
|
return b.statedb.GetBalance(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -311,7 +312,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
|||||||
}
|
}
|
||||||
cm := newChainMaker(parent, config, engine)
|
cm := newChainMaker(parent, config, engine)
|
||||||
|
|
||||||
genblock := func(i int, parent *types.Block, triedb *trie.Database, statedb *state.StateDB) (*types.Block, types.Receipts) {
|
genblock := func(i int, parent *types.Block, triedb *triedb.Database, statedb *state.StateDB) (*types.Block, types.Receipts) {
|
||||||
b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine}
|
b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine}
|
||||||
b.header = cm.makeHeader(parent, statedb, b.engine)
|
b.header = cm.makeHeader(parent, statedb, b.engine)
|
||||||
|
|
||||||
@ -361,7 +362,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Forcibly use hash-based state scheme for retaining all nodes in disk.
|
// Forcibly use hash-based state scheme for retaining all nodes in disk.
|
||||||
triedb := trie.NewDatabase(db, trie.HashDefaults)
|
triedb := triedb.NewDatabase(db, triedb.HashDefaults)
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
@ -406,7 +407,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
|||||||
// then generate chain on top.
|
// then generate chain on top.
|
||||||
func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
|
func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
triedb := trie.NewDatabase(db, trie.HashDefaults)
|
triedb := triedb.NewDatabase(db, triedb.HashDefaults)
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
_, err := genesis.Commit(db, triedb)
|
_, err := genesis.Commit(db, triedb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGeneratePOSChain(t *testing.T) {
|
func TestGeneratePOSChain(t *testing.T) {
|
||||||
@ -46,7 +46,7 @@ func TestGeneratePOSChain(t *testing.T) {
|
|||||||
asm4788 = common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
|
asm4788 = common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: &config,
|
Config: &config,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
address: {Balance: funds},
|
address: {Balance: funds},
|
||||||
params.BeaconRootsStorageAddress: {Balance: common.Big0, Code: asm4788},
|
params.BeaconRootsStorageAddress: {Balance: common.Big0, Code: asm4788},
|
||||||
},
|
},
|
||||||
@ -69,19 +69,19 @@ func TestGeneratePOSChain(t *testing.T) {
|
|||||||
storage[common.Hash{0x01}] = common.Hash{0x01}
|
storage[common.Hash{0x01}] = common.Hash{0x01}
|
||||||
storage[common.Hash{0x02}] = common.Hash{0x02}
|
storage[common.Hash{0x02}] = common.Hash{0x02}
|
||||||
storage[common.Hash{0x03}] = common.HexToHash("0303")
|
storage[common.Hash{0x03}] = common.HexToHash("0303")
|
||||||
gspec.Alloc[aa] = GenesisAccount{
|
gspec.Alloc[aa] = types.Account{
|
||||||
Balance: common.Big1,
|
Balance: common.Big1,
|
||||||
Nonce: 1,
|
Nonce: 1,
|
||||||
Storage: storage,
|
Storage: storage,
|
||||||
Code: common.Hex2Bytes("6042"),
|
Code: common.Hex2Bytes("6042"),
|
||||||
}
|
}
|
||||||
gspec.Alloc[bb] = GenesisAccount{
|
gspec.Alloc[bb] = types.Account{
|
||||||
Balance: common.Big2,
|
Balance: common.Big2,
|
||||||
Nonce: 1,
|
Nonce: 1,
|
||||||
Storage: storage,
|
Storage: storage,
|
||||||
Code: common.Hex2Bytes("600154600354"),
|
Code: common.Hex2Bytes("600154600354"),
|
||||||
}
|
}
|
||||||
genesis := gspec.MustCommit(gendb, trie.NewDatabase(gendb, trie.HashDefaults))
|
genesis := gspec.MustCommit(gendb, triedb.NewDatabase(gendb, triedb.HashDefaults))
|
||||||
|
|
||||||
genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) {
|
genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) {
|
||||||
gen.SetParentBeaconRoot(common.Hash{byte(i + 1)})
|
gen.SetParentBeaconRoot(common.Hash{byte(i + 1)})
|
||||||
@ -202,9 +202,9 @@ func ExampleGenerateChain() {
|
|||||||
// Ensure that key1 has some funds in the genesis block.
|
// Ensure that key1 has some funds in the genesis block.
|
||||||
gspec := &Genesis{
|
gspec := &Genesis{
|
||||||
Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)},
|
Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)},
|
||||||
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
|
Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
|
||||||
}
|
}
|
||||||
genesis := gspec.MustCommit(genDb, trie.NewDatabase(genDb, trie.HashDefaults))
|
genesis := gspec.MustCommit(genDb, triedb.NewDatabase(genDb, triedb.HashDefaults))
|
||||||
|
|
||||||
// This call generates a chain of 5 blocks. The function runs for
|
// This call generates a chain of 5 blocks. The function runs for
|
||||||
// each block and adds different features to gen based on the
|
// each block and adds different features to gen based on the
|
||||||
|
@ -104,4 +104,10 @@ var (
|
|||||||
// ErrBlobFeeCapTooLow is returned if the transaction fee cap is less than the
|
// ErrBlobFeeCapTooLow is returned if the transaction fee cap is less than the
|
||||||
// blob gas fee of the block.
|
// blob gas fee of the block.
|
||||||
ErrBlobFeeCapTooLow = errors.New("max fee per blob gas less than block blob gas fee")
|
ErrBlobFeeCapTooLow = errors.New("max fee per blob gas less than block blob gas fee")
|
||||||
|
|
||||||
|
// ErrMissingBlobHashes is returned if a blob transaction has no blob hashes.
|
||||||
|
ErrMissingBlobHashes = errors.New("blob transaction missing blob hashes")
|
||||||
|
|
||||||
|
// ErrBlobTxCreate is returned if a blob transaction has no explicit to field.
|
||||||
|
ErrBlobTxCreate = errors.New("blob transaction of type create")
|
||||||
)
|
)
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChainContext supports retrieving headers and consensus parameters from the
|
// ChainContext supports retrieving headers and consensus parameters from the
|
||||||
@ -129,12 +130,12 @@ func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash
|
|||||||
|
|
||||||
// CanTransfer checks whether there are enough funds in the address' account to make a transfer.
|
// CanTransfer checks whether there are enough funds in the address' account to make a transfer.
|
||||||
// This does not take the necessary gas in to account to make the transfer valid.
|
// This does not take the necessary gas in to account to make the transfer valid.
|
||||||
func CanTransfer(db vm.StateDB, addr common.Address, amount *big.Int) bool {
|
func CanTransfer(db vm.StateDB, addr common.Address, amount *uint256.Int) bool {
|
||||||
return db.GetBalance(addr).Cmp(amount) >= 0
|
return db.GetBalance(addr).Cmp(amount) >= 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transfer subtracts amount from sender and adds amount to recipient using the given Db
|
// Transfer subtracts amount from sender and adds amount to recipient using the given Db
|
||||||
func Transfer(db vm.StateDB, sender, recipient common.Address, amount *big.Int) {
|
func Transfer(db vm.StateDB, sender, recipient common.Address, amount *uint256.Int) {
|
||||||
db.SubBalance(sender, amount)
|
db.SubBalance(sender, amount)
|
||||||
db.AddBalance(recipient, amount)
|
db.AddBalance(recipient, amount)
|
||||||
}
|
}
|
||||||
|
@ -74,8 +74,10 @@ func TestCreation(t *testing.T) {
|
|||||||
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
||||||
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block
|
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block
|
||||||
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
|
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
|
||||||
{20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // First Shanghai block
|
{20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // First Shanghai block
|
||||||
{30000000, 2000000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // Future Shanghai block
|
{30000000, 1710338134, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // Last Shanghai block
|
||||||
|
{40000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}}, // First Cancun block
|
||||||
|
{50000000, 2000000000, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}}, // Future Cancun block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Goerli test cases
|
// Goerli test cases
|
||||||
@ -106,7 +108,10 @@ func TestCreation(t *testing.T) {
|
|||||||
{1735370, 0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last London block
|
{1735370, 0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last London block
|
||||||
{1735371, 0, ID{Hash: checksumToBytes(0xb96cbd13), Next: 1677557088}}, // First MergeNetsplit block
|
{1735371, 0, ID{Hash: checksumToBytes(0xb96cbd13), Next: 1677557088}}, // First MergeNetsplit block
|
||||||
{1735372, 1677557087, ID{Hash: checksumToBytes(0xb96cbd13), Next: 1677557088}}, // Last MergeNetsplit block
|
{1735372, 1677557087, ID{Hash: checksumToBytes(0xb96cbd13), Next: 1677557088}}, // Last MergeNetsplit block
|
||||||
{1735372, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // First Shanghai block
|
{1735372, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 1706655072}}, // First Shanghai block
|
||||||
|
{1735372, 1706655071, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 1706655072}}, // Last Shanghai block
|
||||||
|
{1735372, 1706655072, ID{Hash: checksumToBytes(0x88cf81d9), Next: 0}}, // First Cancun block
|
||||||
|
{1735372, 2706655072, ID{Hash: checksumToBytes(0x88cf81d9), Next: 0}}, // Future Cancun block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Holesky test cases
|
// Holesky test cases
|
||||||
@ -116,7 +121,10 @@ func TestCreation(t *testing.T) {
|
|||||||
[]testcase{
|
[]testcase{
|
||||||
{0, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin, London, Paris block
|
{0, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin, London, Paris block
|
||||||
{123, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // First MergeNetsplit block
|
{123, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // First MergeNetsplit block
|
||||||
{123, 1696000704, ID{Hash: checksumToBytes(0xfd4f016b), Next: 0}}, // Last MergeNetsplit block
|
{123, 1696000704, ID{Hash: checksumToBytes(0xfd4f016b), Next: 1707305664}}, // First Shanghai block
|
||||||
|
{123, 1707305663, ID{Hash: checksumToBytes(0xfd4f016b), Next: 1707305664}}, // Last Shanghai block
|
||||||
|
{123, 1707305664, ID{Hash: checksumToBytes(0x9b192ad0), Next: 0}}, // First Cancun block
|
||||||
|
{123, 2707305664, ID{Hash: checksumToBytes(0x9b192ad0), Next: 0}}, // Future Cancun block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -135,6 +143,7 @@ func TestValidation(t *testing.T) {
|
|||||||
// Config that has not timestamp enabled
|
// Config that has not timestamp enabled
|
||||||
legacyConfig := *params.MainnetChainConfig
|
legacyConfig := *params.MainnetChainConfig
|
||||||
legacyConfig.ShanghaiTime = nil
|
legacyConfig.ShanghaiTime = nil
|
||||||
|
legacyConfig.CancunTime = nil
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
config *params.ChainConfig
|
config *params.ChainConfig
|
||||||
@ -207,14 +216,10 @@ func TestValidation(t *testing.T) {
|
|||||||
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
||||||
//
|
//
|
||||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
//
|
|
||||||
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
|
|
||||||
{&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
{&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
||||||
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
||||||
//
|
|
||||||
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
|
|
||||||
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
//------------------------------------
|
//------------------------------------
|
||||||
@ -291,34 +296,25 @@ func TestValidation(t *testing.T) {
|
|||||||
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
// also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork).
|
// also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork).
|
||||||
// In this case we don't know if Cancun passed yet or not.
|
// In this case we don't know if Cancun passed yet or not.
|
||||||
//
|
{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced
|
|
||||||
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
// also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We
|
// also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We
|
||||||
// don't know if Cancun passed yet (will pass) or not.
|
// don't know if Cancun passed yet (will pass) or not.
|
||||||
//
|
{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced and update next timestamp
|
|
||||||
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
// also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As
|
// also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As
|
||||||
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
||||||
//
|
{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: math.MaxUint64}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced
|
|
||||||
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
// Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
// is simply out of sync, accept.
|
// is simply out of sync, accept.
|
||||||
//
|
{params.MainnetChainConfig, 21000000, 1710338135, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
|
|
||||||
// {params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
// Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
// is simply out of sync, accept.
|
// is simply out of sync, accept.
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
|
{params.MainnetChainConfig, 21123456, 1710338136, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
|
||||||
//{params.MainnetChainConfig, 21123456, 1678123456, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote
|
// Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
// is definitely out of sync. It may or may not need the Prague update, we don't know yet.
|
// is definitely out of sync. It may or may not need the Prague update, we don't know yet.
|
||||||
@ -327,9 +323,7 @@ func TestValidation(t *testing.T) {
|
|||||||
//{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
//{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
||||||
|
|
||||||
// Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept.
|
// Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept.
|
||||||
//
|
{params.MainnetChainConfig, 21000000, 1700000000, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
|
|
||||||
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local
|
// Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local
|
||||||
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
||||||
@ -339,9 +333,7 @@ func TestValidation(t *testing.T) {
|
|||||||
|
|
||||||
// Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks.
|
// Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks.
|
||||||
// Remote needs software update.
|
// Remote needs software update.
|
||||||
//
|
{params.MainnetChainConfig, 21000000, 1710338135, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, ErrRemoteStale},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update local head and time
|
|
||||||
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, ErrRemoteStale},
|
|
||||||
|
|
||||||
// Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai +
|
// Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai +
|
||||||
// 0xffffffff. Local needs software update, reject.
|
// 0xffffffff. Local needs software update, reject.
|
||||||
@ -349,24 +341,20 @@ func TestValidation(t *testing.T) {
|
|||||||
|
|
||||||
// Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun +
|
// Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun +
|
||||||
// 0xffffffff. Local needs software update, reject.
|
// 0xffffffff. Local needs software update, reject.
|
||||||
//
|
{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x9f3d2254, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
|
|
||||||
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x00000000, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
|
|
||||||
|
|
||||||
// Local is mainnet Shanghai, remote is random Shanghai.
|
// Local is mainnet Shanghai, remote is random Shanghai.
|
||||||
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
|
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Shanghai, far in the future. Remote announces Gopherium (non existing fork)
|
// Local is mainnet Cancun, far in the future. Remote announces Gopherium (non existing fork)
|
||||||
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
|
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
|
||||||
//
|
//
|
||||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
{params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xdce96c2d), Next: 8888888888}, ErrLocalIncompatibleOrStale},
|
{params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x9f3d2254), Next: 8888888888}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
|
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
|
||||||
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.
|
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.
|
||||||
//
|
{params.MainnetChainConfig, 20999999, 1699999999, ID{Hash: checksumToBytes(0x71147644), Next: 1700000000}, ErrLocalIncompatibleOrStale},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced
|
|
||||||
//{params.MainnetChainConfig, 20999999, 1677999999, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, ErrLocalIncompatibleOrStale},
|
|
||||||
}
|
}
|
||||||
genesis := core.DefaultGenesisBlock().ToBlock()
|
genesis := core.DefaultGenesisBlock().ToBlock()
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -26,7 +27,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
|
|||||||
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
|
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
|
||||||
Mixhash common.Hash `json:"mixHash"`
|
Mixhash common.Hash `json:"mixHash"`
|
||||||
Coinbase common.Address `json:"coinbase"`
|
Coinbase common.Address `json:"coinbase"`
|
||||||
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
|
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
|
||||||
Number math.HexOrDecimal64 `json:"number"`
|
Number math.HexOrDecimal64 `json:"number"`
|
||||||
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
|
||||||
ParentHash common.Hash `json:"parentHash"`
|
ParentHash common.Hash `json:"parentHash"`
|
||||||
@ -44,7 +45,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
|
|||||||
enc.Mixhash = g.Mixhash
|
enc.Mixhash = g.Mixhash
|
||||||
enc.Coinbase = g.Coinbase
|
enc.Coinbase = g.Coinbase
|
||||||
if g.Alloc != nil {
|
if g.Alloc != nil {
|
||||||
enc.Alloc = make(map[common.UnprefixedAddress]GenesisAccount, len(g.Alloc))
|
enc.Alloc = make(map[common.UnprefixedAddress]types.Account, len(g.Alloc))
|
||||||
for k, v := range g.Alloc {
|
for k, v := range g.Alloc {
|
||||||
enc.Alloc[common.UnprefixedAddress(k)] = v
|
enc.Alloc[common.UnprefixedAddress(k)] = v
|
||||||
}
|
}
|
||||||
@ -69,7 +70,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
|
|||||||
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
|
Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
|
||||||
Mixhash *common.Hash `json:"mixHash"`
|
Mixhash *common.Hash `json:"mixHash"`
|
||||||
Coinbase *common.Address `json:"coinbase"`
|
Coinbase *common.Address `json:"coinbase"`
|
||||||
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
|
Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
|
||||||
Number *math.HexOrDecimal64 `json:"number"`
|
Number *math.HexOrDecimal64 `json:"number"`
|
||||||
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
|
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
|
||||||
ParentHash *common.Hash `json:"parentHash"`
|
ParentHash *common.Hash `json:"parentHash"`
|
||||||
@ -110,7 +111,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
|
|||||||
if dec.Alloc == nil {
|
if dec.Alloc == nil {
|
||||||
return errors.New("missing required field 'alloc' for Genesis")
|
return errors.New("missing required field 'alloc' for Genesis")
|
||||||
}
|
}
|
||||||
g.Alloc = make(GenesisAlloc, len(dec.Alloc))
|
g.Alloc = make(types.GenesisAlloc, len(dec.Alloc))
|
||||||
for k, v := range dec.Alloc {
|
for k, v := range dec.Alloc {
|
||||||
g.Alloc[common.Address(k)] = v
|
g.Alloc[common.Address(k)] = v
|
||||||
}
|
}
|
||||||
|
110
core/genesis.go
110
core/genesis.go
@ -18,7 +18,6 @@ package core
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -37,14 +36,21 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run github.com/fjl/gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
|
//go:generate go run github.com/fjl/gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
|
||||||
//go:generate go run github.com/fjl/gencodec -type GenesisAccount -field-override genesisAccountMarshaling -out gen_genesis_account.go
|
|
||||||
|
|
||||||
var errGenesisNoConfig = errors.New("genesis has no chain configuration")
|
var errGenesisNoConfig = errors.New("genesis has no chain configuration")
|
||||||
|
|
||||||
|
// Deprecated: use types.GenesisAccount instead.
|
||||||
|
type GenesisAccount = types.Account
|
||||||
|
|
||||||
|
// Deprecated: use types.GenesisAlloc instead.
|
||||||
|
type GenesisAlloc = types.GenesisAlloc
|
||||||
|
|
||||||
// Genesis specifies the header fields, state of a genesis block. It also defines hard
|
// Genesis specifies the header fields, state of a genesis block. It also defines hard
|
||||||
// fork switch-over blocks through the chain configuration.
|
// fork switch-over blocks through the chain configuration.
|
||||||
type Genesis struct {
|
type Genesis struct {
|
||||||
@ -56,7 +62,7 @@ type Genesis struct {
|
|||||||
Difficulty *big.Int `json:"difficulty" gencodec:"required"`
|
Difficulty *big.Int `json:"difficulty" gencodec:"required"`
|
||||||
Mixhash common.Hash `json:"mixHash"`
|
Mixhash common.Hash `json:"mixHash"`
|
||||||
Coinbase common.Address `json:"coinbase"`
|
Coinbase common.Address `json:"coinbase"`
|
||||||
Alloc GenesisAlloc `json:"alloc" gencodec:"required"`
|
Alloc types.GenesisAlloc `json:"alloc" gencodec:"required"`
|
||||||
|
|
||||||
// These fields are used for consensus tests. Please don't use them
|
// These fields are used for consensus tests. Please don't use them
|
||||||
// in actual genesis blocks.
|
// in actual genesis blocks.
|
||||||
@ -106,29 +112,14 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) {
|
|||||||
return &genesis, nil
|
return &genesis, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenesisAlloc specifies the initial state that is part of the genesis block.
|
// hashAlloc computes the state root according to the genesis specification.
|
||||||
type GenesisAlloc map[common.Address]GenesisAccount
|
func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
|
||||||
|
|
||||||
func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
|
|
||||||
m := make(map[common.UnprefixedAddress]GenesisAccount)
|
|
||||||
if err := json.Unmarshal(data, &m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*ga = make(GenesisAlloc)
|
|
||||||
for addr, a := range m {
|
|
||||||
(*ga)[common.Address(addr)] = a
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash computes the state root according to the genesis specification.
|
|
||||||
func (ga *GenesisAlloc) hash(isVerkle bool) (common.Hash, error) {
|
|
||||||
// If a genesis-time verkle trie is requested, create a trie config
|
// If a genesis-time verkle trie is requested, create a trie config
|
||||||
// with the verkle trie enabled so that the tree can be initialized
|
// with the verkle trie enabled so that the tree can be initialized
|
||||||
// as such.
|
// as such.
|
||||||
var config *trie.Config
|
var config *triedb.Config
|
||||||
if isVerkle {
|
if isVerkle {
|
||||||
config = &trie.Config{
|
config = &triedb.Config{
|
||||||
PathDB: pathdb.Defaults,
|
PathDB: pathdb.Defaults,
|
||||||
IsVerkle: true,
|
IsVerkle: true,
|
||||||
}
|
}
|
||||||
@ -142,7 +133,7 @@ func (ga *GenesisAlloc) hash(isVerkle bool) (common.Hash, error) {
|
|||||||
}
|
}
|
||||||
for addr, account := range *ga {
|
for addr, account := range *ga {
|
||||||
if account.Balance != nil {
|
if account.Balance != nil {
|
||||||
statedb.AddBalance(addr, account.Balance)
|
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance))
|
||||||
}
|
}
|
||||||
statedb.SetCode(addr, account.Code)
|
statedb.SetCode(addr, account.Code)
|
||||||
statedb.SetNonce(addr, account.Nonce)
|
statedb.SetNonce(addr, account.Nonce)
|
||||||
@ -153,17 +144,17 @@ func (ga *GenesisAlloc) hash(isVerkle bool) (common.Hash, error) {
|
|||||||
return statedb.Commit(0, false)
|
return statedb.Commit(0, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// flush is very similar with hash, but the main difference is all the generated
|
// flushAlloc is very similar with hash, but the main difference is all the generated
|
||||||
// states will be persisted into the given database. Also, the genesis state
|
// states will be persisted into the given database. Also, the genesis state
|
||||||
// specification will be flushed as well.
|
// specification will be flushed as well.
|
||||||
func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error {
|
func flushAlloc(ga *types.GenesisAlloc, db ethdb.Database, triedb *triedb.Database, blockhash common.Hash) error {
|
||||||
statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
|
statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for addr, account := range *ga {
|
for addr, account := range *ga {
|
||||||
if account.Balance != nil {
|
if account.Balance != nil {
|
||||||
statedb.AddBalance(addr, account.Balance)
|
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance))
|
||||||
}
|
}
|
||||||
statedb.SetCode(addr, account.Code)
|
statedb.SetCode(addr, account.Code)
|
||||||
statedb.SetNonce(addr, account.Nonce)
|
statedb.SetNonce(addr, account.Nonce)
|
||||||
@ -190,15 +181,6 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenesisAccount is an account in the state of the genesis block.
|
|
||||||
type GenesisAccount struct {
|
|
||||||
Code []byte `json:"code,omitempty"`
|
|
||||||
Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
|
|
||||||
Balance *big.Int `json:"balance" gencodec:"required"`
|
|
||||||
Nonce uint64 `json:"nonce,omitempty"`
|
|
||||||
PrivateKey []byte `json:"secretKey,omitempty"` // for tests
|
|
||||||
}
|
|
||||||
|
|
||||||
// field type overrides for gencodec
|
// field type overrides for gencodec
|
||||||
type genesisSpecMarshaling struct {
|
type genesisSpecMarshaling struct {
|
||||||
Nonce math.HexOrDecimal64
|
Nonce math.HexOrDecimal64
|
||||||
@ -208,40 +190,12 @@ type genesisSpecMarshaling struct {
|
|||||||
GasUsed math.HexOrDecimal64
|
GasUsed math.HexOrDecimal64
|
||||||
Number math.HexOrDecimal64
|
Number math.HexOrDecimal64
|
||||||
Difficulty *math.HexOrDecimal256
|
Difficulty *math.HexOrDecimal256
|
||||||
Alloc map[common.UnprefixedAddress]GenesisAccount
|
Alloc map[common.UnprefixedAddress]types.Account
|
||||||
BaseFee *math.HexOrDecimal256
|
BaseFee *math.HexOrDecimal256
|
||||||
ExcessBlobGas *math.HexOrDecimal64
|
ExcessBlobGas *math.HexOrDecimal64
|
||||||
BlobGasUsed *math.HexOrDecimal64
|
BlobGasUsed *math.HexOrDecimal64
|
||||||
}
|
}
|
||||||
|
|
||||||
type genesisAccountMarshaling struct {
|
|
||||||
Code hexutil.Bytes
|
|
||||||
Balance *math.HexOrDecimal256
|
|
||||||
Nonce math.HexOrDecimal64
|
|
||||||
Storage map[storageJSON]storageJSON
|
|
||||||
PrivateKey hexutil.Bytes
|
|
||||||
}
|
|
||||||
|
|
||||||
// storageJSON represents a 256 bit byte array, but allows less than 256 bits when
|
|
||||||
// unmarshaling from hex.
|
|
||||||
type storageJSON common.Hash
|
|
||||||
|
|
||||||
func (h *storageJSON) UnmarshalText(text []byte) error {
|
|
||||||
text = bytes.TrimPrefix(text, []byte("0x"))
|
|
||||||
if len(text) > 64 {
|
|
||||||
return fmt.Errorf("too many hex characters in storage key/value %q", text)
|
|
||||||
}
|
|
||||||
offset := len(h) - len(text)/2 // pad on the left
|
|
||||||
if _, err := hex.Decode(h[offset:], text); err != nil {
|
|
||||||
return fmt.Errorf("invalid hex storage key/value %q", text)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h storageJSON) MarshalText() ([]byte, error) {
|
|
||||||
return hexutil.Bytes(h[:]).MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenesisMismatchError is raised when trying to overwrite an existing
|
// GenesisMismatchError is raised when trying to overwrite an existing
|
||||||
// genesis block with an incompatible one.
|
// genesis block with an incompatible one.
|
||||||
type GenesisMismatchError struct {
|
type GenesisMismatchError struct {
|
||||||
@ -271,11 +225,11 @@ type ChainOverrides struct {
|
|||||||
// error is a *params.ConfigCompatError and the new, unwritten config is returned.
|
// error is a *params.ConfigCompatError and the new, unwritten config is returned.
|
||||||
//
|
//
|
||||||
// The returned chain configuration is never nil.
|
// The returned chain configuration is never nil.
|
||||||
func SetupGenesisBlock(db ethdb.Database, triedb *trie.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
|
func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
|
||||||
return SetupGenesisBlockWithOverride(db, triedb, genesis, nil)
|
return SetupGenesisBlockWithOverride(db, triedb, genesis, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) {
|
func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) {
|
||||||
if genesis != nil && genesis.Config == nil {
|
if genesis != nil && genesis.Config == nil {
|
||||||
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
||||||
}
|
}
|
||||||
@ -412,6 +366,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
|
|||||||
return g.Config
|
return g.Config
|
||||||
case ghash == params.MainnetGenesisHash:
|
case ghash == params.MainnetGenesisHash:
|
||||||
return params.MainnetChainConfig
|
return params.MainnetChainConfig
|
||||||
|
case ghash == params.HoleskyGenesisHash:
|
||||||
|
return params.HoleskyChainConfig
|
||||||
case ghash == params.SepoliaGenesisHash:
|
case ghash == params.SepoliaGenesisHash:
|
||||||
return params.SepoliaChainConfig
|
return params.SepoliaChainConfig
|
||||||
case ghash == params.GoerliGenesisHash:
|
case ghash == params.GoerliGenesisHash:
|
||||||
@ -429,7 +385,7 @@ func (g *Genesis) IsVerkle() bool {
|
|||||||
|
|
||||||
// ToBlock returns the genesis block according to genesis specification.
|
// ToBlock returns the genesis block according to genesis specification.
|
||||||
func (g *Genesis) ToBlock() *types.Block {
|
func (g *Genesis) ToBlock() *types.Block {
|
||||||
root, err := g.Alloc.hash(g.IsVerkle())
|
root, err := hashAlloc(&g.Alloc, g.IsVerkle())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@ -488,7 +444,7 @@ func (g *Genesis) ToBlock() *types.Block {
|
|||||||
|
|
||||||
// Commit writes the block and state of a genesis specification to the database.
|
// Commit writes the block and state of a genesis specification to the database.
|
||||||
// The block is committed as the canonical head block.
|
// The block is committed as the canonical head block.
|
||||||
func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block, error) {
|
func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Block, error) {
|
||||||
block := g.ToBlock()
|
block := g.ToBlock()
|
||||||
if block.Number().Sign() != 0 {
|
if block.Number().Sign() != 0 {
|
||||||
return nil, errors.New("can't commit genesis block with number > 0")
|
return nil, errors.New("can't commit genesis block with number > 0")
|
||||||
@ -503,10 +459,10 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block
|
|||||||
if config.Clique != nil && len(block.Extra()) < 32+crypto.SignatureLength {
|
if config.Clique != nil && len(block.Extra()) < 32+crypto.SignatureLength {
|
||||||
return nil, errors.New("can't start clique chain without signers")
|
return nil, errors.New("can't start clique chain without signers")
|
||||||
}
|
}
|
||||||
// All the checks has passed, flush the states derived from the genesis
|
// All the checks has passed, flushAlloc the states derived from the genesis
|
||||||
// specification as well as the specification itself into the provided
|
// specification as well as the specification itself into the provided
|
||||||
// database.
|
// database.
|
||||||
if err := g.Alloc.flush(db, triedb, block.Hash()); err != nil {
|
if err := flushAlloc(&g.Alloc, db, triedb, block.Hash()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
|
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
|
||||||
@ -522,7 +478,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block
|
|||||||
|
|
||||||
// MustCommit writes the genesis block and state to db, panicking on error.
|
// MustCommit writes the genesis block and state to db, panicking on error.
|
||||||
// The block is committed as the canonical head block.
|
// The block is committed as the canonical head block.
|
||||||
func (g *Genesis) MustCommit(db ethdb.Database, triedb *trie.Database) *types.Block {
|
func (g *Genesis) MustCommit(db ethdb.Database, triedb *triedb.Database) *types.Block {
|
||||||
block, err := g.Commit(db, triedb)
|
block, err := g.Commit(db, triedb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -590,7 +546,7 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
|
|||||||
GasLimit: gasLimit,
|
GasLimit: gasLimit,
|
||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
Difficulty: big.NewInt(1),
|
Difficulty: big.NewInt(1),
|
||||||
Alloc: map[common.Address]GenesisAccount{
|
Alloc: map[common.Address]types.Account{
|
||||||
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
|
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
|
||||||
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
|
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
|
||||||
common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD
|
common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD
|
||||||
@ -603,12 +559,12 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
if faucet != nil {
|
if faucet != nil {
|
||||||
genesis.Alloc[*faucet] = GenesisAccount{Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}
|
genesis.Alloc[*faucet] = types.Account{Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}
|
||||||
}
|
}
|
||||||
return genesis
|
return genesis
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodePrealloc(data string) GenesisAlloc {
|
func decodePrealloc(data string) types.GenesisAlloc {
|
||||||
var p []struct {
|
var p []struct {
|
||||||
Addr *big.Int
|
Addr *big.Int
|
||||||
Balance *big.Int
|
Balance *big.Int
|
||||||
@ -624,9 +580,9 @@ func decodePrealloc(data string) GenesisAlloc {
|
|||||||
if err := rlp.NewStream(strings.NewReader(data), 0).Decode(&p); err != nil {
|
if err := rlp.NewStream(strings.NewReader(data), 0).Decode(&p); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
ga := make(GenesisAlloc, len(p))
|
ga := make(types.GenesisAlloc, len(p))
|
||||||
for _, account := range p {
|
for _, account := range p {
|
||||||
acc := GenesisAccount{Balance: account.Balance}
|
acc := types.Account{Balance: account.Balance}
|
||||||
if account.Misc != nil {
|
if account.Misc != nil {
|
||||||
acc.Nonce = account.Misc.Nonce
|
acc.Nonce = account.Misc.Nonce
|
||||||
acc.Code = account.Misc.Code
|
acc.Code = account.Misc.Code
|
||||||
|
@ -27,18 +27,19 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInvalidCliqueConfig(t *testing.T) {
|
func TestInvalidCliqueConfig(t *testing.T) {
|
||||||
block := DefaultGoerliGenesisBlock()
|
block := DefaultGoerliGenesisBlock()
|
||||||
block.ExtraData = []byte{}
|
block.ExtraData = []byte{}
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
if _, err := block.Commit(db, trie.NewDatabase(db, nil)); err == nil {
|
if _, err := block.Commit(db, triedb.NewDatabase(db, nil)); err == nil {
|
||||||
t.Fatal("Expected error on invalid clique config")
|
t.Fatal("Expected error on invalid clique config")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -53,7 +54,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
|
|||||||
customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50")
|
customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50")
|
||||||
customg = Genesis{
|
customg = Genesis{
|
||||||
Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3)},
|
Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3)},
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
|
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -71,7 +72,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
name: "genesis without ChainConfig",
|
name: "genesis without ChainConfig",
|
||||||
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
||||||
return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), new(Genesis))
|
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), new(Genesis))
|
||||||
},
|
},
|
||||||
wantErr: errGenesisNoConfig,
|
wantErr: errGenesisNoConfig,
|
||||||
wantConfig: params.AllEthashProtocolChanges,
|
wantConfig: params.AllEthashProtocolChanges,
|
||||||
@ -79,7 +80,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
name: "no block in DB, genesis == nil",
|
name: "no block in DB, genesis == nil",
|
||||||
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
||||||
return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil)
|
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
|
||||||
},
|
},
|
||||||
wantHash: params.MainnetGenesisHash,
|
wantHash: params.MainnetGenesisHash,
|
||||||
wantConfig: params.MainnetChainConfig,
|
wantConfig: params.MainnetChainConfig,
|
||||||
@ -87,8 +88,8 @@ func testSetupGenesis(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
name: "mainnet block in DB, genesis == nil",
|
name: "mainnet block in DB, genesis == nil",
|
||||||
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
||||||
DefaultGenesisBlock().MustCommit(db, trie.NewDatabase(db, newDbConfig(scheme)))
|
DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, newDbConfig(scheme)))
|
||||||
return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil)
|
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
|
||||||
},
|
},
|
||||||
wantHash: params.MainnetGenesisHash,
|
wantHash: params.MainnetGenesisHash,
|
||||||
wantConfig: params.MainnetChainConfig,
|
wantConfig: params.MainnetChainConfig,
|
||||||
@ -96,7 +97,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
name: "custom block in DB, genesis == nil",
|
name: "custom block in DB, genesis == nil",
|
||||||
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
||||||
tdb := trie.NewDatabase(db, newDbConfig(scheme))
|
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
|
||||||
customg.Commit(db, tdb)
|
customg.Commit(db, tdb)
|
||||||
return SetupGenesisBlock(db, tdb, nil)
|
return SetupGenesisBlock(db, tdb, nil)
|
||||||
},
|
},
|
||||||
@ -106,7 +107,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
name: "custom block in DB, genesis == goerli",
|
name: "custom block in DB, genesis == goerli",
|
||||||
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
||||||
tdb := trie.NewDatabase(db, newDbConfig(scheme))
|
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
|
||||||
customg.Commit(db, tdb)
|
customg.Commit(db, tdb)
|
||||||
return SetupGenesisBlock(db, tdb, DefaultGoerliGenesisBlock())
|
return SetupGenesisBlock(db, tdb, DefaultGoerliGenesisBlock())
|
||||||
},
|
},
|
||||||
@ -117,7 +118,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
name: "compatible config in DB",
|
name: "compatible config in DB",
|
||||||
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
||||||
tdb := trie.NewDatabase(db, newDbConfig(scheme))
|
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
|
||||||
oldcustomg.Commit(db, tdb)
|
oldcustomg.Commit(db, tdb)
|
||||||
return SetupGenesisBlock(db, tdb, &customg)
|
return SetupGenesisBlock(db, tdb, &customg)
|
||||||
},
|
},
|
||||||
@ -129,7 +130,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
|
|||||||
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
|
||||||
// Commit the 'old' genesis block with Homestead transition at #2.
|
// Commit the 'old' genesis block with Homestead transition at #2.
|
||||||
// Advance to block #4, past the homestead transition block of customg.
|
// Advance to block #4, past the homestead transition block of customg.
|
||||||
tdb := trie.NewDatabase(db, newDbConfig(scheme))
|
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
|
||||||
oldcustomg.Commit(db, tdb)
|
oldcustomg.Commit(db, tdb)
|
||||||
|
|
||||||
bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
|
bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
|
||||||
@ -188,7 +189,7 @@ func TestGenesisHashes(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
// Test via MustCommit
|
// Test via MustCommit
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
if have := c.genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)).Hash(); have != c.want {
|
if have := c.genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults)).Hash(); have != c.want {
|
||||||
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
|
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
|
||||||
}
|
}
|
||||||
// Test via ToBlock
|
// Test via ToBlock
|
||||||
@ -206,7 +207,7 @@ func TestGenesis_Commit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
genesisBlock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
|
genesisBlock := genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults))
|
||||||
|
|
||||||
if genesis.Difficulty != nil {
|
if genesis.Difficulty != nil {
|
||||||
t.Fatalf("assumption wrong")
|
t.Fatalf("assumption wrong")
|
||||||
@ -228,16 +229,16 @@ func TestGenesis_Commit(t *testing.T) {
|
|||||||
func TestReadWriteGenesisAlloc(t *testing.T) {
|
func TestReadWriteGenesisAlloc(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
alloc = &GenesisAlloc{
|
alloc = &types.GenesisAlloc{
|
||||||
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
|
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
|
||||||
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
|
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
|
||||||
}
|
}
|
||||||
hash, _ = alloc.hash(false)
|
hash, _ = hashAlloc(alloc, false)
|
||||||
)
|
)
|
||||||
blob, _ := json.Marshal(alloc)
|
blob, _ := json.Marshal(alloc)
|
||||||
rawdb.WriteGenesisStateSpec(db, hash, blob)
|
rawdb.WriteGenesisStateSpec(db, hash, blob)
|
||||||
|
|
||||||
var reload GenesisAlloc
|
var reload types.GenesisAlloc
|
||||||
err := reload.UnmarshalJSON(rawdb.ReadGenesisStateSpec(db, hash))
|
err := reload.UnmarshalJSON(rawdb.ReadGenesisStateSpec(db, hash))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to load genesis state %v", err)
|
t.Fatalf("Failed to load genesis state %v", err)
|
||||||
@ -256,11 +257,11 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDbConfig(scheme string) *trie.Config {
|
func newDbConfig(scheme string) *triedb.Config {
|
||||||
if scheme == rawdb.HashScheme {
|
if scheme == rawdb.HashScheme {
|
||||||
return trie.HashDefaults
|
return triedb.HashDefaults
|
||||||
}
|
}
|
||||||
return &trie.Config{PathDB: pathdb.Defaults}
|
return &triedb.Config{PathDB: pathdb.Defaults}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVerkleGenesisCommit(t *testing.T) {
|
func TestVerkleGenesisCommit(t *testing.T) {
|
||||||
@ -298,7 +299,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
|
|||||||
Config: verkleConfig,
|
Config: verkleConfig,
|
||||||
Timestamp: verkleTime,
|
Timestamp: verkleTime,
|
||||||
Difficulty: big.NewInt(0),
|
Difficulty: big.NewInt(0),
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
|
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -310,7 +311,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
triedb := trie.NewDatabase(db, &trie.Config{IsVerkle: true, PathDB: pathdb.Defaults})
|
triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults})
|
||||||
block := genesis.MustCommit(db, triedb)
|
block := genesis.MustCommit(db, triedb)
|
||||||
if !bytes.Equal(block.Root().Bytes(), expected) {
|
if !bytes.Equal(block.Root().Bytes(), expected) {
|
||||||
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
|
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func verifyUnbrokenCanonchain(hc *HeaderChain) error {
|
func verifyUnbrokenCanonchain(hc *HeaderChain) error {
|
||||||
@ -73,7 +73,7 @@ func TestHeaderInsertion(t *testing.T) {
|
|||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges}
|
gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges}
|
||||||
)
|
)
|
||||||
gspec.Commit(db, trie.NewDatabase(db, nil))
|
gspec.Commit(db, triedb.NewDatabase(db, nil))
|
||||||
hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false })
|
hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -278,23 +278,6 @@ func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
|
|
||||||
func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
|
|
||||||
data, _ := db.Get(fastTxLookupLimitKey)
|
|
||||||
if len(data) != 8 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
number := binary.BigEndian.Uint64(data)
|
|
||||||
return &number
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
|
|
||||||
func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
|
|
||||||
if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
|
|
||||||
log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going
|
// ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going
|
||||||
// backwards towards genesis. This method assumes that the caller already has
|
// backwards towards genesis. This method assumes that the caller already has
|
||||||
// placed a cap on count, to prevent DoS issues.
|
// placed a cap on count, to prevent DoS issues.
|
||||||
|
@ -178,7 +178,7 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
|
|||||||
//
|
//
|
||||||
// There is a passed channel, the whole procedure will be interrupted if any
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
// signal received.
|
// signal received.
|
||||||
func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
|
func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
|
||||||
// short circuit for invalid range
|
// short circuit for invalid range
|
||||||
if from >= to {
|
if from >= to {
|
||||||
return
|
return
|
||||||
@ -188,13 +188,13 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan
|
|||||||
batch = db.NewBatch()
|
batch = db.NewBatch()
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
logged = start.Add(-7 * time.Second)
|
logged = start.Add(-7 * time.Second)
|
||||||
|
|
||||||
// Since we iterate in reverse, we expect the first number to come
|
// Since we iterate in reverse, we expect the first number to come
|
||||||
// in to be [to-1]. Therefore, setting lastNum to means that the
|
// in to be [to-1]. Therefore, setting lastNum to means that the
|
||||||
// prqueue gap-evaluation will work correctly
|
// queue gap-evaluation will work correctly
|
||||||
lastNum = to
|
lastNum = to
|
||||||
queue = prque.New[int64, *blockTxHashes](nil)
|
queue = prque.New[int64, *blockTxHashes](nil)
|
||||||
// for stats reporting
|
blocks, txs = 0, 0 // for stats reporting
|
||||||
blocks, txs = 0, 0
|
|
||||||
)
|
)
|
||||||
for chanDelivery := range hashesCh {
|
for chanDelivery := range hashesCh {
|
||||||
// Push the delivery into the queue and process contiguous ranges.
|
// Push the delivery into the queue and process contiguous ranges.
|
||||||
@ -240,11 +240,15 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan
|
|||||||
log.Crit("Failed writing batch to db", "error", err)
|
log.Crit("Failed writing batch to db", "error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
logger := log.Debug
|
||||||
|
if report {
|
||||||
|
logger = log.Info
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-interrupt:
|
case <-interrupt:
|
||||||
log.Debug("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
|
logger("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
default:
|
default:
|
||||||
log.Debug("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
|
logger("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,20 +261,20 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan
|
|||||||
//
|
//
|
||||||
// There is a passed channel, the whole procedure will be interrupted if any
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
// signal received.
|
// signal received.
|
||||||
func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) {
|
func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, report bool) {
|
||||||
indexTransactions(db, from, to, interrupt, nil)
|
indexTransactions(db, from, to, interrupt, nil, report)
|
||||||
}
|
}
|
||||||
|
|
||||||
// indexTransactionsForTesting is the internal debug version with an additional hook.
|
// indexTransactionsForTesting is the internal debug version with an additional hook.
|
||||||
func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
|
func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
|
||||||
indexTransactions(db, from, to, interrupt, hook)
|
indexTransactions(db, from, to, interrupt, hook, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// unindexTransactions removes txlookup indices of the specified block range.
|
// unindexTransactions removes txlookup indices of the specified block range.
|
||||||
//
|
//
|
||||||
// There is a passed channel, the whole procedure will be interrupted if any
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
// signal received.
|
// signal received.
|
||||||
func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
|
func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) {
|
||||||
// short circuit for invalid range
|
// short circuit for invalid range
|
||||||
if from >= to {
|
if from >= to {
|
||||||
return
|
return
|
||||||
@ -280,12 +284,12 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch
|
|||||||
batch = db.NewBatch()
|
batch = db.NewBatch()
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
logged = start.Add(-7 * time.Second)
|
logged = start.Add(-7 * time.Second)
|
||||||
|
|
||||||
// we expect the first number to come in to be [from]. Therefore, setting
|
// we expect the first number to come in to be [from]. Therefore, setting
|
||||||
// nextNum to from means that the prqueue gap-evaluation will work correctly
|
// nextNum to from means that the queue gap-evaluation will work correctly
|
||||||
nextNum = from
|
nextNum = from
|
||||||
queue = prque.New[int64, *blockTxHashes](nil)
|
queue = prque.New[int64, *blockTxHashes](nil)
|
||||||
// for stats reporting
|
blocks, txs = 0, 0 // for stats reporting
|
||||||
blocks, txs = 0, 0
|
|
||||||
)
|
)
|
||||||
// Otherwise spin up the concurrent iterator and unindexer
|
// Otherwise spin up the concurrent iterator and unindexer
|
||||||
for delivery := range hashesCh {
|
for delivery := range hashesCh {
|
||||||
@ -332,11 +336,15 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch
|
|||||||
log.Crit("Failed writing batch to db", "error", err)
|
log.Crit("Failed writing batch to db", "error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
logger := log.Debug
|
||||||
|
if report {
|
||||||
|
logger = log.Info
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-interrupt:
|
case <-interrupt:
|
||||||
log.Debug("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
|
logger("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
default:
|
default:
|
||||||
log.Debug("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
|
logger("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -345,11 +353,11 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch
|
|||||||
//
|
//
|
||||||
// There is a passed channel, the whole procedure will be interrupted if any
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
// signal received.
|
// signal received.
|
||||||
func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) {
|
func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, report bool) {
|
||||||
unindexTransactions(db, from, to, interrupt, nil)
|
unindexTransactions(db, from, to, interrupt, nil, report)
|
||||||
}
|
}
|
||||||
|
|
||||||
// unindexTransactionsForTesting is the internal debug version with an additional hook.
|
// unindexTransactionsForTesting is the internal debug version with an additional hook.
|
||||||
func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
|
func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) {
|
||||||
unindexTransactions(db, from, to, interrupt, hook)
|
unindexTransactions(db, from, to, interrupt, hook, false)
|
||||||
}
|
}
|
||||||
|
@ -162,18 +162,18 @@ func TestIndexTransactions(t *testing.T) {
|
|||||||
t.Fatalf("Transaction tail mismatch")
|
t.Fatalf("Transaction tail mismatch")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
IndexTransactions(chainDb, 5, 11, nil)
|
IndexTransactions(chainDb, 5, 11, nil, false)
|
||||||
verify(5, 11, true, 5)
|
verify(5, 11, true, 5)
|
||||||
verify(0, 5, false, 5)
|
verify(0, 5, false, 5)
|
||||||
|
|
||||||
IndexTransactions(chainDb, 0, 5, nil)
|
IndexTransactions(chainDb, 0, 5, nil, false)
|
||||||
verify(0, 11, true, 0)
|
verify(0, 11, true, 0)
|
||||||
|
|
||||||
UnindexTransactions(chainDb, 0, 5, nil)
|
UnindexTransactions(chainDb, 0, 5, nil, false)
|
||||||
verify(5, 11, true, 5)
|
verify(5, 11, true, 5)
|
||||||
verify(0, 5, false, 5)
|
verify(0, 5, false, 5)
|
||||||
|
|
||||||
UnindexTransactions(chainDb, 5, 11, nil)
|
UnindexTransactions(chainDb, 5, 11, nil, false)
|
||||||
verify(0, 11, false, 11)
|
verify(0, 11, false, 11)
|
||||||
|
|
||||||
// Testing corner cases
|
// Testing corner cases
|
||||||
@ -190,7 +190,7 @@ func TestIndexTransactions(t *testing.T) {
|
|||||||
})
|
})
|
||||||
verify(9, 11, true, 9)
|
verify(9, 11, true, 9)
|
||||||
verify(0, 9, false, 9)
|
verify(0, 9, false, 9)
|
||||||
IndexTransactions(chainDb, 0, 9, nil)
|
IndexTransactions(chainDb, 0, 9, nil, false)
|
||||||
|
|
||||||
signal = make(chan struct{})
|
signal = make(chan struct{})
|
||||||
var once2 sync.Once
|
var once2 sync.Once
|
||||||
|
@ -657,7 +657,6 @@ func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {
|
|||||||
{"snapshotRecoveryNumber", pp(ReadSnapshotRecoveryNumber(db))},
|
{"snapshotRecoveryNumber", pp(ReadSnapshotRecoveryNumber(db))},
|
||||||
{"snapshotRoot", fmt.Sprintf("%v", ReadSnapshotRoot(db))},
|
{"snapshotRoot", fmt.Sprintf("%v", ReadSnapshotRoot(db))},
|
||||||
{"txIndexTail", pp(ReadTxIndexTail(db))},
|
{"txIndexTail", pp(ReadTxIndexTail(db))},
|
||||||
{"fastTxLookupLimit", pp(ReadFastTxLookupLimit(db))},
|
|
||||||
}
|
}
|
||||||
if b := ReadSkeletonSyncStatus(db); b != nil {
|
if b := ReadSkeletonSyncStatus(db); b != nil {
|
||||||
data = append(data, []string{"SkeletonSyncStatus", string(b)})
|
data = append(data, []string{"SkeletonSyncStatus", string(b)})
|
||||||
|
@ -80,6 +80,8 @@ var (
|
|||||||
txIndexTailKey = []byte("TransactionIndexTail")
|
txIndexTailKey = []byte("TransactionIndexTail")
|
||||||
|
|
||||||
// fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
|
// fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
|
||||||
|
// This flag is deprecated, it's kept to avoid reporting errors when inspect
|
||||||
|
// database.
|
||||||
fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")
|
fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")
|
||||||
|
|
||||||
// badBlockKey tracks the list of bad blocks seen by local
|
// badBlockKey tracks the list of bad blocks seen by local
|
||||||
|
@ -41,7 +41,7 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block {
|
|||||||
funds = big.NewInt(1_000_000_000_000_000_000)
|
funds = big.NewInt(1_000_000_000_000_000_000)
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: params.TestChainConfig,
|
Config: params.TestChainConfig,
|
||||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
Alloc: types.GenesisAlloc{address: {Balance: funds}},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
// We need to generate as many blocks +1 as uncles
|
// We need to generate as many blocks +1 as uncles
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
"github.com/ethereum/go-ethereum/trie/utils"
|
"github.com/ethereum/go-ethereum/trie/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -67,7 +68,7 @@ type Database interface {
|
|||||||
DiskDB() ethdb.KeyValueStore
|
DiskDB() ethdb.KeyValueStore
|
||||||
|
|
||||||
// TrieDB returns the underlying trie database for managing trie nodes.
|
// TrieDB returns the underlying trie database for managing trie nodes.
|
||||||
TrieDB() *trie.Database
|
TrieDB() *triedb.Database
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trie is a Ethereum Merkle Patricia trie.
|
// Trie is a Ethereum Merkle Patricia trie.
|
||||||
@ -150,17 +151,17 @@ func NewDatabase(db ethdb.Database) Database {
|
|||||||
// NewDatabaseWithConfig creates a backing store for state. The returned database
|
// NewDatabaseWithConfig creates a backing store for state. The returned database
|
||||||
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
|
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
|
||||||
// large memory cache.
|
// large memory cache.
|
||||||
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
|
func NewDatabaseWithConfig(db ethdb.Database, config *triedb.Config) Database {
|
||||||
return &cachingDB{
|
return &cachingDB{
|
||||||
disk: db,
|
disk: db,
|
||||||
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
|
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
|
||||||
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
|
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
|
||||||
triedb: trie.NewDatabase(db, config),
|
triedb: triedb.NewDatabase(db, config),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDatabaseWithNodeDB creates a state database with an already initialized node database.
|
// NewDatabaseWithNodeDB creates a state database with an already initialized node database.
|
||||||
func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database {
|
func NewDatabaseWithNodeDB(db ethdb.Database, triedb *triedb.Database) Database {
|
||||||
return &cachingDB{
|
return &cachingDB{
|
||||||
disk: db,
|
disk: db,
|
||||||
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
|
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
|
||||||
@ -173,7 +174,7 @@ type cachingDB struct {
|
|||||||
disk ethdb.KeyValueStore
|
disk ethdb.KeyValueStore
|
||||||
codeSizeCache *lru.Cache[common.Hash, int]
|
codeSizeCache *lru.Cache[common.Hash, int]
|
||||||
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
|
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
|
||||||
triedb *trie.Database
|
triedb *triedb.Database
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenTrie opens the main account trie at a specific root hash.
|
// OpenTrie opens the main account trie at a specific root hash.
|
||||||
@ -260,6 +261,6 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TrieDB retrieves any intermediate trie-node caching layer.
|
// TrieDB retrieves any intermediate trie-node caching layer.
|
||||||
func (db *cachingDB) TrieDB() *trie.Database {
|
func (db *cachingDB) TrieDB() *triedb.Database {
|
||||||
return db.triedb
|
return db.triedb
|
||||||
}
|
}
|
||||||
|
@ -17,9 +17,8 @@
|
|||||||
package state
|
package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// journalEntry is a modification entry in the state change journal that can be
|
// journalEntry is a modification entry in the state change journal that can be
|
||||||
@ -103,13 +102,13 @@ type (
|
|||||||
selfDestructChange struct {
|
selfDestructChange struct {
|
||||||
account *common.Address
|
account *common.Address
|
||||||
prev bool // whether account had already self-destructed
|
prev bool // whether account had already self-destructed
|
||||||
prevbalance *big.Int
|
prevbalance *uint256.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Changes to individual accounts.
|
// Changes to individual accounts.
|
||||||
balanceChange struct {
|
balanceChange struct {
|
||||||
account *common.Address
|
account *common.Address
|
||||||
prev *big.Int
|
prev *uint256.Int
|
||||||
}
|
}
|
||||||
nonceChange struct {
|
nonceChange struct {
|
||||||
account *common.Address
|
account *common.Address
|
||||||
|
@ -27,17 +27,10 @@ import (
|
|||||||
bloomfilter "github.com/holiman/bloomfilter/v2"
|
bloomfilter "github.com/holiman/bloomfilter/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// stateBloomHasher is a wrapper around a byte blob to satisfy the interface API
|
// stateBloomHash is used to convert a trie hash or contract code hash into a 64 bit mini hash.
|
||||||
// requirements of the bloom library used. It's used to convert a trie hash or
|
func stateBloomHash(f []byte) uint64 {
|
||||||
// contract code hash into a 64 bit mini hash.
|
return binary.BigEndian.Uint64(f)
|
||||||
type stateBloomHasher []byte
|
}
|
||||||
|
|
||||||
func (f stateBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
|
|
||||||
func (f stateBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
|
|
||||||
func (f stateBloomHasher) Reset() { panic("not implemented") }
|
|
||||||
func (f stateBloomHasher) BlockSize() int { panic("not implemented") }
|
|
||||||
func (f stateBloomHasher) Size() int { return 8 }
|
|
||||||
func (f stateBloomHasher) Sum64() uint64 { return binary.BigEndian.Uint64(f) }
|
|
||||||
|
|
||||||
// stateBloom is a bloom filter used during the state conversion(snapshot->state).
|
// stateBloom is a bloom filter used during the state conversion(snapshot->state).
|
||||||
// The keys of all generated entries will be recorded here so that in the pruning
|
// The keys of all generated entries will be recorded here so that in the pruning
|
||||||
@ -113,10 +106,10 @@ func (bloom *stateBloom) Put(key []byte, value []byte) error {
|
|||||||
if !isCode {
|
if !isCode {
|
||||||
return errors.New("invalid entry")
|
return errors.New("invalid entry")
|
||||||
}
|
}
|
||||||
bloom.bloom.Add(stateBloomHasher(codeKey))
|
bloom.bloom.AddHash(stateBloomHash(codeKey))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
bloom.bloom.Add(stateBloomHasher(key))
|
bloom.bloom.AddHash(stateBloomHash(key))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,5 +121,5 @@ func (bloom *stateBloom) Delete(key []byte) error { panic("not supported") }
|
|||||||
// - If it says yes, the key may be contained
|
// - If it says yes, the key may be contained
|
||||||
// - If it says no, the key is definitely not contained.
|
// - If it says no, the key is definitely not contained.
|
||||||
func (bloom *stateBloom) Contain(key []byte) bool {
|
func (bloom *stateBloom) Contain(key []byte) bool {
|
||||||
return bloom.bloom.Contains(stateBloomHasher(key))
|
return bloom.bloom.ContainsHash(stateBloomHash(key))
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -86,7 +87,7 @@ func NewPruner(db ethdb.Database, config Config) (*Pruner, error) {
|
|||||||
return nil, errors.New("failed to load head block")
|
return nil, errors.New("failed to load head block")
|
||||||
}
|
}
|
||||||
// Offline pruning is only supported in legacy hash based scheme.
|
// Offline pruning is only supported in legacy hash based scheme.
|
||||||
triedb := trie.NewDatabase(db, trie.HashDefaults)
|
triedb := triedb.NewDatabase(db, triedb.HashDefaults)
|
||||||
|
|
||||||
snapconfig := snapshot.Config{
|
snapconfig := snapshot.Config{
|
||||||
CacheSize: 256,
|
CacheSize: 256,
|
||||||
@ -121,7 +122,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
|
|||||||
// the trie nodes(and codes) belong to the active state will be filtered
|
// the trie nodes(and codes) belong to the active state will be filtered
|
||||||
// out. A very small part of stale tries will also be filtered because of
|
// out. A very small part of stale tries will also be filtered because of
|
||||||
// the false-positive rate of bloom filter. But the assumption is held here
|
// the false-positive rate of bloom filter. But the assumption is held here
|
||||||
// that the false-positive is low enough(~0.05%). The probablity of the
|
// that the false-positive is low enough(~0.05%). The probability of the
|
||||||
// dangling node is the state root is super low. So the dangling nodes in
|
// dangling node is the state root is super low. So the dangling nodes in
|
||||||
// theory will never ever be visited again.
|
// theory will never ever be visited again.
|
||||||
var (
|
var (
|
||||||
@ -366,7 +367,7 @@ func RecoverPruning(datadir string, db ethdb.Database) error {
|
|||||||
AsyncBuild: false,
|
AsyncBuild: false,
|
||||||
}
|
}
|
||||||
// Offline pruning is only supported in legacy hash based scheme.
|
// Offline pruning is only supported in legacy hash based scheme.
|
||||||
triedb := trie.NewDatabase(db, trie.HashDefaults)
|
triedb := triedb.NewDatabase(db, triedb.HashDefaults)
|
||||||
snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root())
|
snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err // The relevant snapshot(s) might not exist
|
return err // The relevant snapshot(s) might not exist
|
||||||
@ -409,7 +410,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
|
|||||||
if genesis == nil {
|
if genesis == nil {
|
||||||
return errors.New("missing genesis block")
|
return errors.New("missing genesis block")
|
||||||
}
|
}
|
||||||
t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db, trie.HashDefaults))
|
t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), triedb.NewDatabase(db, triedb.HashDefaults))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -433,7 +434,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
|
|||||||
}
|
}
|
||||||
if acc.Root != types.EmptyRootHash {
|
if acc.Root != types.EmptyRootHash {
|
||||||
id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root)
|
id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root)
|
||||||
storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db, trie.HashDefaults))
|
storageTrie, err := trie.NewStateTrie(id, triedb.NewDatabase(db, triedb.HashDefaults))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ var (
|
|||||||
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
|
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
|
||||||
|
|
||||||
// aggregatorItemLimit is an approximate number of items that will end up
|
// aggregatorItemLimit is an approximate number of items that will end up
|
||||||
// in the agregator layer before it's flushed out to disk. A plain account
|
// in the aggregator layer before it's flushed out to disk. A plain account
|
||||||
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
|
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
|
||||||
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
|
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
|
||||||
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
|
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
|
||||||
@ -124,47 +124,20 @@ type diffLayer struct {
|
|||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// destructBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
// destructBloomHash is used to convert a destruct event into a 64 bit mini hash.
|
||||||
// API requirements of the bloom library used. It's used to convert a destruct
|
func destructBloomHash(h common.Hash) uint64 {
|
||||||
// event into a 64 bit mini hash.
|
|
||||||
type destructBloomHasher common.Hash
|
|
||||||
|
|
||||||
func (h destructBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
|
|
||||||
func (h destructBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
|
|
||||||
func (h destructBloomHasher) Reset() { panic("not implemented") }
|
|
||||||
func (h destructBloomHasher) BlockSize() int { panic("not implemented") }
|
|
||||||
func (h destructBloomHasher) Size() int { return 8 }
|
|
||||||
func (h destructBloomHasher) Sum64() uint64 {
|
|
||||||
return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8])
|
return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8])
|
||||||
}
|
}
|
||||||
|
|
||||||
// accountBloomHasher is a wrapper around a common.Hash to satisfy the interface
|
// accountBloomHash is used to convert an account hash into a 64 bit mini hash.
|
||||||
// API requirements of the bloom library used. It's used to convert an account
|
func accountBloomHash(h common.Hash) uint64 {
|
||||||
// hash into a 64 bit mini hash.
|
|
||||||
type accountBloomHasher common.Hash
|
|
||||||
|
|
||||||
func (h accountBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
|
|
||||||
func (h accountBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
|
|
||||||
func (h accountBloomHasher) Reset() { panic("not implemented") }
|
|
||||||
func (h accountBloomHasher) BlockSize() int { panic("not implemented") }
|
|
||||||
func (h accountBloomHasher) Size() int { return 8 }
|
|
||||||
func (h accountBloomHasher) Sum64() uint64 {
|
|
||||||
return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8])
|
return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8])
|
||||||
}
|
}
|
||||||
|
|
||||||
// storageBloomHasher is a wrapper around a [2]common.Hash to satisfy the interface
|
// storageBloomHash is used to convert an account hash and a storage hash into a 64 bit mini hash.
|
||||||
// API requirements of the bloom library used. It's used to convert an account
|
func storageBloomHash(h0, h1 common.Hash) uint64 {
|
||||||
// hash into a 64 bit mini hash.
|
return binary.BigEndian.Uint64(h0[bloomStorageHasherOffset:bloomStorageHasherOffset+8]) ^
|
||||||
type storageBloomHasher [2]common.Hash
|
binary.BigEndian.Uint64(h1[bloomStorageHasherOffset:bloomStorageHasherOffset+8])
|
||||||
|
|
||||||
func (h storageBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") }
|
|
||||||
func (h storageBloomHasher) Sum(b []byte) []byte { panic("not implemented") }
|
|
||||||
func (h storageBloomHasher) Reset() { panic("not implemented") }
|
|
||||||
func (h storageBloomHasher) BlockSize() int { panic("not implemented") }
|
|
||||||
func (h storageBloomHasher) Size() int { return 8 }
|
|
||||||
func (h storageBloomHasher) Sum64() uint64 {
|
|
||||||
return binary.BigEndian.Uint64(h[0][bloomStorageHasherOffset:bloomStorageHasherOffset+8]) ^
|
|
||||||
binary.BigEndian.Uint64(h[1][bloomStorageHasherOffset:bloomStorageHasherOffset+8])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
// newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
|
||||||
@ -233,14 +206,14 @@ func (dl *diffLayer) rebloom(origin *diskLayer) {
|
|||||||
}
|
}
|
||||||
// Iterate over all the accounts and storage slots and index them
|
// Iterate over all the accounts and storage slots and index them
|
||||||
for hash := range dl.destructSet {
|
for hash := range dl.destructSet {
|
||||||
dl.diffed.Add(destructBloomHasher(hash))
|
dl.diffed.AddHash(destructBloomHash(hash))
|
||||||
}
|
}
|
||||||
for hash := range dl.accountData {
|
for hash := range dl.accountData {
|
||||||
dl.diffed.Add(accountBloomHasher(hash))
|
dl.diffed.AddHash(accountBloomHash(hash))
|
||||||
}
|
}
|
||||||
for accountHash, slots := range dl.storageData {
|
for accountHash, slots := range dl.storageData {
|
||||||
for storageHash := range slots {
|
for storageHash := range slots {
|
||||||
dl.diffed.Add(storageBloomHasher{accountHash, storageHash})
|
dl.diffed.AddHash(storageBloomHash(accountHash, storageHash))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Calculate the current false positive rate and update the error rate meter.
|
// Calculate the current false positive rate and update the error rate meter.
|
||||||
@ -301,9 +274,9 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
// Check the bloom filter first whether there's even a point in reaching into
|
// Check the bloom filter first whether there's even a point in reaching into
|
||||||
// all the maps in all the layers below
|
// all the maps in all the layers below
|
||||||
hit := dl.diffed.Contains(accountBloomHasher(hash))
|
hit := dl.diffed.ContainsHash(accountBloomHash(hash))
|
||||||
if !hit {
|
if !hit {
|
||||||
hit = dl.diffed.Contains(destructBloomHasher(hash))
|
hit = dl.diffed.ContainsHash(destructBloomHash(hash))
|
||||||
}
|
}
|
||||||
var origin *diskLayer
|
var origin *diskLayer
|
||||||
if !hit {
|
if !hit {
|
||||||
@ -372,9 +345,9 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
|
|||||||
dl.lock.RUnlock()
|
dl.lock.RUnlock()
|
||||||
return nil, ErrSnapshotStale
|
return nil, ErrSnapshotStale
|
||||||
}
|
}
|
||||||
hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash})
|
hit := dl.diffed.ContainsHash(storageBloomHash(accountHash, storageHash))
|
||||||
if !hit {
|
if !hit {
|
||||||
hit = dl.diffed.Contains(destructBloomHasher(accountHash))
|
hit = dl.diffed.ContainsHash(destructBloomHash(accountHash))
|
||||||
}
|
}
|
||||||
var origin *diskLayer
|
var origin *diskLayer
|
||||||
if !hit {
|
if !hit {
|
||||||
|
@ -26,13 +26,13 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// diskLayer is a low level persistent snapshot built on top of a key-value store.
|
// diskLayer is a low level persistent snapshot built on top of a key-value store.
|
||||||
type diskLayer struct {
|
type diskLayer struct {
|
||||||
diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
|
diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
|
||||||
triedb *trie.Database // Trie node cache for reconstruction purposes
|
triedb *triedb.Database // Trie node cache for reconstruction purposes
|
||||||
cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
|
cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
|
||||||
|
|
||||||
root common.Hash // Root hash of the base snapshot
|
root common.Hash // Root hash of the base snapshot
|
||||||
|
@ -139,7 +139,7 @@ func TestDiskMerge(t *testing.T) {
|
|||||||
// Retrieve all the data through the disk layer and validate it
|
// Retrieve all the data through the disk layer and validate it
|
||||||
base = snaps.Snapshot(diffRoot)
|
base = snaps.Snapshot(diffRoot)
|
||||||
if _, ok := base.(*diskLayer); !ok {
|
if _, ok := base.(*diskLayer); !ok {
|
||||||
t.Fatalf("update not flattend into the disk layer")
|
t.Fatalf("update not flattened into the disk layer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertAccount ensures that an account matches the given blob.
|
// assertAccount ensures that an account matches the given blob.
|
||||||
@ -362,7 +362,7 @@ func TestDiskPartialMerge(t *testing.T) {
|
|||||||
// Retrieve all the data through the disk layer and validate it
|
// Retrieve all the data through the disk layer and validate it
|
||||||
base = snaps.Snapshot(diffRoot)
|
base = snaps.Snapshot(diffRoot)
|
||||||
if _, ok := base.(*diskLayer); !ok {
|
if _, ok := base.(*diskLayer); !ok {
|
||||||
t.Fatalf("test %d: update not flattend into the disk layer", i)
|
t.Fatalf("test %d: update not flattened into the disk layer", i)
|
||||||
}
|
}
|
||||||
assertAccount(accNoModNoCache, accNoModNoCache[:])
|
assertAccount(accNoModNoCache, accNoModNoCache[:])
|
||||||
assertAccount(accNoModCache, accNoModCache[:])
|
assertAccount(accNoModCache, accNoModCache[:])
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -55,7 +56,7 @@ var (
|
|||||||
// generateSnapshot regenerates a brand new snapshot based on an existing state
|
// generateSnapshot regenerates a brand new snapshot based on an existing state
|
||||||
// database and head block asynchronously. The snapshot is returned immediately
|
// database and head block asynchronously. The snapshot is returned immediately
|
||||||
// and generation is continued in the background until done.
|
// and generation is continued in the background until done.
|
||||||
func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer {
|
func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *triedb.Database, cache int, root common.Hash) *diskLayer {
|
||||||
// Create a new disk layer with an initialized state marker at zero
|
// Create a new disk layer with an initialized state marker at zero
|
||||||
var (
|
var (
|
||||||
stats = &generatorStats{start: time.Now()}
|
stats = &generatorStats{start: time.Now()}
|
||||||
@ -353,7 +354,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
|
|||||||
var resolver trie.NodeResolver
|
var resolver trie.NodeResolver
|
||||||
if len(result.keys) > 0 {
|
if len(result.keys) > 0 {
|
||||||
mdb := rawdb.NewMemoryDatabase()
|
mdb := rawdb.NewMemoryDatabase()
|
||||||
tdb := trie.NewDatabase(mdb, trie.HashDefaults)
|
tdb := triedb.NewDatabase(mdb, triedb.HashDefaults)
|
||||||
defer tdb.Close()
|
defer tdb.Close()
|
||||||
snapTrie := trie.NewEmpty(tdb)
|
snapTrie := trie.NewEmpty(tdb)
|
||||||
for i, key := range result.keys {
|
for i, key := range result.keys {
|
||||||
|
@ -18,7 +18,6 @@ package snapshot
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -30,9 +29,11 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/hashdb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -58,9 +59,9 @@ func testGeneration(t *testing.T, scheme string) {
|
|||||||
var helper = newHelper(scheme)
|
var helper = newHelper(scheme)
|
||||||
stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
|
stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
|
||||||
|
|
||||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
|
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
@ -97,16 +98,16 @@ func testGenerateExistentState(t *testing.T, scheme string) {
|
|||||||
var helper = newHelper(scheme)
|
var helper = newHelper(scheme)
|
||||||
|
|
||||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addSnapAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||||
|
|
||||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addSnapAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
|
|
||||||
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addSnapAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||||
|
|
||||||
root, snap := helper.CommitAndGenerate()
|
root, snap := helper.CommitAndGenerate()
|
||||||
@ -155,20 +156,20 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
|
|||||||
|
|
||||||
type testHelper struct {
|
type testHelper struct {
|
||||||
diskdb ethdb.Database
|
diskdb ethdb.Database
|
||||||
triedb *trie.Database
|
triedb *triedb.Database
|
||||||
accTrie *trie.StateTrie
|
accTrie *trie.StateTrie
|
||||||
nodes *trienode.MergedNodeSet
|
nodes *trienode.MergedNodeSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHelper(scheme string) *testHelper {
|
func newHelper(scheme string) *testHelper {
|
||||||
diskdb := rawdb.NewMemoryDatabase()
|
diskdb := rawdb.NewMemoryDatabase()
|
||||||
config := &trie.Config{}
|
config := &triedb.Config{}
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
config.PathDB = &pathdb.Config{} // disable caching
|
config.PathDB = &pathdb.Config{} // disable caching
|
||||||
} else {
|
} else {
|
||||||
config.HashDB = &hashdb.Config{} // disable caching
|
config.HashDB = &hashdb.Config{} // disable caching
|
||||||
}
|
}
|
||||||
triedb := trie.NewDatabase(diskdb, config)
|
triedb := triedb.NewDatabase(diskdb, config)
|
||||||
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb)
|
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb)
|
||||||
return &testHelper{
|
return &testHelper{
|
||||||
diskdb: diskdb,
|
diskdb: diskdb,
|
||||||
@ -259,28 +260,28 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) {
|
|||||||
helper := newHelper(scheme)
|
helper := newHelper(scheme)
|
||||||
|
|
||||||
// Account one, empty root but non-empty database
|
// Account one, empty root but non-empty database
|
||||||
helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||||
|
|
||||||
// Account two, non empty root but empty database
|
// Account two, non empty root but empty database
|
||||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
|
|
||||||
// Miss slots
|
// Miss slots
|
||||||
{
|
{
|
||||||
// Account three, non empty root but misses slots in the beginning
|
// Account three, non empty root but misses slots in the beginning
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
|
helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
|
||||||
|
|
||||||
// Account four, non empty root but misses slots in the middle
|
// Account four, non empty root but misses slots in the middle
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-4", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
|
helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
|
||||||
|
|
||||||
// Account five, non empty root but misses slots in the end
|
// Account five, non empty root but misses slots in the end
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-5", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
|
helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,22 +289,22 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
// Account six, non empty root but wrong slots in the beginning
|
// Account six, non empty root but wrong slots in the beginning
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-6", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
|
helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
|
||||||
|
|
||||||
// Account seven, non empty root but wrong slots in the middle
|
// Account seven, non empty root but wrong slots in the middle
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-7", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
|
helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
|
||||||
|
|
||||||
// Account eight, non empty root but wrong slots in the end
|
// Account eight, non empty root but wrong slots in the end
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-8", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-8", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
|
helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
|
||||||
|
|
||||||
// Account 9, non empty root but rotated slots
|
// Account 9, non empty root but rotated slots
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-9", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-9", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
|
helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -311,17 +312,17 @@ func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
// Account 10, non empty root but extra slots in the beginning
|
// Account 10, non empty root but extra slots in the beginning
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-10", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-10", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
|
helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
|
||||||
|
|
||||||
// Account 11, non empty root but extra slots in the middle
|
// Account 11, non empty root but extra slots in the middle
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-11", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-11", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
|
helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
|
||||||
|
|
||||||
// Account 12, non empty root but extra slots in the end
|
// Account 12, non empty root but extra slots in the end
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-12", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-12", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
|
helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -366,25 +367,25 @@ func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) {
|
|||||||
|
|
||||||
// Missing accounts, only in the trie
|
// Missing accounts, only in the trie
|
||||||
{
|
{
|
||||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning
|
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning
|
||||||
helper.addTrieAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle
|
helper.addTrieAccount("acc-4", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle
|
||||||
helper.addTrieAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End
|
helper.addTrieAccount("acc-6", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrong accounts
|
// Wrong accounts
|
||||||
{
|
{
|
||||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
|
helper.addSnapAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
|
||||||
|
|
||||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addSnapAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extra accounts, only in the snap
|
// Extra accounts, only in the snap
|
||||||
{
|
{
|
||||||
helper.addSnapAccount("acc-0", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning
|
helper.addSnapAccount("acc-0", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning
|
||||||
helper.addSnapAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: common.Hex2Bytes("0x1234")}) // Middle
|
helper.addSnapAccount("acc-5", &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: common.Hex2Bytes("0x1234")}) // Middle
|
||||||
helper.addSnapAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // after the end
|
helper.addSnapAccount("acc-7", &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // after the end
|
||||||
}
|
}
|
||||||
|
|
||||||
root, snap := helper.CommitAndGenerate()
|
root, snap := helper.CommitAndGenerate()
|
||||||
@ -418,9 +419,9 @@ func testGenerateCorruptAccountTrie(t *testing.T, scheme string) {
|
|||||||
// without any storage slots to keep the test smaller.
|
// without any storage slots to keep the test smaller.
|
||||||
helper := newHelper(scheme)
|
helper := newHelper(scheme)
|
||||||
|
|
||||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
|
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
|
||||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
||||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
|
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
|
||||||
|
|
||||||
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
|
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
|
||||||
|
|
||||||
@ -463,10 +464,10 @@ func testGenerateMissingStorageTrie(t *testing.T, scheme string) {
|
|||||||
helper = newHelper(scheme)
|
helper = newHelper(scheme)
|
||||||
)
|
)
|
||||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
||||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
||||||
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
||||||
|
|
||||||
root := helper.Commit()
|
root := helper.Commit()
|
||||||
|
|
||||||
@ -503,10 +504,10 @@ func testGenerateCorruptStorageTrie(t *testing.T, scheme string) {
|
|||||||
helper := newHelper(scheme)
|
helper := newHelper(scheme)
|
||||||
|
|
||||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
|
||||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
|
||||||
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
|
||||||
|
|
||||||
root := helper.Commit()
|
root := helper.Commit()
|
||||||
|
|
||||||
@ -546,7 +547,7 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) {
|
|||||||
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
|
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
|
acc := &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
|
||||||
val, _ := rlp.EncodeToBytes(acc)
|
val, _ := rlp.EncodeToBytes(acc)
|
||||||
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||||
|
|
||||||
@ -566,7 +567,7 @@ func testGenerateWithExtraAccounts(t *testing.T, scheme string) {
|
|||||||
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
|
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
|
acc := &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
|
||||||
val, _ := rlp.EncodeToBytes(acc)
|
val, _ := rlp.EncodeToBytes(acc)
|
||||||
key := hashData([]byte("acc-2"))
|
key := hashData([]byte("acc-2"))
|
||||||
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
|
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
|
||||||
@ -622,7 +623,7 @@ func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) {
|
|||||||
[]string{"val-1", "val-2", "val-3"},
|
[]string{"val-1", "val-2", "val-3"},
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
|
acc := &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
|
||||||
val, _ := rlp.EncodeToBytes(acc)
|
val, _ := rlp.EncodeToBytes(acc)
|
||||||
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||||
|
|
||||||
@ -636,7 +637,7 @@ func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) {
|
|||||||
{
|
{
|
||||||
// 100 accounts exist only in snapshot
|
// 100 accounts exist only in snapshot
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
acc := &types.StateAccount{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
|
acc := &types.StateAccount{Balance: uint256.NewInt(uint64(i)), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
|
||||||
val, _ := rlp.EncodeToBytes(acc)
|
val, _ := rlp.EncodeToBytes(acc)
|
||||||
key := hashData([]byte(fmt.Sprintf("acc-%d", i)))
|
key := hashData([]byte(fmt.Sprintf("acc-%d", i)))
|
||||||
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
|
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
|
||||||
@ -678,7 +679,7 @@ func testGenerateWithExtraBeforeAndAfter(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
helper := newHelper(scheme)
|
helper := newHelper(scheme)
|
||||||
{
|
{
|
||||||
acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
|
acc := &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
|
||||||
val, _ := rlp.EncodeToBytes(acc)
|
val, _ := rlp.EncodeToBytes(acc)
|
||||||
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
|
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
|
||||||
helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val)
|
helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val)
|
||||||
@ -720,7 +721,7 @@ func testGenerateWithMalformedSnapdata(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
helper := newHelper(scheme)
|
helper := newHelper(scheme)
|
||||||
{
|
{
|
||||||
acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
|
acc := &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}
|
||||||
val, _ := rlp.EncodeToBytes(acc)
|
val, _ := rlp.EncodeToBytes(acc)
|
||||||
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
|
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
|
||||||
|
|
||||||
@ -764,7 +765,7 @@ func testGenerateFromEmptySnap(t *testing.T, scheme string) {
|
|||||||
for i := 0; i < 400; i++ {
|
for i := 0; i < 400; i++ {
|
||||||
stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
|
helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
|
||||||
&types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
&types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
}
|
}
|
||||||
root, snap := helper.CommitAndGenerate()
|
root, snap := helper.CommitAndGenerate()
|
||||||
t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4
|
t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4
|
||||||
@ -806,7 +807,7 @@ func testGenerateWithIncompleteStorage(t *testing.T, scheme string) {
|
|||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
accKey := fmt.Sprintf("acc-%d", i)
|
accKey := fmt.Sprintf("acc-%d", i)
|
||||||
stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true)
|
stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true)
|
||||||
helper.addAccount(accKey, &types.StateAccount{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount(accKey, &types.StateAccount{Balance: uint256.NewInt(uint64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
var moddedKeys []string
|
var moddedKeys []string
|
||||||
var moddedVals []string
|
var moddedVals []string
|
||||||
for ii := 0; ii < 8; ii++ {
|
for ii := 0; ii < 8; ii++ {
|
||||||
@ -903,11 +904,11 @@ func testGenerateCompleteSnapshotWithDanglingStorage(t *testing.T, scheme string
|
|||||||
var helper = newHelper(scheme)
|
var helper = newHelper(scheme)
|
||||||
|
|
||||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
|
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
|
|
||||||
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||||
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
|
||||||
@ -943,11 +944,11 @@ func testGenerateBrokenSnapshotWithDanglingStorage(t *testing.T, scheme string)
|
|||||||
var helper = newHelper(scheme)
|
var helper = newHelper(scheme)
|
||||||
|
|
||||||
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: uint256.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: uint256.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
|
|
||||||
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
|
||||||
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: uint256.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()})
|
||||||
|
|
||||||
populateDangling(helper.diskdb)
|
populateDangling(helper.diskdb)
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
const journalVersion uint64 = 0
|
const journalVersion uint64 = 0
|
||||||
@ -120,7 +120,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou
|
|||||||
}
|
}
|
||||||
|
|
||||||
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
|
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
|
||||||
func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash, cache int, recovery bool, noBuild bool) (snapshot, bool, error) {
|
func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash, cache int, recovery bool, noBuild bool) (snapshot, bool, error) {
|
||||||
// If snapshotting is disabled (initial sync in progress), don't do anything,
|
// If snapshotting is disabled (initial sync in progress), don't do anything,
|
||||||
// wait for the chain to permit us to do something meaningful
|
// wait for the chain to permit us to do something meaningful
|
||||||
if rawdb.ReadSnapshotDisabled(diskdb) {
|
if rawdb.ReadSnapshotDisabled(diskdb) {
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -168,7 +168,7 @@ type Config struct {
|
|||||||
type Tree struct {
|
type Tree struct {
|
||||||
config Config // Snapshots configurations
|
config Config // Snapshots configurations
|
||||||
diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
|
diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
|
||||||
triedb *trie.Database // In-memory cache to access the trie through
|
triedb *triedb.Database // In-memory cache to access the trie through
|
||||||
layers map[common.Hash]snapshot // Collection of all known layers
|
layers map[common.Hash]snapshot // Collection of all known layers
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
|
|
||||||
@ -192,7 +192,7 @@ type Tree struct {
|
|||||||
// state trie.
|
// state trie.
|
||||||
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
||||||
// a background thread.
|
// a background thread.
|
||||||
func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash) (*Tree, error) {
|
func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash) (*Tree, error) {
|
||||||
// Create a new, empty snapshot tree
|
// Create a new, empty snapshot tree
|
||||||
snap := &Tree{
|
snap := &Tree{
|
||||||
config: config,
|
config: config,
|
||||||
@ -258,6 +258,14 @@ func (t *Tree) Disable() {
|
|||||||
for _, layer := range t.layers {
|
for _, layer := range t.layers {
|
||||||
switch layer := layer.(type) {
|
switch layer := layer.(type) {
|
||||||
case *diskLayer:
|
case *diskLayer:
|
||||||
|
|
||||||
|
layer.lock.RLock()
|
||||||
|
generating := layer.genMarker != nil
|
||||||
|
layer.lock.RUnlock()
|
||||||
|
if !generating {
|
||||||
|
// Generator is already aborted or finished
|
||||||
|
break
|
||||||
|
}
|
||||||
// If the base layer is generating, abort it
|
// If the base layer is generating, abort it
|
||||||
if layer.genAbort != nil {
|
if layer.genAbort != nil {
|
||||||
abort := make(chan *generatorStats)
|
abort := make(chan *generatorStats)
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
crand "crypto/rand"
|
crand "crypto/rand"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -30,6 +29,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// randomHash generates a random blob of data and returns it as a hash.
|
// randomHash generates a random blob of data and returns it as a hash.
|
||||||
@ -44,7 +44,7 @@ func randomHash() common.Hash {
|
|||||||
// randomAccount generates a random account and returns it RLP encoded.
|
// randomAccount generates a random account and returns it RLP encoded.
|
||||||
func randomAccount() []byte {
|
func randomAccount() []byte {
|
||||||
a := &types.StateAccount{
|
a := &types.StateAccount{
|
||||||
Balance: big.NewInt(rand.Int63()),
|
Balance: uint256.NewInt(rand.Uint64()),
|
||||||
Nonce: rand.Uint64(),
|
Nonce: rand.Uint64(),
|
||||||
Root: randomHash(),
|
Root: randomHash(),
|
||||||
CodeHash: types.EmptyCodeHash[:],
|
CodeHash: types.EmptyCodeHash[:],
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -29,6 +28,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Code []byte
|
type Code []byte
|
||||||
@ -93,7 +93,7 @@ type stateObject struct {
|
|||||||
|
|
||||||
// empty returns whether the account is considered empty.
|
// empty returns whether the account is considered empty.
|
||||||
func (s *stateObject) empty() bool {
|
func (s *stateObject) empty() bool {
|
||||||
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes())
|
return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
// newObject creates a state object.
|
// newObject creates a state object.
|
||||||
@ -405,36 +405,36 @@ func (s *stateObject) commit() (*trienode.NodeSet, error) {
|
|||||||
|
|
||||||
// AddBalance adds amount to s's balance.
|
// AddBalance adds amount to s's balance.
|
||||||
// It is used to add funds to the destination account of a transfer.
|
// It is used to add funds to the destination account of a transfer.
|
||||||
func (s *stateObject) AddBalance(amount *big.Int) {
|
func (s *stateObject) AddBalance(amount *uint256.Int) {
|
||||||
// EIP161: We must check emptiness for the objects such that the account
|
// EIP161: We must check emptiness for the objects such that the account
|
||||||
// clearing (0,0,0 objects) can take effect.
|
// clearing (0,0,0 objects) can take effect.
|
||||||
if amount.Sign() == 0 {
|
if amount.IsZero() {
|
||||||
if s.empty() {
|
if s.empty() {
|
||||||
s.touch()
|
s.touch()
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.SetBalance(new(big.Int).Add(s.Balance(), amount))
|
s.SetBalance(new(uint256.Int).Add(s.Balance(), amount))
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubBalance removes amount from s's balance.
|
// SubBalance removes amount from s's balance.
|
||||||
// It is used to remove funds from the origin account of a transfer.
|
// It is used to remove funds from the origin account of a transfer.
|
||||||
func (s *stateObject) SubBalance(amount *big.Int) {
|
func (s *stateObject) SubBalance(amount *uint256.Int) {
|
||||||
if amount.Sign() == 0 {
|
if amount.IsZero() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.SetBalance(new(big.Int).Sub(s.Balance(), amount))
|
s.SetBalance(new(uint256.Int).Sub(s.Balance(), amount))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateObject) SetBalance(amount *big.Int) {
|
func (s *stateObject) SetBalance(amount *uint256.Int) {
|
||||||
s.db.journal.append(balanceChange{
|
s.db.journal.append(balanceChange{
|
||||||
account: &s.address,
|
account: &s.address,
|
||||||
prev: new(big.Int).Set(s.data.Balance),
|
prev: new(uint256.Int).Set(s.data.Balance),
|
||||||
})
|
})
|
||||||
s.setBalance(amount)
|
s.setBalance(amount)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateObject) setBalance(amount *big.Int) {
|
func (s *stateObject) setBalance(amount *uint256.Int) {
|
||||||
s.data.Balance = amount
|
s.data.Balance = amount
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -533,7 +533,7 @@ func (s *stateObject) CodeHash() []byte {
|
|||||||
return s.data.CodeHash
|
return s.data.CodeHash
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateObject) Balance() *big.Int {
|
func (s *stateObject) Balance() *uint256.Int {
|
||||||
return s.data.Balance
|
return s.data.Balance
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@ package state
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"math/big"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -27,7 +26,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
type stateEnv struct {
|
type stateEnv struct {
|
||||||
@ -43,17 +43,17 @@ func newStateEnv() *stateEnv {
|
|||||||
|
|
||||||
func TestDump(t *testing.T) {
|
func TestDump(t *testing.T) {
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
|
tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
|
||||||
sdb, _ := New(types.EmptyRootHash, tdb, nil)
|
sdb, _ := New(types.EmptyRootHash, tdb, nil)
|
||||||
s := &stateEnv{db: db, state: sdb}
|
s := &stateEnv{db: db, state: sdb}
|
||||||
|
|
||||||
// generate a few entries
|
// generate a few entries
|
||||||
obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01}))
|
obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
|
||||||
obj1.AddBalance(big.NewInt(22))
|
obj1.AddBalance(uint256.NewInt(22))
|
||||||
obj2 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
|
obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
|
||||||
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
|
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
|
||||||
obj3 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x02}))
|
obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02}))
|
||||||
obj3.SetBalance(big.NewInt(44))
|
obj3.SetBalance(uint256.NewInt(44))
|
||||||
|
|
||||||
// write some of them to the trie
|
// write some of them to the trie
|
||||||
s.state.updateStateObject(obj1)
|
s.state.updateStateObject(obj1)
|
||||||
@ -100,19 +100,19 @@ func TestDump(t *testing.T) {
|
|||||||
|
|
||||||
func TestIterativeDump(t *testing.T) {
|
func TestIterativeDump(t *testing.T) {
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
|
tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
|
||||||
sdb, _ := New(types.EmptyRootHash, tdb, nil)
|
sdb, _ := New(types.EmptyRootHash, tdb, nil)
|
||||||
s := &stateEnv{db: db, state: sdb}
|
s := &stateEnv{db: db, state: sdb}
|
||||||
|
|
||||||
// generate a few entries
|
// generate a few entries
|
||||||
obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01}))
|
obj1 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01}))
|
||||||
obj1.AddBalance(big.NewInt(22))
|
obj1.AddBalance(uint256.NewInt(22))
|
||||||
obj2 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
|
obj2 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
|
||||||
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
|
obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
|
||||||
obj3 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x02}))
|
obj3 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x02}))
|
||||||
obj3.SetBalance(big.NewInt(44))
|
obj3.SetBalance(uint256.NewInt(44))
|
||||||
obj4 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x00}))
|
obj4 := s.state.getOrNewStateObject(common.BytesToAddress([]byte{0x00}))
|
||||||
obj4.AddBalance(big.NewInt(1337))
|
obj4.AddBalance(uint256.NewInt(1337))
|
||||||
|
|
||||||
// write some of them to the trie
|
// write some of them to the trie
|
||||||
s.state.updateStateObject(obj1)
|
s.state.updateStateObject(obj1)
|
||||||
@ -208,7 +208,7 @@ func TestSnapshot2(t *testing.T) {
|
|||||||
|
|
||||||
// db, trie are already non-empty values
|
// db, trie are already non-empty values
|
||||||
so0 := state.getStateObject(stateobjaddr0)
|
so0 := state.getStateObject(stateobjaddr0)
|
||||||
so0.SetBalance(big.NewInt(42))
|
so0.SetBalance(uint256.NewInt(42))
|
||||||
so0.SetNonce(43)
|
so0.SetNonce(43)
|
||||||
so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'})
|
so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'})
|
||||||
so0.selfDestructed = false
|
so0.selfDestructed = false
|
||||||
@ -220,7 +220,7 @@ func TestSnapshot2(t *testing.T) {
|
|||||||
|
|
||||||
// and one with deleted == true
|
// and one with deleted == true
|
||||||
so1 := state.getStateObject(stateobjaddr1)
|
so1 := state.getStateObject(stateobjaddr1)
|
||||||
so1.SetBalance(big.NewInt(52))
|
so1.SetBalance(uint256.NewInt(52))
|
||||||
so1.SetNonce(53)
|
so1.SetNonce(53)
|
||||||
so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'})
|
so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'})
|
||||||
so1.selfDestructed = true
|
so1.selfDestructed = true
|
||||||
|
@ -19,7 +19,6 @@ package state
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -34,6 +33,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
"github.com/ethereum/go-ethereum/trie/triestate"
|
"github.com/ethereum/go-ethereum/trie/triestate"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -289,12 +289,12 @@ func (s *StateDB) Empty(addr common.Address) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetBalance retrieves the balance from the given address or 0 if object not found
|
// GetBalance retrieves the balance from the given address or 0 if object not found
|
||||||
func (s *StateDB) GetBalance(addr common.Address) *big.Int {
|
func (s *StateDB) GetBalance(addr common.Address) *uint256.Int {
|
||||||
stateObject := s.getStateObject(addr)
|
stateObject := s.getStateObject(addr)
|
||||||
if stateObject != nil {
|
if stateObject != nil {
|
||||||
return stateObject.Balance()
|
return stateObject.Balance()
|
||||||
}
|
}
|
||||||
return common.Big0
|
return common.U2560
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNonce retrieves the nonce from the given address or 0 if object not found
|
// GetNonce retrieves the nonce from the given address or 0 if object not found
|
||||||
@ -382,44 +382,44 @@ func (s *StateDB) HasSelfDestructed(addr common.Address) bool {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// AddBalance adds amount to the account associated with addr.
|
// AddBalance adds amount to the account associated with addr.
|
||||||
func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) {
|
func (s *StateDB) AddBalance(addr common.Address, amount *uint256.Int) {
|
||||||
stateObject := s.GetOrNewStateObject(addr)
|
stateObject := s.getOrNewStateObject(addr)
|
||||||
if stateObject != nil {
|
if stateObject != nil {
|
||||||
stateObject.AddBalance(amount)
|
stateObject.AddBalance(amount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubBalance subtracts amount from the account associated with addr.
|
// SubBalance subtracts amount from the account associated with addr.
|
||||||
func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) {
|
func (s *StateDB) SubBalance(addr common.Address, amount *uint256.Int) {
|
||||||
stateObject := s.GetOrNewStateObject(addr)
|
stateObject := s.getOrNewStateObject(addr)
|
||||||
if stateObject != nil {
|
if stateObject != nil {
|
||||||
stateObject.SubBalance(amount)
|
stateObject.SubBalance(amount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) {
|
func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int) {
|
||||||
stateObject := s.GetOrNewStateObject(addr)
|
stateObject := s.getOrNewStateObject(addr)
|
||||||
if stateObject != nil {
|
if stateObject != nil {
|
||||||
stateObject.SetBalance(amount)
|
stateObject.SetBalance(amount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
|
func (s *StateDB) SetNonce(addr common.Address, nonce uint64) {
|
||||||
stateObject := s.GetOrNewStateObject(addr)
|
stateObject := s.getOrNewStateObject(addr)
|
||||||
if stateObject != nil {
|
if stateObject != nil {
|
||||||
stateObject.SetNonce(nonce)
|
stateObject.SetNonce(nonce)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StateDB) SetCode(addr common.Address, code []byte) {
|
func (s *StateDB) SetCode(addr common.Address, code []byte) {
|
||||||
stateObject := s.GetOrNewStateObject(addr)
|
stateObject := s.getOrNewStateObject(addr)
|
||||||
if stateObject != nil {
|
if stateObject != nil {
|
||||||
stateObject.SetCode(crypto.Keccak256Hash(code), code)
|
stateObject.SetCode(crypto.Keccak256Hash(code), code)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
|
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
|
||||||
stateObject := s.GetOrNewStateObject(addr)
|
stateObject := s.getOrNewStateObject(addr)
|
||||||
if stateObject != nil {
|
if stateObject != nil {
|
||||||
stateObject.SetState(key, value)
|
stateObject.SetState(key, value)
|
||||||
}
|
}
|
||||||
@ -440,7 +440,7 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common
|
|||||||
if _, ok := s.stateObjectsDestruct[addr]; !ok {
|
if _, ok := s.stateObjectsDestruct[addr]; !ok {
|
||||||
s.stateObjectsDestruct[addr] = nil
|
s.stateObjectsDestruct[addr] = nil
|
||||||
}
|
}
|
||||||
stateObject := s.GetOrNewStateObject(addr)
|
stateObject := s.getOrNewStateObject(addr)
|
||||||
for k, v := range storage {
|
for k, v := range storage {
|
||||||
stateObject.SetState(k, v)
|
stateObject.SetState(k, v)
|
||||||
}
|
}
|
||||||
@ -459,10 +459,10 @@ func (s *StateDB) SelfDestruct(addr common.Address) {
|
|||||||
s.journal.append(selfDestructChange{
|
s.journal.append(selfDestructChange{
|
||||||
account: &addr,
|
account: &addr,
|
||||||
prev: stateObject.selfDestructed,
|
prev: stateObject.selfDestructed,
|
||||||
prevbalance: new(big.Int).Set(stateObject.Balance()),
|
prevbalance: new(uint256.Int).Set(stateObject.Balance()),
|
||||||
})
|
})
|
||||||
stateObject.markSelfdestructed()
|
stateObject.markSelfdestructed()
|
||||||
stateObject.data.Balance = new(big.Int)
|
stateObject.data.Balance = new(uint256.Int)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StateDB) Selfdestruct6780(addr common.Address) {
|
func (s *StateDB) Selfdestruct6780(addr common.Address) {
|
||||||
@ -623,8 +623,8 @@ func (s *StateDB) setStateObject(object *stateObject) {
|
|||||||
s.stateObjects[object.Address()] = object
|
s.stateObjects[object.Address()] = object
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOrNewStateObject retrieves a state object or create a new state object if nil.
|
// getOrNewStateObject retrieves a state object or create a new state object if nil.
|
||||||
func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
|
func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
|
||||||
stateObject := s.getStateObject(addr)
|
stateObject := s.getStateObject(addr)
|
||||||
if stateObject == nil {
|
if stateObject == nil {
|
||||||
stateObject, _ = s.createObject(addr)
|
stateObject, _ = s.createObject(addr)
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -36,8 +35,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
|
||||||
"github.com/ethereum/go-ethereum/trie/triestate"
|
"github.com/ethereum/go-ethereum/trie/triestate"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A stateTest checks that the state changes are correctly captured. Instances
|
// A stateTest checks that the state changes are correctly captured. Instances
|
||||||
@ -60,7 +61,7 @@ func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction
|
|||||||
{
|
{
|
||||||
name: "SetBalance",
|
name: "SetBalance",
|
||||||
fn: func(a testAction, s *StateDB) {
|
fn: func(a testAction, s *StateDB) {
|
||||||
s.SetBalance(addr, big.NewInt(a.args[0]))
|
s.SetBalance(addr, uint256.NewInt(uint64(a.args[0])))
|
||||||
},
|
},
|
||||||
args: make([]int64, 1),
|
args: make([]int64, 1),
|
||||||
},
|
},
|
||||||
@ -181,7 +182,7 @@ func (test *stateTest) run() bool {
|
|||||||
storageList = append(storageList, copy2DSet(states.Storages))
|
storageList = append(storageList, copy2DSet(states.Storages))
|
||||||
}
|
}
|
||||||
disk = rawdb.NewMemoryDatabase()
|
disk = rawdb.NewMemoryDatabase()
|
||||||
tdb = trie.NewDatabase(disk, &trie.Config{PathDB: pathdb.Defaults})
|
tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
|
||||||
sdb = NewDatabaseWithNodeDB(disk, tdb)
|
sdb = NewDatabaseWithNodeDB(disk, tdb)
|
||||||
byzantium = rand.Intn(2) == 0
|
byzantium = rand.Intn(2) == 0
|
||||||
)
|
)
|
||||||
@ -252,7 +253,7 @@ func (test *stateTest) run() bool {
|
|||||||
// - the account was indeed not present in trie
|
// - the account was indeed not present in trie
|
||||||
// - the account is present in new trie, nil->nil is regarded as invalid
|
// - the account is present in new trie, nil->nil is regarded as invalid
|
||||||
// - the slots transition is correct
|
// - the slots transition is correct
|
||||||
func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
|
func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
|
||||||
// Verify account change
|
// Verify account change
|
||||||
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
||||||
oBlob, err := otr.Get(addrHash.Bytes())
|
oBlob, err := otr.Get(addrHash.Bytes())
|
||||||
@ -303,7 +304,7 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database
|
|||||||
// - the account was indeed present in trie
|
// - the account was indeed present in trie
|
||||||
// - the account in old trie matches the provided value
|
// - the account in old trie matches the provided value
|
||||||
// - the slots transition is correct
|
// - the slots transition is correct
|
||||||
func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
|
func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
|
||||||
// Verify account change
|
// Verify account change
|
||||||
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
addrHash := crypto.Keccak256Hash(addr.Bytes())
|
||||||
oBlob, err := otr.Get(addrHash.Bytes())
|
oBlob, err := otr.Get(addrHash.Bytes())
|
||||||
@ -357,7 +358,7 @@ func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
|
func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
|
||||||
otr, err := trie.New(trie.StateTrieID(root), db)
|
otr, err := trie.New(trie.StateTrieID(root), db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -37,9 +36,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/hashdb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -49,14 +49,14 @@ func TestUpdateLeaks(t *testing.T) {
|
|||||||
// Create an empty state database
|
// Create an empty state database
|
||||||
var (
|
var (
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
tdb = trie.NewDatabase(db, nil)
|
tdb = triedb.NewDatabase(db, nil)
|
||||||
)
|
)
|
||||||
state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil)
|
state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil)
|
||||||
|
|
||||||
// Update it with some accounts
|
// Update it with some accounts
|
||||||
for i := byte(0); i < 255; i++ {
|
for i := byte(0); i < 255; i++ {
|
||||||
addr := common.BytesToAddress([]byte{i})
|
addr := common.BytesToAddress([]byte{i})
|
||||||
state.AddBalance(addr, big.NewInt(int64(11*i)))
|
state.AddBalance(addr, uint256.NewInt(uint64(11*i)))
|
||||||
state.SetNonce(addr, uint64(42*i))
|
state.SetNonce(addr, uint64(42*i))
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i}))
|
state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i}))
|
||||||
@ -85,13 +85,13 @@ func TestIntermediateLeaks(t *testing.T) {
|
|||||||
// Create two state databases, one transitioning to the final state, the other final from the beginning
|
// Create two state databases, one transitioning to the final state, the other final from the beginning
|
||||||
transDb := rawdb.NewMemoryDatabase()
|
transDb := rawdb.NewMemoryDatabase()
|
||||||
finalDb := rawdb.NewMemoryDatabase()
|
finalDb := rawdb.NewMemoryDatabase()
|
||||||
transNdb := trie.NewDatabase(transDb, nil)
|
transNdb := triedb.NewDatabase(transDb, nil)
|
||||||
finalNdb := trie.NewDatabase(finalDb, nil)
|
finalNdb := triedb.NewDatabase(finalDb, nil)
|
||||||
transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil)
|
transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil)
|
||||||
finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil)
|
finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil)
|
||||||
|
|
||||||
modify := func(state *StateDB, addr common.Address, i, tweak byte) {
|
modify := func(state *StateDB, addr common.Address, i, tweak byte) {
|
||||||
state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak)))
|
state.SetBalance(addr, uint256.NewInt(uint64(11*i)+uint64(tweak)))
|
||||||
state.SetNonce(addr, uint64(42*i+tweak))
|
state.SetNonce(addr, uint64(42*i+tweak))
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
state.SetState(addr, common.Hash{i, i, i, 0}, common.Hash{})
|
state.SetState(addr, common.Hash{i, i, i, 0}, common.Hash{})
|
||||||
@ -166,8 +166,8 @@ func TestCopy(t *testing.T) {
|
|||||||
orig, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
orig, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
|
|
||||||
for i := byte(0); i < 255; i++ {
|
for i := byte(0); i < 255; i++ {
|
||||||
obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
obj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||||
obj.AddBalance(big.NewInt(int64(i)))
|
obj.AddBalance(uint256.NewInt(uint64(i)))
|
||||||
orig.updateStateObject(obj)
|
orig.updateStateObject(obj)
|
||||||
}
|
}
|
||||||
orig.Finalise(false)
|
orig.Finalise(false)
|
||||||
@ -180,13 +180,13 @@ func TestCopy(t *testing.T) {
|
|||||||
|
|
||||||
// modify all in memory
|
// modify all in memory
|
||||||
for i := byte(0); i < 255; i++ {
|
for i := byte(0); i < 255; i++ {
|
||||||
origObj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
origObj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||||
copyObj := copy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||||
ccopyObj := ccopy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||||
|
|
||||||
origObj.AddBalance(big.NewInt(2 * int64(i)))
|
origObj.AddBalance(uint256.NewInt(2 * uint64(i)))
|
||||||
copyObj.AddBalance(big.NewInt(3 * int64(i)))
|
copyObj.AddBalance(uint256.NewInt(3 * uint64(i)))
|
||||||
ccopyObj.AddBalance(big.NewInt(4 * int64(i)))
|
ccopyObj.AddBalance(uint256.NewInt(4 * uint64(i)))
|
||||||
|
|
||||||
orig.updateStateObject(origObj)
|
orig.updateStateObject(origObj)
|
||||||
copy.updateStateObject(copyObj)
|
copy.updateStateObject(copyObj)
|
||||||
@ -208,17 +208,17 @@ func TestCopy(t *testing.T) {
|
|||||||
|
|
||||||
// Verify that the three states have been updated independently
|
// Verify that the three states have been updated independently
|
||||||
for i := byte(0); i < 255; i++ {
|
for i := byte(0); i < 255; i++ {
|
||||||
origObj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
origObj := orig.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||||
copyObj := copy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
copyObj := copy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||||
ccopyObj := ccopy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
ccopyObj := ccopy.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||||
|
|
||||||
if want := big.NewInt(3 * int64(i)); origObj.Balance().Cmp(want) != 0 {
|
if want := uint256.NewInt(3 * uint64(i)); origObj.Balance().Cmp(want) != 0 {
|
||||||
t.Errorf("orig obj %d: balance mismatch: have %v, want %v", i, origObj.Balance(), want)
|
t.Errorf("orig obj %d: balance mismatch: have %v, want %v", i, origObj.Balance(), want)
|
||||||
}
|
}
|
||||||
if want := big.NewInt(4 * int64(i)); copyObj.Balance().Cmp(want) != 0 {
|
if want := uint256.NewInt(4 * uint64(i)); copyObj.Balance().Cmp(want) != 0 {
|
||||||
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, copyObj.Balance(), want)
|
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, copyObj.Balance(), want)
|
||||||
}
|
}
|
||||||
if want := big.NewInt(5 * int64(i)); ccopyObj.Balance().Cmp(want) != 0 {
|
if want := uint256.NewInt(5 * uint64(i)); ccopyObj.Balance().Cmp(want) != 0 {
|
||||||
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, ccopyObj.Balance(), want)
|
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, ccopyObj.Balance(), want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -266,14 +266,14 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
|
|||||||
{
|
{
|
||||||
name: "SetBalance",
|
name: "SetBalance",
|
||||||
fn: func(a testAction, s *StateDB) {
|
fn: func(a testAction, s *StateDB) {
|
||||||
s.SetBalance(addr, big.NewInt(a.args[0]))
|
s.SetBalance(addr, uint256.NewInt(uint64(a.args[0])))
|
||||||
},
|
},
|
||||||
args: make([]int64, 1),
|
args: make([]int64, 1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "AddBalance",
|
name: "AddBalance",
|
||||||
fn: func(a testAction, s *StateDB) {
|
fn: func(a testAction, s *StateDB) {
|
||||||
s.AddBalance(addr, big.NewInt(a.args[0]))
|
s.AddBalance(addr, uint256.NewInt(uint64(a.args[0])))
|
||||||
},
|
},
|
||||||
args: make([]int64, 1),
|
args: make([]int64, 1),
|
||||||
},
|
},
|
||||||
@ -531,12 +531,12 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
|
|||||||
|
|
||||||
func TestTouchDelete(t *testing.T) {
|
func TestTouchDelete(t *testing.T) {
|
||||||
s := newStateEnv()
|
s := newStateEnv()
|
||||||
s.state.GetOrNewStateObject(common.Address{})
|
s.state.getOrNewStateObject(common.Address{})
|
||||||
root, _ := s.state.Commit(0, false)
|
root, _ := s.state.Commit(0, false)
|
||||||
s.state, _ = New(root, s.state.db, s.state.snaps)
|
s.state, _ = New(root, s.state.db, s.state.snaps)
|
||||||
|
|
||||||
snapshot := s.state.Snapshot()
|
snapshot := s.state.Snapshot()
|
||||||
s.state.AddBalance(common.Address{}, new(big.Int))
|
s.state.AddBalance(common.Address{}, new(uint256.Int))
|
||||||
|
|
||||||
if len(s.state.journal.dirties) != 1 {
|
if len(s.state.journal.dirties) != 1 {
|
||||||
t.Fatal("expected one dirty state object")
|
t.Fatal("expected one dirty state object")
|
||||||
@ -552,7 +552,7 @@ func TestTouchDelete(t *testing.T) {
|
|||||||
func TestCopyOfCopy(t *testing.T) {
|
func TestCopyOfCopy(t *testing.T) {
|
||||||
state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
addr := common.HexToAddress("aaaa")
|
addr := common.HexToAddress("aaaa")
|
||||||
state.SetBalance(addr, big.NewInt(42))
|
state.SetBalance(addr, uint256.NewInt(42))
|
||||||
|
|
||||||
if got := state.Copy().GetBalance(addr).Uint64(); got != 42 {
|
if got := state.Copy().GetBalance(addr).Uint64(); got != 42 {
|
||||||
t.Fatalf("1st copy fail, expected 42, got %v", got)
|
t.Fatalf("1st copy fail, expected 42, got %v", got)
|
||||||
@ -575,11 +575,11 @@ func TestCopyCommitCopy(t *testing.T) {
|
|||||||
skey := common.HexToHash("aaa")
|
skey := common.HexToHash("aaa")
|
||||||
sval := common.HexToHash("bbb")
|
sval := common.HexToHash("bbb")
|
||||||
|
|
||||||
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie
|
||||||
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
||||||
state.SetState(addr, skey, sval) // Change the storage trie
|
state.SetState(addr, skey, sval) // Change the storage trie
|
||||||
|
|
||||||
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -593,7 +593,7 @@ func TestCopyCommitCopy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Copy the non-committed state database and check pre/post commit balance
|
// Copy the non-committed state database and check pre/post commit balance
|
||||||
copyOne := state.Copy()
|
copyOne := state.Copy()
|
||||||
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := copyOne.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("first copy pre-commit balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("first copy pre-commit balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -607,7 +607,7 @@ func TestCopyCommitCopy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Copy the copy and check the balance once more
|
// Copy the copy and check the balance once more
|
||||||
copyTwo := copyOne.Copy()
|
copyTwo := copyOne.Copy()
|
||||||
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := copyTwo.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("second copy balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("second copy balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -622,7 +622,7 @@ func TestCopyCommitCopy(t *testing.T) {
|
|||||||
// Commit state, ensure states can be loaded from disk
|
// Commit state, ensure states can be loaded from disk
|
||||||
root, _ := state.Commit(0, false)
|
root, _ := state.Commit(0, false)
|
||||||
state, _ = New(root, tdb, nil)
|
state, _ = New(root, tdb, nil)
|
||||||
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -648,11 +648,11 @@ func TestCopyCopyCommitCopy(t *testing.T) {
|
|||||||
skey := common.HexToHash("aaa")
|
skey := common.HexToHash("aaa")
|
||||||
sval := common.HexToHash("bbb")
|
sval := common.HexToHash("bbb")
|
||||||
|
|
||||||
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie
|
||||||
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
||||||
state.SetState(addr, skey, sval) // Change the storage trie
|
state.SetState(addr, skey, sval) // Change the storage trie
|
||||||
|
|
||||||
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -666,7 +666,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Copy the non-committed state database and check pre/post commit balance
|
// Copy the non-committed state database and check pre/post commit balance
|
||||||
copyOne := state.Copy()
|
copyOne := state.Copy()
|
||||||
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := copyOne.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("first copy balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("first copy balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -680,7 +680,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Copy the copy and check the balance once more
|
// Copy the copy and check the balance once more
|
||||||
copyTwo := copyOne.Copy()
|
copyTwo := copyOne.Copy()
|
||||||
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := copyTwo.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("second copy pre-commit balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("second copy pre-commit balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -694,7 +694,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Copy the copy-copy and check the balance once more
|
// Copy the copy-copy and check the balance once more
|
||||||
copyThree := copyTwo.Copy()
|
copyThree := copyTwo.Copy()
|
||||||
if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := copyThree.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("third copy balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("third copy balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := copyThree.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := copyThree.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -717,11 +717,11 @@ func TestCommitCopy(t *testing.T) {
|
|||||||
skey := common.HexToHash("aaa")
|
skey := common.HexToHash("aaa")
|
||||||
sval := common.HexToHash("bbb")
|
sval := common.HexToHash("bbb")
|
||||||
|
|
||||||
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie
|
||||||
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
||||||
state.SetState(addr, skey, sval) // Change the storage trie
|
state.SetState(addr, skey, sval) // Change the storage trie
|
||||||
|
|
||||||
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
|
||||||
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
||||||
}
|
}
|
||||||
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||||
@ -736,7 +736,7 @@ func TestCommitCopy(t *testing.T) {
|
|||||||
// Copy the committed state database, the copied one is not functional.
|
// Copy the committed state database, the copied one is not functional.
|
||||||
state.Commit(0, true)
|
state.Commit(0, true)
|
||||||
copied := state.Copy()
|
copied := state.Copy()
|
||||||
if balance := copied.GetBalance(addr); balance.Cmp(big.NewInt(0)) != 0 {
|
if balance := copied.GetBalance(addr); balance.Cmp(uint256.NewInt(0)) != 0 {
|
||||||
t.Fatalf("unexpected balance: have %v", balance)
|
t.Fatalf("unexpected balance: have %v", balance)
|
||||||
}
|
}
|
||||||
if code := copied.GetCode(addr); code != nil {
|
if code := copied.GetCode(addr); code != nil {
|
||||||
@ -766,7 +766,7 @@ func TestDeleteCreateRevert(t *testing.T) {
|
|||||||
state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
|
|
||||||
addr := common.BytesToAddress([]byte("so"))
|
addr := common.BytesToAddress([]byte("so"))
|
||||||
state.SetBalance(addr, big.NewInt(1))
|
state.SetBalance(addr, uint256.NewInt(1))
|
||||||
|
|
||||||
root, _ := state.Commit(0, false)
|
root, _ := state.Commit(0, false)
|
||||||
state, _ = New(root, state.db, state.snaps)
|
state, _ = New(root, state.db, state.snaps)
|
||||||
@ -776,7 +776,7 @@ func TestDeleteCreateRevert(t *testing.T) {
|
|||||||
state.Finalise(true)
|
state.Finalise(true)
|
||||||
|
|
||||||
id := state.Snapshot()
|
id := state.Snapshot()
|
||||||
state.SetBalance(addr, big.NewInt(2))
|
state.SetBalance(addr, uint256.NewInt(2))
|
||||||
state.RevertToSnapshot(id)
|
state.RevertToSnapshot(id)
|
||||||
|
|
||||||
// Commit the entire state and make sure we don't crash and have the correct state
|
// Commit the entire state and make sure we don't crash and have the correct state
|
||||||
@ -799,34 +799,34 @@ func TestMissingTrieNodes(t *testing.T) {
|
|||||||
func testMissingTrieNodes(t *testing.T, scheme string) {
|
func testMissingTrieNodes(t *testing.T, scheme string) {
|
||||||
// Create an initial state with a few accounts
|
// Create an initial state with a few accounts
|
||||||
var (
|
var (
|
||||||
triedb *trie.Database
|
tdb *triedb.Database
|
||||||
memDb = rawdb.NewMemoryDatabase()
|
memDb = rawdb.NewMemoryDatabase()
|
||||||
)
|
)
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
triedb = trie.NewDatabase(memDb, &trie.Config{PathDB: &pathdb.Config{
|
tdb = triedb.NewDatabase(memDb, &triedb.Config{PathDB: &pathdb.Config{
|
||||||
CleanCacheSize: 0,
|
CleanCacheSize: 0,
|
||||||
DirtyCacheSize: 0,
|
DirtyCacheSize: 0,
|
||||||
}}) // disable caching
|
}}) // disable caching
|
||||||
} else {
|
} else {
|
||||||
triedb = trie.NewDatabase(memDb, &trie.Config{HashDB: &hashdb.Config{
|
tdb = triedb.NewDatabase(memDb, &triedb.Config{HashDB: &hashdb.Config{
|
||||||
CleanCacheSize: 0,
|
CleanCacheSize: 0,
|
||||||
}}) // disable caching
|
}}) // disable caching
|
||||||
}
|
}
|
||||||
db := NewDatabaseWithNodeDB(memDb, triedb)
|
db := NewDatabaseWithNodeDB(memDb, tdb)
|
||||||
|
|
||||||
var root common.Hash
|
var root common.Hash
|
||||||
state, _ := New(types.EmptyRootHash, db, nil)
|
state, _ := New(types.EmptyRootHash, db, nil)
|
||||||
addr := common.BytesToAddress([]byte("so"))
|
addr := common.BytesToAddress([]byte("so"))
|
||||||
{
|
{
|
||||||
state.SetBalance(addr, big.NewInt(1))
|
state.SetBalance(addr, uint256.NewInt(1))
|
||||||
state.SetCode(addr, []byte{1, 2, 3})
|
state.SetCode(addr, []byte{1, 2, 3})
|
||||||
a2 := common.BytesToAddress([]byte("another"))
|
a2 := common.BytesToAddress([]byte("another"))
|
||||||
state.SetBalance(a2, big.NewInt(100))
|
state.SetBalance(a2, uint256.NewInt(100))
|
||||||
state.SetCode(a2, []byte{1, 2, 4})
|
state.SetCode(a2, []byte{1, 2, 4})
|
||||||
root, _ = state.Commit(0, false)
|
root, _ = state.Commit(0, false)
|
||||||
t.Logf("root: %x", root)
|
t.Logf("root: %x", root)
|
||||||
// force-flush
|
// force-flush
|
||||||
triedb.Commit(root, false)
|
tdb.Commit(root, false)
|
||||||
}
|
}
|
||||||
// Create a new state on the old root
|
// Create a new state on the old root
|
||||||
state, _ = New(root, db, nil)
|
state, _ = New(root, db, nil)
|
||||||
@ -846,7 +846,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
|
|||||||
t.Errorf("expected %d, got %d", exp, got)
|
t.Errorf("expected %d, got %d", exp, got)
|
||||||
}
|
}
|
||||||
// Modify the state
|
// Modify the state
|
||||||
state.SetBalance(addr, big.NewInt(2))
|
state.SetBalance(addr, uint256.NewInt(2))
|
||||||
root, err := state.Commit(0, false)
|
root, err := state.Commit(0, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected error, got root :%x", root)
|
t.Fatalf("expected error, got root :%x", root)
|
||||||
@ -1033,7 +1033,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
|
|||||||
// Create a state trie with many accounts and slots
|
// Create a state trie with many accounts and slots
|
||||||
var (
|
var (
|
||||||
memdb = rawdb.NewMemoryDatabase()
|
memdb = rawdb.NewMemoryDatabase()
|
||||||
triedb = trie.NewDatabase(memdb, nil)
|
triedb = triedb.NewDatabase(memdb, nil)
|
||||||
statedb = NewDatabaseWithNodeDB(memdb, triedb)
|
statedb = NewDatabaseWithNodeDB(memdb, triedb)
|
||||||
state, _ = New(types.EmptyRootHash, statedb, nil)
|
state, _ = New(types.EmptyRootHash, statedb, nil)
|
||||||
)
|
)
|
||||||
@ -1105,7 +1105,7 @@ func TestStateDBTransientStorage(t *testing.T) {
|
|||||||
func TestResetObject(t *testing.T) {
|
func TestResetObject(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
disk = rawdb.NewMemoryDatabase()
|
disk = rawdb.NewMemoryDatabase()
|
||||||
tdb = trie.NewDatabase(disk, nil)
|
tdb = triedb.NewDatabase(disk, nil)
|
||||||
db = NewDatabaseWithNodeDB(disk, tdb)
|
db = NewDatabaseWithNodeDB(disk, tdb)
|
||||||
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash)
|
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash)
|
||||||
state, _ = New(types.EmptyRootHash, db, snaps)
|
state, _ = New(types.EmptyRootHash, db, snaps)
|
||||||
@ -1114,13 +1114,13 @@ func TestResetObject(t *testing.T) {
|
|||||||
slotB = common.HexToHash("0x2")
|
slotB = common.HexToHash("0x2")
|
||||||
)
|
)
|
||||||
// Initialize account with balance and storage in first transaction.
|
// Initialize account with balance and storage in first transaction.
|
||||||
state.SetBalance(addr, big.NewInt(1))
|
state.SetBalance(addr, uint256.NewInt(1))
|
||||||
state.SetState(addr, slotA, common.BytesToHash([]byte{0x1}))
|
state.SetState(addr, slotA, common.BytesToHash([]byte{0x1}))
|
||||||
state.IntermediateRoot(true)
|
state.IntermediateRoot(true)
|
||||||
|
|
||||||
// Reset account and mutate balance and storages
|
// Reset account and mutate balance and storages
|
||||||
state.CreateAccount(addr)
|
state.CreateAccount(addr)
|
||||||
state.SetBalance(addr, big.NewInt(2))
|
state.SetBalance(addr, uint256.NewInt(2))
|
||||||
state.SetState(addr, slotB, common.BytesToHash([]byte{0x2}))
|
state.SetState(addr, slotB, common.BytesToHash([]byte{0x2}))
|
||||||
root, _ := state.Commit(0, true)
|
root, _ := state.Commit(0, true)
|
||||||
|
|
||||||
@ -1139,14 +1139,14 @@ func TestResetObject(t *testing.T) {
|
|||||||
func TestDeleteStorage(t *testing.T) {
|
func TestDeleteStorage(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
disk = rawdb.NewMemoryDatabase()
|
disk = rawdb.NewMemoryDatabase()
|
||||||
tdb = trie.NewDatabase(disk, nil)
|
tdb = triedb.NewDatabase(disk, nil)
|
||||||
db = NewDatabaseWithNodeDB(disk, tdb)
|
db = NewDatabaseWithNodeDB(disk, tdb)
|
||||||
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash)
|
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash)
|
||||||
state, _ = New(types.EmptyRootHash, db, snaps)
|
state, _ = New(types.EmptyRootHash, db, snaps)
|
||||||
addr = common.HexToAddress("0x1")
|
addr = common.HexToAddress("0x1")
|
||||||
)
|
)
|
||||||
// Initialize account and populate storage
|
// Initialize account and populate storage
|
||||||
state.SetBalance(addr, big.NewInt(1))
|
state.SetBalance(addr, uint256.NewInt(1))
|
||||||
state.CreateAccount(addr)
|
state.CreateAccount(addr)
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
slot := common.Hash(uint256.NewInt(uint64(i)).Bytes32())
|
slot := common.Hash(uint256.NewInt(uint64(i)).Bytes32())
|
||||||
@ -1158,7 +1158,7 @@ func TestDeleteStorage(t *testing.T) {
|
|||||||
fastState, _ := New(root, db, snaps)
|
fastState, _ := New(root, db, snaps)
|
||||||
slowState, _ := New(root, db, nil)
|
slowState, _ := New(root, db, nil)
|
||||||
|
|
||||||
obj := fastState.GetOrNewStateObject(addr)
|
obj := fastState.getOrNewStateObject(addr)
|
||||||
storageRoot := obj.data.Root
|
storageRoot := obj.data.Root
|
||||||
|
|
||||||
_, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
|
_, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot)
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewStateSync create a new state trie download scheduler.
|
// NewStateSync creates a new state trie download scheduler.
|
||||||
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync {
|
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync {
|
||||||
// Register the storage slot callback if the external callback is specified.
|
// Register the storage slot callback if the external callback is specified.
|
||||||
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
|
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
|
||||||
|
@ -18,7 +18,6 @@ package state
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"math/big"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -28,40 +27,42 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
"github.com/ethereum/go-ethereum/triedb/hashdb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testAccount is the data associated with an account used by the state tests.
|
// testAccount is the data associated with an account used by the state tests.
|
||||||
type testAccount struct {
|
type testAccount struct {
|
||||||
address common.Address
|
address common.Address
|
||||||
balance *big.Int
|
balance *uint256.Int
|
||||||
nonce uint64
|
nonce uint64
|
||||||
code []byte
|
code []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeTestState create a sample test state to test node-wise reconstruction.
|
// makeTestState create a sample test state to test node-wise reconstruction.
|
||||||
func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, common.Hash, []*testAccount) {
|
func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, common.Hash, []*testAccount) {
|
||||||
// Create an empty state
|
// Create an empty state
|
||||||
config := &trie.Config{Preimages: true}
|
config := &triedb.Config{Preimages: true}
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
config.PathDB = pathdb.Defaults
|
config.PathDB = pathdb.Defaults
|
||||||
} else {
|
} else {
|
||||||
config.HashDB = hashdb.Defaults
|
config.HashDB = hashdb.Defaults
|
||||||
}
|
}
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
nodeDb := trie.NewDatabase(db, config)
|
nodeDb := triedb.NewDatabase(db, config)
|
||||||
sdb := NewDatabaseWithNodeDB(db, nodeDb)
|
sdb := NewDatabaseWithNodeDB(db, nodeDb)
|
||||||
state, _ := New(types.EmptyRootHash, sdb, nil)
|
state, _ := New(types.EmptyRootHash, sdb, nil)
|
||||||
|
|
||||||
// Fill it with some arbitrary data
|
// Fill it with some arbitrary data
|
||||||
var accounts []*testAccount
|
var accounts []*testAccount
|
||||||
for i := byte(0); i < 96; i++ {
|
for i := byte(0); i < 96; i++ {
|
||||||
obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
obj := state.getOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||||
acc := &testAccount{address: common.BytesToAddress([]byte{i})}
|
acc := &testAccount{address: common.BytesToAddress([]byte{i})}
|
||||||
|
|
||||||
obj.AddBalance(big.NewInt(int64(11 * i)))
|
obj.AddBalance(uint256.NewInt(uint64(11 * i)))
|
||||||
acc.balance = big.NewInt(int64(11 * i))
|
acc.balance = uint256.NewInt(uint64(11 * i))
|
||||||
|
|
||||||
obj.SetNonce(uint64(42 * i))
|
obj.SetNonce(uint64(42 * i))
|
||||||
acc.nonce = uint64(42 * i)
|
acc.nonce = uint64(42 * i)
|
||||||
@ -87,7 +88,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, com
|
|||||||
// checkStateAccounts cross references a reconstructed state with an expected
|
// checkStateAccounts cross references a reconstructed state with an expected
|
||||||
// account array.
|
// account array.
|
||||||
func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root common.Hash, accounts []*testAccount) {
|
func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root common.Hash, accounts []*testAccount) {
|
||||||
var config trie.Config
|
var config triedb.Config
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
config.PathDB = pathdb.Defaults
|
config.PathDB = pathdb.Defaults
|
||||||
}
|
}
|
||||||
@ -114,7 +115,7 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root com
|
|||||||
|
|
||||||
// checkStateConsistency checks that all data of a state root is present.
|
// checkStateConsistency checks that all data of a state root is present.
|
||||||
func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) error {
|
func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) error {
|
||||||
config := &trie.Config{Preimages: true}
|
config := &triedb.Config{Preimages: true}
|
||||||
if scheme == rawdb.PathScheme {
|
if scheme == rawdb.PathScheme {
|
||||||
config.PathDB = pathdb.Defaults
|
config.PathDB = pathdb.Defaults
|
||||||
}
|
}
|
||||||
@ -130,8 +131,8 @@ func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) e
|
|||||||
|
|
||||||
// Tests that an empty state is not scheduled for syncing.
|
// Tests that an empty state is not scheduled for syncing.
|
||||||
func TestEmptyStateSync(t *testing.T) {
|
func TestEmptyStateSync(t *testing.T) {
|
||||||
dbA := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)
|
dbA := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)
|
||||||
dbB := trie.NewDatabase(rawdb.NewMemoryDatabase(), &trie.Config{PathDB: pathdb.Defaults})
|
dbB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), &triedb.Config{PathDB: pathdb.Defaults})
|
||||||
|
|
||||||
sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbA.Scheme())
|
sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbA.Scheme())
|
||||||
if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 {
|
if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 {
|
||||||
@ -237,7 +238,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, s
|
|||||||
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
|
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
|
||||||
stTrie, err := trie.New(id, ndb)
|
stTrie, err := trie.New(id, ndb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
|
t.Fatalf("failed to retrieve storage trie for path %x: %v", node.syncPath[1], err)
|
||||||
}
|
}
|
||||||
data, _, err := stTrie.GetNode(node.syncPath[1])
|
data, _, err := stTrie.GetNode(node.syncPath[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
func filledStateDB() *StateDB {
|
func filledStateDB() *StateDB {
|
||||||
@ -34,7 +35,7 @@ func filledStateDB() *StateDB {
|
|||||||
skey := common.HexToHash("aaa")
|
skey := common.HexToHash("aaa")
|
||||||
sval := common.HexToHash("bbb")
|
sval := common.HexToHash("bbb")
|
||||||
|
|
||||||
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
state.SetBalance(addr, uint256.NewInt(42)) // Change the account trie
|
||||||
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
||||||
state.SetState(addr, skey, sval) // Change the storage trie
|
state.SetState(addr, skey, sval) // Change the storage trie
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
|
@ -217,6 +217,6 @@ func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *stat
|
|||||||
}
|
}
|
||||||
vmenv.Reset(NewEVMTxContext(msg), statedb)
|
vmenv.Reset(NewEVMTxContext(msg), statedb)
|
||||||
statedb.AddAddressToAccessList(params.BeaconRootsStorageAddress)
|
statedb.AddAddressToAccessList(params.BeaconRootsStorageAddress)
|
||||||
_, _, _ = vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.Big0)
|
_, _, _ = vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
|
||||||
statedb.Finalise(true)
|
statedb.Finalise(true)
|
||||||
}
|
}
|
||||||
|
@ -117,12 +117,12 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: config,
|
Config: config,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
|
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
|
||||||
Balance: big.NewInt(1000000000000000000), // 1 ether
|
Balance: big.NewInt(1000000000000000000), // 1 ether
|
||||||
Nonce: 0,
|
Nonce: 0,
|
||||||
},
|
},
|
||||||
common.HexToAddress("0xfd0810DD14796680f72adf1a371963d0745BCc64"): GenesisAccount{
|
common.HexToAddress("0xfd0810DD14796680f72adf1a371963d0745BCc64"): types.Account{
|
||||||
Balance: big.NewInt(1000000000000000000), // 1 ether
|
Balance: big.NewInt(1000000000000000000), // 1 ether
|
||||||
Nonce: math.MaxUint64,
|
Nonce: math.MaxUint64,
|
||||||
},
|
},
|
||||||
@ -232,7 +232,7 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||||||
txs: []*types.Transaction{
|
txs: []*types.Transaction{
|
||||||
mkDynamicTx(0, common.Address{}, params.TxGas, bigNumber, bigNumber),
|
mkDynamicTx(0, common.Address{}, params.TxGas, bigNumber, bigNumber),
|
||||||
},
|
},
|
||||||
want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000",
|
want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 required balance exceeds 256 bits",
|
||||||
},
|
},
|
||||||
{ // ErrMaxInitCodeSizeExceeded
|
{ // ErrMaxInitCodeSizeExceeded
|
||||||
txs: []*types.Transaction{
|
txs: []*types.Transaction{
|
||||||
@ -281,8 +281,8 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||||||
IstanbulBlock: big.NewInt(0),
|
IstanbulBlock: big.NewInt(0),
|
||||||
MuirGlacierBlock: big.NewInt(0),
|
MuirGlacierBlock: big.NewInt(0),
|
||||||
},
|
},
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
|
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
|
||||||
Balance: big.NewInt(1000000000000000000), // 1 ether
|
Balance: big.NewInt(1000000000000000000), // 1 ether
|
||||||
Nonce: 0,
|
Nonce: 0,
|
||||||
},
|
},
|
||||||
@ -319,8 +319,8 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
gspec = &Genesis{
|
gspec = &Genesis{
|
||||||
Config: config,
|
Config: config,
|
||||||
Alloc: GenesisAlloc{
|
Alloc: types.GenesisAlloc{
|
||||||
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
|
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
|
||||||
Balance: big.NewInt(1000000000000000000), // 1 ether
|
Balance: big.NewInt(1000000000000000000), // 1 ether
|
||||||
Nonce: 0,
|
Nonce: 0,
|
||||||
Code: common.FromHex("0xB0B0FACE"),
|
Code: common.FromHex("0xB0B0FACE"),
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -26,7 +25,9 @@ import (
|
|||||||
cmath "github.com/ethereum/go-ethereum/common/math"
|
cmath "github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ExecutionResult includes all output after executing given evm
|
// ExecutionResult includes all output after executing given evm
|
||||||
@ -66,7 +67,7 @@ func (result *ExecutionResult) Revert() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
|
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
|
||||||
func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool, isEIP3860 bool) (uint64, error) {
|
func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028, isEIP3860 bool) (uint64, error) {
|
||||||
// Set the starting gas for the raw transaction
|
// Set the starting gas for the raw transaction
|
||||||
var gas uint64
|
var gas uint64
|
||||||
if isContractCreation && isHomestead {
|
if isContractCreation && isHomestead {
|
||||||
@ -252,7 +253,11 @@ func (st *StateTransition) buyGas() error {
|
|||||||
mgval.Add(mgval, blobFee)
|
mgval.Add(mgval, blobFee)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if have, want := st.state.GetBalance(st.msg.From), balanceCheck; have.Cmp(want) < 0 {
|
balanceCheckU256, overflow := uint256.FromBig(balanceCheck)
|
||||||
|
if overflow {
|
||||||
|
return fmt.Errorf("%w: address %v required balance exceeds 256 bits", ErrInsufficientFunds, st.msg.From.Hex())
|
||||||
|
}
|
||||||
|
if have, want := st.state.GetBalance(st.msg.From), balanceCheckU256; have.Cmp(want) < 0 {
|
||||||
return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From.Hex(), have, want)
|
return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From.Hex(), have, want)
|
||||||
}
|
}
|
||||||
if err := st.gp.SubGas(st.msg.GasLimit); err != nil {
|
if err := st.gp.SubGas(st.msg.GasLimit); err != nil {
|
||||||
@ -261,7 +266,8 @@ func (st *StateTransition) buyGas() error {
|
|||||||
st.gasRemaining += st.msg.GasLimit
|
st.gasRemaining += st.msg.GasLimit
|
||||||
|
|
||||||
st.initialGas = st.msg.GasLimit
|
st.initialGas = st.msg.GasLimit
|
||||||
st.state.SubBalance(st.msg.From, mgval)
|
mgvalU256, _ := uint256.FromBig(mgval)
|
||||||
|
st.state.SubBalance(st.msg.From, mgvalU256)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,13 +323,18 @@ func (st *StateTransition) preCheck() error {
|
|||||||
}
|
}
|
||||||
// Check the blob version validity
|
// Check the blob version validity
|
||||||
if msg.BlobHashes != nil {
|
if msg.BlobHashes != nil {
|
||||||
|
// The to field of a blob tx type is mandatory, and a `BlobTx` transaction internally
|
||||||
|
// has it as a non-nillable value, so any msg derived from blob transaction has it non-nil.
|
||||||
|
// However, messages created through RPC (eth_call) don't have this restriction.
|
||||||
|
if msg.To == nil {
|
||||||
|
return ErrBlobTxCreate
|
||||||
|
}
|
||||||
if len(msg.BlobHashes) == 0 {
|
if len(msg.BlobHashes) == 0 {
|
||||||
return errors.New("blob transaction missing blob hashes")
|
return ErrMissingBlobHashes
|
||||||
}
|
}
|
||||||
for i, hash := range msg.BlobHashes {
|
for i, hash := range msg.BlobHashes {
|
||||||
if hash[0] != params.BlobTxHashVersion {
|
if !kzg4844.IsValidVersionedHash(hash[:]) {
|
||||||
return fmt.Errorf("blob %d hash version mismatch (have %d, supported %d)",
|
return fmt.Errorf("blob %d has invalid hash version", i)
|
||||||
i, hash[0], params.BlobTxHashVersion)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -404,7 +415,11 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
|||||||
st.gasRemaining -= gas
|
st.gasRemaining -= gas
|
||||||
|
|
||||||
// Check clause 6
|
// Check clause 6
|
||||||
if msg.Value.Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From, msg.Value) {
|
value, overflow := uint256.FromBig(msg.Value)
|
||||||
|
if overflow {
|
||||||
|
return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From.Hex())
|
||||||
|
}
|
||||||
|
if !value.IsZero() && !st.evm.Context.CanTransfer(st.state, msg.From, value) {
|
||||||
return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From.Hex())
|
return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -423,11 +438,11 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
|||||||
vmerr error // vm errors do not effect consensus and are therefore not assigned to err
|
vmerr error // vm errors do not effect consensus and are therefore not assigned to err
|
||||||
)
|
)
|
||||||
if contractCreation {
|
if contractCreation {
|
||||||
ret, _, st.gasRemaining, vmerr = st.evm.Create(sender, msg.Data, st.gasRemaining, msg.Value)
|
ret, _, st.gasRemaining, vmerr = st.evm.Create(sender, msg.Data, st.gasRemaining, value)
|
||||||
} else {
|
} else {
|
||||||
// Increment the nonce for the next transaction
|
// Increment the nonce for the next transaction
|
||||||
st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1)
|
st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1)
|
||||||
ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), msg.Data, st.gasRemaining, msg.Value)
|
ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), msg.Data, st.gasRemaining, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
var gasRefund uint64
|
var gasRefund uint64
|
||||||
@ -446,14 +461,15 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
|||||||
effectiveTip = cmath.BigMin(msg.GasTipCap, new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee))
|
effectiveTip = cmath.BigMin(msg.GasTipCap, new(big.Int).Sub(msg.GasFeeCap, st.evm.Context.BaseFee))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
effectiveTipU256, _ := uint256.FromBig(effectiveTip)
|
||||||
|
|
||||||
if st.evm.Config.NoBaseFee && msg.GasFeeCap.Sign() == 0 && msg.GasTipCap.Sign() == 0 {
|
if st.evm.Config.NoBaseFee && msg.GasFeeCap.Sign() == 0 && msg.GasTipCap.Sign() == 0 {
|
||||||
// Skip fee payment when NoBaseFee is set and the fee fields
|
// Skip fee payment when NoBaseFee is set and the fee fields
|
||||||
// are 0. This avoids a negative effectiveTip being applied to
|
// are 0. This avoids a negative effectiveTip being applied to
|
||||||
// the coinbase when simulating calls.
|
// the coinbase when simulating calls.
|
||||||
} else {
|
} else {
|
||||||
fee := new(big.Int).SetUint64(st.gasUsed())
|
fee := new(uint256.Int).SetUint64(st.gasUsed())
|
||||||
fee.Mul(fee, effectiveTip)
|
fee.Mul(fee, effectiveTipU256)
|
||||||
st.state.AddBalance(st.evm.Context.Coinbase, fee)
|
st.state.AddBalance(st.evm.Context.Coinbase, fee)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -474,7 +490,8 @@ func (st *StateTransition) refundGas(refundQuotient uint64) uint64 {
|
|||||||
st.gasRemaining += refund
|
st.gasRemaining += refund
|
||||||
|
|
||||||
// Return ETH for remaining gas, exchanged at the original rate.
|
// Return ETH for remaining gas, exchanged at the original rate.
|
||||||
remaining := new(big.Int).Mul(new(big.Int).SetUint64(st.gasRemaining), st.msg.GasPrice)
|
remaining := uint256.NewInt(st.gasRemaining)
|
||||||
|
remaining = remaining.Mul(remaining, uint256.MustFromBig(st.msg.GasPrice))
|
||||||
st.state.AddBalance(st.msg.From, remaining)
|
st.state.AddBalance(st.msg.From, remaining)
|
||||||
|
|
||||||
// Also return remaining gas to the block gas counter so it is
|
// Also return remaining gas to the block gas counter so it is
|
||||||
|
219
core/txindexer.go
Normal file
219
core/txindexer.go
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TxIndexProgress is the struct describing the progress for transaction indexing.
|
||||||
|
type TxIndexProgress struct {
|
||||||
|
Indexed uint64 // number of blocks whose transactions are indexed
|
||||||
|
Remaining uint64 // number of blocks whose transactions are not indexed yet
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done returns an indicator if the transaction indexing is finished.
|
||||||
|
func (progress TxIndexProgress) Done() bool {
|
||||||
|
return progress.Remaining == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// txIndexer is the module responsible for maintaining transaction indexes
|
||||||
|
// according to the configured indexing range by users.
|
||||||
|
type txIndexer struct {
|
||||||
|
// limit is the maximum number of blocks from head whose tx indexes
|
||||||
|
// are reserved:
|
||||||
|
// * 0: means the entire chain should be indexed
|
||||||
|
// * N: means the latest N blocks [HEAD-N+1, HEAD] should be indexed
|
||||||
|
// and all others shouldn't.
|
||||||
|
limit uint64
|
||||||
|
db ethdb.Database
|
||||||
|
progress chan chan TxIndexProgress
|
||||||
|
term chan chan struct{}
|
||||||
|
closed chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newTxIndexer initializes the transaction indexer.
|
||||||
|
func newTxIndexer(limit uint64, chain *BlockChain) *txIndexer {
|
||||||
|
indexer := &txIndexer{
|
||||||
|
limit: limit,
|
||||||
|
db: chain.db,
|
||||||
|
progress: make(chan chan TxIndexProgress),
|
||||||
|
term: make(chan chan struct{}),
|
||||||
|
closed: make(chan struct{}),
|
||||||
|
}
|
||||||
|
go indexer.loop(chain)
|
||||||
|
|
||||||
|
var msg string
|
||||||
|
if limit == 0 {
|
||||||
|
msg = "entire chain"
|
||||||
|
} else {
|
||||||
|
msg = fmt.Sprintf("last %d blocks", limit)
|
||||||
|
}
|
||||||
|
log.Info("Initialized transaction indexer", "range", msg)
|
||||||
|
|
||||||
|
return indexer
|
||||||
|
}
|
||||||
|
|
||||||
|
// run executes the scheduled indexing/unindexing task in a separate thread.
|
||||||
|
// If the stop channel is closed, the task should be terminated as soon as
|
||||||
|
// possible, the done channel will be closed once the task is finished.
|
||||||
|
func (indexer *txIndexer) run(tail *uint64, head uint64, stop chan struct{}, done chan struct{}) {
|
||||||
|
defer func() { close(done) }()
|
||||||
|
|
||||||
|
// Short circuit if chain is empty and nothing to index.
|
||||||
|
if head == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// The tail flag is not existent, it means the node is just initialized
|
||||||
|
// and all blocks in the chain (part of them may from ancient store) are
|
||||||
|
// not indexed yet, index the chain according to the configured limit.
|
||||||
|
if tail == nil {
|
||||||
|
from := uint64(0)
|
||||||
|
if indexer.limit != 0 && head >= indexer.limit {
|
||||||
|
from = head - indexer.limit + 1
|
||||||
|
}
|
||||||
|
rawdb.IndexTransactions(indexer.db, from, head+1, stop, true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// The tail flag is existent (which means indexes in [tail, head] should be
|
||||||
|
// present), while the whole chain are requested for indexing.
|
||||||
|
if indexer.limit == 0 || head < indexer.limit {
|
||||||
|
if *tail > 0 {
|
||||||
|
// It can happen when chain is rewound to a historical point which
|
||||||
|
// is even lower than the indexes tail, recap the indexing target
|
||||||
|
// to new head to avoid reading non-existent block bodies.
|
||||||
|
end := *tail
|
||||||
|
if end > head+1 {
|
||||||
|
end = head + 1
|
||||||
|
}
|
||||||
|
rawdb.IndexTransactions(indexer.db, 0, end, stop, true)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// The tail flag is existent, adjust the index range according to configured
|
||||||
|
// limit and the latest chain head.
|
||||||
|
if head-indexer.limit+1 < *tail {
|
||||||
|
// Reindex a part of missing indices and rewind index tail to HEAD-limit
|
||||||
|
rawdb.IndexTransactions(indexer.db, head-indexer.limit+1, *tail, stop, true)
|
||||||
|
} else {
|
||||||
|
// Unindex a part of stale indices and forward index tail to HEAD-limit
|
||||||
|
rawdb.UnindexTransactions(indexer.db, *tail, head-indexer.limit+1, stop, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// loop is the scheduler of the indexer, assigning indexing/unindexing tasks depending
|
||||||
|
// on the received chain event.
|
||||||
|
func (indexer *txIndexer) loop(chain *BlockChain) {
|
||||||
|
defer close(indexer.closed)
|
||||||
|
|
||||||
|
// Listening to chain events and manipulate the transaction indexes.
|
||||||
|
var (
|
||||||
|
stop chan struct{} // Non-nil if background routine is active.
|
||||||
|
done chan struct{} // Non-nil if background routine is active.
|
||||||
|
lastHead uint64 // The latest announced chain head (whose tx indexes are assumed created)
|
||||||
|
lastTail = rawdb.ReadTxIndexTail(indexer.db) // The oldest indexed block, nil means nothing indexed
|
||||||
|
|
||||||
|
headCh = make(chan ChainHeadEvent)
|
||||||
|
sub = chain.SubscribeChainHeadEvent(headCh)
|
||||||
|
)
|
||||||
|
defer sub.Unsubscribe()
|
||||||
|
|
||||||
|
// Launch the initial processing if chain is not empty (head != genesis).
|
||||||
|
// This step is useful in these scenarios that chain has no progress.
|
||||||
|
if head := rawdb.ReadHeadBlock(indexer.db); head != nil && head.Number().Uint64() != 0 {
|
||||||
|
stop = make(chan struct{})
|
||||||
|
done = make(chan struct{})
|
||||||
|
lastHead = head.Number().Uint64()
|
||||||
|
go indexer.run(rawdb.ReadTxIndexTail(indexer.db), head.NumberU64(), stop, done)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case head := <-headCh:
|
||||||
|
if done == nil {
|
||||||
|
stop = make(chan struct{})
|
||||||
|
done = make(chan struct{})
|
||||||
|
go indexer.run(rawdb.ReadTxIndexTail(indexer.db), head.Block.NumberU64(), stop, done)
|
||||||
|
}
|
||||||
|
lastHead = head.Block.NumberU64()
|
||||||
|
case <-done:
|
||||||
|
stop = nil
|
||||||
|
done = nil
|
||||||
|
lastTail = rawdb.ReadTxIndexTail(indexer.db)
|
||||||
|
case ch := <-indexer.progress:
|
||||||
|
ch <- indexer.report(lastHead, lastTail)
|
||||||
|
case ch := <-indexer.term:
|
||||||
|
if stop != nil {
|
||||||
|
close(stop)
|
||||||
|
}
|
||||||
|
if done != nil {
|
||||||
|
log.Info("Waiting background transaction indexer to exit")
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// report returns the tx indexing progress.
|
||||||
|
func (indexer *txIndexer) report(head uint64, tail *uint64) TxIndexProgress {
|
||||||
|
total := indexer.limit
|
||||||
|
if indexer.limit == 0 || total > head {
|
||||||
|
total = head + 1 // genesis included
|
||||||
|
}
|
||||||
|
var indexed uint64
|
||||||
|
if tail != nil {
|
||||||
|
indexed = head - *tail + 1
|
||||||
|
}
|
||||||
|
// The value of indexed might be larger than total if some blocks need
|
||||||
|
// to be unindexed, avoiding a negative remaining.
|
||||||
|
var remaining uint64
|
||||||
|
if indexed < total {
|
||||||
|
remaining = total - indexed
|
||||||
|
}
|
||||||
|
return TxIndexProgress{
|
||||||
|
Indexed: indexed,
|
||||||
|
Remaining: remaining,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// txIndexProgress retrieves the tx indexing progress, or an error if the
|
||||||
|
// background tx indexer is already stopped.
|
||||||
|
func (indexer *txIndexer) txIndexProgress() (TxIndexProgress, error) {
|
||||||
|
ch := make(chan TxIndexProgress, 1)
|
||||||
|
select {
|
||||||
|
case indexer.progress <- ch:
|
||||||
|
return <-ch, nil
|
||||||
|
case <-indexer.closed:
|
||||||
|
return TxIndexProgress{}, errors.New("indexer is closed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// close shutdown the indexer. Safe to be called for multiple times.
|
||||||
|
func (indexer *txIndexer) close() {
|
||||||
|
ch := make(chan struct{})
|
||||||
|
select {
|
||||||
|
case indexer.term <- ch:
|
||||||
|
<-ch
|
||||||
|
case <-indexer.closed:
|
||||||
|
}
|
||||||
|
}
|
243
core/txindexer_test.go
Normal file
243
core/txindexer_test.go
Normal file
@ -0,0 +1,243 @@
|
|||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestTxIndexer tests the functionalities for managing transaction indexes.
|
||||||
|
func TestTxIndexer(t *testing.T) {
|
||||||
|
var (
|
||||||
|
testBankKey, _ = crypto.GenerateKey()
|
||||||
|
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
|
||||||
|
testBankFunds = big.NewInt(1000000000000000000)
|
||||||
|
|
||||||
|
gspec = &Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
}
|
||||||
|
engine = ethash.NewFaker()
|
||||||
|
nonce = uint64(0)
|
||||||
|
chainHead = uint64(128)
|
||||||
|
)
|
||||||
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, engine, int(chainHead), func(i int, gen *BlockGen) {
|
||||||
|
tx, _ := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("0xdeadbeef"), big.NewInt(1000), params.TxGas, big.NewInt(10*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey)
|
||||||
|
gen.AddTx(tx)
|
||||||
|
nonce += 1
|
||||||
|
})
|
||||||
|
|
||||||
|
// verifyIndexes checks if the transaction indexes are present or not
|
||||||
|
// of the specified block.
|
||||||
|
verifyIndexes := func(db ethdb.Database, number uint64, exist bool) {
|
||||||
|
if number == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
block := blocks[number-1]
|
||||||
|
for _, tx := range block.Transactions() {
|
||||||
|
lookup := rawdb.ReadTxLookupEntry(db, tx.Hash())
|
||||||
|
if exist && lookup == nil {
|
||||||
|
t.Fatalf("missing %d %x", number, tx.Hash().Hex())
|
||||||
|
}
|
||||||
|
if !exist && lookup != nil {
|
||||||
|
t.Fatalf("unexpected %d %x", number, tx.Hash().Hex())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
verify := func(db ethdb.Database, expTail uint64, indexer *txIndexer) {
|
||||||
|
tail := rawdb.ReadTxIndexTail(db)
|
||||||
|
if tail == nil {
|
||||||
|
t.Fatal("Failed to write tx index tail")
|
||||||
|
}
|
||||||
|
if *tail != expTail {
|
||||||
|
t.Fatalf("Unexpected tx index tail, want %v, got %d", expTail, *tail)
|
||||||
|
}
|
||||||
|
if *tail != 0 {
|
||||||
|
for number := uint64(0); number < *tail; number += 1 {
|
||||||
|
verifyIndexes(db, number, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for number := *tail; number <= chainHead; number += 1 {
|
||||||
|
verifyIndexes(db, number, true)
|
||||||
|
}
|
||||||
|
progress := indexer.report(chainHead, tail)
|
||||||
|
if !progress.Done() {
|
||||||
|
t.Fatalf("Expect fully indexed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var cases = []struct {
|
||||||
|
limitA uint64
|
||||||
|
tailA uint64
|
||||||
|
limitB uint64
|
||||||
|
tailB uint64
|
||||||
|
limitC uint64
|
||||||
|
tailC uint64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
// LimitA: 0
|
||||||
|
// TailA: 0
|
||||||
|
//
|
||||||
|
// all blocks are indexed
|
||||||
|
limitA: 0,
|
||||||
|
tailA: 0,
|
||||||
|
|
||||||
|
// LimitB: 1
|
||||||
|
// TailB: 128
|
||||||
|
//
|
||||||
|
// block-128 is indexed
|
||||||
|
limitB: 1,
|
||||||
|
tailB: 128,
|
||||||
|
|
||||||
|
// LimitB: 64
|
||||||
|
// TailB: 65
|
||||||
|
//
|
||||||
|
// block [65, 128] are indexed
|
||||||
|
limitC: 64,
|
||||||
|
tailC: 65,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// LimitA: 64
|
||||||
|
// TailA: 65
|
||||||
|
//
|
||||||
|
// block [65, 128] are indexed
|
||||||
|
limitA: 64,
|
||||||
|
tailA: 65,
|
||||||
|
|
||||||
|
// LimitB: 1
|
||||||
|
// TailB: 128
|
||||||
|
//
|
||||||
|
// block-128 is indexed
|
||||||
|
limitB: 1,
|
||||||
|
tailB: 128,
|
||||||
|
|
||||||
|
// LimitB: 64
|
||||||
|
// TailB: 65
|
||||||
|
//
|
||||||
|
// block [65, 128] are indexed
|
||||||
|
limitC: 64,
|
||||||
|
tailC: 65,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// LimitA: 127
|
||||||
|
// TailA: 2
|
||||||
|
//
|
||||||
|
// block [2, 128] are indexed
|
||||||
|
limitA: 127,
|
||||||
|
tailA: 2,
|
||||||
|
|
||||||
|
// LimitB: 1
|
||||||
|
// TailB: 128
|
||||||
|
//
|
||||||
|
// block-128 is indexed
|
||||||
|
limitB: 1,
|
||||||
|
tailB: 128,
|
||||||
|
|
||||||
|
// LimitB: 64
|
||||||
|
// TailB: 65
|
||||||
|
//
|
||||||
|
// block [65, 128] are indexed
|
||||||
|
limitC: 64,
|
||||||
|
tailC: 65,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// LimitA: 128
|
||||||
|
// TailA: 1
|
||||||
|
//
|
||||||
|
// block [2, 128] are indexed
|
||||||
|
limitA: 128,
|
||||||
|
tailA: 1,
|
||||||
|
|
||||||
|
// LimitB: 1
|
||||||
|
// TailB: 128
|
||||||
|
//
|
||||||
|
// block-128 is indexed
|
||||||
|
limitB: 1,
|
||||||
|
tailB: 128,
|
||||||
|
|
||||||
|
// LimitB: 64
|
||||||
|
// TailB: 65
|
||||||
|
//
|
||||||
|
// block [65, 128] are indexed
|
||||||
|
limitC: 64,
|
||||||
|
tailC: 65,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// LimitA: 129
|
||||||
|
// TailA: 0
|
||||||
|
//
|
||||||
|
// block [0, 128] are indexed
|
||||||
|
limitA: 129,
|
||||||
|
tailA: 0,
|
||||||
|
|
||||||
|
// LimitB: 1
|
||||||
|
// TailB: 128
|
||||||
|
//
|
||||||
|
// block-128 is indexed
|
||||||
|
limitB: 1,
|
||||||
|
tailB: 128,
|
||||||
|
|
||||||
|
// LimitB: 64
|
||||||
|
// TailB: 65
|
||||||
|
//
|
||||||
|
// block [65, 128] are indexed
|
||||||
|
limitC: 64,
|
||||||
|
tailC: 65,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
frdir := t.TempDir()
|
||||||
|
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
||||||
|
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
||||||
|
|
||||||
|
// Index the initial blocks from ancient store
|
||||||
|
indexer := &txIndexer{
|
||||||
|
limit: c.limitA,
|
||||||
|
db: db,
|
||||||
|
progress: make(chan chan TxIndexProgress),
|
||||||
|
}
|
||||||
|
indexer.run(nil, 128, make(chan struct{}), make(chan struct{}))
|
||||||
|
verify(db, c.tailA, indexer)
|
||||||
|
|
||||||
|
indexer.limit = c.limitB
|
||||||
|
indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{}))
|
||||||
|
verify(db, c.tailB, indexer)
|
||||||
|
|
||||||
|
indexer.limit = c.limitC
|
||||||
|
indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{}))
|
||||||
|
verify(db, c.tailC, indexer)
|
||||||
|
|
||||||
|
// Recover all indexes
|
||||||
|
indexer.limit = 0
|
||||||
|
indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{}))
|
||||||
|
verify(db, 0, indexer)
|
||||||
|
|
||||||
|
db.Close()
|
||||||
|
os.RemoveAll(frdir)
|
||||||
|
}
|
||||||
|
}
|
@ -268,7 +268,7 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
|
|||||||
// going up, crossing the smaller positive jump counter). As such, the pool
|
// going up, crossing the smaller positive jump counter). As such, the pool
|
||||||
// cares only about the min of the two delta values for eviction priority.
|
// cares only about the min of the two delta values for eviction priority.
|
||||||
//
|
//
|
||||||
// priority = min(delta-basefee, delta-blobfee)
|
// priority = min(deltaBasefee, deltaBlobfee)
|
||||||
//
|
//
|
||||||
// - The above very aggressive dimensionality and noise reduction should result
|
// - The above very aggressive dimensionality and noise reduction should result
|
||||||
// in transaction being grouped into a small number of buckets, the further
|
// in transaction being grouped into a small number of buckets, the further
|
||||||
@ -280,7 +280,7 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
|
|||||||
// with high fee caps since it could enable pool wars. As such, any positive
|
// with high fee caps since it could enable pool wars. As such, any positive
|
||||||
// priority will be grouped together.
|
// priority will be grouped together.
|
||||||
//
|
//
|
||||||
// priority = min(delta-basefee, delta-blobfee, 0)
|
// priority = min(deltaBasefee, deltaBlobfee, 0)
|
||||||
//
|
//
|
||||||
// Optimisation tradeoffs:
|
// Optimisation tradeoffs:
|
||||||
//
|
//
|
||||||
@ -342,7 +342,7 @@ func (p *BlobPool) Filter(tx *types.Transaction) bool {
|
|||||||
// Init sets the gas price needed to keep a transaction in the pool and the chain
|
// Init sets the gas price needed to keep a transaction in the pool and the chain
|
||||||
// head to allow balance / nonce checks. The transaction journal will be loaded
|
// head to allow balance / nonce checks. The transaction journal will be loaded
|
||||||
// from disk and filtered based on the provided starting settings.
|
// from disk and filtered based on the provided starting settings.
|
||||||
func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error {
|
func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
|
||||||
p.reserve = reserve
|
p.reserve = reserve
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -371,14 +371,14 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
|
|||||||
}
|
}
|
||||||
p.head, p.state = head, state
|
p.head, p.state = head, state
|
||||||
|
|
||||||
// Index all transactions on disk and delete anything inprocessable
|
// Index all transactions on disk and delete anything unprocessable
|
||||||
var fails []uint64
|
var fails []uint64
|
||||||
index := func(id uint64, size uint32, blob []byte) {
|
index := func(id uint64, size uint32, blob []byte) {
|
||||||
if p.parseTransaction(id, size, blob) != nil {
|
if p.parseTransaction(id, size, blob) != nil {
|
||||||
fails = append(fails, id)
|
fails = append(fails, id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
store, err := billy.Open(billy.Options{Path: queuedir}, newSlotter(), index)
|
store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, newSlotter(), index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -386,6 +386,8 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
|
|||||||
|
|
||||||
if len(fails) > 0 {
|
if len(fails) > 0 {
|
||||||
log.Warn("Dropping invalidated blob transactions", "ids", fails)
|
log.Warn("Dropping invalidated blob transactions", "ids", fails)
|
||||||
|
dropInvalidMeter.Mark(int64(len(fails)))
|
||||||
|
|
||||||
for _, id := range fails {
|
for _, id := range fails {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
p.Close()
|
p.Close()
|
||||||
@ -400,7 +402,7 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
|
|||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
|
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
|
||||||
blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice))
|
blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
|
||||||
)
|
)
|
||||||
if p.head.ExcessBlobGas != nil {
|
if p.head.ExcessBlobGas != nil {
|
||||||
blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas))
|
blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas))
|
||||||
@ -418,7 +420,7 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
|
|||||||
basefeeGauge.Update(int64(basefee.Uint64()))
|
basefeeGauge.Update(int64(basefee.Uint64()))
|
||||||
blobfeeGauge.Update(int64(blobfee.Uint64()))
|
blobfeeGauge.Update(int64(blobfee.Uint64()))
|
||||||
|
|
||||||
p.SetGasTip(gasTip)
|
p.SetGasTip(new(big.Int).SetUint64(gasTip))
|
||||||
|
|
||||||
// Since the user might have modified their pool's capacity, evict anything
|
// Since the user might have modified their pool's capacity, evict anything
|
||||||
// above the current allowance
|
// above the current allowance
|
||||||
@ -434,9 +436,11 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
|
|||||||
// Close closes down the underlying persistent store.
|
// Close closes down the underlying persistent store.
|
||||||
func (p *BlobPool) Close() error {
|
func (p *BlobPool) Close() error {
|
||||||
var errs []error
|
var errs []error
|
||||||
|
if p.limbo != nil { // Close might be invoked due to error in constructor, before p,limbo is set
|
||||||
if err := p.limbo.Close(); err != nil {
|
if err := p.limbo.Close(); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err := p.store.Close(); err != nil {
|
if err := p.store.Close(); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
@ -456,7 +460,7 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
|||||||
tx := new(types.Transaction)
|
tx := new(types.Transaction)
|
||||||
if err := rlp.DecodeBytes(blob, tx); err != nil {
|
if err := rlp.DecodeBytes(blob, tx); err != nil {
|
||||||
// This path is impossible unless the disk data representation changes
|
// This path is impossible unless the disk data representation changes
|
||||||
// across restarts. For that ever unprobable case, recover gracefully
|
// across restarts. For that ever improbable case, recover gracefully
|
||||||
// by ignoring this data entry.
|
// by ignoring this data entry.
|
||||||
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
|
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
|
||||||
return err
|
return err
|
||||||
@ -467,11 +471,17 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
meta := newBlobTxMeta(id, size, tx)
|
meta := newBlobTxMeta(id, size, tx)
|
||||||
|
if _, exists := p.lookup[meta.hash]; exists {
|
||||||
|
// This path is only possible after a crash, where deleted items are not
|
||||||
|
// removed via the normal shutdown-startup procedure and thus may get
|
||||||
|
// partially resurrected.
|
||||||
|
log.Error("Rejecting duplicate blob pool entry", "id", id, "hash", tx.Hash())
|
||||||
|
return errors.New("duplicate blob entry")
|
||||||
|
}
|
||||||
sender, err := p.signer.Sender(tx)
|
sender, err := p.signer.Sender(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This path is impossible unless the signature validity changes across
|
// This path is impossible unless the signature validity changes across
|
||||||
// restarts. For that ever unprobable case, recover gracefully by ignoring
|
// restarts. For that ever improbable case, recover gracefully by ignoring
|
||||||
// this data entry.
|
// this data entry.
|
||||||
log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
|
log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
|
||||||
return err
|
return err
|
||||||
@ -530,15 +540,17 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
}
|
}
|
||||||
delete(p.index, addr)
|
delete(p.index, addr)
|
||||||
delete(p.spent, addr)
|
delete(p.spent, addr)
|
||||||
if inclusions != nil { // only during reorgs will the heap will be initialized
|
if inclusions != nil { // only during reorgs will the heap be initialized
|
||||||
heap.Remove(p.evict, p.evict.index[addr])
|
heap.Remove(p.evict, p.evict.index[addr])
|
||||||
}
|
}
|
||||||
p.reserve(addr, false)
|
p.reserve(addr, false)
|
||||||
|
|
||||||
if gapped {
|
if gapped {
|
||||||
log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
|
log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
|
||||||
|
dropDanglingMeter.Mark(int64(len(ids)))
|
||||||
} else {
|
} else {
|
||||||
log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
|
log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
|
||||||
|
dropFilledMeter.Mark(int64(len(ids)))
|
||||||
}
|
}
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
@ -569,6 +581,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
txs = txs[1:]
|
txs = txs[1:]
|
||||||
}
|
}
|
||||||
log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
|
log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
|
||||||
|
dropOverlappedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
@ -583,7 +597,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
|
txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps
|
||||||
|
|
||||||
for i := 1; i < len(txs); i++ {
|
for i := 1; i < len(txs); i++ {
|
||||||
// If there's no nonce gap, initialize the evicion thresholds as the
|
// If there's no nonce gap, initialize the eviction thresholds as the
|
||||||
// minimum between the cumulative thresholds and the current tx fees
|
// minimum between the cumulative thresholds and the current tx fees
|
||||||
if txs[i].nonce == txs[i-1].nonce+1 {
|
if txs[i].nonce == txs[i-1].nonce+1 {
|
||||||
txs[i].evictionExecTip = txs[i-1].evictionExecTip
|
txs[i].evictionExecTip = txs[i-1].evictionExecTip
|
||||||
@ -600,10 +614,30 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Sanity check that there's no double nonce. This case would be a coding
|
// Sanity check that there's no double nonce. This case would generally
|
||||||
// error, but better know about it
|
// be a coding error, so better know about it.
|
||||||
|
//
|
||||||
|
// Also, Billy behind the blobpool does not journal deletes. A process
|
||||||
|
// crash would result in previously deleted entities being resurrected.
|
||||||
|
// That could potentially cause a duplicate nonce to appear.
|
||||||
if txs[i].nonce == txs[i-1].nonce {
|
if txs[i].nonce == txs[i-1].nonce {
|
||||||
log.Error("Duplicate nonce blob transaction", "from", addr, "nonce", txs[i].nonce)
|
id := p.lookup[txs[i].hash]
|
||||||
|
|
||||||
|
log.Error("Dropping repeat nonce blob transaction", "from", addr, "nonce", txs[i].nonce, "id", id)
|
||||||
|
dropRepeatedMeter.Mark(1)
|
||||||
|
|
||||||
|
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
|
||||||
|
p.stored -= uint64(txs[i].size)
|
||||||
|
delete(p.lookup, txs[i].hash)
|
||||||
|
|
||||||
|
if err := p.store.Delete(id); err != nil {
|
||||||
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
|
}
|
||||||
|
txs = append(txs[:i], txs[i+1:]...)
|
||||||
|
p.index[addr] = txs
|
||||||
|
|
||||||
|
i--
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise if there's a nonce gap evict all later transactions
|
// Otherwise if there's a nonce gap evict all later transactions
|
||||||
var (
|
var (
|
||||||
@ -621,6 +655,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
txs = txs[:i]
|
txs = txs[:i]
|
||||||
|
|
||||||
log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
|
log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
|
||||||
|
dropGappedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
@ -632,7 +668,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
// Ensure that there's no over-draft, this is expected to happen when some
|
// Ensure that there's no over-draft, this is expected to happen when some
|
||||||
// transactions get included without publishing on the network
|
// transactions get included without publishing on the network
|
||||||
var (
|
var (
|
||||||
balance = uint256.MustFromBig(p.state.GetBalance(addr))
|
balance = p.state.GetBalance(addr)
|
||||||
spent = p.spent[addr]
|
spent = p.spent[addr]
|
||||||
)
|
)
|
||||||
if spent.Cmp(balance) > 0 {
|
if spent.Cmp(balance) > 0 {
|
||||||
@ -657,7 +693,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
if len(txs) == 0 {
|
if len(txs) == 0 {
|
||||||
delete(p.index, addr)
|
delete(p.index, addr)
|
||||||
delete(p.spent, addr)
|
delete(p.spent, addr)
|
||||||
if inclusions != nil { // only during reorgs will the heap will be initialized
|
if inclusions != nil { // only during reorgs will the heap be initialized
|
||||||
heap.Remove(p.evict, p.evict.index[addr])
|
heap.Remove(p.evict, p.evict.index[addr])
|
||||||
}
|
}
|
||||||
p.reserve(addr, false)
|
p.reserve(addr, false)
|
||||||
@ -665,6 +701,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
p.index[addr] = txs
|
p.index[addr] = txs
|
||||||
}
|
}
|
||||||
log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
|
log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
|
||||||
|
dropOverdraftedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
@ -695,6 +733,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
p.index[addr] = txs
|
p.index[addr] = txs
|
||||||
|
|
||||||
log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
|
log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
|
||||||
|
dropOvercappedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
@ -711,7 +751,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
// offload removes a tracked blob transaction from the pool and moves it into the
|
// offload removes a tracked blob transaction from the pool and moves it into the
|
||||||
// limbo for tracking until finality.
|
// limbo for tracking until finality.
|
||||||
//
|
//
|
||||||
// The method may log errors for various unexpcted scenarios but will not return
|
// The method may log errors for various unexpected scenarios but will not return
|
||||||
// any of it since there's no clear error case. Some errors may be due to coding
|
// any of it since there's no clear error case. Some errors may be due to coding
|
||||||
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
||||||
// all cases, the pool needs to continue operating.
|
// all cases, the pool needs to continue operating.
|
||||||
@ -769,7 +809,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Recheck the account's pooled transactions to drop included and
|
// Recheck the account's pooled transactions to drop included and
|
||||||
// invalidated one
|
// invalidated ones
|
||||||
p.recheck(addr, inclusions)
|
p.recheck(addr, inclusions)
|
||||||
}
|
}
|
||||||
if len(adds) > 0 {
|
if len(adds) > 0 {
|
||||||
@ -952,7 +992,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the indixes and metrics
|
// Update the indices and metrics
|
||||||
meta := newBlobTxMeta(id, p.store.Size(id), tx)
|
meta := newBlobTxMeta(id, p.store.Size(id), tx)
|
||||||
if _, ok := p.index[addr]; !ok {
|
if _, ok := p.index[addr]; !ok {
|
||||||
if err := p.reserve(addr, true); err != nil {
|
if err := p.reserve(addr, true); err != nil {
|
||||||
@ -1019,6 +1059,8 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
|
|||||||
}
|
}
|
||||||
// Clear out the transactions from the data store
|
// Clear out the transactions from the data store
|
||||||
log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
|
log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
|
||||||
|
dropUnderpricedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete dropped transaction", "id", id, "err", err)
|
log.Error("Failed to delete dropped transaction", "id", id, "err", err)
|
||||||
@ -1161,7 +1203,7 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
||||||
// consensus validity and pool restictions).
|
// consensus validity and pool restrictions).
|
||||||
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
||||||
var (
|
var (
|
||||||
adds = make([]*types.Transaction, 0, len(txs))
|
adds = make([]*types.Transaction, 0, len(txs))
|
||||||
@ -1181,10 +1223,10 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a new blob transaction into the pool if it passes validation (both
|
// Add inserts a new blob transaction into the pool if it passes validation (both
|
||||||
// consensus validity and pool restictions).
|
// consensus validity and pool restrictions).
|
||||||
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
||||||
// The blob pool blocks on adding a transaction. This is because blob txs are
|
// The blob pool blocks on adding a transaction. This is because blob txs are
|
||||||
// only even pulled form the network, so this method will act as the overload
|
// only even pulled from the network, so this method will act as the overload
|
||||||
// protection for fetches.
|
// protection for fetches.
|
||||||
waitStart := time.Now()
|
waitStart := time.Now()
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
@ -1198,6 +1240,22 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||||||
// Ensure the transaction is valid from all perspectives
|
// Ensure the transaction is valid from all perspectives
|
||||||
if err := p.validateTx(tx); err != nil {
|
if err := p.validateTx(tx); err != nil {
|
||||||
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
|
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, txpool.ErrUnderpriced):
|
||||||
|
addUnderpricedMeter.Mark(1)
|
||||||
|
case errors.Is(err, core.ErrNonceTooLow):
|
||||||
|
addStaleMeter.Mark(1)
|
||||||
|
case errors.Is(err, core.ErrNonceTooHigh):
|
||||||
|
addGappedMeter.Mark(1)
|
||||||
|
case errors.Is(err, core.ErrInsufficientFunds):
|
||||||
|
addOverdraftedMeter.Mark(1)
|
||||||
|
case errors.Is(err, txpool.ErrAccountLimitExceeded):
|
||||||
|
addOvercappedMeter.Mark(1)
|
||||||
|
case errors.Is(err, txpool.ErrReplaceUnderpriced):
|
||||||
|
addNoreplaceMeter.Mark(1)
|
||||||
|
default:
|
||||||
|
addInvalidMeter.Mark(1)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// If the address is not yet known, request exclusivity to track the account
|
// If the address is not yet known, request exclusivity to track the account
|
||||||
@ -1205,6 +1263,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||||||
from, _ := types.Sender(p.signer, tx) // already validated above
|
from, _ := types.Sender(p.signer, tx) // already validated above
|
||||||
if _, ok := p.index[from]; !ok {
|
if _, ok := p.index[from]; !ok {
|
||||||
if err := p.reserve(from, true); err != nil {
|
if err := p.reserve(from, true); err != nil {
|
||||||
|
addNonExclusiveMeter.Mark(1)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -1244,6 +1303,8 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||||||
}
|
}
|
||||||
if len(p.index[from]) > offset {
|
if len(p.index[from]) > offset {
|
||||||
// Transaction replaces a previously queued one
|
// Transaction replaces a previously queued one
|
||||||
|
dropReplacedMeter.Mark(1)
|
||||||
|
|
||||||
prev := p.index[from][offset]
|
prev := p.index[from][offset]
|
||||||
if err := p.store.Delete(prev.id); err != nil {
|
if err := p.store.Delete(prev.id); err != nil {
|
||||||
// Shitty situation, but try to recover gracefully instead of going boom
|
// Shitty situation, but try to recover gracefully instead of going boom
|
||||||
@ -1322,6 +1383,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||||||
}
|
}
|
||||||
p.updateStorageMetrics()
|
p.updateStorageMetrics()
|
||||||
|
|
||||||
|
addValidMeter.Mark(1)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1355,7 +1417,7 @@ func (p *BlobPool) drop() {
|
|||||||
p.stored -= uint64(drop.size)
|
p.stored -= uint64(drop.size)
|
||||||
delete(p.lookup, drop.hash)
|
delete(p.lookup, drop.hash)
|
||||||
|
|
||||||
// Remove the transaction from the pool's evicion heap:
|
// Remove the transaction from the pool's eviction heap:
|
||||||
// - If the entire account was dropped, pop off the address
|
// - If the entire account was dropped, pop off the address
|
||||||
// - Otherwise, if the new tail has better eviction caps, fix the heap
|
// - Otherwise, if the new tail has better eviction caps, fix the heap
|
||||||
if last {
|
if last {
|
||||||
@ -1371,7 +1433,9 @@ func (p *BlobPool) drop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Remove the transaction from the data store
|
// Remove the transaction from the data store
|
||||||
log.Warn("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
|
log.Debug("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
|
||||||
|
dropOverflownMeter.Mark(1)
|
||||||
|
|
||||||
if err := p.store.Delete(drop.id); err != nil {
|
if err := p.store.Delete(drop.id); err != nil {
|
||||||
log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
|
log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
|
||||||
}
|
}
|
||||||
@ -1379,7 +1443,15 @@ func (p *BlobPool) drop() {
|
|||||||
|
|
||||||
// Pending retrieves all currently processable transactions, grouped by origin
|
// Pending retrieves all currently processable transactions, grouped by origin
|
||||||
// account and sorted by nonce.
|
// account and sorted by nonce.
|
||||||
func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
|
//
|
||||||
|
// The transactions can also be pre-filtered by the dynamic fee components to
|
||||||
|
// reduce allocations and load on downstream subsystems.
|
||||||
|
func (p *BlobPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
|
||||||
|
// If only plain transactions are requested, this pool is unsuitable as it
|
||||||
|
// contains none, don't even bother.
|
||||||
|
if filter.OnlyPlainTxs {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
// Track the amount of time waiting to retrieve the list of pending blob txs
|
// Track the amount of time waiting to retrieve the list of pending blob txs
|
||||||
// from the pool and the amount of time actually spent on assembling the data.
|
// from the pool and the amount of time actually spent on assembling the data.
|
||||||
// The latter will be pretty much moot, but we've kept it to have symmetric
|
// The latter will be pretty much moot, but we've kept it to have symmetric
|
||||||
@ -1389,20 +1461,40 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr
|
|||||||
pendwaitHist.Update(time.Since(pendStart).Nanoseconds())
|
pendwaitHist.Update(time.Since(pendStart).Nanoseconds())
|
||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
|
|
||||||
defer func(start time.Time) {
|
execStart := time.Now()
|
||||||
pendtimeHist.Update(time.Since(start).Nanoseconds())
|
defer func() {
|
||||||
}(time.Now())
|
pendtimeHist.Update(time.Since(execStart).Nanoseconds())
|
||||||
|
}()
|
||||||
|
|
||||||
pending := make(map[common.Address][]*txpool.LazyTransaction)
|
pending := make(map[common.Address][]*txpool.LazyTransaction, len(p.index))
|
||||||
for addr, txs := range p.index {
|
for addr, txs := range p.index {
|
||||||
var lazies []*txpool.LazyTransaction
|
lazies := make([]*txpool.LazyTransaction, 0, len(txs))
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
|
// If transaction filtering was requested, discard badly priced ones
|
||||||
|
if filter.MinTip != nil && filter.BaseFee != nil {
|
||||||
|
if tx.execFeeCap.Lt(filter.BaseFee) {
|
||||||
|
break // basefee too low, cannot be included, discard rest of txs from the account
|
||||||
|
}
|
||||||
|
tip := new(uint256.Int).Sub(tx.execFeeCap, filter.BaseFee)
|
||||||
|
if tip.Gt(tx.execTipCap) {
|
||||||
|
tip = tx.execTipCap
|
||||||
|
}
|
||||||
|
if tip.Lt(filter.MinTip) {
|
||||||
|
break // allowed or remaining tip too low, cannot be included, discard rest of txs from the account
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if filter.BlobFee != nil {
|
||||||
|
if tx.blobFeeCap.Lt(filter.BlobFee) {
|
||||||
|
break // blobfee too low, cannot be included, discard rest of txs from the account
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Transaction was accepted according to the filter, append to the pending list
|
||||||
lazies = append(lazies, &txpool.LazyTransaction{
|
lazies = append(lazies, &txpool.LazyTransaction{
|
||||||
Pool: p,
|
Pool: p,
|
||||||
Hash: tx.hash,
|
Hash: tx.hash,
|
||||||
Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
|
Time: execStart, // TODO(karalabe): Maybe save these and use that?
|
||||||
GasFeeCap: tx.execFeeCap.ToBig(),
|
GasFeeCap: tx.execFeeCap,
|
||||||
GasTipCap: tx.execTipCap.ToBig(),
|
GasTipCap: tx.execTipCap,
|
||||||
Gas: tx.execGas,
|
Gas: tx.execGas,
|
||||||
BlobGas: tx.blobGas,
|
BlobGas: tx.blobGas,
|
||||||
})
|
})
|
||||||
@ -1462,7 +1554,7 @@ func (p *BlobPool) updateStorageMetrics() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes
|
// updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes
|
||||||
// // them out as metrics.
|
// them out as metrics.
|
||||||
func (p *BlobPool) updateLimboMetrics() {
|
func (p *BlobPool) updateLimboMetrics() {
|
||||||
stats := p.limbo.store.Infos()
|
stats := p.limbo.store.Infos()
|
||||||
|
|
||||||
|
@ -51,21 +51,9 @@ var (
|
|||||||
emptyBlob = kzg4844.Blob{}
|
emptyBlob = kzg4844.Blob{}
|
||||||
emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
|
emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
|
||||||
emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
|
emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
|
||||||
emptyBlobVHash = blobHash(emptyBlobCommit)
|
emptyBlobVHash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
|
||||||
)
|
)
|
||||||
|
|
||||||
func blobHash(commit kzg4844.Commitment) common.Hash {
|
|
||||||
hasher := sha256.New()
|
|
||||||
hasher.Write(commit[:])
|
|
||||||
hash := hasher.Sum(nil)
|
|
||||||
|
|
||||||
var vhash common.Hash
|
|
||||||
vhash[0] = params.BlobTxHashVersion
|
|
||||||
copy(vhash[1:], hash[1:])
|
|
||||||
|
|
||||||
return vhash
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chain configuration with Cancun enabled.
|
// Chain configuration with Cancun enabled.
|
||||||
//
|
//
|
||||||
// TODO(karalabe): replace with params.MainnetChainConfig after Cancun.
|
// TODO(karalabe): replace with params.MainnetChainConfig after Cancun.
|
||||||
@ -197,7 +185,7 @@ func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64,
|
|||||||
return types.MustSignNewTx(key, types.LatestSigner(testChainConfig), blobtx)
|
return types.MustSignNewTx(key, types.LatestSigner(testChainConfig), blobtx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeUnsignedTx is a utility method to construct a random blob tranasaction
|
// makeUnsignedTx is a utility method to construct a random blob transaction
|
||||||
// without signing it.
|
// without signing it.
|
||||||
func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
|
func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
|
||||||
return &types.BlobTx{
|
return &types.BlobTx{
|
||||||
@ -317,7 +305,16 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
|
|||||||
// - 1. A transaction that cannot be decoded must be dropped
|
// - 1. A transaction that cannot be decoded must be dropped
|
||||||
// - 2. A transaction that cannot be recovered (bad signature) must be dropped
|
// - 2. A transaction that cannot be recovered (bad signature) must be dropped
|
||||||
// - 3. All transactions after a nonce gap must be dropped
|
// - 3. All transactions after a nonce gap must be dropped
|
||||||
// - 4. All transactions after an underpriced one (including it) must be dropped
|
// - 4. All transactions after an already included nonce must be dropped
|
||||||
|
// - 5. All transactions after an underpriced one (including it) must be dropped
|
||||||
|
// - 6. All transactions after an overdrafting sequence must be dropped
|
||||||
|
// - 7. All transactions exceeding the per-account limit must be dropped
|
||||||
|
//
|
||||||
|
// Furthermore, some strange corner-cases can also occur after a crash, as Billy's
|
||||||
|
// simplicity also allows it to resurrect past deleted entities:
|
||||||
|
//
|
||||||
|
// - 8. Fully duplicate transactions (matching hash) must be dropped
|
||||||
|
// - 9. Duplicate nonces from the same account must be dropped
|
||||||
func TestOpenDrops(t *testing.T) {
|
func TestOpenDrops(t *testing.T) {
|
||||||
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
||||||
|
|
||||||
@ -350,7 +347,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
badsig, _ := store.Put(blob)
|
badsig, _ := store.Put(blob)
|
||||||
|
|
||||||
// Insert a sequence of transactions with a nonce gap in between to verify
|
// Insert a sequence of transactions with a nonce gap in between to verify
|
||||||
// that anything gapped will get evicted (case 3)
|
// that anything gapped will get evicted (case 3).
|
||||||
var (
|
var (
|
||||||
gapper, _ = crypto.GenerateKey()
|
gapper, _ = crypto.GenerateKey()
|
||||||
|
|
||||||
@ -369,7 +366,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions with a gapped starting nonce to verify
|
// Insert a sequence of transactions with a gapped starting nonce to verify
|
||||||
// that the entire set will get dropped.
|
// that the entire set will get dropped (case 3).
|
||||||
var (
|
var (
|
||||||
dangler, _ = crypto.GenerateKey()
|
dangler, _ = crypto.GenerateKey()
|
||||||
dangling = make(map[uint64]struct{})
|
dangling = make(map[uint64]struct{})
|
||||||
@ -382,7 +379,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
dangling[id] = struct{}{}
|
dangling[id] = struct{}{}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions with already passed nonces to veirfy
|
// Insert a sequence of transactions with already passed nonces to veirfy
|
||||||
// that the entire set will get dropped.
|
// that the entire set will get dropped (case 4).
|
||||||
var (
|
var (
|
||||||
filler, _ = crypto.GenerateKey()
|
filler, _ = crypto.GenerateKey()
|
||||||
filled = make(map[uint64]struct{})
|
filled = make(map[uint64]struct{})
|
||||||
@ -394,8 +391,8 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
id, _ := store.Put(blob)
|
id, _ := store.Put(blob)
|
||||||
filled[id] = struct{}{}
|
filled[id] = struct{}{}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions with partially passed nonces to veirfy
|
// Insert a sequence of transactions with partially passed nonces to verify
|
||||||
// that the included part of the set will get dropped
|
// that the included part of the set will get dropped (case 4).
|
||||||
var (
|
var (
|
||||||
overlapper, _ = crypto.GenerateKey()
|
overlapper, _ = crypto.GenerateKey()
|
||||||
overlapped = make(map[uint64]struct{})
|
overlapped = make(map[uint64]struct{})
|
||||||
@ -412,7 +409,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions with an underpriced first to verify that
|
// Insert a sequence of transactions with an underpriced first to verify that
|
||||||
// the entire set will get dropped (case 4).
|
// the entire set will get dropped (case 5).
|
||||||
var (
|
var (
|
||||||
underpayer, _ = crypto.GenerateKey()
|
underpayer, _ = crypto.GenerateKey()
|
||||||
underpaid = make(map[uint64]struct{})
|
underpaid = make(map[uint64]struct{})
|
||||||
@ -431,7 +428,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert a sequence of transactions with an underpriced in between to verify
|
// Insert a sequence of transactions with an underpriced in between to verify
|
||||||
// that it and anything newly gapped will get evicted (case 4).
|
// that it and anything newly gapped will get evicted (case 5).
|
||||||
var (
|
var (
|
||||||
outpricer, _ = crypto.GenerateKey()
|
outpricer, _ = crypto.GenerateKey()
|
||||||
outpriced = make(map[uint64]struct{})
|
outpriced = make(map[uint64]struct{})
|
||||||
@ -453,7 +450,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions fully overdrafted to verify that the
|
// Insert a sequence of transactions fully overdrafted to verify that the
|
||||||
// entire set will get invalidated.
|
// entire set will get invalidated (case 6).
|
||||||
var (
|
var (
|
||||||
exceeder, _ = crypto.GenerateKey()
|
exceeder, _ = crypto.GenerateKey()
|
||||||
exceeded = make(map[uint64]struct{})
|
exceeded = make(map[uint64]struct{})
|
||||||
@ -471,7 +468,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
exceeded[id] = struct{}{}
|
exceeded[id] = struct{}{}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions partially overdrafted to verify that part
|
// Insert a sequence of transactions partially overdrafted to verify that part
|
||||||
// of the set will get invalidated.
|
// of the set will get invalidated (case 6).
|
||||||
var (
|
var (
|
||||||
overdrafter, _ = crypto.GenerateKey()
|
overdrafter, _ = crypto.GenerateKey()
|
||||||
overdrafted = make(map[uint64]struct{})
|
overdrafted = make(map[uint64]struct{})
|
||||||
@ -493,7 +490,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions overflowing the account cap to verify
|
// Insert a sequence of transactions overflowing the account cap to verify
|
||||||
// that part of the set will get invalidated.
|
// that part of the set will get invalidated (case 7).
|
||||||
var (
|
var (
|
||||||
overcapper, _ = crypto.GenerateKey()
|
overcapper, _ = crypto.GenerateKey()
|
||||||
overcapped = make(map[uint64]struct{})
|
overcapped = make(map[uint64]struct{})
|
||||||
@ -508,21 +505,59 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
overcapped[id] = struct{}{}
|
overcapped[id] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Insert a batch of duplicated transactions to verify that only one of each
|
||||||
|
// version will remain (case 8).
|
||||||
|
var (
|
||||||
|
duplicater, _ = crypto.GenerateKey()
|
||||||
|
duplicated = make(map[uint64]struct{})
|
||||||
|
)
|
||||||
|
for _, nonce := range []uint64{0, 1, 2} {
|
||||||
|
blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, 1, 1, duplicater))
|
||||||
|
|
||||||
|
for i := 0; i < int(nonce)+1; i++ {
|
||||||
|
id, _ := store.Put(blob)
|
||||||
|
if i == 0 {
|
||||||
|
valids[id] = struct{}{}
|
||||||
|
} else {
|
||||||
|
duplicated[id] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Insert a batch of duplicated nonces to verify that only one of each will
|
||||||
|
// remain (case 9).
|
||||||
|
var (
|
||||||
|
repeater, _ = crypto.GenerateKey()
|
||||||
|
repeated = make(map[uint64]struct{})
|
||||||
|
)
|
||||||
|
for _, nonce := range []uint64{0, 1, 2} {
|
||||||
|
for i := 0; i < int(nonce)+1; i++ {
|
||||||
|
blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, uint64(i)+1 /* unique hashes */, 1, repeater))
|
||||||
|
|
||||||
|
id, _ := store.Put(blob)
|
||||||
|
if i == 0 {
|
||||||
|
valids[id] = struct{}{}
|
||||||
|
} else {
|
||||||
|
repeated[id] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
store.Close()
|
store.Close()
|
||||||
|
|
||||||
// Create a blob pool out of the pre-seeded data
|
// Create a blob pool out of the pre-seeded data
|
||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), big.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), big.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), big.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3)
|
statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3)
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), big.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2)
|
statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2)
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), big.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), big.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), big.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), big.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), big.NewInt(10000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000))
|
||||||
|
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000))
|
||||||
|
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.Commit(0, true)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
@ -532,7 +567,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
statedb: statedb,
|
statedb: statedb,
|
||||||
}
|
}
|
||||||
pool := New(Config{Datadir: storage}, chain)
|
pool := New(Config{Datadir: storage}, chain)
|
||||||
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||||
t.Fatalf("failed to create blob pool: %v", err)
|
t.Fatalf("failed to create blob pool: %v", err)
|
||||||
}
|
}
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
@ -566,6 +601,10 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
|
t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
|
||||||
} else if _, ok := overcapped[tx.id]; ok {
|
} else if _, ok := overcapped[tx.id]; ok {
|
||||||
t.Errorf("overcapped transaction remained in storage: %d", tx.id)
|
t.Errorf("overcapped transaction remained in storage: %d", tx.id)
|
||||||
|
} else if _, ok := duplicated[tx.id]; ok {
|
||||||
|
t.Errorf("duplicated transaction remained in storage: %d", tx.id)
|
||||||
|
} else if _, ok := repeated[tx.id]; ok {
|
||||||
|
t.Errorf("repeated nonce transaction remained in storage: %d", tx.id)
|
||||||
} else {
|
} else {
|
||||||
alive[tx.id] = struct{}{}
|
alive[tx.id] = struct{}{}
|
||||||
}
|
}
|
||||||
@ -596,7 +635,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
|
|
||||||
// Tests that transactions loaded from disk are indexed correctly.
|
// Tests that transactions loaded from disk are indexed correctly.
|
||||||
//
|
//
|
||||||
// - 1. Transactions must be groupped by sender, sorted by nonce
|
// - 1. Transactions must be grouped by sender, sorted by nonce
|
||||||
// - 2. Eviction thresholds are calculated correctly for the sequences
|
// - 2. Eviction thresholds are calculated correctly for the sequences
|
||||||
// - 3. Balance usage of an account is totals across all transactions
|
// - 3. Balance usage of an account is totals across all transactions
|
||||||
func TestOpenIndex(t *testing.T) {
|
func TestOpenIndex(t *testing.T) {
|
||||||
@ -610,7 +649,7 @@ func TestOpenIndex(t *testing.T) {
|
|||||||
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
|
||||||
|
|
||||||
// Insert a sequence of transactions with varying price points to check that
|
// Insert a sequence of transactions with varying price points to check that
|
||||||
// the cumulative minimumw will be maintained.
|
// the cumulative minimum will be maintained.
|
||||||
var (
|
var (
|
||||||
key, _ = crypto.GenerateKey()
|
key, _ = crypto.GenerateKey()
|
||||||
addr = crypto.PubkeyToAddress(key.PublicKey)
|
addr = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
@ -637,7 +676,7 @@ func TestOpenIndex(t *testing.T) {
|
|||||||
|
|
||||||
// Create a blob pool out of the pre-seeded data
|
// Create a blob pool out of the pre-seeded data
|
||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
||||||
statedb.AddBalance(addr, big.NewInt(1_000_000_000))
|
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000))
|
||||||
statedb.Commit(0, true)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
@ -647,7 +686,7 @@ func TestOpenIndex(t *testing.T) {
|
|||||||
statedb: statedb,
|
statedb: statedb,
|
||||||
}
|
}
|
||||||
pool := New(Config{Datadir: storage}, chain)
|
pool := New(Config{Datadir: storage}, chain)
|
||||||
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||||
t.Fatalf("failed to create blob pool: %v", err)
|
t.Fatalf("failed to create blob pool: %v", err)
|
||||||
}
|
}
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
@ -737,9 +776,9 @@ func TestOpenHeap(t *testing.T) {
|
|||||||
|
|
||||||
// Create a blob pool out of the pre-seeded data
|
// Create a blob pool out of the pre-seeded data
|
||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
||||||
statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000))
|
||||||
statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000))
|
||||||
statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000))
|
||||||
statedb.Commit(0, true)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
@ -749,7 +788,7 @@ func TestOpenHeap(t *testing.T) {
|
|||||||
statedb: statedb,
|
statedb: statedb,
|
||||||
}
|
}
|
||||||
pool := New(Config{Datadir: storage}, chain)
|
pool := New(Config{Datadir: storage}, chain)
|
||||||
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||||
t.Fatalf("failed to create blob pool: %v", err)
|
t.Fatalf("failed to create blob pool: %v", err)
|
||||||
}
|
}
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
@ -817,9 +856,9 @@ func TestOpenCap(t *testing.T) {
|
|||||||
for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} {
|
for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} {
|
||||||
// Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
|
// Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
|
||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
||||||
statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
|
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000))
|
||||||
statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
|
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000))
|
||||||
statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
|
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000))
|
||||||
statedb.Commit(0, true)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
@ -829,7 +868,7 @@ func TestOpenCap(t *testing.T) {
|
|||||||
statedb: statedb,
|
statedb: statedb,
|
||||||
}
|
}
|
||||||
pool := New(Config{Datadir: storage, Datacap: datacap}, chain)
|
pool := New(Config{Datadir: storage, Datacap: datacap}, chain)
|
||||||
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||||
t.Fatalf("failed to create blob pool: %v", err)
|
t.Fatalf("failed to create blob pool: %v", err)
|
||||||
}
|
}
|
||||||
// Verify that enough transactions have been dropped to get the pool's size
|
// Verify that enough transactions have been dropped to get the pool's size
|
||||||
@ -1189,6 +1228,24 @@ func TestAdd(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// Blob transactions that don't meet the min blob gas price should be rejected
|
||||||
|
{
|
||||||
|
seeds: map[string]seed{
|
||||||
|
"alice": {balance: 10000000},
|
||||||
|
},
|
||||||
|
adds: []addtx{
|
||||||
|
{ // New account, no previous txs, nonce 0, but blob fee cap too low
|
||||||
|
from: "alice",
|
||||||
|
tx: makeUnsignedTx(0, 1, 1, 0),
|
||||||
|
err: txpool.ErrUnderpriced,
|
||||||
|
},
|
||||||
|
{ // Same as above but blob fee cap equals minimum, should be accepted
|
||||||
|
from: "alice",
|
||||||
|
tx: makeUnsignedTx(0, 1, 1, params.BlobTxMinBlobGasprice),
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
// Create a temporary folder for the persistent backend
|
// Create a temporary folder for the persistent backend
|
||||||
@ -1209,8 +1266,8 @@ func TestAdd(t *testing.T) {
|
|||||||
keys[acc], _ = crypto.GenerateKey()
|
keys[acc], _ = crypto.GenerateKey()
|
||||||
addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
|
addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
|
||||||
|
|
||||||
// Seed the state database with this acocunt
|
// Seed the state database with this account
|
||||||
statedb.AddBalance(addrs[acc], new(big.Int).SetUint64(seed.balance))
|
statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance))
|
||||||
statedb.SetNonce(addrs[acc], seed.nonce)
|
statedb.SetNonce(addrs[acc], seed.nonce)
|
||||||
|
|
||||||
// Sign the seed transactions and store them in the data store
|
// Sign the seed transactions and store them in the data store
|
||||||
@ -1231,7 +1288,7 @@ func TestAdd(t *testing.T) {
|
|||||||
statedb: statedb,
|
statedb: statedb,
|
||||||
}
|
}
|
||||||
pool := New(Config{Datadir: storage}, chain)
|
pool := New(Config{Datadir: storage}, chain)
|
||||||
if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||||
t.Fatalf("test %d: failed to create blob pool: %v", i, err)
|
t.Fatalf("test %d: failed to create blob pool: %v", i, err)
|
||||||
}
|
}
|
||||||
verifyPoolInternals(t, pool)
|
verifyPoolInternals(t, pool)
|
||||||
@ -1249,3 +1306,65 @@ func TestAdd(t *testing.T) {
|
|||||||
pool.Close()
|
pool.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Benchmarks the time it takes to assemble the lazy pending transaction list
|
||||||
|
// from the pool contents.
|
||||||
|
func BenchmarkPoolPending100Mb(b *testing.B) { benchmarkPoolPending(b, 100_000_000) }
|
||||||
|
func BenchmarkPoolPending1GB(b *testing.B) { benchmarkPoolPending(b, 1_000_000_000) }
|
||||||
|
func BenchmarkPoolPending10GB(b *testing.B) { benchmarkPoolPending(b, 10_000_000_000) }
|
||||||
|
|
||||||
|
func benchmarkPoolPending(b *testing.B, datacap uint64) {
|
||||||
|
// Calculate the maximum number of transaction that would fit into the pool
|
||||||
|
// and generate a set of random accounts to seed them with.
|
||||||
|
capacity := datacap / params.BlobTxBlobGasPerBlob
|
||||||
|
|
||||||
|
var (
|
||||||
|
basefee = uint64(1050)
|
||||||
|
blobfee = uint64(105)
|
||||||
|
signer = types.LatestSigner(testChainConfig)
|
||||||
|
statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
|
||||||
|
chain = &testBlockChain{
|
||||||
|
config: testChainConfig,
|
||||||
|
basefee: uint256.NewInt(basefee),
|
||||||
|
blobfee: uint256.NewInt(blobfee),
|
||||||
|
statedb: statedb,
|
||||||
|
}
|
||||||
|
pool = New(Config{Datadir: ""}, chain)
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||||
|
b.Fatalf("failed to create blob pool: %v", err)
|
||||||
|
}
|
||||||
|
// Fill the pool up with one random transaction from each account with the
|
||||||
|
// same price and everything to maximize the worst case scenario
|
||||||
|
for i := 0; i < int(capacity); i++ {
|
||||||
|
blobtx := makeUnsignedTx(0, 10, basefee+10, blobfee)
|
||||||
|
blobtx.R = uint256.NewInt(1)
|
||||||
|
blobtx.S = uint256.NewInt(uint64(100 + i))
|
||||||
|
blobtx.V = uint256.NewInt(0)
|
||||||
|
tx := types.NewTx(blobtx)
|
||||||
|
addr, err := types.Sender(signer, tx)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000))
|
||||||
|
pool.add(tx)
|
||||||
|
}
|
||||||
|
statedb.Commit(0, true)
|
||||||
|
defer pool.Close()
|
||||||
|
|
||||||
|
// Benchmark assembling the pending
|
||||||
|
b.ResetTimer()
|
||||||
|
b.ReportAllocs()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
p := pool.Pending(txpool.PendingFilter{
|
||||||
|
MinTip: uint256.NewInt(1),
|
||||||
|
BaseFee: chain.basefee,
|
||||||
|
BlobFee: chain.blobfee,
|
||||||
|
})
|
||||||
|
if len(p) != int(capacity) {
|
||||||
|
b.Fatalf("have %d want %d", len(p), capacity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -30,7 +30,7 @@ type Config struct {
|
|||||||
// DefaultConfig contains the default configurations for the transaction pool.
|
// DefaultConfig contains the default configurations for the transaction pool.
|
||||||
var DefaultConfig = Config{
|
var DefaultConfig = Config{
|
||||||
Datadir: "blobpool",
|
Datadir: "blobpool",
|
||||||
Datacap: 10 * 1024 * 1024 * 1024,
|
Datacap: 10 * 1024 * 1024 * 1024 / 4, // TODO(karalabe): /4 handicap for rollout, gradually bump back up to 10GB
|
||||||
PriceBump: 100, // either have patience or be aggressive, no mushy ground
|
PriceBump: 100, // either have patience or be aggressive, no mushy ground
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
// transaction from each account to determine which account to evict from.
|
// transaction from each account to determine which account to evict from.
|
||||||
//
|
//
|
||||||
// The heap internally tracks a slice of cheapest transactions from each account
|
// The heap internally tracks a slice of cheapest transactions from each account
|
||||||
// and a mapping from addresses to indices for direct removals/udates.
|
// and a mapping from addresses to indices for direct removals/updates.
|
||||||
//
|
//
|
||||||
// The goal of the heap is to decide which account has the worst bottleneck to
|
// The goal of the heap is to decide which account has the worst bottleneck to
|
||||||
// evict transactions from.
|
// evict transactions from.
|
||||||
|
@ -53,7 +53,7 @@ func newLimbo(datadir string) (*limbo, error) {
|
|||||||
index: make(map[common.Hash]uint64),
|
index: make(map[common.Hash]uint64),
|
||||||
groups: make(map[uint64]map[uint64]common.Hash),
|
groups: make(map[uint64]map[uint64]common.Hash),
|
||||||
}
|
}
|
||||||
// Index all limboed blobs on disk and delete anything inprocessable
|
// Index all limboed blobs on disk and delete anything unprocessable
|
||||||
var fails []uint64
|
var fails []uint64
|
||||||
index := func(id uint64, size uint32, data []byte) {
|
index := func(id uint64, size uint32, data []byte) {
|
||||||
if l.parseBlob(id, data) != nil {
|
if l.parseBlob(id, data) != nil {
|
||||||
@ -89,7 +89,7 @@ func (l *limbo) parseBlob(id uint64, data []byte) error {
|
|||||||
item := new(limboBlob)
|
item := new(limboBlob)
|
||||||
if err := rlp.DecodeBytes(data, item); err != nil {
|
if err := rlp.DecodeBytes(data, item); err != nil {
|
||||||
// This path is impossible unless the disk data representation changes
|
// This path is impossible unless the disk data representation changes
|
||||||
// across restarts. For that ever unprobable case, recover gracefully
|
// across restarts. For that ever improbable case, recover gracefully
|
||||||
// by ignoring this data entry.
|
// by ignoring this data entry.
|
||||||
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
|
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
|
||||||
return err
|
return err
|
||||||
@ -172,7 +172,7 @@ func (l *limbo) pull(tx common.Hash) (*types.Transaction, error) {
|
|||||||
// update changes the block number under which a blob transaction is tracked. This
|
// update changes the block number under which a blob transaction is tracked. This
|
||||||
// method should be used when a reorg changes a transaction's inclusion block.
|
// method should be used when a reorg changes a transaction's inclusion block.
|
||||||
//
|
//
|
||||||
// The method may log errors for various unexpcted scenarios but will not return
|
// The method may log errors for various unexpected scenarios but will not return
|
||||||
// any of it since there's no clear error case. Some errors may be due to coding
|
// any of it since there's no clear error case. Some errors may be due to coding
|
||||||
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
||||||
// all cases, the pool needs to continue operating.
|
// all cases, the pool needs to continue operating.
|
||||||
|
@ -65,8 +65,8 @@ var (
|
|||||||
pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil)
|
pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil)
|
||||||
|
|
||||||
// addwait/time, resetwait/time and getwait/time track the rough health of
|
// addwait/time, resetwait/time and getwait/time track the rough health of
|
||||||
// the pool and whether or not it's capable of keeping up with the load from
|
// the pool and whether it's capable of keeping up with the load from the
|
||||||
// the network.
|
// network.
|
||||||
addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
@ -75,4 +75,31 @@ var (
|
|||||||
pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
|
|
||||||
|
// The below metrics track various cases where transactions are dropped out
|
||||||
|
// of the pool. Most are exceptional, some are chain progression and some
|
||||||
|
// threshold cappings.
|
||||||
|
dropInvalidMeter = metrics.NewRegisteredMeter("blobpool/drop/invalid", nil) // Invalid transaction, consensus change or bugfix, neutral-ish
|
||||||
|
dropDanglingMeter = metrics.NewRegisteredMeter("blobpool/drop/dangling", nil) // First nonce gapped, bad
|
||||||
|
dropFilledMeter = metrics.NewRegisteredMeter("blobpool/drop/filled", nil) // State full-overlap, chain progress, ok
|
||||||
|
dropOverlappedMeter = metrics.NewRegisteredMeter("blobpool/drop/overlapped", nil) // State partial-overlap, chain progress, ok
|
||||||
|
dropRepeatedMeter = metrics.NewRegisteredMeter("blobpool/drop/repeated", nil) // Repeated nonce, bad
|
||||||
|
dropGappedMeter = metrics.NewRegisteredMeter("blobpool/drop/gapped", nil) // Non-first nonce gapped, bad
|
||||||
|
dropOverdraftedMeter = metrics.NewRegisteredMeter("blobpool/drop/overdrafted", nil) // Balance exceeded, bad
|
||||||
|
dropOvercappedMeter = metrics.NewRegisteredMeter("blobpool/drop/overcapped", nil) // Per-account cap exceeded, bad
|
||||||
|
dropOverflownMeter = metrics.NewRegisteredMeter("blobpool/drop/overflown", nil) // Global disk cap exceeded, neutral-ish
|
||||||
|
dropUnderpricedMeter = metrics.NewRegisteredMeter("blobpool/drop/underpriced", nil) // Gas tip changed, neutral
|
||||||
|
dropReplacedMeter = metrics.NewRegisteredMeter("blobpool/drop/replaced", nil) // Transaction replaced, neutral
|
||||||
|
|
||||||
|
// The below metrics track various outcomes of transactions being added to
|
||||||
|
// the pool.
|
||||||
|
addInvalidMeter = metrics.NewRegisteredMeter("blobpool/add/invalid", nil) // Invalid transaction, reject, neutral
|
||||||
|
addUnderpricedMeter = metrics.NewRegisteredMeter("blobpool/add/underpriced", nil) // Gas tip too low, neutral
|
||||||
|
addStaleMeter = metrics.NewRegisteredMeter("blobpool/add/stale", nil) // Nonce already filled, reject, bad-ish
|
||||||
|
addGappedMeter = metrics.NewRegisteredMeter("blobpool/add/gapped", nil) // Nonce gapped, reject, bad-ish
|
||||||
|
addOverdraftedMeter = metrics.NewRegisteredMeter("blobpool/add/overdrafted", nil) // Balance exceeded, reject, neutral
|
||||||
|
addOvercappedMeter = metrics.NewRegisteredMeter("blobpool/add/overcapped", nil) // Per-account cap exceeded, reject, neutral
|
||||||
|
addNoreplaceMeter = metrics.NewRegisteredMeter("blobpool/add/noreplace", nil) // Replacement fees or tips too low, neutral
|
||||||
|
addNonExclusiveMeter = metrics.NewRegisteredMeter("blobpool/add/nonexclusive", nil) // Plain transaction from same account exists, reject, neutral
|
||||||
|
addValidMeter = metrics.NewRegisteredMeter("blobpool/add/valid", nil) // Valid transaction, add, neutral
|
||||||
)
|
)
|
||||||
|
@ -64,7 +64,7 @@ func BenchmarkDynamicFeeJumpCalculation(b *testing.B) {
|
|||||||
// Benchmarks how many priority recalculations can be done.
|
// Benchmarks how many priority recalculations can be done.
|
||||||
func BenchmarkPriorityCalculation(b *testing.B) {
|
func BenchmarkPriorityCalculation(b *testing.B) {
|
||||||
// The basefee and blob fee is constant for all transactions across a block,
|
// The basefee and blob fee is constant for all transactions across a block,
|
||||||
// so we can assume theit absolute jump counts can be pre-computed.
|
// so we can assume their absolute jump counts can be pre-computed.
|
||||||
basefee := uint256.NewInt(17_200_000_000) // 17.2 Gwei is the 22.03.2023 zero-emission basefee, random number
|
basefee := uint256.NewInt(17_200_000_000) // 17.2 Gwei is the 22.03.2023 zero-emission basefee, random number
|
||||||
blobfee := uint256.NewInt(123_456_789_000) // Completely random, no idea what this will be
|
blobfee := uint256.NewInt(123_456_789_000) // Completely random, no idea what this will be
|
||||||
|
|
||||||
|
@ -54,4 +54,10 @@ var (
|
|||||||
// ErrFutureReplacePending is returned if a future transaction replaces a pending
|
// ErrFutureReplacePending is returned if a future transaction replaces a pending
|
||||||
// one. Future transactions should only be able to replace other future transactions.
|
// one. Future transactions should only be able to replace other future transactions.
|
||||||
ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
|
ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
|
||||||
|
|
||||||
|
// ErrAlreadyReserved is returned if the sender address has a pending transaction
|
||||||
|
// in a different subpool. For example, this error is returned in response to any
|
||||||
|
// input transaction of non-blob type when a blob transaction from this sender
|
||||||
|
// remains pending (and vice-versa).
|
||||||
|
ErrAlreadyReserved = errors.New("address already reserved")
|
||||||
)
|
)
|
||||||
|
@ -164,7 +164,12 @@ func (journal *journal) rotate(all map[common.Address]types.Transactions) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
journal.writer = sink
|
journal.writer = sink
|
||||||
log.Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all))
|
|
||||||
|
logger := log.Info
|
||||||
|
if len(all) == 0 {
|
||||||
|
logger = log.Debug
|
||||||
|
}
|
||||||
|
logger("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -202,7 +203,7 @@ type LegacyPool struct {
|
|||||||
config Config
|
config Config
|
||||||
chainconfig *params.ChainConfig
|
chainconfig *params.ChainConfig
|
||||||
chain BlockChain
|
chain BlockChain
|
||||||
gasTip atomic.Pointer[big.Int]
|
gasTip atomic.Pointer[uint256.Int]
|
||||||
txFeed event.Feed
|
txFeed event.Feed
|
||||||
signer types.Signer
|
signer types.Signer
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
@ -287,12 +288,12 @@ func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
|
|||||||
// head to allow balance / nonce checks. The transaction journal will be loaded
|
// head to allow balance / nonce checks. The transaction journal will be loaded
|
||||||
// from disk and filtered based on the provided starting settings. The internal
|
// from disk and filtered based on the provided starting settings. The internal
|
||||||
// goroutines will be spun up and the pool deemed operational afterwards.
|
// goroutines will be spun up and the pool deemed operational afterwards.
|
||||||
func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error {
|
func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
|
||||||
// Set the address reserver to request exclusive access to pooled accounts
|
// Set the address reserver to request exclusive access to pooled accounts
|
||||||
pool.reserve = reserve
|
pool.reserve = reserve
|
||||||
|
|
||||||
// Set the basic pool parameters
|
// Set the basic pool parameters
|
||||||
pool.gasTip.Store(gasTip)
|
pool.gasTip.Store(uint256.NewInt(gasTip))
|
||||||
|
|
||||||
// Initialize the state with head block, or fallback to empty one in
|
// Initialize the state with head block, or fallback to empty one in
|
||||||
// case the head state is not available (might occur when node is not
|
// case the head state is not available (might occur when node is not
|
||||||
@ -433,11 +434,13 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) {
|
|||||||
pool.mu.Lock()
|
pool.mu.Lock()
|
||||||
defer pool.mu.Unlock()
|
defer pool.mu.Unlock()
|
||||||
|
|
||||||
old := pool.gasTip.Load()
|
var (
|
||||||
pool.gasTip.Store(new(big.Int).Set(tip))
|
newTip = uint256.MustFromBig(tip)
|
||||||
|
old = pool.gasTip.Load()
|
||||||
|
)
|
||||||
|
pool.gasTip.Store(newTip)
|
||||||
// If the min miner fee increased, remove transactions below the new threshold
|
// If the min miner fee increased, remove transactions below the new threshold
|
||||||
if tip.Cmp(old) > 0 {
|
if newTip.Cmp(old) > 0 {
|
||||||
// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
|
// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
|
||||||
drop := pool.all.RemotesBelowTip(tip)
|
drop := pool.all.RemotesBelowTip(tip)
|
||||||
for _, tx := range drop {
|
for _, tx := range drop {
|
||||||
@ -445,7 +448,7 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) {
|
|||||||
}
|
}
|
||||||
pool.priced.Removed(len(drop))
|
pool.priced.Removed(len(drop))
|
||||||
}
|
}
|
||||||
log.Info("Legacy pool tip threshold updated", "tip", tip)
|
log.Info("Legacy pool tip threshold updated", "tip", newTip)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nonce returns the next nonce of an account, with all transactions executable
|
// Nonce returns the next nonce of an account, with all transactions executable
|
||||||
@ -515,24 +518,38 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pending retrieves all currently processable transactions, grouped by origin
|
// Pending retrieves all currently processable transactions, grouped by origin
|
||||||
// account and sorted by nonce. The returned transaction set is a copy and can be
|
// account and sorted by nonce.
|
||||||
// freely modified by calling code.
|
|
||||||
//
|
//
|
||||||
// The enforceTips parameter can be used to do an extra filtering on the pending
|
// The transactions can also be pre-filtered by the dynamic fee components to
|
||||||
// transactions and only return those whose **effective** tip is large enough in
|
// reduce allocations and load on downstream subsystems.
|
||||||
// the next pending execution environment.
|
func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
|
||||||
func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
|
// If only blob transactions are requested, this pool is unsuitable as it
|
||||||
|
// contains none, don't even bother.
|
||||||
|
if filter.OnlyBlobTxs {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
pool.mu.Lock()
|
pool.mu.Lock()
|
||||||
defer pool.mu.Unlock()
|
defer pool.mu.Unlock()
|
||||||
|
|
||||||
|
// Convert the new uint256.Int types to the old big.Int ones used by the legacy pool
|
||||||
|
var (
|
||||||
|
minTipBig *big.Int
|
||||||
|
baseFeeBig *big.Int
|
||||||
|
)
|
||||||
|
if filter.MinTip != nil {
|
||||||
|
minTipBig = filter.MinTip.ToBig()
|
||||||
|
}
|
||||||
|
if filter.BaseFee != nil {
|
||||||
|
baseFeeBig = filter.BaseFee.ToBig()
|
||||||
|
}
|
||||||
pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
|
pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
|
||||||
for addr, list := range pool.pending {
|
for addr, list := range pool.pending {
|
||||||
txs := list.Flatten()
|
txs := list.Flatten()
|
||||||
|
|
||||||
// If the miner requests tip enforcement, cap the lists now
|
// If the miner requests tip enforcement, cap the lists now
|
||||||
if enforceTips && !pool.locals.contains(addr) {
|
if minTipBig != nil && !pool.locals.contains(addr) {
|
||||||
for i, tx := range txs {
|
for i, tx := range txs {
|
||||||
if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), pool.priced.urgent.baseFee) < 0 {
|
if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 {
|
||||||
txs = txs[:i]
|
txs = txs[:i]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -546,8 +563,8 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L
|
|||||||
Hash: txs[i].Hash(),
|
Hash: txs[i].Hash(),
|
||||||
Tx: txs[i],
|
Tx: txs[i],
|
||||||
Time: txs[i].Time(),
|
Time: txs[i].Time(),
|
||||||
GasFeeCap: txs[i].GasFeeCap(),
|
GasFeeCap: uint256.MustFromBig(txs[i].GasFeeCap()),
|
||||||
GasTipCap: txs[i].GasTipCap(),
|
GasTipCap: uint256.MustFromBig(txs[i].GasTipCap()),
|
||||||
Gas: txs[i].Gas(),
|
Gas: txs[i].Gas(),
|
||||||
BlobGas: txs[i].BlobGas(),
|
BlobGas: txs[i].BlobGas(),
|
||||||
}
|
}
|
||||||
@ -594,7 +611,7 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro
|
|||||||
1<<types.AccessListTxType |
|
1<<types.AccessListTxType |
|
||||||
1<<types.DynamicFeeTxType,
|
1<<types.DynamicFeeTxType,
|
||||||
MaxSize: txMaxSize,
|
MaxSize: txMaxSize,
|
||||||
MinTip: pool.gasTip.Load(),
|
MinTip: pool.gasTip.Load().ToBig(),
|
||||||
}
|
}
|
||||||
if local {
|
if local {
|
||||||
opts.MinTip = new(big.Int)
|
opts.MinTip = new(big.Int)
|
||||||
@ -624,7 +641,7 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error {
|
|||||||
},
|
},
|
||||||
ExistingExpenditure: func(addr common.Address) *big.Int {
|
ExistingExpenditure: func(addr common.Address) *big.Int {
|
||||||
if list := pool.pending[addr]; list != nil {
|
if list := pool.pending[addr]; list != nil {
|
||||||
return list.totalcost
|
return list.totalcost.ToBig()
|
||||||
}
|
}
|
||||||
return new(big.Int)
|
return new(big.Int)
|
||||||
},
|
},
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
|
func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
|
||||||
@ -49,7 +50,7 @@ func fillPool(t testing.TB, pool *LegacyPool) {
|
|||||||
nonExecutableTxs := types.Transactions{}
|
nonExecutableTxs := types.Transactions{}
|
||||||
for i := 0; i < 384; i++ {
|
for i := 0; i < 384; i++ {
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(10000000000))
|
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), uint256.NewInt(10000000000))
|
||||||
// Add executable ones
|
// Add executable ones
|
||||||
for j := 0; j < int(pool.config.AccountSlots); j++ {
|
for j := 0; j < int(pool.config.AccountSlots); j++ {
|
||||||
executableTxs = append(executableTxs, pricedTransaction(uint64(j), 100000, big.NewInt(300), key))
|
executableTxs = append(executableTxs, pricedTransaction(uint64(j), 100000, big.NewInt(300), key))
|
||||||
@ -84,14 +85,14 @@ func TestTransactionFutureAttack(t *testing.T) {
|
|||||||
config.GlobalQueue = 100
|
config.GlobalQueue = 100
|
||||||
config.GlobalSlots = 100
|
config.GlobalSlots = 100
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
fillPool(t, pool)
|
fillPool(t, pool)
|
||||||
pending, _ := pool.Stats()
|
pending, _ := pool.Stats()
|
||||||
// Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops
|
// Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops
|
||||||
{
|
{
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
|
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), uint256.NewInt(100000000000))
|
||||||
futureTxs := types.Transactions{}
|
futureTxs := types.Transactions{}
|
||||||
for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ {
|
for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ {
|
||||||
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key))
|
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key))
|
||||||
@ -118,7 +119,7 @@ func TestTransactionFuture1559(t *testing.T) {
|
|||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
|
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create a number of test accounts, fund them and make transactions
|
// Create a number of test accounts, fund them and make transactions
|
||||||
@ -128,7 +129,7 @@ func TestTransactionFuture1559(t *testing.T) {
|
|||||||
// Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops
|
// Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops
|
||||||
{
|
{
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
|
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), uint256.NewInt(100000000000))
|
||||||
futureTxs := types.Transactions{}
|
futureTxs := types.Transactions{}
|
||||||
for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ {
|
for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ {
|
||||||
futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key))
|
futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key))
|
||||||
@ -151,7 +152,7 @@ func TestTransactionZAttack(t *testing.T) {
|
|||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
|
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
// Create a number of test accounts, fund them and make transactions
|
// Create a number of test accounts, fund them and make transactions
|
||||||
fillPool(t, pool)
|
fillPool(t, pool)
|
||||||
@ -161,7 +162,7 @@ func TestTransactionZAttack(t *testing.T) {
|
|||||||
var ivpendingNum int
|
var ivpendingNum int
|
||||||
pendingtxs, _ := pool.Content()
|
pendingtxs, _ := pool.Content()
|
||||||
for account, txs := range pendingtxs {
|
for account, txs := range pendingtxs {
|
||||||
cur_balance := new(big.Int).Set(pool.currentState.GetBalance(account))
|
cur_balance := new(big.Int).Set(pool.currentState.GetBalance(account).ToBig())
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
if cur_balance.Cmp(tx.Value()) <= 0 {
|
if cur_balance.Cmp(tx.Value()) <= 0 {
|
||||||
ivpendingNum++
|
ivpendingNum++
|
||||||
@ -182,7 +183,7 @@ func TestTransactionZAttack(t *testing.T) {
|
|||||||
for j := 0; j < int(pool.config.GlobalQueue); j++ {
|
for j := 0; j < int(pool.config.GlobalQueue); j++ {
|
||||||
futureTxs := types.Transactions{}
|
futureTxs := types.Transactions{}
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
|
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), uint256.NewInt(100000000000))
|
||||||
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key))
|
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key))
|
||||||
pool.addRemotesSync(futureTxs)
|
pool.addRemotesSync(futureTxs)
|
||||||
}
|
}
|
||||||
@ -190,7 +191,7 @@ func TestTransactionZAttack(t *testing.T) {
|
|||||||
overDraftTxs := types.Transactions{}
|
overDraftTxs := types.Transactions{}
|
||||||
{
|
{
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
|
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), uint256.NewInt(100000000000))
|
||||||
for j := 0; j < int(pool.config.GlobalSlots); j++ {
|
for j := 0; j < int(pool.config.GlobalSlots); j++ {
|
||||||
overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key))
|
overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key))
|
||||||
}
|
}
|
||||||
@ -222,12 +223,12 @@ func BenchmarkFutureAttack(b *testing.B) {
|
|||||||
config.GlobalQueue = 100
|
config.GlobalQueue = 100
|
||||||
config.GlobalSlots = 100
|
config.GlobalSlots = 100
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
fillPool(b, pool)
|
fillPool(b, pool)
|
||||||
|
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
|
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), uint256.NewInt(100000000000))
|
||||||
futureTxs := types.Transactions{}
|
futureTxs := types.Transactions{}
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
|
@ -39,6 +39,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -163,7 +164,7 @@ func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.Privat
|
|||||||
|
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()); err != nil {
|
if err := pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
// wait for the pool to initialize
|
// wait for the pool to initialize
|
||||||
@ -198,9 +199,6 @@ func validatePoolInternals(pool *LegacyPool) error {
|
|||||||
if nonce := pool.pendingNonces.get(addr); nonce != last+1 {
|
if nonce := pool.pendingNonces.get(addr); nonce != last+1 {
|
||||||
return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1)
|
return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1)
|
||||||
}
|
}
|
||||||
if txs.totalcost.Cmp(common.Big0) < 0 {
|
|
||||||
return fmt.Errorf("totalcost went negative: %v", txs.totalcost)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -255,7 +253,7 @@ func (c *testChain) State() (*state.StateDB, error) {
|
|||||||
c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
// simulate that the new head block included tx0 and tx1
|
// simulate that the new head block included tx0 and tx1
|
||||||
c.statedb.SetNonce(c.address, 2)
|
c.statedb.SetNonce(c.address, 2)
|
||||||
c.statedb.SetBalance(c.address, new(big.Int).SetUint64(params.Ether))
|
c.statedb.SetBalance(c.address, new(uint256.Int).SetUint64(params.Ether))
|
||||||
*c.trigger = false
|
*c.trigger = false
|
||||||
}
|
}
|
||||||
return stdb, nil
|
return stdb, nil
|
||||||
@ -275,14 +273,14 @@ func TestStateChangeDuringReset(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// setup pool with 2 transaction in it
|
// setup pool with 2 transaction in it
|
||||||
statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether))
|
statedb.SetBalance(address, new(uint256.Int).SetUint64(params.Ether))
|
||||||
blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger}
|
blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger}
|
||||||
|
|
||||||
tx0 := transaction(0, 100000, key)
|
tx0 := transaction(0, 100000, key)
|
||||||
tx1 := transaction(1, 100000, key)
|
tx1 := transaction(1, 100000, key)
|
||||||
|
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
nonce := pool.Nonce(address)
|
nonce := pool.Nonce(address)
|
||||||
@ -309,7 +307,7 @@ func TestStateChangeDuringReset(t *testing.T) {
|
|||||||
|
|
||||||
func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) {
|
func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) {
|
||||||
pool.mu.Lock()
|
pool.mu.Lock()
|
||||||
pool.currentState.AddBalance(addr, amount)
|
pool.currentState.AddBalance(addr, uint256.MustFromBig(amount))
|
||||||
pool.mu.Unlock()
|
pool.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -348,7 +346,7 @@ func TestInvalidTransactions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tx = transaction(1, 100000, key)
|
tx = transaction(1, 100000, key)
|
||||||
pool.gasTip.Store(big.NewInt(1000))
|
pool.gasTip.Store(uint256.NewInt(1000))
|
||||||
if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) {
|
if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) {
|
||||||
t.Errorf("want %v have %v", want, err)
|
t.Errorf("want %v have %v", want, err)
|
||||||
}
|
}
|
||||||
@ -470,7 +468,7 @@ func TestChainFork(t *testing.T) {
|
|||||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
resetState := func() {
|
resetState := func() {
|
||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
statedb.AddBalance(addr, big.NewInt(100000000000000))
|
statedb.AddBalance(addr, uint256.NewInt(100000000000000))
|
||||||
|
|
||||||
pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
|
pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
|
||||||
<-pool.requestReset(nil, nil)
|
<-pool.requestReset(nil, nil)
|
||||||
@ -499,7 +497,7 @@ func TestDoubleNonce(t *testing.T) {
|
|||||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
resetState := func() {
|
resetState := func() {
|
||||||
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
statedb.AddBalance(addr, big.NewInt(100000000000000))
|
statedb.AddBalance(addr, uint256.NewInt(100000000000000))
|
||||||
|
|
||||||
pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
|
pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
|
||||||
<-pool.requestReset(nil, nil)
|
<-pool.requestReset(nil, nil)
|
||||||
@ -702,7 +700,7 @@ func TestPostponing(t *testing.T) {
|
|||||||
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
||||||
|
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create two test accounts to produce different gap profiles with
|
// Create two test accounts to produce different gap profiles with
|
||||||
@ -919,7 +917,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
|
|||||||
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
|
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
|
||||||
|
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create a number of test accounts and fund them (last one will be the local)
|
// Create a number of test accounts and fund them (last one will be the local)
|
||||||
@ -1012,7 +1010,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
|
|||||||
config.NoLocals = nolocals
|
config.NoLocals = nolocals
|
||||||
|
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create two test accounts to ensure remotes expire but locals do not
|
// Create two test accounts to ensure remotes expire but locals do not
|
||||||
@ -1197,7 +1195,7 @@ func TestPendingGlobalLimiting(t *testing.T) {
|
|||||||
config.GlobalSlots = config.AccountSlots * 10
|
config.GlobalSlots = config.AccountSlots * 10
|
||||||
|
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create a number of test accounts and fund them
|
// Create a number of test accounts and fund them
|
||||||
@ -1301,7 +1299,7 @@ func TestCapClearsFromAll(t *testing.T) {
|
|||||||
config.GlobalSlots = 8
|
config.GlobalSlots = 8
|
||||||
|
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create a number of test accounts and fund them
|
// Create a number of test accounts and fund them
|
||||||
@ -1334,7 +1332,7 @@ func TestPendingMinimumAllowance(t *testing.T) {
|
|||||||
config.GlobalSlots = 1
|
config.GlobalSlots = 1
|
||||||
|
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create a number of test accounts and fund them
|
// Create a number of test accounts and fund them
|
||||||
@ -1380,7 +1378,7 @@ func TestRepricing(t *testing.T) {
|
|||||||
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
||||||
|
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Keep track of transaction events to ensure all executables get announced
|
// Keep track of transaction events to ensure all executables get announced
|
||||||
@ -1502,7 +1500,7 @@ func TestMinGasPriceEnforced(t *testing.T) {
|
|||||||
txPoolConfig := DefaultConfig
|
txPoolConfig := DefaultConfig
|
||||||
txPoolConfig.NoLocals = true
|
txPoolConfig.NoLocals = true
|
||||||
pool := New(txPoolConfig, blockchain)
|
pool := New(txPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(txPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(txPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
@ -1673,7 +1671,7 @@ func TestRepricingKeepsLocals(t *testing.T) {
|
|||||||
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
|
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
|
||||||
|
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create a number of test accounts and fund them
|
// Create a number of test accounts and fund them
|
||||||
@ -1751,7 +1749,7 @@ func TestUnderpricing(t *testing.T) {
|
|||||||
config.GlobalQueue = 2
|
config.GlobalQueue = 2
|
||||||
|
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Keep track of transaction events to ensure all executables get announced
|
// Keep track of transaction events to ensure all executables get announced
|
||||||
@ -1866,7 +1864,7 @@ func TestStableUnderpricing(t *testing.T) {
|
|||||||
config.GlobalQueue = 0
|
config.GlobalQueue = 0
|
||||||
|
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Keep track of transaction events to ensure all executables get announced
|
// Keep track of transaction events to ensure all executables get announced
|
||||||
@ -2095,7 +2093,7 @@ func TestDeduplication(t *testing.T) {
|
|||||||
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
||||||
|
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create a test account to add transactions with
|
// Create a test account to add transactions with
|
||||||
@ -2162,7 +2160,7 @@ func TestReplacement(t *testing.T) {
|
|||||||
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
||||||
|
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Keep track of transaction events to ensure all executables get announced
|
// Keep track of transaction events to ensure all executables get announced
|
||||||
@ -2373,7 +2371,7 @@ func testJournaling(t *testing.T, nolocals bool) {
|
|||||||
config.Rejournal = time.Second
|
config.Rejournal = time.Second
|
||||||
|
|
||||||
pool := New(config, blockchain)
|
pool := New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
|
|
||||||
// Create two test accounts to ensure remotes expire but locals do not
|
// Create two test accounts to ensure remotes expire but locals do not
|
||||||
local, _ := crypto.GenerateKey()
|
local, _ := crypto.GenerateKey()
|
||||||
@ -2411,7 +2409,7 @@ func testJournaling(t *testing.T, nolocals bool) {
|
|||||||
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
||||||
|
|
||||||
pool = New(config, blockchain)
|
pool = New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
|
|
||||||
pending, queued = pool.Stats()
|
pending, queued = pool.Stats()
|
||||||
if queued != 0 {
|
if queued != 0 {
|
||||||
@ -2438,7 +2436,7 @@ func testJournaling(t *testing.T, nolocals bool) {
|
|||||||
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
|
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
|
||||||
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
||||||
pool = New(config, blockchain)
|
pool = New(config, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
|
|
||||||
pending, queued = pool.Stats()
|
pending, queued = pool.Stats()
|
||||||
if pending != 0 {
|
if pending != 0 {
|
||||||
@ -2469,7 +2467,7 @@ func TestStatusCheck(t *testing.T) {
|
|||||||
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
|
||||||
|
|
||||||
pool := New(testTxPoolConfig, blockchain)
|
pool := New(testTxPoolConfig, blockchain)
|
||||||
pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver())
|
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
|
||||||
defer pool.Close()
|
defer pool.Close()
|
||||||
|
|
||||||
// Create the test accounts to check various transaction statuses with
|
// Create the test accounts to check various transaction statuses with
|
||||||
@ -2662,7 +2660,7 @@ func BenchmarkMultiAccountBatchInsert(b *testing.B) {
|
|||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
account := crypto.PubkeyToAddress(key.PublicKey)
|
account := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
pool.currentState.AddBalance(account, big.NewInt(1000000))
|
pool.currentState.AddBalance(account, uint256.NewInt(1000000))
|
||||||
tx := transaction(uint64(0), 100000, key)
|
tx := transaction(uint64(0), 100000, key)
|
||||||
batches[i] = tx
|
batches[i] = tx
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for
|
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for
|
||||||
@ -159,14 +161,14 @@ func (m *sortedMap) Cap(threshold int) types.Transactions {
|
|||||||
}
|
}
|
||||||
// Otherwise gather and drop the highest nonce'd transactions
|
// Otherwise gather and drop the highest nonce'd transactions
|
||||||
var drops types.Transactions
|
var drops types.Transactions
|
||||||
|
slices.Sort(*m.index)
|
||||||
sort.Sort(*m.index)
|
|
||||||
for size := len(m.items); size > threshold; size-- {
|
for size := len(m.items); size > threshold; size-- {
|
||||||
drops = append(drops, m.items[(*m.index)[size-1]])
|
drops = append(drops, m.items[(*m.index)[size-1]])
|
||||||
delete(m.items, (*m.index)[size-1])
|
delete(m.items, (*m.index)[size-1])
|
||||||
}
|
}
|
||||||
*m.index = (*m.index)[:threshold]
|
*m.index = (*m.index)[:threshold]
|
||||||
heap.Init(m.index)
|
// The sorted m.index slice is still a valid heap, so there is no need to
|
||||||
|
// reheap after deleting tail items.
|
||||||
|
|
||||||
// If we had a cache, shift the back
|
// If we had a cache, shift the back
|
||||||
m.cacheMu.Lock()
|
m.cacheMu.Lock()
|
||||||
@ -271,19 +273,19 @@ type list struct {
|
|||||||
strict bool // Whether nonces are strictly continuous or not
|
strict bool // Whether nonces are strictly continuous or not
|
||||||
txs *sortedMap // Heap indexed sorted hash map of the transactions
|
txs *sortedMap // Heap indexed sorted hash map of the transactions
|
||||||
|
|
||||||
costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
|
costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance)
|
||||||
gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit)
|
gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit)
|
||||||
totalcost *big.Int // Total cost of all transactions in the list
|
totalcost *uint256.Int // Total cost of all transactions in the list
|
||||||
}
|
}
|
||||||
|
|
||||||
// newList create a new transaction list for maintaining nonce-indexable fast,
|
// newList creates a new transaction list for maintaining nonce-indexable fast,
|
||||||
// gapped, sortable transaction lists.
|
// gapped, sortable transaction lists.
|
||||||
func newList(strict bool) *list {
|
func newList(strict bool) *list {
|
||||||
return &list{
|
return &list{
|
||||||
strict: strict,
|
strict: strict,
|
||||||
txs: newSortedMap(),
|
txs: newSortedMap(),
|
||||||
costcap: new(big.Int),
|
costcap: new(uint256.Int),
|
||||||
totalcost: new(big.Int),
|
totalcost: new(uint256.Int),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,10 +327,15 @@ func (l *list) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transa
|
|||||||
l.subTotalCost([]*types.Transaction{old})
|
l.subTotalCost([]*types.Transaction{old})
|
||||||
}
|
}
|
||||||
// Add new tx cost to totalcost
|
// Add new tx cost to totalcost
|
||||||
l.totalcost.Add(l.totalcost, tx.Cost())
|
cost, overflow := uint256.FromBig(tx.Cost())
|
||||||
|
if overflow {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
l.totalcost.Add(l.totalcost, cost)
|
||||||
|
|
||||||
// Otherwise overwrite the old transaction with the current one
|
// Otherwise overwrite the old transaction with the current one
|
||||||
l.txs.Put(tx)
|
l.txs.Put(tx)
|
||||||
if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 {
|
if l.costcap.Cmp(cost) < 0 {
|
||||||
l.costcap = cost
|
l.costcap = cost
|
||||||
}
|
}
|
||||||
if gas := tx.Gas(); l.gascap < gas {
|
if gas := tx.Gas(); l.gascap < gas {
|
||||||
@ -355,17 +362,17 @@ func (l *list) Forward(threshold uint64) types.Transactions {
|
|||||||
// a point in calculating all the costs or if the balance covers all. If the threshold
|
// a point in calculating all the costs or if the balance covers all. If the threshold
|
||||||
// is lower than the costgas cap, the caps will be reset to a new high after removing
|
// is lower than the costgas cap, the caps will be reset to a new high after removing
|
||||||
// the newly invalidated transactions.
|
// the newly invalidated transactions.
|
||||||
func (l *list) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions, types.Transactions) {
|
func (l *list) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactions, types.Transactions) {
|
||||||
// If all transactions are below the threshold, short circuit
|
// If all transactions are below the threshold, short circuit
|
||||||
if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit {
|
if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds
|
l.costcap = new(uint256.Int).Set(costLimit) // Lower the caps to the thresholds
|
||||||
l.gascap = gasLimit
|
l.gascap = gasLimit
|
||||||
|
|
||||||
// Filter out all the transactions above the account's funds
|
// Filter out all the transactions above the account's funds
|
||||||
removed := l.txs.Filter(func(tx *types.Transaction) bool {
|
removed := l.txs.Filter(func(tx *types.Transaction) bool {
|
||||||
return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit) > 0
|
return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit.ToBig()) > 0
|
||||||
})
|
})
|
||||||
|
|
||||||
if len(removed) == 0 {
|
if len(removed) == 0 {
|
||||||
@ -456,7 +463,10 @@ func (l *list) LastElement() *types.Transaction {
|
|||||||
// total cost of all transactions.
|
// total cost of all transactions.
|
||||||
func (l *list) subTotalCost(txs []*types.Transaction) {
|
func (l *list) subTotalCost(txs []*types.Transaction) {
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
l.totalcost.Sub(l.totalcost, tx.Cost())
|
_, underflow := l.totalcost.SubOverflow(l.totalcost, uint256.MustFromBig(tx.Cost()))
|
||||||
|
if underflow {
|
||||||
|
panic("totalcost underflow")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,8 +21,10 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests that transactions can be added to strict lists and list contents and
|
// Tests that transactions can be added to strict lists and list contents and
|
||||||
@ -51,6 +53,21 @@ func TestStrictListAdd(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestListAddVeryExpensive tests adding txs which exceed 256 bits in cost. It is
|
||||||
|
// expected that the list does not panic.
|
||||||
|
func TestListAddVeryExpensive(t *testing.T) {
|
||||||
|
key, _ := crypto.GenerateKey()
|
||||||
|
list := newList(true)
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
value := big.NewInt(100)
|
||||||
|
gasprice, _ := new(big.Int).SetString("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 0)
|
||||||
|
gaslimit := uint64(i)
|
||||||
|
tx, _ := types.SignTx(types.NewTransaction(uint64(i), common.Address{}, value, gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
|
||||||
|
t.Logf("cost: %x bitlen: %d\n", tx.Cost(), tx.Cost().BitLen())
|
||||||
|
list.Add(tx, DefaultConfig.PriceBump)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkListAdd(b *testing.B) {
|
func BenchmarkListAdd(b *testing.B) {
|
||||||
// Generate a list of transactions to insert
|
// Generate a list of transactions to insert
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
@ -60,7 +77,7 @@ func BenchmarkListAdd(b *testing.B) {
|
|||||||
txs[i] = transaction(uint64(i), 0, key)
|
txs[i] = transaction(uint64(i), 0, key)
|
||||||
}
|
}
|
||||||
// Insert the transactions in a random order
|
// Insert the transactions in a random order
|
||||||
priceLimit := big.NewInt(int64(DefaultConfig.PriceLimit))
|
priceLimit := uint256.NewInt(DefaultConfig.PriceLimit)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
list := newList(true)
|
list := newList(true)
|
||||||
@ -70,3 +87,25 @@ func BenchmarkListAdd(b *testing.B) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkListCapOneTx(b *testing.B) {
|
||||||
|
// Generate a list of transactions to insert
|
||||||
|
key, _ := crypto.GenerateKey()
|
||||||
|
|
||||||
|
txs := make(types.Transactions, 32)
|
||||||
|
for i := 0; i < len(txs); i++ {
|
||||||
|
txs[i] = transaction(uint64(i), 0, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
list := newList(true)
|
||||||
|
// Insert the transactions in a random order
|
||||||
|
for _, v := range rand.Perm(len(txs)) {
|
||||||
|
list.Add(txs[v], DefaultConfig.PriceBump)
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
list.Cap(list.Len() - 1)
|
||||||
|
b.StopTimer()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LazyTransaction contains a small subset of the transaction properties that is
|
// LazyTransaction contains a small subset of the transaction properties that is
|
||||||
@ -35,8 +36,8 @@ type LazyTransaction struct {
|
|||||||
Tx *types.Transaction // Transaction if already resolved
|
Tx *types.Transaction // Transaction if already resolved
|
||||||
|
|
||||||
Time time.Time // Time when the transaction was first seen
|
Time time.Time // Time when the transaction was first seen
|
||||||
GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
|
GasFeeCap *uint256.Int // Maximum fee per gas the transaction may consume
|
||||||
GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay
|
GasTipCap *uint256.Int // Maximum miner tip per gas the transaction can pay
|
||||||
|
|
||||||
Gas uint64 // Amount of gas required by the transaction
|
Gas uint64 // Amount of gas required by the transaction
|
||||||
BlobGas uint64 // Amount of blob gas required by the transaction
|
BlobGas uint64 // Amount of blob gas required by the transaction
|
||||||
@ -44,12 +45,18 @@ type LazyTransaction struct {
|
|||||||
|
|
||||||
// Resolve retrieves the full transaction belonging to a lazy handle if it is still
|
// Resolve retrieves the full transaction belonging to a lazy handle if it is still
|
||||||
// maintained by the transaction pool.
|
// maintained by the transaction pool.
|
||||||
|
//
|
||||||
|
// Note, the method will *not* cache the retrieved transaction if the original
|
||||||
|
// pool has not cached it. The idea being, that if the tx was too big to insert
|
||||||
|
// originally, silently saving it will cause more trouble down the line (and
|
||||||
|
// indeed seems to have caused a memory bloat in the original implementation
|
||||||
|
// which did just that).
|
||||||
func (ltx *LazyTransaction) Resolve() *types.Transaction {
|
func (ltx *LazyTransaction) Resolve() *types.Transaction {
|
||||||
if ltx.Tx == nil {
|
if ltx.Tx != nil {
|
||||||
ltx.Tx = ltx.Pool.Get(ltx.Hash)
|
|
||||||
}
|
|
||||||
return ltx.Tx
|
return ltx.Tx
|
||||||
}
|
}
|
||||||
|
return ltx.Pool.Get(ltx.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
// LazyResolver is a minimal interface needed for a transaction pool to satisfy
|
// LazyResolver is a minimal interface needed for a transaction pool to satisfy
|
||||||
// resolving lazy transactions. It's mostly a helper to avoid the entire sub-
|
// resolving lazy transactions. It's mostly a helper to avoid the entire sub-
|
||||||
@ -63,13 +70,28 @@ type LazyResolver interface {
|
|||||||
// may request (and relinquish) exclusive access to certain addresses.
|
// may request (and relinquish) exclusive access to certain addresses.
|
||||||
type AddressReserver func(addr common.Address, reserve bool) error
|
type AddressReserver func(addr common.Address, reserve bool) error
|
||||||
|
|
||||||
|
// PendingFilter is a collection of filter rules to allow retrieving a subset
|
||||||
|
// of transactions for announcement or mining.
|
||||||
|
//
|
||||||
|
// Note, the entries here are not arbitrary useful filters, rather each one has
|
||||||
|
// a very specific call site in mind and each one can be evaluated very cheaply
|
||||||
|
// by the pool implementations. Only add new ones that satisfy those constraints.
|
||||||
|
type PendingFilter struct {
|
||||||
|
MinTip *uint256.Int // Minimum miner tip required to include a transaction
|
||||||
|
BaseFee *uint256.Int // Minimum 1559 basefee needed to include a transaction
|
||||||
|
BlobFee *uint256.Int // Minimum 4844 blobfee needed to include a blob transaction
|
||||||
|
|
||||||
|
OnlyPlainTxs bool // Return only plain EVM transactions (peer-join announces, block space filling)
|
||||||
|
OnlyBlobTxs bool // Return only blob transactions (block blob-space filling)
|
||||||
|
}
|
||||||
|
|
||||||
// SubPool represents a specialized transaction pool that lives on its own (e.g.
|
// SubPool represents a specialized transaction pool that lives on its own (e.g.
|
||||||
// blob pool). Since independent of how many specialized pools we have, they do
|
// blob pool). Since independent of how many specialized pools we have, they do
|
||||||
// need to be updated in lockstep and assemble into one coherent view for block
|
// need to be updated in lockstep and assemble into one coherent view for block
|
||||||
// production, this interface defines the common methods that allow the primary
|
// production, this interface defines the common methods that allow the primary
|
||||||
// transaction pool to manage the subpools.
|
// transaction pool to manage the subpools.
|
||||||
type SubPool interface {
|
type SubPool interface {
|
||||||
// Filter is a selector used to decide whether a transaction whould be added
|
// Filter is a selector used to decide whether a transaction would be added
|
||||||
// to this particular subpool.
|
// to this particular subpool.
|
||||||
Filter(tx *types.Transaction) bool
|
Filter(tx *types.Transaction) bool
|
||||||
|
|
||||||
@ -80,7 +102,7 @@ type SubPool interface {
|
|||||||
// These should not be passed as a constructor argument - nor should the pools
|
// These should not be passed as a constructor argument - nor should the pools
|
||||||
// start by themselves - in order to keep multiple subpools in lockstep with
|
// start by themselves - in order to keep multiple subpools in lockstep with
|
||||||
// one another.
|
// one another.
|
||||||
Init(gasTip *big.Int, head *types.Header, reserve AddressReserver) error
|
Init(gasTip uint64, head *types.Header, reserve AddressReserver) error
|
||||||
|
|
||||||
// Close terminates any background processing threads and releases any held
|
// Close terminates any background processing threads and releases any held
|
||||||
// resources.
|
// resources.
|
||||||
@ -108,7 +130,10 @@ type SubPool interface {
|
|||||||
|
|
||||||
// Pending retrieves all currently processable transactions, grouped by origin
|
// Pending retrieves all currently processable transactions, grouped by origin
|
||||||
// account and sorted by nonce.
|
// account and sorted by nonce.
|
||||||
Pending(enforceTips bool) map[common.Address][]*LazyTransaction
|
//
|
||||||
|
// The transactions can also be pre-filtered by the dynamic fee components to
|
||||||
|
// reduce allocations and load on downstream subsystems.
|
||||||
|
Pending(filter PendingFilter) map[common.Address][]*LazyTransaction
|
||||||
|
|
||||||
// SubscribeTransactions subscribes to new transaction events. The subscriber
|
// SubscribeTransactions subscribes to new transaction events. The subscriber
|
||||||
// can decide whether to receive notifications only for newly seen transactions
|
// can decide whether to receive notifications only for newly seen transactions
|
||||||
|
@ -72,11 +72,14 @@ type TxPool struct {
|
|||||||
|
|
||||||
subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown
|
subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown
|
||||||
quit chan chan error // Quit channel to tear down the head updater
|
quit chan chan error // Quit channel to tear down the head updater
|
||||||
|
term chan struct{} // Termination channel to detect a closed pool
|
||||||
|
|
||||||
|
sync chan chan error // Testing / simulator channel to block until internal reset is done
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new transaction pool to gather, sort and filter inbound
|
// New creates a new transaction pool to gather, sort and filter inbound
|
||||||
// transactions from the network.
|
// transactions from the network.
|
||||||
func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) {
|
func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
|
||||||
// Retrieve the current head so that all subpools and this main coordinator
|
// Retrieve the current head so that all subpools and this main coordinator
|
||||||
// pool will have the same starting state, even if the chain moves forward
|
// pool will have the same starting state, even if the chain moves forward
|
||||||
// during initialization.
|
// during initialization.
|
||||||
@ -86,6 +89,8 @@ func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error)
|
|||||||
subpools: subpools,
|
subpools: subpools,
|
||||||
reservations: make(map[common.Address]SubPool),
|
reservations: make(map[common.Address]SubPool),
|
||||||
quit: make(chan chan error),
|
quit: make(chan chan error),
|
||||||
|
term: make(chan struct{}),
|
||||||
|
sync: make(chan chan error),
|
||||||
}
|
}
|
||||||
for i, subpool := range subpools {
|
for i, subpool := range subpools {
|
||||||
if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil {
|
if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil {
|
||||||
@ -117,7 +122,7 @@ func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
|
|||||||
log.Error("pool attempted to reserve already-owned address", "address", addr)
|
log.Error("pool attempted to reserve already-owned address", "address", addr)
|
||||||
return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed
|
return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed
|
||||||
}
|
}
|
||||||
return errors.New("address already reserved")
|
return ErrAlreadyReserved
|
||||||
}
|
}
|
||||||
p.reservations[addr] = subpool
|
p.reservations[addr] = subpool
|
||||||
if metrics.Enabled {
|
if metrics.Enabled {
|
||||||
@ -174,6 +179,9 @@ func (p *TxPool) Close() error {
|
|||||||
// outside blockchain events as well as for various reporting and transaction
|
// outside blockchain events as well as for various reporting and transaction
|
||||||
// eviction events.
|
// eviction events.
|
||||||
func (p *TxPool) loop(head *types.Header, chain BlockChain) {
|
func (p *TxPool) loop(head *types.Header, chain BlockChain) {
|
||||||
|
// Close the termination marker when the pool stops
|
||||||
|
defer close(p.term)
|
||||||
|
|
||||||
// Subscribe to chain head events to trigger subpool resets
|
// Subscribe to chain head events to trigger subpool resets
|
||||||
var (
|
var (
|
||||||
newHeadCh = make(chan core.ChainHeadEvent)
|
newHeadCh = make(chan core.ChainHeadEvent)
|
||||||
@ -190,13 +198,23 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) {
|
|||||||
var (
|
var (
|
||||||
resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently
|
resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently
|
||||||
resetDone = make(chan *types.Header)
|
resetDone = make(chan *types.Header)
|
||||||
|
|
||||||
|
resetForced bool // Whether a forced reset was requested, only used in simulator mode
|
||||||
|
resetWaiter chan error // Channel waiting on a forced reset, only used in simulator mode
|
||||||
)
|
)
|
||||||
|
// Notify the live reset waiter to not block if the txpool is closed.
|
||||||
|
defer func() {
|
||||||
|
if resetWaiter != nil {
|
||||||
|
resetWaiter <- errors.New("pool already terminated")
|
||||||
|
resetWaiter = nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
var errc chan error
|
var errc chan error
|
||||||
for errc == nil {
|
for errc == nil {
|
||||||
// Something interesting might have happened, run a reset if there is
|
// Something interesting might have happened, run a reset if there is
|
||||||
// one needed but none is running. The resetter will run on its own
|
// one needed but none is running. The resetter will run on its own
|
||||||
// goroutine to allow chain head events to be consumed contiguously.
|
// goroutine to allow chain head events to be consumed contiguously.
|
||||||
if newHead != oldHead {
|
if newHead != oldHead || resetForced {
|
||||||
// Try to inject a busy marker and start a reset if successful
|
// Try to inject a busy marker and start a reset if successful
|
||||||
select {
|
select {
|
||||||
case resetBusy <- struct{}{}:
|
case resetBusy <- struct{}{}:
|
||||||
@ -208,8 +226,17 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) {
|
|||||||
resetDone <- newHead
|
resetDone <- newHead
|
||||||
}(oldHead, newHead)
|
}(oldHead, newHead)
|
||||||
|
|
||||||
|
// If the reset operation was explicitly requested, consider it
|
||||||
|
// being fulfilled and drop the request marker. If it was not,
|
||||||
|
// this is a noop.
|
||||||
|
resetForced = false
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// Reset already running, wait until it finishes
|
// Reset already running, wait until it finishes.
|
||||||
|
//
|
||||||
|
// Note, this will not drop any forced reset request. If a forced
|
||||||
|
// reset was requested, but we were busy, then when the currently
|
||||||
|
// running reset finishes, a new one will be spun up.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Wait for the next chain head event or a previous reset finish
|
// Wait for the next chain head event or a previous reset finish
|
||||||
@ -223,8 +250,26 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) {
|
|||||||
oldHead = head
|
oldHead = head
|
||||||
<-resetBusy
|
<-resetBusy
|
||||||
|
|
||||||
|
// If someone is waiting for a reset to finish, notify them, unless
|
||||||
|
// the forced op is still pending. In that case, wait another round
|
||||||
|
// of resets.
|
||||||
|
if resetWaiter != nil && !resetForced {
|
||||||
|
resetWaiter <- nil
|
||||||
|
resetWaiter = nil
|
||||||
|
}
|
||||||
|
|
||||||
case errc = <-p.quit:
|
case errc = <-p.quit:
|
||||||
// Termination requested, break out on the next loop round
|
// Termination requested, break out on the next loop round
|
||||||
|
|
||||||
|
case syncc := <-p.sync:
|
||||||
|
// Transaction pool is running inside a simulator, and we are about
|
||||||
|
// to create a new block. Request a forced sync operation to ensure
|
||||||
|
// that any running reset operation finishes to make block imports
|
||||||
|
// deterministic. On top of that, run a new reset operation to make
|
||||||
|
// transaction insertions deterministic instead of being stuck in a
|
||||||
|
// queue waiting for a reset.
|
||||||
|
resetForced = true
|
||||||
|
resetWaiter = syncc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Notify the closer of termination (no error possible for now)
|
// Notify the closer of termination (no error possible for now)
|
||||||
@ -308,10 +353,13 @@ func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
|||||||
|
|
||||||
// Pending retrieves all currently processable transactions, grouped by origin
|
// Pending retrieves all currently processable transactions, grouped by origin
|
||||||
// account and sorted by nonce.
|
// account and sorted by nonce.
|
||||||
func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction {
|
//
|
||||||
|
// The transactions can also be pre-filtered by the dynamic fee components to
|
||||||
|
// reduce allocations and load on downstream subsystems.
|
||||||
|
func (p *TxPool) Pending(filter PendingFilter) map[common.Address][]*LazyTransaction {
|
||||||
txs := make(map[common.Address][]*LazyTransaction)
|
txs := make(map[common.Address][]*LazyTransaction)
|
||||||
for _, subpool := range p.subpools {
|
for _, subpool := range p.subpools {
|
||||||
for addr, set := range subpool.Pending(enforceTips) {
|
for addr, set := range subpool.Pending(filter) {
|
||||||
txs[addr] = set
|
txs[addr] = set
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -415,3 +463,20 @@ func (p *TxPool) Status(hash common.Hash) TxStatus {
|
|||||||
}
|
}
|
||||||
return TxStatusUnknown
|
return TxStatusUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sync is a helper method for unit tests or simulator runs where the chain events
|
||||||
|
// are arriving in quick succession, without any time in between them to run the
|
||||||
|
// internal background reset operations. This method will run an explicit reset
|
||||||
|
// operation to ensure the pool stabilises, thus avoiding flakey behavior.
|
||||||
|
//
|
||||||
|
// Note, do not use this in production / live code. In live code, the pool is
|
||||||
|
// meant to reset on a separate thread to avoid DoS vectors.
|
||||||
|
func (p *TxPool) Sync() error {
|
||||||
|
sync := make(chan error)
|
||||||
|
select {
|
||||||
|
case p.sync <- sync:
|
||||||
|
return <-sync
|
||||||
|
case <-p.term:
|
||||||
|
return errors.New("pool already terminated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user