Merge pull request #91 from openrelayxyz/merge/geth-v1.13.2

Merge/geth v1.13.2
This commit is contained in:
AusIV 2023-09-28 12:10:00 -05:00 committed by GitHub
commit b549624096
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
74 changed files with 1080 additions and 744 deletions

View File

@ -199,7 +199,6 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address,
if err != nil { if err != nil {
return nil, err return nil, err
} }
return stateDB.GetCode(contract), nil return stateDB.GetCode(contract), nil
} }
@ -212,7 +211,6 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
if err != nil { if err != nil {
return nil, err return nil, err
} }
return stateDB.GetBalance(contract), nil return stateDB.GetBalance(contract), nil
} }
@ -225,7 +223,6 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
if err != nil { if err != nil {
return 0, err return 0, err
} }
return stateDB.GetNonce(contract), nil return stateDB.GetNonce(contract), nil
} }
@ -238,7 +235,6 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
if err != nil { if err != nil {
return nil, err return nil, err
} }
val := stateDB.GetState(contract, key) val := stateDB.GetState(contract, key)
return val[:], nil return val[:], nil
} }
@ -700,8 +696,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
} }
block.AddTxWithChain(b.blockchain, tx) block.AddTxWithChain(b.blockchain, tx)
}) })
stateDB, _ := b.blockchain.State() stateDB, err := b.blockchain.State()
if err != nil {
return err
}
b.pendingBlock = blocks[0] b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
b.pendingReceipts = receipts[0] b.pendingReceipts = receipts[0]
@ -821,11 +819,12 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
block.OffsetTime(int64(adjustment.Seconds())) block.OffsetTime(int64(adjustment.Seconds()))
}) })
stateDB, _ := b.blockchain.State() stateDB, err := b.blockchain.State()
if err != nil {
return err
}
b.pendingBlock = blocks[0] b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
return nil return nil
} }

View File

@ -20,6 +20,7 @@
package keystore package keystore
import ( import (
"os"
"time" "time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -77,7 +78,9 @@ func (w *watcher) loop() {
} }
defer watcher.Close() defer watcher.Close()
if err := watcher.Add(w.ac.keydir); err != nil { if err := watcher.Add(w.ac.keydir); err != nil {
logger.Warn("Failed to watch keystore folder", "err", err) if !os.IsNotExist(err) {
logger.Warn("Failed to watch keystore folder", "err", err)
}
return return
} }

View File

@ -17,6 +17,7 @@
package main package main
import ( import (
"errors"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -51,7 +52,14 @@ type resolver interface {
RequestENR(*enode.Node) (*enode.Node, error) RequestENR(*enode.Node) (*enode.Node, error)
} }
func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler { func newCrawler(input nodeSet, bootnodes []*enode.Node, disc resolver, iters ...enode.Iterator) (*crawler, error) {
if len(input) == 0 {
input.add(bootnodes...)
}
if len(input) == 0 {
return nil, errors.New("no input nodes to start crawling")
}
c := &crawler{ c := &crawler{
input: input, input: input,
output: make(nodeSet, len(input)), output: make(nodeSet, len(input)),
@ -67,7 +75,7 @@ func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler
for id, n := range input { for id, n := range input {
c.output[id] = n c.output[id] = n
} }
return c return c, nil
} }
func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet { func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet {

View File

@ -143,7 +143,7 @@ var discoveryNodeFlags = []cli.Flag{
func discv4Ping(ctx *cli.Context) error { func discv4Ping(ctx *cli.Context) error {
n := getNodeArg(ctx) n := getNodeArg(ctx)
disc := startV4(ctx) disc, _ := startV4(ctx)
defer disc.Close() defer disc.Close()
start := time.Now() start := time.Now()
@ -156,7 +156,7 @@ func discv4Ping(ctx *cli.Context) error {
func discv4RequestRecord(ctx *cli.Context) error { func discv4RequestRecord(ctx *cli.Context) error {
n := getNodeArg(ctx) n := getNodeArg(ctx)
disc := startV4(ctx) disc, _ := startV4(ctx)
defer disc.Close() defer disc.Close()
respN, err := disc.RequestENR(n) respN, err := disc.RequestENR(n)
@ -169,7 +169,7 @@ func discv4RequestRecord(ctx *cli.Context) error {
func discv4Resolve(ctx *cli.Context) error { func discv4Resolve(ctx *cli.Context) error {
n := getNodeArg(ctx) n := getNodeArg(ctx)
disc := startV4(ctx) disc, _ := startV4(ctx)
defer disc.Close() defer disc.Close()
fmt.Println(disc.Resolve(n).String()) fmt.Println(disc.Resolve(n).String())
@ -196,10 +196,13 @@ func discv4ResolveJSON(ctx *cli.Context) error {
nodeargs = append(nodeargs, n) nodeargs = append(nodeargs, n)
} }
// Run the crawler. disc, config := startV4(ctx)
disc := startV4(ctx)
defer disc.Close() defer disc.Close()
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
c, err := newCrawler(inputSet, config.Bootnodes, disc, enode.IterNodes(nodeargs))
if err != nil {
return err
}
c.revalidateInterval = 0 c.revalidateInterval = 0
output := c.run(0, 1) output := c.run(0, 1)
writeNodesJSON(nodesFile, output) writeNodesJSON(nodesFile, output)
@ -211,14 +214,18 @@ func discv4Crawl(ctx *cli.Context) error {
return errors.New("need nodes file as argument") return errors.New("need nodes file as argument")
} }
nodesFile := ctx.Args().First() nodesFile := ctx.Args().First()
var inputSet nodeSet inputSet := make(nodeSet)
if common.FileExist(nodesFile) { if common.FileExist(nodesFile) {
inputSet = loadNodesJSON(nodesFile) inputSet = loadNodesJSON(nodesFile)
} }
disc := startV4(ctx) disc, config := startV4(ctx)
defer disc.Close() defer disc.Close()
c := newCrawler(inputSet, disc, disc.RandomNodes())
c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes())
if err != nil {
return err
}
c.revalidateInterval = 10 * time.Minute c.revalidateInterval = 10 * time.Minute
output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name)) output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
writeNodesJSON(nodesFile, output) writeNodesJSON(nodesFile, output)
@ -238,14 +245,14 @@ func discv4Test(ctx *cli.Context) error {
} }
// startV4 starts an ephemeral discovery V4 node. // startV4 starts an ephemeral discovery V4 node.
func startV4(ctx *cli.Context) *discover.UDPv4 { func startV4(ctx *cli.Context) (*discover.UDPv4, discover.Config) {
ln, config := makeDiscoveryConfig(ctx) ln, config := makeDiscoveryConfig(ctx)
socket := listen(ctx, ln) socket := listen(ctx, ln)
disc, err := discover.ListenV4(socket, ln, config) disc, err := discover.ListenV4(socket, ln, config)
if err != nil { if err != nil {
exit(err) exit(err)
} }
return disc return disc, config
} }
func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) { func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) {

View File

@ -81,7 +81,7 @@ var (
func discv5Ping(ctx *cli.Context) error { func discv5Ping(ctx *cli.Context) error {
n := getNodeArg(ctx) n := getNodeArg(ctx)
disc := startV5(ctx) disc, _ := startV5(ctx)
defer disc.Close() defer disc.Close()
fmt.Println(disc.Ping(n)) fmt.Println(disc.Ping(n))
@ -90,7 +90,7 @@ func discv5Ping(ctx *cli.Context) error {
func discv5Resolve(ctx *cli.Context) error { func discv5Resolve(ctx *cli.Context) error {
n := getNodeArg(ctx) n := getNodeArg(ctx)
disc := startV5(ctx) disc, _ := startV5(ctx)
defer disc.Close() defer disc.Close()
fmt.Println(disc.Resolve(n)) fmt.Println(disc.Resolve(n))
@ -102,14 +102,18 @@ func discv5Crawl(ctx *cli.Context) error {
return errors.New("need nodes file as argument") return errors.New("need nodes file as argument")
} }
nodesFile := ctx.Args().First() nodesFile := ctx.Args().First()
var inputSet nodeSet inputSet := make(nodeSet)
if common.FileExist(nodesFile) { if common.FileExist(nodesFile) {
inputSet = loadNodesJSON(nodesFile) inputSet = loadNodesJSON(nodesFile)
} }
disc := startV5(ctx) disc, config := startV5(ctx)
defer disc.Close() defer disc.Close()
c := newCrawler(inputSet, disc, disc.RandomNodes())
c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes())
if err != nil {
return err
}
c.revalidateInterval = 10 * time.Minute c.revalidateInterval = 10 * time.Minute
output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name)) output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
writeNodesJSON(nodesFile, output) writeNodesJSON(nodesFile, output)
@ -127,7 +131,7 @@ func discv5Test(ctx *cli.Context) error {
} }
func discv5Listen(ctx *cli.Context) error { func discv5Listen(ctx *cli.Context) error {
disc := startV5(ctx) disc, _ := startV5(ctx)
defer disc.Close() defer disc.Close()
fmt.Println(disc.Self()) fmt.Println(disc.Self())
@ -135,12 +139,12 @@ func discv5Listen(ctx *cli.Context) error {
} }
// startV5 starts an ephemeral discovery v5 node. // startV5 starts an ephemeral discovery v5 node.
func startV5(ctx *cli.Context) *discover.UDPv5 { func startV5(ctx *cli.Context) (*discover.UDPv5, discover.Config) {
ln, config := makeDiscoveryConfig(ctx) ln, config := makeDiscoveryConfig(ctx)
socket := listen(ctx, ln) socket := listen(ctx, ln)
disc, err := discover.ListenV5(socket, ln, config) disc, err := discover.ListenV5(socket, ln, config)
if err != nil { if err != nil {
exit(err) exit(err)
} }
return disc return disc, config
} }

View File

@ -120,6 +120,7 @@ func setupGeth(stack *node.Node) error {
if err != nil { if err != nil {
return err return err
} }
backend.SetSynced()
_, err = backend.BlockChain().InsertChain(chain.blocks[1:]) _, err = backend.BlockChain().InsertChain(chain.blocks[1:])
return err return err

View File

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -41,10 +40,7 @@ func blockTestCmd(ctx *cli.Context) error {
if len(ctx.Args().First()) == 0 { if len(ctx.Args().First()) == 0 {
return errors.New("path-to-test argument required") return errors.New("path-to-test argument required")
} }
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
var tracer vm.EVMLogger var tracer vm.EVMLogger
// Configure the EVM logger // Configure the EVM logger
if ctx.Bool(MachineFlag.Name) { if ctx.Bool(MachineFlag.Name) {

View File

@ -23,107 +23,116 @@ import (
"os" "os"
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
var ( var (
DebugFlag = &cli.BoolFlag{ DebugFlag = &cli.BoolFlag{
Name: "debug", Name: "debug",
Usage: "output full trace logs", Usage: "output full trace logs",
} Category: flags.VMCategory,
MemProfileFlag = &cli.StringFlag{
Name: "memprofile",
Usage: "creates a memory profile at the given path",
}
CPUProfileFlag = &cli.StringFlag{
Name: "cpuprofile",
Usage: "creates a CPU profile at the given path",
} }
StatDumpFlag = &cli.BoolFlag{ StatDumpFlag = &cli.BoolFlag{
Name: "statdump", Name: "statdump",
Usage: "displays stack and heap memory information", Usage: "displays stack and heap memory information",
Category: flags.VMCategory,
} }
CodeFlag = &cli.StringFlag{ CodeFlag = &cli.StringFlag{
Name: "code", Name: "code",
Usage: "EVM code", Usage: "EVM code",
Category: flags.VMCategory,
} }
CodeFileFlag = &cli.StringFlag{ CodeFileFlag = &cli.StringFlag{
Name: "codefile", Name: "codefile",
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ", Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
Category: flags.VMCategory,
} }
GasFlag = &cli.Uint64Flag{ GasFlag = &cli.Uint64Flag{
Name: "gas", Name: "gas",
Usage: "gas limit for the evm", Usage: "gas limit for the evm",
Value: 10000000000, Value: 10000000000,
Category: flags.VMCategory,
} }
PriceFlag = &flags.BigFlag{ PriceFlag = &flags.BigFlag{
Name: "price", Name: "price",
Usage: "price set for the evm", Usage: "price set for the evm",
Value: new(big.Int), Value: new(big.Int),
Category: flags.VMCategory,
} }
ValueFlag = &flags.BigFlag{ ValueFlag = &flags.BigFlag{
Name: "value", Name: "value",
Usage: "value set for the evm", Usage: "value set for the evm",
Value: new(big.Int), Value: new(big.Int),
Category: flags.VMCategory,
} }
DumpFlag = &cli.BoolFlag{ DumpFlag = &cli.BoolFlag{
Name: "dump", Name: "dump",
Usage: "dumps the state after the run", Usage: "dumps the state after the run",
Category: flags.VMCategory,
} }
InputFlag = &cli.StringFlag{ InputFlag = &cli.StringFlag{
Name: "input", Name: "input",
Usage: "input for the EVM", Usage: "input for the EVM",
Category: flags.VMCategory,
} }
InputFileFlag = &cli.StringFlag{ InputFileFlag = &cli.StringFlag{
Name: "inputfile", Name: "inputfile",
Usage: "file containing input for the EVM", Usage: "file containing input for the EVM",
} Category: flags.VMCategory,
VerbosityFlag = &cli.IntFlag{
Name: "verbosity",
Usage: "sets the verbosity level",
} }
BenchFlag = &cli.BoolFlag{ BenchFlag = &cli.BoolFlag{
Name: "bench", Name: "bench",
Usage: "benchmark the execution", Usage: "benchmark the execution",
Category: flags.VMCategory,
} }
CreateFlag = &cli.BoolFlag{ CreateFlag = &cli.BoolFlag{
Name: "create", Name: "create",
Usage: "indicates the action should be create rather than call", Usage: "indicates the action should be create rather than call",
Category: flags.VMCategory,
} }
GenesisFlag = &cli.StringFlag{ GenesisFlag = &cli.StringFlag{
Name: "prestate", Name: "prestate",
Usage: "JSON file with prestate (genesis) config", Usage: "JSON file with prestate (genesis) config",
Category: flags.VMCategory,
} }
MachineFlag = &cli.BoolFlag{ MachineFlag = &cli.BoolFlag{
Name: "json", Name: "json",
Usage: "output trace logs in machine readable format (json)", Usage: "output trace logs in machine readable format (json)",
Category: flags.VMCategory,
} }
SenderFlag = &cli.StringFlag{ SenderFlag = &cli.StringFlag{
Name: "sender", Name: "sender",
Usage: "The transaction origin", Usage: "The transaction origin",
Category: flags.VMCategory,
} }
ReceiverFlag = &cli.StringFlag{ ReceiverFlag = &cli.StringFlag{
Name: "receiver", Name: "receiver",
Usage: "The transaction receiver (execution context)", Usage: "The transaction receiver (execution context)",
Category: flags.VMCategory,
} }
DisableMemoryFlag = &cli.BoolFlag{ DisableMemoryFlag = &cli.BoolFlag{
Name: "nomemory", Name: "nomemory",
Value: true, Value: true,
Usage: "disable memory output", Usage: "disable memory output",
Category: flags.VMCategory,
} }
DisableStackFlag = &cli.BoolFlag{ DisableStackFlag = &cli.BoolFlag{
Name: "nostack", Name: "nostack",
Usage: "disable stack output", Usage: "disable stack output",
Category: flags.VMCategory,
} }
DisableStorageFlag = &cli.BoolFlag{ DisableStorageFlag = &cli.BoolFlag{
Name: "nostorage", Name: "nostorage",
Usage: "disable storage output", Usage: "disable storage output",
Category: flags.VMCategory,
} }
DisableReturnDataFlag = &cli.BoolFlag{ DisableReturnDataFlag = &cli.BoolFlag{
Name: "noreturndata", Name: "noreturndata",
Value: true, Value: true,
Usage: "enable return data output", Usage: "enable return data output",
Category: flags.VMCategory,
} }
) )
@ -183,34 +192,38 @@ var blockBuilderCommand = &cli.Command{
}, },
} }
// vmFlags contains flags related to running the EVM.
var vmFlags = []cli.Flag{
CodeFlag,
CodeFileFlag,
CreateFlag,
GasFlag,
PriceFlag,
ValueFlag,
InputFlag,
InputFileFlag,
GenesisFlag,
SenderFlag,
ReceiverFlag,
}
// traceFlags contains flags that configure tracing output.
var traceFlags = []cli.Flag{
BenchFlag,
DebugFlag,
DumpFlag,
MachineFlag,
StatDumpFlag,
DisableMemoryFlag,
DisableStackFlag,
DisableStorageFlag,
DisableReturnDataFlag,
}
var app = flags.NewApp("the evm command line interface") var app = flags.NewApp("the evm command line interface")
func init() { func init() {
app.Flags = []cli.Flag{ app.Flags = flags.Merge(vmFlags, traceFlags, debug.Flags)
BenchFlag,
CreateFlag,
DebugFlag,
VerbosityFlag,
CodeFlag,
CodeFileFlag,
GasFlag,
PriceFlag,
ValueFlag,
DumpFlag,
InputFlag,
InputFileFlag,
MemProfileFlag,
CPUProfileFlag,
StatDumpFlag,
GenesisFlag,
MachineFlag,
SenderFlag,
ReceiverFlag,
DisableMemoryFlag,
DisableStackFlag,
DisableStorageFlag,
DisableReturnDataFlag,
}
app.Commands = []*cli.Command{ app.Commands = []*cli.Command{
compileCommand, compileCommand,
disasmCommand, disasmCommand,
@ -221,6 +234,14 @@ func init() {
transactionCommand, transactionCommand,
blockBuilderCommand, blockBuilderCommand,
} }
app.Before = func(ctx *cli.Context) error {
flags.MigrateGlobalFlags(ctx)
return debug.Setup(ctx)
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
} }
func main() { func main() {

View File

@ -24,7 +24,6 @@ import (
"math/big" "math/big"
"os" "os"
goruntime "runtime" goruntime "runtime"
"runtime/pprof"
"testing" "testing"
"time" "time"
@ -34,12 +33,10 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime" "github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
@ -52,6 +49,7 @@ var runCommand = &cli.Command{
Usage: "run arbitrary evm binary", Usage: "run arbitrary evm binary",
ArgsUsage: "<code>", ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`, Description: `The run command runs arbitrary EVM code.`,
Flags: flags.Merge(vmFlags, traceFlags),
} }
// readGenesis will read the given JSON format genesis file and return // readGenesis will read the given JSON format genesis file and return
@ -109,9 +107,6 @@ func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []by
} }
func runCmd(ctx *cli.Context) error { func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &logger.Config{ logconfig := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name), EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name), DisableStack: ctx.Bool(DisableStackFlag.Name),
@ -121,15 +116,14 @@ func runCmd(ctx *cli.Context) error {
} }
var ( var (
tracer vm.EVMLogger tracer vm.EVMLogger
debugLogger *logger.StructLogger debugLogger *logger.StructLogger
statedb *state.StateDB statedb *state.StateDB
chainConfig *params.ChainConfig chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender")) sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver")) receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis preimages = ctx.Bool(DumpFlag.Name)
preimages = ctx.Bool(DumpFlag.Name) blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
) )
if ctx.Bool(MachineFlag.Name) { if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout) tracer = logger.NewJSONLogger(logconfig, os.Stdout)
@ -139,30 +133,30 @@ func runCmd(ctx *cli.Context) error {
} else { } else {
debugLogger = logger.NewStructLogger(logconfig) debugLogger = logger.NewStructLogger(logconfig)
} }
initialGas := ctx.Uint64(GasFlag.Name)
genesisConfig := new(core.Genesis)
genesisConfig.GasLimit = initialGas
if ctx.String(GenesisFlag.Name) != "" { if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.String(GenesisFlag.Name)) genesisConfig = readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen if genesisConfig.GasLimit != 0 {
db := rawdb.NewMemoryDatabase() initialGas = genesisConfig.GasLimit
triedb := trie.NewDatabase(db, &trie.Config{ }
Preimages: preimages,
HashDB: hashdb.Defaults,
})
defer triedb.Close()
genesis := gen.MustCommit(db, triedb)
sdb := state.NewDatabaseWithNodeDB(db, triedb)
statedb, _ = state.New(genesis.Root(), sdb, nil)
chainConfig = gen.Config
} else { } else {
db := rawdb.NewMemoryDatabase() genesisConfig.Config = params.AllEthashProtocolChanges
triedb := trie.NewDatabase(db, &trie.Config{
Preimages: preimages,
HashDB: hashdb.Defaults,
})
defer triedb.Close()
sdb := state.NewDatabaseWithNodeDB(db, triedb)
statedb, _ = state.New(types.EmptyRootHash, sdb, nil)
genesisConfig = new(core.Genesis)
} }
db := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(db, &trie.Config{
Preimages: preimages,
HashDB: hashdb.Defaults,
})
defer triedb.Close()
genesis := genesisConfig.MustCommit(db, triedb)
sdb := state.NewDatabaseWithNodeDB(db, triedb)
statedb, _ = state.New(genesis.Root(), sdb, nil)
chainConfig = genesisConfig.Config
if ctx.String(SenderFlag.Name) != "" { if ctx.String(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.String(SenderFlag.Name)) sender = common.HexToAddress(ctx.String(SenderFlag.Name))
} }
@ -216,10 +210,6 @@ func runCmd(ctx *cli.Context) error {
} }
code = common.Hex2Bytes(bin) code = common.Hex2Bytes(bin)
} }
initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{ runtimeConfig := runtime.Config{
Origin: sender, Origin: sender,
State: statedb, State: statedb,
@ -236,19 +226,6 @@ func runCmd(ctx *cli.Context) error {
}, },
} }
if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
if chainConfig != nil { if chainConfig != nil {
runtimeConfig.ChainConfig = chainConfig runtimeConfig.ChainConfig = chainConfig
} else { } else {
@ -296,19 +273,6 @@ func runCmd(ctx *cli.Context) error {
fmt.Println(string(statedb.Dump(nil))) fmt.Println(string(statedb.Dump(nil)))
} }
if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
}
if ctx.Bool(DebugFlag.Name) { if ctx.Bool(DebugFlag.Name) {
if debugLogger != nil { if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####") fmt.Fprintln(os.Stderr, "#### TRACE ####")

View File

@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -52,11 +51,6 @@ type StatetestResult struct {
} }
func stateTestCmd(ctx *cli.Context) error { func stateTestCmd(ctx *cli.Context) error {
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
// Configure the EVM logger // Configure the EVM logger
config := &logger.Config{ config := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name), EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),

View File

@ -50,8 +50,7 @@ var (
ArgsUsage: "<genesisPath>", ArgsUsage: "<genesisPath>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CachePreimagesFlag, utils.CachePreimagesFlag,
utils.StateSchemeFlag, }, utils.DatabaseFlags),
}, utils.DatabasePathFlags),
Description: ` Description: `
The init command initializes a new genesis block and definition for the network. The init command initializes a new genesis block and definition for the network.
This is a destructive action and changes the network in which you will be This is a destructive action and changes the network in which you will be
@ -97,9 +96,8 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
utils.MetricsInfluxDBOrganizationFlag, utils.MetricsInfluxDBOrganizationFlag,
utils.TxLookupLimitFlag, utils.TxLookupLimitFlag,
utils.TransactionHistoryFlag, utils.TransactionHistoryFlag,
utils.StateSchemeFlag,
utils.StateHistoryFlag, utils.StateHistoryFlag,
}, utils.DatabasePathFlags), }, utils.DatabaseFlags),
Description: ` Description: `
The import command imports blocks from an RLP-encoded form. The form can be one file The import command imports blocks from an RLP-encoded form. The form can be one file
with several RLP-encoded blocks, or several files can be used. with several RLP-encoded blocks, or several files can be used.
@ -115,8 +113,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.StateSchemeFlag, }, utils.DatabaseFlags),
}, utils.DatabasePathFlags),
Description: ` Description: `
Requires a first argument of the file to write to. Requires a first argument of the file to write to.
Optional second and third arguments control the first and Optional second and third arguments control the first and
@ -132,7 +129,7 @@ be gzipped.`,
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.DatabasePathFlags), }, utils.DatabaseFlags),
Description: ` Description: `
The import-preimages command imports hash preimages from an RLP encoded stream. The import-preimages command imports hash preimages from an RLP encoded stream.
It's deprecated, please use "geth db import" instead. It's deprecated, please use "geth db import" instead.
@ -146,7 +143,7 @@ It's deprecated, please use "geth db import" instead.
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.DatabasePathFlags), }, utils.DatabaseFlags),
Description: ` Description: `
The export-preimages command exports hash preimages to an RLP encoded stream. The export-preimages command exports hash preimages to an RLP encoded stream.
It's deprecated, please use "geth db export" instead. It's deprecated, please use "geth db export" instead.
@ -165,8 +162,7 @@ It's deprecated, please use "geth db export" instead.
utils.IncludeIncompletesFlag, utils.IncludeIncompletesFlag,
utils.StartKeyFlag, utils.StartKeyFlag,
utils.DumpLimitFlag, utils.DumpLimitFlag,
utils.StateSchemeFlag, }, utils.DatabaseFlags),
}, utils.DatabasePathFlags),
Description: ` Description: `
This command dumps out the state for a given block (or latest, if none provided). This command dumps out the state for a given block (or latest, if none provided).
`, `,
@ -340,7 +336,8 @@ func exportChain(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
chain, _ := utils.MakeChain(ctx, stack, true) chain, db := utils.MakeChain(ctx, stack, true)
defer db.Close()
start := time.Now() start := time.Now()
var err error var err error
@ -380,6 +377,7 @@ func importPreimages(ctx *cli.Context) error {
defer stack.Close() defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false) db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
start := time.Now() start := time.Now()
if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
@ -398,6 +396,7 @@ func exportPreimages(ctx *cli.Context) error {
defer stack.Close() defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true) db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
start := time.Now() start := time.Now()
if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
@ -409,6 +408,8 @@ func exportPreimages(ctx *cli.Context) error {
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
db := utils.MakeChainDatabase(ctx, stack, true) db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var header *types.Header var header *types.Header
if ctx.NArg() > 1 { if ctx.NArg() > 1 {
return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())

View File

@ -48,7 +48,7 @@ var (
Name: "removedb", Name: "removedb",
Usage: "Remove blockchain and state databases", Usage: "Remove blockchain and state databases",
ArgsUsage: "", ArgsUsage: "",
Flags: utils.DatabasePathFlags, Flags: utils.DatabaseFlags,
Description: ` Description: `
Remove blockchain and state databases`, Remove blockchain and state databases`,
} }
@ -77,7 +77,7 @@ Remove blockchain and state databases`,
ArgsUsage: "<prefix> <start>", ArgsUsage: "<prefix> <start>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Usage: "Inspect the storage size for each type of data in the database", Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
} }
@ -85,7 +85,7 @@ Remove blockchain and state databases`,
Action: checkStateContent, Action: checkStateContent,
Name: "check-state-content", Name: "check-state-content",
ArgsUsage: "<start (optional)>", ArgsUsage: "<start (optional)>",
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Usage: "Verify that state data is cryptographically correct", Usage: "Verify that state data is cryptographically correct",
Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes. Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
@ -97,7 +97,7 @@ a data corruption.`,
Usage: "Print leveldb statistics", Usage: "Print leveldb statistics",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
} }
dbCompactCmd = &cli.Command{ dbCompactCmd = &cli.Command{
Action: dbCompact, Action: dbCompact,
@ -107,7 +107,7 @@ a data corruption.`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.CacheFlag, utils.CacheFlag,
utils.CacheDatabaseFlag, utils.CacheDatabaseFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: `This command performs a database compaction. Description: `This command performs a database compaction.
WARNING: This operation may take a very long time to finish, and may cause database WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`, corruption if it is aborted during execution'!`,
@ -119,7 +119,7 @@ corruption if it is aborted during execution'!`,
ArgsUsage: "<hex-encoded key>", ArgsUsage: "<hex-encoded key>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: "This command looks up the specified database key from the database.", Description: "This command looks up the specified database key from the database.",
} }
dbDeleteCmd = &cli.Command{ dbDeleteCmd = &cli.Command{
@ -129,7 +129,7 @@ corruption if it is aborted during execution'!`,
ArgsUsage: "<hex-encoded key>", ArgsUsage: "<hex-encoded key>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: `This command deletes the specified database key from the database. Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`, WARNING: This is a low-level operation which may cause database corruption!`,
} }
@ -140,7 +140,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<hex-encoded key> <hex-encoded value>", ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: `This command sets a given database key to the given value. Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`, WARNING: This is a low-level operation which may cause database corruption!`,
} }
@ -151,8 +151,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>", ArgsUsage: "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
utils.StateSchemeFlag, }, utils.NetworkFlags, utils.DatabaseFlags),
}, utils.NetworkFlags, utils.DatabasePathFlags),
Description: "This command looks up the specified database key from the database.", Description: "This command looks up the specified database key from the database.",
} }
dbDumpFreezerIndex = &cli.Command{ dbDumpFreezerIndex = &cli.Command{
@ -162,7 +161,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>", ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: "This command displays information about the freezer index.", Description: "This command displays information about the freezer index.",
} }
dbImportCmd = &cli.Command{ dbImportCmd = &cli.Command{
@ -172,7 +171,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<dumpfile> <start (optional)", ArgsUsage: "<dumpfile> <start (optional)",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: "The import command imports the specific chain data from an RLP encoded stream.", Description: "The import command imports the specific chain data from an RLP encoded stream.",
} }
dbExportCmd = &cli.Command{ dbExportCmd = &cli.Command{
@ -182,7 +181,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<type> <dumpfile>", ArgsUsage: "<type> <dumpfile>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.", Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
} }
dbMetadataCmd = &cli.Command{ dbMetadataCmd = &cli.Command{
@ -191,7 +190,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
Usage: "Shows metadata about the chain status.", Usage: "Shows metadata about the chain status.",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: "Shows metadata about the chain status.", Description: "Shows metadata about the chain status.",
} }
) )
@ -595,6 +594,7 @@ func importLDBdata(ctx *cli.Context) error {
close(stop) close(stop)
}() }()
db := utils.MakeChainDatabase(ctx, stack, false) db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
return utils.ImportLDBData(db, fName, int64(start), stop) return utils.ImportLDBData(db, fName, int64(start), stop)
} }
@ -691,6 +691,7 @@ func exportChaindata(ctx *cli.Context) error {
close(stop) close(stop)
}() }()
db := utils.MakeChainDatabase(ctx, stack, true) db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop) return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
} }
@ -698,6 +699,8 @@ func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true) db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
ancients, err := db.Ancients() ancients, err := db.Ancients()
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err) fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)

View File

@ -96,7 +96,6 @@ var (
utils.SnapshotFlag, utils.SnapshotFlag,
utils.TxLookupLimitFlag, utils.TxLookupLimitFlag,
utils.TransactionHistoryFlag, utils.TransactionHistoryFlag,
utils.StateSchemeFlag,
utils.StateHistoryFlag, utils.StateHistoryFlag,
utils.LightServeFlag, utils.LightServeFlag,
utils.LightIngressFlag, utils.LightIngressFlag,
@ -152,7 +151,7 @@ var (
utils.GpoMaxGasPriceFlag, utils.GpoMaxGasPriceFlag,
utils.GpoIgnoreGasPriceFlag, utils.GpoIgnoreGasPriceFlag,
configFileFlag, configFileFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags) }, utils.NetworkFlags, utils.DatabaseFlags)
rpcFlags = []cli.Flag{ rpcFlags = []cli.Flag{
utils.HTTPEnabledFlag, utils.HTTPEnabledFlag,

View File

@ -51,7 +51,7 @@ var (
Action: pruneState, Action: pruneState,
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.BloomFilterSizeFlag, utils.BloomFilterSizeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabaseFlags),
Description: ` Description: `
geth snapshot prune-state <state-root> geth snapshot prune-state <state-root>
will prune historical state data with the help of the state snapshot. will prune historical state data with the help of the state snapshot.
@ -69,9 +69,7 @@ WARNING: it's only supported in hash mode(--state.scheme=hash)".
Usage: "Recalculate state hash based on the snapshot for verification", Usage: "Recalculate state hash based on the snapshot for verification",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: verifyState, Action: verifyState,
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
utils.StateSchemeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
Description: ` Description: `
geth snapshot verify-state <state-root> geth snapshot verify-state <state-root>
will traverse the whole accounts and storages set based on the specified will traverse the whole accounts and storages set based on the specified
@ -84,7 +82,7 @@ In other words, this command does the snapshot to trie conversion.
Usage: "Check that there is no 'dangling' snap storage", Usage: "Check that there is no 'dangling' snap storage",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: checkDanglingStorage, Action: checkDanglingStorage,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: ` Description: `
geth snapshot check-dangling-storage <state-root> traverses the snap storage geth snapshot check-dangling-storage <state-root> traverses the snap storage
data, and verifies that all snapshot storage data has a corresponding account. data, and verifies that all snapshot storage data has a corresponding account.
@ -95,7 +93,7 @@ data, and verifies that all snapshot storage data has a corresponding account.
Usage: "Check all snapshot layers for the a specific account", Usage: "Check all snapshot layers for the a specific account",
ArgsUsage: "<address | hash>", ArgsUsage: "<address | hash>",
Action: checkAccount, Action: checkAccount,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: ` Description: `
geth snapshot inspect-account <address | hash> checks all snapshot layers and prints out geth snapshot inspect-account <address | hash> checks all snapshot layers and prints out
information about the specified address. information about the specified address.
@ -106,9 +104,7 @@ information about the specified address.
Usage: "Traverse the state with given root hash and perform quick verification", Usage: "Traverse the state with given root hash and perform quick verification",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: traverseState, Action: traverseState,
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
utils.StateSchemeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
Description: ` Description: `
geth snapshot traverse-state <state-root> geth snapshot traverse-state <state-root>
will traverse the whole state from the given state root and will abort if any will traverse the whole state from the given state root and will abort if any
@ -123,9 +119,7 @@ It's also usable without snapshot enabled.
Usage: "Traverse the state with given root hash and perform detailed verification", Usage: "Traverse the state with given root hash and perform detailed verification",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: traverseRawState, Action: traverseRawState,
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
utils.StateSchemeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
Description: ` Description: `
geth snapshot traverse-rawstate <state-root> geth snapshot traverse-rawstate <state-root>
will traverse the whole state from the given root and will abort if any referenced will traverse the whole state from the given root and will abort if any referenced
@ -146,8 +140,7 @@ It's also usable without snapshot enabled.
utils.ExcludeStorageFlag, utils.ExcludeStorageFlag,
utils.StartKeyFlag, utils.StartKeyFlag,
utils.DumpLimitFlag, utils.DumpLimitFlag,
utils.StateSchemeFlag, }, utils.NetworkFlags, utils.DatabaseFlags),
}, utils.NetworkFlags, utils.DatabasePathFlags),
Description: ` Description: `
This command is semantically equivalent to 'geth dump', but uses the snapshots This command is semantically equivalent to 'geth dump', but uses the snapshots
as the backend data source, making this command a lot faster. as the backend data source, making this command a lot faster.
@ -252,7 +245,9 @@ func checkDanglingStorage(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
return snapshot.CheckDanglingStorage(utils.MakeChainDatabase(ctx, stack, true)) db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return snapshot.CheckDanglingStorage(db)
} }
// traverseState is a helper function used for pruning verification. // traverseState is a helper function used for pruning verification.
@ -332,6 +327,11 @@ func traverseState(ctx *cli.Context) error {
storageIter := trie.NewIterator(storageIt) storageIter := trie.NewIterator(storageIt)
for storageIter.Next() { for storageIter.Next() {
slots += 1 slots += 1
if time.Since(lastReport) > time.Second*8 {
log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
lastReport = time.Now()
}
} }
if storageIter.Err != nil { if storageIter.Err != nil {
log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Err) log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Err)
@ -486,6 +486,10 @@ func traverseRawState(ctx *cli.Context) error {
if storageIter.Leaf() { if storageIter.Leaf() {
slots += 1 slots += 1
} }
if time.Since(lastReport) > time.Second*8 {
log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
lastReport = time.Now()
}
} }
if storageIter.Error() != nil { if storageIter.Error() != nil {
log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Error()) log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Error())

View File

@ -45,7 +45,7 @@ var (
Usage: "verify the conversion of a MPT into a verkle tree", Usage: "verify the conversion of a MPT into a verkle tree",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: verifyVerkle, Action: verifyVerkle,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: ` Description: `
geth verkle verify <state-root> geth verkle verify <state-root>
This command takes a root commitment and attempts to rebuild the tree. This command takes a root commitment and attempts to rebuild the tree.
@ -56,7 +56,7 @@ This command takes a root commitment and attempts to rebuild the tree.
Usage: "Dump a verkle tree to a DOT file", Usage: "Dump a verkle tree to a DOT file",
ArgsUsage: "<root> <key1> [<key 2> ...]", ArgsUsage: "<root> <key1> [<key 2> ...]",
Action: expandVerkle, Action: expandVerkle,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: ` Description: `
geth verkle dump <state-root> <key 1> [<key 2> ...] geth verkle dump <state-root> <key 1> [<key 2> ...]
This command will produce a dot file representing the tree, rooted at <root>. This command will produce a dot file representing the tree, rooted at <root>.
@ -115,6 +115,7 @@ func verifyVerkle(ctx *cli.Context) error {
defer stack.Close() defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true) chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb) headBlock := rawdb.ReadHeadBlock(chaindb)
if headBlock == nil { if headBlock == nil {
log.Error("Failed to load head block") log.Error("Failed to load head block")
@ -163,6 +164,7 @@ func expandVerkle(ctx *cli.Context) error {
defer stack.Close() defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true) chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
var ( var (
rootC common.Hash rootC common.Hash
keylist [][]byte keylist [][]byte

View File

@ -969,18 +969,19 @@ var (
// NetworkFlags is the flag group of all built-in supported networks. // NetworkFlags is the flag group of all built-in supported networks.
NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...) NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...)
// DatabasePathFlags is the flag group of all database path flags. // DatabaseFlags is the flag group of all database flags.
DatabasePathFlags = []cli.Flag{ DatabaseFlags = []cli.Flag{
DataDirFlag, DataDirFlag,
AncientFlag, AncientFlag,
RemoteDBFlag, RemoteDBFlag,
StateSchemeFlag,
HttpHeaderFlag, HttpHeaderFlag,
} }
) )
func init() { func init() {
if rawdb.PebbleEnabled { if rawdb.PebbleEnabled {
DatabasePathFlags = append(DatabasePathFlags, DBEngineFlag) DatabaseFlags = append(DatabaseFlags, DBEngineFlag)
} }
} }
@ -1006,7 +1007,7 @@ func MakeDataDir(ctx *cli.Context) string {
// setNodeKey creates a node key from set command line flags, either loading it // setNodeKey creates a node key from set command line flags, either loading it
// from a file or as a specified hex value. If neither flags were provided, this // from a file or as a specified hex value. If neither flags were provided, this
// method returns nil and an emphemeral key is to be generated. // method returns nil and an ephemeral key is to be generated.
func setNodeKey(ctx *cli.Context, cfg *p2p.Config) { func setNodeKey(ctx *cli.Context, cfg *p2p.Config) {
var ( var (
hex = ctx.String(NodeKeyHexFlag.Name) hex = ctx.String(NodeKeyHexFlag.Name)
@ -1039,35 +1040,45 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) {
// setBootstrapNodes creates a list of bootstrap nodes from the command line // setBootstrapNodes creates a list of bootstrap nodes from the command line
// flags, reverting to pre-configured ones if none have been specified. // flags, reverting to pre-configured ones if none have been specified.
// Priority order for bootnodes configuration:
//
// 1. --bootnodes flag
// 2. Config file
// 3. Network preset flags (e.g. --goerli)
// 4. default to mainnet nodes
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls := params.MainnetBootnodes urls := params.MainnetBootnodes
switch { if ctx.IsSet(BootnodesFlag.Name) {
case ctx.IsSet(BootnodesFlag.Name):
urls = SplitAndTrim(ctx.String(BootnodesFlag.Name)) urls = SplitAndTrim(ctx.String(BootnodesFlag.Name))
case ctx.Bool(HoleskyFlag.Name): } else {
urls = params.HoleskyBootnodes if cfg.BootstrapNodes != nil {
case ctx.Bool(SepoliaFlag.Name): return // Already set by config file, don't apply defaults.
urls = params.SepoliaBootnodes }
case ctx.Bool(GoerliFlag.Name): switch {
urls = params.GoerliBootnodes case ctx.Bool(HoleskyFlag.Name):
urls = params.HoleskyBootnodes
case ctx.Bool(SepoliaFlag.Name):
urls = params.SepoliaBootnodes
case ctx.Bool(GoerliFlag.Name):
urls = params.GoerliBootnodes
}
} }
cfg.BootstrapNodes = mustParseBootnodes(urls)
}
// don't apply defaults if BootstrapNodes is already set func mustParseBootnodes(urls []string) []*enode.Node {
if cfg.BootstrapNodes != nil { nodes := make([]*enode.Node, 0, len(urls))
return
}
cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls))
for _, url := range urls { for _, url := range urls {
if url != "" { if url != "" {
node, err := enode.Parse(enode.ValidSchemes, url) node, err := enode.Parse(enode.ValidSchemes, url)
if err != nil { if err != nil {
log.Crit("Bootstrap URL invalid", "enode", url, "err", err) log.Crit("Bootstrap URL invalid", "enode", url, "err", err)
continue return nil
} }
cfg.BootstrapNodes = append(cfg.BootstrapNodes, node) nodes = append(nodes, node)
} }
} }
return nodes
} }
// setBootstrapNodesV5 creates a list of bootstrap nodes from the command line // setBootstrapNodesV5 creates a list of bootstrap nodes from the command line
@ -2129,7 +2140,7 @@ func DialRPCWithHeaders(endpoint string, headers []string) (*rpc.Client, error)
} }
var opts []rpc.ClientOption var opts []rpc.ClientOption
if len(headers) > 0 { if len(headers) > 0 {
var customHeaders = make(http.Header) customHeaders := make(http.Header)
for _, h := range headers { for _, h := range headers {
kv := strings.Split(h, ":") kv := strings.Split(h, ":")
if len(kv) != 2 { if len(kv) != 2 {

View File

@ -17,6 +17,8 @@
package asm package asm
import ( import (
"encoding/hex"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"os" "os"
@ -30,7 +32,7 @@ import (
// and holds the tokens for the program. // and holds the tokens for the program.
type Compiler struct { type Compiler struct {
tokens []token tokens []token
binary []interface{} out []byte
labels map[string]int labels map[string]int
@ -50,12 +52,10 @@ func NewCompiler(debug bool) *Compiler {
// Feed feeds tokens in to ch and are interpreted by // Feed feeds tokens in to ch and are interpreted by
// the compiler. // the compiler.
// //
// feed is the first pass in the compile stage as it // feed is the first pass in the compile stage as it collects the used labels in the
// collects the used labels in the program and keeps a // program and keeps a program counter which is used to determine the locations of the
// program counter which is used to determine the locations // jump dests. The labels can than be used in the second stage to push labels and
// of the jump dests. The labels can than be used in the // determine the right position.
// second stage to push labels and determine the right
// position.
func (c *Compiler) Feed(ch <-chan token) { func (c *Compiler) Feed(ch <-chan token) {
var prev token var prev token
for i := range ch { for i := range ch {
@ -79,7 +79,6 @@ func (c *Compiler) Feed(ch <-chan token) {
c.pc++ c.pc++
} }
} }
c.tokens = append(c.tokens, i) c.tokens = append(c.tokens, i)
prev = i prev = i
} }
@ -88,12 +87,11 @@ func (c *Compiler) Feed(ch <-chan token) {
} }
} }
// Compile compiles the current tokens and returns a // Compile compiles the current tokens and returns a binary string that can be interpreted
// binary string that can be interpreted by the EVM // by the EVM and an error if it failed.
// and an error if it failed.
// //
// compile is the second stage in the compile phase // compile is the second stage in the compile phase which compiles the tokens to EVM
// which compiles the tokens to EVM instructions. // instructions.
func (c *Compiler) Compile() (string, []error) { func (c *Compiler) Compile() (string, []error) {
var errors []error var errors []error
// continue looping over the tokens until // continue looping over the tokens until
@ -105,16 +103,8 @@ func (c *Compiler) Compile() (string, []error) {
} }
// turn the binary to hex // turn the binary to hex
var bin strings.Builder h := hex.EncodeToString(c.out)
for _, v := range c.binary { return h, errors
switch v := v.(type) {
case vm.OpCode:
bin.WriteString(fmt.Sprintf("%x", []byte{byte(v)}))
case []byte:
bin.WriteString(fmt.Sprintf("%x", v))
}
}
return bin.String(), errors
} }
// next returns the next token and increments the // next returns the next token and increments the
@ -156,87 +146,114 @@ func (c *Compiler) compileLine() error {
return nil return nil
} }
// compileNumber compiles the number to bytes // parseNumber compiles the number to bytes
func (c *Compiler) compileNumber(element token) { func parseNumber(tok token) ([]byte, error) {
num := math.MustParseBig256(element.text).Bytes() if tok.typ != number {
if len(num) == 0 { panic("parseNumber of non-number token")
num = []byte{0}
} }
c.pushBin(num) num, ok := math.ParseBig256(tok.text)
if !ok {
return nil, errors.New("invalid number")
}
bytes := num.Bytes()
if len(bytes) == 0 {
bytes = []byte{0}
}
return bytes, nil
} }
// compileElement compiles the element (push & label or both) // compileElement compiles the element (push & label or both)
// to a binary representation and may error if incorrect statements // to a binary representation and may error if incorrect statements
// where fed. // where fed.
func (c *Compiler) compileElement(element token) error { func (c *Compiler) compileElement(element token) error {
// check for a jump. jumps must be read and compiled switch {
// from right to left. case isJump(element.text):
if isJump(element.text) { return c.compileJump(element.text)
rvalue := c.next() case isPush(element.text):
switch rvalue.typ { return c.compilePush()
case number: default:
// TODO figure out how to return the error properly c.outputOpcode(toBinary(element.text))
c.compileNumber(rvalue)
case stringValue:
// strings are quoted, remove them.
c.pushBin(rvalue.text[1 : len(rvalue.text)-2])
case label:
c.pushBin(vm.PUSH4)
pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes()
pos = append(make([]byte, 4-len(pos)), pos...)
c.pushBin(pos)
case lineEnd:
c.pos--
default:
return compileErr(rvalue, rvalue.text, "number, string or label")
}
// push the operation
c.pushBin(toBinary(element.text))
return nil return nil
} else if isPush(element.text) {
// handle pushes. pushes are read from left to right.
var value []byte
rvalue := c.next()
switch rvalue.typ {
case number:
value = math.MustParseBig256(rvalue.text).Bytes()
if len(value) == 0 {
value = []byte{0}
}
case stringValue:
value = []byte(rvalue.text[1 : len(rvalue.text)-1])
case label:
value = big.NewInt(int64(c.labels[rvalue.text])).Bytes()
value = append(make([]byte, 4-len(value)), value...)
default:
return compileErr(rvalue, rvalue.text, "number, string or label")
}
if len(value) > 32 {
return fmt.Errorf("%d type error: unsupported string or number with size > 32", rvalue.lineno)
}
c.pushBin(vm.OpCode(int(vm.PUSH1) - 1 + len(value)))
c.pushBin(value)
} else {
c.pushBin(toBinary(element.text))
} }
}
func (c *Compiler) compileJump(jumpType string) error {
rvalue := c.next()
switch rvalue.typ {
case number:
numBytes, err := parseNumber(rvalue)
if err != nil {
return err
}
c.outputBytes(numBytes)
case stringValue:
// strings are quoted, remove them.
str := rvalue.text[1 : len(rvalue.text)-2]
c.outputBytes([]byte(str))
case label:
c.outputOpcode(vm.PUSH4)
pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes()
pos = append(make([]byte, 4-len(pos)), pos...)
c.outputBytes(pos)
case lineEnd:
// push without argument is supported, it just takes the destination from the stack.
c.pos--
default:
return compileErr(rvalue, rvalue.text, "number, string or label")
}
// push the operation
c.outputOpcode(toBinary(jumpType))
return nil
}
func (c *Compiler) compilePush() error {
// handle pushes. pushes are read from left to right.
var value []byte
rvalue := c.next()
switch rvalue.typ {
case number:
value = math.MustParseBig256(rvalue.text).Bytes()
if len(value) == 0 {
value = []byte{0}
}
case stringValue:
value = []byte(rvalue.text[1 : len(rvalue.text)-1])
case label:
value = big.NewInt(int64(c.labels[rvalue.text])).Bytes()
value = append(make([]byte, 4-len(value)), value...)
default:
return compileErr(rvalue, rvalue.text, "number, string or label")
}
if len(value) > 32 {
return fmt.Errorf("%d: string or number size > 32 bytes", rvalue.lineno+1)
}
c.outputOpcode(vm.OpCode(int(vm.PUSH1) - 1 + len(value)))
c.outputBytes(value)
return nil return nil
} }
// compileLabel pushes a jumpdest to the binary slice. // compileLabel pushes a jumpdest to the binary slice.
func (c *Compiler) compileLabel() { func (c *Compiler) compileLabel() {
c.pushBin(vm.JUMPDEST) c.outputOpcode(vm.JUMPDEST)
} }
// pushBin pushes the value v to the binary stack. func (c *Compiler) outputOpcode(op vm.OpCode) {
func (c *Compiler) pushBin(v interface{}) {
if c.debug { if c.debug {
fmt.Printf("%d: %v\n", len(c.binary), v) fmt.Printf("%d: %v\n", len(c.out), op)
} }
c.binary = append(c.binary, v) c.out = append(c.out, byte(op))
}
// output pushes the value v to the binary stack.
func (c *Compiler) outputBytes(b []byte) {
if c.debug {
fmt.Printf("%d: %x\n", len(c.out), b)
}
c.out = append(c.out, b...)
} }
// isPush returns whether the string op is either any of // isPush returns whether the string op is either any of
@ -263,13 +280,13 @@ type compileError struct {
} }
func (err compileError) Error() string { func (err compileError) Error() string {
return fmt.Sprintf("%d syntax error: unexpected %v, expected %v", err.lineno, err.got, err.want) return fmt.Sprintf("%d: syntax error: unexpected %v, expected %v", err.lineno, err.got, err.want)
} }
func compileErr(c token, got, want string) error { func compileErr(c token, got, want string) error {
return compileError{ return compileError{
got: got, got: got,
want: want, want: want,
lineno: c.lineno, lineno: c.lineno + 1,
} }
} }

View File

@ -54,6 +54,14 @@ func TestCompiler(t *testing.T) {
`, `,
output: "6300000006565b", output: "6300000006565b",
}, },
{
input: `
JUMP @label
label: ;; comment
ADD ;; comment
`,
output: "6300000006565b01",
},
} }
for _, test := range tests { for _, test := range tests {
ch := Lex([]byte(test.input), false) ch := Lex([]byte(test.input), false)

View File

@ -72,6 +72,16 @@ func TestLexer(t *testing.T) {
input: "@label123", input: "@label123",
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
}, },
// comment after label
{
input: "@label123 ;; comment",
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
},
// comment after instruction
{
input: "push 3 ;; comment\nadd",
tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}},
},
} }
for _, test := range tests { for _, test := range tests {

View File

@ -42,6 +42,8 @@ type token struct {
// is able to parse and return. // is able to parse and return.
type tokenType int type tokenType int
//go:generate go run golang.org/x/tools/cmd/stringer -type tokenType
const ( const (
eof tokenType = iota // end of file eof tokenType = iota // end of file
lineStart // emitted when a line starts lineStart // emitted when a line starts
@ -52,31 +54,13 @@ const (
labelDef // label definition is emitted when a new label is found labelDef // label definition is emitted when a new label is found
number // number is emitted when a number is found number // number is emitted when a number is found
stringValue // stringValue is emitted when a string has been found stringValue // stringValue is emitted when a string has been found
Numbers = "1234567890" // characters representing any decimal number
HexadecimalNumbers = Numbers + "aAbBcCdDeEfF" // characters representing any hexadecimal
Alpha = "abcdefghijklmnopqrstuwvxyzABCDEFGHIJKLMNOPQRSTUWVXYZ" // characters representing alphanumeric
) )
// String implements stringer const (
func (it tokenType) String() string { decimalNumbers = "1234567890" // characters representing any decimal number
if int(it) > len(stringtokenTypes) { hexNumbers = decimalNumbers + "aAbBcCdDeEfF" // characters representing any hexadecimal
return "invalid" alpha = "abcdefghijklmnopqrstuwvxyzABCDEFGHIJKLMNOPQRSTUWVXYZ" // characters representing alphanumeric
} )
return stringtokenTypes[it]
}
var stringtokenTypes = []string{
eof: "EOF",
lineStart: "new line",
lineEnd: "end of line",
invalidStatement: "invalid statement",
element: "element",
label: "label",
labelDef: "label definition",
number: "number",
stringValue: "string",
}
// lexer is the basic construct for parsing // lexer is the basic construct for parsing
// source code and turning them in to tokens. // source code and turning them in to tokens.
@ -200,7 +184,6 @@ func lexLine(l *lexer) stateFn {
l.emit(lineEnd) l.emit(lineEnd)
l.ignore() l.ignore()
l.lineno++ l.lineno++
l.emit(lineStart) l.emit(lineStart)
case r == ';' && l.peek() == ';': case r == ';' && l.peek() == ';':
return lexComment return lexComment
@ -225,6 +208,7 @@ func lexLine(l *lexer) stateFn {
// of the line and discards the text. // of the line and discards the text.
func lexComment(l *lexer) stateFn { func lexComment(l *lexer) stateFn {
l.acceptRunUntil('\n') l.acceptRunUntil('\n')
l.backup()
l.ignore() l.ignore()
return lexLine return lexLine
@ -234,7 +218,7 @@ func lexComment(l *lexer) stateFn {
// the lex text state function to advance the parsing // the lex text state function to advance the parsing
// process. // process.
func lexLabel(l *lexer) stateFn { func lexLabel(l *lexer) stateFn {
l.acceptRun(Alpha + "_" + Numbers) l.acceptRun(alpha + "_" + decimalNumbers)
l.emit(label) l.emit(label)
@ -253,9 +237,9 @@ func lexInsideString(l *lexer) stateFn {
} }
func lexNumber(l *lexer) stateFn { func lexNumber(l *lexer) stateFn {
acceptance := Numbers acceptance := decimalNumbers
if l.accept("xX") { if l.accept("xX") {
acceptance = HexadecimalNumbers acceptance = hexNumbers
} }
l.acceptRun(acceptance) l.acceptRun(acceptance)
@ -265,7 +249,7 @@ func lexNumber(l *lexer) stateFn {
} }
func lexElement(l *lexer) stateFn { func lexElement(l *lexer) stateFn {
l.acceptRun(Alpha + "_" + Numbers) l.acceptRun(alpha + "_" + decimalNumbers)
if l.peek() == ':' { if l.peek() == ':' {
l.emit(labelDef) l.emit(labelDef)

View File

@ -0,0 +1,31 @@
// Code generated by "stringer -type tokenType"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[eof-0]
_ = x[lineStart-1]
_ = x[lineEnd-2]
_ = x[invalidStatement-3]
_ = x[element-4]
_ = x[label-5]
_ = x[labelDef-6]
_ = x[number-7]
_ = x[stringValue-8]
}
const _tokenType_name = "eoflineStartlineEndinvalidStatementelementlabellabelDefnumberstringValue"
var _tokenType_index = [...]uint8{0, 3, 12, 19, 35, 42, 47, 55, 61, 72}
func (i tokenType) String() string {
if i < 0 || i >= tokenType(len(_tokenType_index)-1) {
return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _tokenType_name[_tokenType_index[i]:_tokenType_index[i+1]]
}

View File

@ -337,17 +337,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
if err := bc.loadLastState(); err != nil { if err := bc.loadLastState(); err != nil {
return nil, err return nil, err
} }
// Make sure the state associated with the block is available // Make sure the state associated with the block is available, or log out
// if there is no available state, waiting for state sync.
head := bc.CurrentBlock() head := bc.CurrentBlock()
if !bc.HasState(head.Root) { if !bc.HasState(head.Root) {
if head.Number.Uint64() == 0 { if head.Number.Uint64() == 0 {
// The genesis state is missing, which is only possible in the path-based // The genesis state is missing, which is only possible in the path-based
// scheme. This situation occurs when the state syncer overwrites it. // scheme. This situation occurs when the initial state sync is not finished
// // yet, or the chain head is rewound below the pivot point. In both scenario,
// The solution is to reset the state to the genesis state. Although it may not // there is no possible recovery approach except for rerunning a snap sync.
// match the sync target, the state healer will later address and correct any // Do nothing here until the state syncer picks it up.
// inconsistencies. log.Info("Genesis state is missing, wait state sync")
bc.resetState()
} else { } else {
// Head state is missing, before the state recovery, find out the // Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the // disk layer point of snapshot(if it's enabled). Make sure the
@ -630,28 +630,6 @@ func (bc *BlockChain) SetSafe(header *types.Header) {
} }
} }
// resetState resets the persistent state to genesis state if it's not present.
func (bc *BlockChain) resetState() {
// Short circuit if the genesis state is already present.
root := bc.genesisBlock.Root()
if bc.HasState(root) {
return
}
// Reset the state database to empty for committing genesis state.
// Note, it should only happen in path-based scheme and Reset function
// is also only call-able in this mode.
if bc.triedb.Scheme() == rawdb.PathScheme {
if err := bc.triedb.Reset(types.EmptyRootHash); err != nil {
log.Crit("Failed to clean state", "err", err) // Shouldn't happen
}
}
// Write genesis state into database.
if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
log.Crit("Failed to commit genesis state", "err", err)
}
log.Info("Reset state to genesis", "root", root)
}
// setHeadBeyondRoot rewinds the local chain to a new head with the extra condition // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
// that the rewind must pass the specified state root. This method is meant to be // that the rewind must pass the specified state root. This method is meant to be
// used when rewinding with snapshots enabled to ensure that we go back further than // used when rewinding with snapshots enabled to ensure that we go back further than
@ -687,7 +665,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
if newHeadBlock == nil { if newHeadBlock == nil {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock newHeadBlock = bc.genesisBlock
bc.resetState()
} else { } else {
// Block exists, keep rewinding until we find one with state, // Block exists, keep rewinding until we find one with state,
// keeping rewinding until we exceed the optional threshold // keeping rewinding until we exceed the optional threshold
@ -715,16 +692,14 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
} }
} }
if beyondRoot || newHeadBlock.NumberU64() == 0 { if beyondRoot || newHeadBlock.NumberU64() == 0 {
if newHeadBlock.NumberU64() == 0 { if !bc.HasState(newHeadBlock.Root()) && bc.stateRecoverable(newHeadBlock.Root()) {
bc.resetState()
} else if !bc.HasState(newHeadBlock.Root()) {
// Rewind to a block with recoverable state. If the state is // Rewind to a block with recoverable state. If the state is
// missing, run the state recovery here. // missing, run the state recovery here.
if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil { if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil {
log.Crit("Failed to rollback state", "err", err) // Shouldn't happen log.Crit("Failed to rollback state", "err", err) // Shouldn't happen
} }
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
} }
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
break break
} }
log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
@ -739,6 +714,15 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// to low, so it's safe to update in-memory markers directly. // to low, so it's safe to update in-memory markers directly.
bc.currentBlock.Store(newHeadBlock.Header()) bc.currentBlock.Store(newHeadBlock.Header())
headBlockGauge.Update(int64(newHeadBlock.NumberU64())) headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
// The head state is missing, which is only possible in the path-based
// scheme. This situation occurs when the chain head is rewound below
// the pivot point. In this scenario, there is no possible recovery
// approach except for rerunning a snap sync. Do nothing here until the
// state syncer picks it up.
if !bc.HasState(newHeadBlock.Root()) {
log.Info("Chain is stateless, wait state sync", "number", newHeadBlock.Number(), "hash", newHeadBlock.Hash())
}
} }
// Rewind the snap block in a simpleton way to the target head // Rewind the snap block in a simpleton way to the target head
if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() { if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() {
@ -838,7 +822,7 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
// Reset the trie database with the fresh snap synced state. // Reset the trie database with the fresh snap synced state.
root := block.Root() root := block.Root()
if bc.triedb.Scheme() == rawdb.PathScheme { if bc.triedb.Scheme() == rawdb.PathScheme {
if err := bc.triedb.Reset(root); err != nil { if err := bc.triedb.Enable(root); err != nil {
return err return err
} }
} }
@ -1402,6 +1386,11 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the // writeBlockWithState writes block, metadata and corresponding state data to the
// database. // database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error { func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
//begin PluGeth injection
var interval time.Duration
_ = pluginSetTrieFlushIntervalClone(interval) // this is being called here to engage a testing scenario
//end PluGeth injection
// Calculate the total difficulty of the block // Calculate the total difficulty of the block
ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil { if ptd == nil {

View File

@ -630,13 +630,16 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx} request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx}
result := <-request result := <-request
// Deliver a result before s.Close() to avoid a deadlock
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
if result.Error != nil { if result.Error != nil {
s.errLock.Lock() s.errLock.Lock()
s.err = result.Error s.err = result.Error
s.errLock.Unlock() s.errLock.Unlock()
s.Close() s.Close()
} }
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
} }
} }
} }

View File

@ -107,6 +107,16 @@ func TestCreation(t *testing.T) {
{1735372, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // First Shanghai block {1735372, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // First Shanghai block
}, },
}, },
// Holesky test cases
{
params.HoleskyChainConfig,
core.DefaultHoleskyGenesisBlock().ToBlock(),
[]testcase{
{0, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin, London, Paris block
{123, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // First MergeNetsplit block
{123, 1696000704, ID{Hash: checksumToBytes(0xfd4f016b), Next: 0}}, // Last MergeNetsplit block
},
},
} }
for i, tt := range tests { for i, tt := range tests {
for j, ttt := range tt.cases { for j, ttt := range tt.cases {

View File

@ -587,10 +587,9 @@ func DefaultHoleskyGenesisBlock() *Genesis {
return &Genesis{ return &Genesis{
Config: params.HoleskyChainConfig, Config: params.HoleskyChainConfig,
Nonce: 0x1234, Nonce: 0x1234,
ExtraData: hexutil.MustDecode("0x686f77206d7563682069732074686520666973683f"),
GasLimit: 0x17d7840, GasLimit: 0x17d7840,
Difficulty: big.NewInt(0x01), Difficulty: big.NewInt(0x01),
Timestamp: 1694786100, Timestamp: 1695902100,
Alloc: decodePrealloc(holeskyAllocData), Alloc: decodePrealloc(holeskyAllocData),
} }
} }

View File

@ -76,3 +76,25 @@ func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) {
log.Crit("Failed to delete skeleton header", "err", err) log.Crit("Failed to delete skeleton header", "err", err)
} }
} }
const (
StateSyncUnknown = uint8(0) // flags the state snap sync is unknown
StateSyncRunning = uint8(1) // flags the state snap sync is not completed yet
StateSyncFinished = uint8(2) // flags the state snap sync is completed
)
// ReadSnapSyncStatusFlag retrieves the state snap sync status flag.
func ReadSnapSyncStatusFlag(db ethdb.KeyValueReader) uint8 {
blob, err := db.Get(snapSyncStatusFlagKey)
if err != nil || len(blob) != 1 {
return StateSyncUnknown
}
return blob[0]
}
// WriteSnapSyncStatusFlag stores the state snap sync status flag into database.
func WriteSnapSyncStatusFlag(db ethdb.KeyValueWriter, flag uint8) {
if err := db.Put(snapSyncStatusFlagKey, []byte{flag}); err != nil {
log.Crit("Failed to store sync status flag", "err", err)
}
}

View File

@ -89,6 +89,16 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash)
return h.hash(data) == hash return h.hash(data) == hash
} }
// ExistsAccountTrieNode checks the presence of the account trie node with the
// specified node path, regardless of the node hash.
func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool {
has, err := db.Has(accountTrieNodeKey(path))
if err != nil {
return false
}
return has
}
// WriteAccountTrieNode writes the provided account trie node into database. // WriteAccountTrieNode writes the provided account trie node into database.
func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) { func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
if err := db.Put(accountTrieNodeKey(path), node); err != nil { if err := db.Put(accountTrieNodeKey(path), node); err != nil {
@ -127,6 +137,16 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [
return h.hash(data) == hash return h.hash(data) == hash
} }
// ExistsStorageTrieNode checks the presence of the storage trie node with the
// specified account hash and node path, regardless of the node hash.
func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool {
has, err := db.Has(storageTrieNodeKey(accountHash, path))
if err != nil {
return false
}
return has
}
// WriteStorageTrieNode writes the provided storage trie node into database. // WriteStorageTrieNode writes the provided storage trie node into database.
func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) { func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) {
if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil { if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil {

View File

@ -253,7 +253,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
break break
} }
} }
// We are about to exit on error. Print database metdata beore exiting // We are about to exit on error. Print database metadata before exiting
printChainMetadata(db) printChainMetadata(db)
return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ", return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ",
frozen-1, number, head) frozen-1, number, head)
@ -555,7 +555,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
} { } {
if bytes.Equal(key, meta) { if bytes.Equal(key, meta) {
metadata.Add(size) metadata.Add(size)

View File

@ -108,7 +108,11 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
// Leveldb uses LOCK as the filelock filename. To prevent the // Leveldb uses LOCK as the filelock filename. To prevent the
// name collision, we use FLOCK as the lock name. // name collision, we use FLOCK as the lock name.
lock := flock.New(flockFile) lock := flock.New(flockFile)
if locked, err := lock.TryLock(); err != nil { tryLock := lock.TryLock
if readonly {
tryLock = lock.TryRLock
}
if locked, err := tryLock(); err != nil {
return nil, err return nil, err
} else if !locked { } else if !locked {
return nil, errors.New("locking failed") return nil, errors.New("locking failed")

View File

@ -212,6 +212,9 @@ func (t *freezerTable) repair() error {
} }
// Ensure the index is a multiple of indexEntrySize bytes // Ensure the index is a multiple of indexEntrySize bytes
if overflow := stat.Size() % indexEntrySize; overflow != 0 { if overflow := stat.Size() % indexEntrySize; overflow != 0 {
if t.readonly {
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
}
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
} }
// Retrieve the file sizes and prepare for truncation // Retrieve the file sizes and prepare for truncation
@ -270,6 +273,9 @@ func (t *freezerTable) repair() error {
// Keep truncating both files until they come in sync // Keep truncating both files until they come in sync
contentExp = int64(lastIndex.offset) contentExp = int64(lastIndex.offset)
for contentExp != contentSize { for contentExp != contentSize {
if t.readonly {
return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum)
}
verbose = true verbose = true
// Truncate the head file to the last offset pointer // Truncate the head file to the last offset pointer
if contentExp < contentSize { if contentExp < contentSize {

View File

@ -289,6 +289,57 @@ func TestFreezerReadonlyValidate(t *testing.T) {
} }
} }
func TestFreezerConcurrentReadonly(t *testing.T) {
t.Parallel()
tables := map[string]bool{"a": true}
dir := t.TempDir()
f, err := NewFreezer(dir, "", false, 2049, tables)
if err != nil {
t.Fatal("can't open freezer", err)
}
var item = make([]byte, 1024)
batch := f.tables["a"].newBatch()
items := uint64(10)
for i := uint64(0); i < items; i++ {
require.NoError(t, batch.AppendRaw(i, item))
}
require.NoError(t, batch.commit())
if loaded := f.tables["a"].items.Load(); loaded != items {
t.Fatalf("unexpected number of items in table, want: %d, have: %d", items, loaded)
}
require.NoError(t, f.Close())
var (
wg sync.WaitGroup
fs = make([]*Freezer, 5)
errs = make([]error, 5)
)
for i := 0; i < 5; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
f, err := NewFreezer(dir, "", true, 2049, tables)
if err == nil {
fs[i] = f
} else {
errs[i] = err
}
}(i)
}
wg.Wait()
for i := range fs {
if err := errs[i]; err != nil {
t.Fatal("failed to open freezer", err)
}
require.NoError(t, fs[i].Close())
}
}
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) { func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) {
t.Helper() t.Helper()

View File

@ -91,6 +91,9 @@ var (
// transitionStatusKey tracks the eth2 transition status. // transitionStatusKey tracks the eth2 transition status.
transitionStatusKey = []byte("eth2-transition") transitionStatusKey = []byte("eth2-transition")
// snapSyncStatusFlagKey flags that status of snap sync.
snapSyncStatusFlagKey = []byte("SnapSyncStatus")
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td

View File

@ -446,6 +446,10 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
internal += time.Since(istart) internal += time.Since(istart)
} }
if iter.Err != nil { if iter.Err != nil {
// Trie errors should never happen. Still, in case of a bug, expose the
// error here, as the outer code will presume errors are interrupts, not
// some deeper issues.
log.Error("State snapshotter failed to iterate trie", "err", err)
return false, nil, iter.Err return false, nil, iter.Err
} }
// Delete all stale snapshot states remaining // Delete all stale snapshot states remaining

View File

@ -355,7 +355,13 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
return err return err
} }
} }
// Initialize the state with head block, or fallback to empty one in
// case the head state is not available(might occur when node is not
// fully synced).
state, err := p.chain.StateAt(head.Root) state, err := p.chain.StateAt(head.Root)
if err != nil {
state, err = p.chain.StateAt(types.EmptyRootHash)
}
if err != nil { if err != nil {
return err return err
} }

View File

@ -44,7 +44,7 @@ type evictHeap struct {
index map[common.Address]int // Indices into the heap for replacements index map[common.Address]int // Indices into the heap for replacements
} }
// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict // newPriceHeap creates a new heap of cheapest accounts in the blob pool to evict
// from in case of over saturation. // from in case of over saturation.
func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap { func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap {
heap := &evictHeap{ heap := &evictHeap{

View File

@ -143,7 +143,7 @@ func (l *limbo) push(tx *types.Transaction, block uint64) error {
return errors.New("already tracked blob transaction") return errors.New("already tracked blob transaction")
} }
if err := l.setAndIndex(tx, block); err != nil { if err := l.setAndIndex(tx, block); err != nil {
log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err) log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err)
return err return err
} }
return nil return nil
@ -191,7 +191,7 @@ func (l *limbo) update(txhash common.Hash, block uint64) {
log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block) log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block)
return return
} }
// Retrieve the old blobs from the data store and write tehm back with a new // Retrieve the old blobs from the data store and write them back with a new
// block number. IF anything fails, there's not much to do, go on. // block number. IF anything fails, there's not much to do, go on.
item, err := l.getAndDrop(id) item, err := l.getAndDrop(id)
if err != nil { if err != nil {

View File

@ -27,7 +27,7 @@ import (
var log2_1_125 = math.Log2(1.125) var log2_1_125 = math.Log2(1.125)
// evictionPriority calculates the eviction priority based on the algorithm // evictionPriority calculates the eviction priority based on the algorithm
// described in the BlobPool docs for a both fee components. // described in the BlobPool docs for both fee components.
// //
// This method takes about 8ns on a very recent laptop CPU, recalculating about // This method takes about 8ns on a very recent laptop CPU, recalculating about
// 125 million transaction priority values per second. // 125 million transaction priority values per second.

View File

@ -52,6 +52,6 @@ var (
ErrOversizedData = errors.New("oversized data") ErrOversizedData = errors.New("oversized data")
// ErrFutureReplacePending is returned if a future transaction replaces a pending // ErrFutureReplacePending is returned if a future transaction replaces a pending
// transaction. Future transactions should only be able to replace other future transactions. // one. Future transactions should only be able to replace other future transactions.
ErrFutureReplacePending = errors.New("future transaction tries to replace pending") ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
) )

View File

@ -298,7 +298,20 @@ func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool
// Set the basic pool parameters // Set the basic pool parameters
pool.gasTip.Store(gasTip) pool.gasTip.Store(gasTip)
pool.reset(nil, head)
// Initialize the state with head block, or fallback to empty one in
// case the head state is not available(might occur when node is not
// fully synced).
statedb, err := pool.chain.StateAt(head.Root)
if err != nil {
statedb, err = pool.chain.StateAt(types.EmptyRootHash)
}
if err != nil {
return err
}
pool.currentHead.Store(head)
pool.currentState = statedb
pool.pendingNonces = newNoncer(statedb)
// Start the reorg loop early, so it can handle requests generated during // Start the reorg loop early, so it can handle requests generated during
// journal loading. // journal loading.
@ -406,7 +419,7 @@ func (pool *LegacyPool) Close() error {
} }
// Reset implements txpool.SubPool, allowing the legacy pool's internal state to be // Reset implements txpool.SubPool, allowing the legacy pool's internal state to be
// kept in sync with the main transacion pool's internal state. // kept in sync with the main transaction pool's internal state.
func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
wait := pool.requestReset(oldHead, newHead) wait := pool.requestReset(oldHead, newHead)
<-wait <-wait
@ -637,7 +650,7 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error {
// pending or queued one, it overwrites the previous transaction if its price is higher. // pending or queued one, it overwrites the previous transaction if its price is higher.
// //
// If a newly added transaction is marked as local, its sending account will be // If a newly added transaction is marked as local, its sending account will be
// be added to the allowlist, preventing any associated transaction from being dropped // added to the allowlist, preventing any associated transaction from being dropped
// out of the pool due to pricing constraints. // out of the pool due to pricing constraints.
func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
// If the transaction is already known, discard it // If the transaction is already known, discard it
@ -901,7 +914,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ
} }
// addLocals enqueues a batch of transactions into the pool if they are valid, marking the // addLocals enqueues a batch of transactions into the pool if they are valid, marking the
// senders as a local ones, ensuring they go around the local pricing constraints. // senders as local ones, ensuring they go around the local pricing constraints.
// //
// This method is used to add transactions from the RPC API and performs synchronous pool // This method is used to add transactions from the RPC API and performs synchronous pool
// reorganization and event propagation. // reorganization and event propagation.
@ -943,7 +956,7 @@ func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
} }
// Add enqueues a batch of transactions into the pool if they are valid. Depending // Add enqueues a batch of transactions into the pool if they are valid. Depending
// on the local flag, full pricing contraints will or will not be applied. // on the local flag, full pricing constraints will or will not be applied.
// //
// If sync is set, the method will block until all internal maintenance related // If sync is set, the method will block until all internal maintenance related
// to the add is finished. Only use this during tests for determinism! // to the add is finished. Only use this during tests for determinism!

View File

@ -70,7 +70,7 @@ type TxPool struct {
reservations map[common.Address]SubPool // Map with the account to pool reservations reservations map[common.Address]SubPool // Map with the account to pool reservations
reserveLock sync.Mutex // Lock protecting the account reservations reserveLock sync.Mutex // Lock protecting the account reservations
subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown
quit chan chan error // Quit channel to tear down the head updater quit chan chan error // Quit channel to tear down the head updater
} }
@ -404,7 +404,7 @@ func (p *TxPool) Locals() []common.Address {
} }
// Status returns the known status (unknown/pending/queued) of a transaction // Status returns the known status (unknown/pending/queued) of a transaction
// identified by their hashes. // identified by its hash.
func (p *TxPool) Status(hash common.Hash) TxStatus { func (p *TxPool) Status(hash common.Hash) TxStatus {
for _, subpool := range p.subpools { for _, subpool := range p.subpools {
if status := subpool.Status(hash); status != TxStatusUnknown { if status := subpool.Status(hash); status != TxStatusUnknown {

View File

@ -114,7 +114,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
if sidecar == nil { if sidecar == nil {
return fmt.Errorf("missing sidecar in blob transaction") return fmt.Errorf("missing sidecar in blob transaction")
} }
// Ensure the number of items in the blob transaction and vairous side // Ensure the number of items in the blob transaction and various side
// data match up before doing any expensive validations // data match up before doing any expensive validations
hashes := tx.BlobHashes() hashes := tx.BlobHashes()
if len(hashes) == 0 { if len(hashes) == 0 {
@ -182,7 +182,7 @@ type ValidationOptionsWithState struct {
// be rejected once the number of remaining slots reaches zero. // be rejected once the number of remaining slots reaches zero.
UsedAndLeftSlots func(addr common.Address) (int, int) UsedAndLeftSlots func(addr common.Address) (int, int)
// ExistingExpenditure is a mandatory callback to retrieve the cummulative // ExistingExpenditure is a mandatory callback to retrieve the cumulative
// cost of the already pooled transactions to check for overdrafts. // cost of the already pooled transactions to check for overdrafts.
ExistingExpenditure func(addr common.Address) *big.Int ExistingExpenditure func(addr common.Address) *big.Int
@ -237,7 +237,7 @@ func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, op
return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(big.Int).Sub(need, balance)) return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(big.Int).Sub(need, balance))
} }
// Transaction takes a new nonce value out of the pool. Ensure it doesn't // Transaction takes a new nonce value out of the pool. Ensure it doesn't
// overflow the number of permitted transactions from a single accoun // overflow the number of permitted transactions from a single account
// (i.e. max cancellable via out-of-bound transaction). // (i.e. max cancellable via out-of-bound transaction).
if used, left := opts.UsedAndLeftSlots(from); left <= 0 { if used, left := opts.UsedAndLeftSlots(from); left <= 0 {
return fmt.Errorf("%w: pooled %d txs", ErrAccountLimitExceeded, used) return fmt.Errorf("%w: pooled %d txs", ErrAccountLimitExceeded, used)

View File

@ -251,6 +251,7 @@ func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
size.SetBytes(interpreter.hasherBuf[:]) size.SetBytes(interpreter.hasherBuf[:])
return nil, nil return nil, nil
} }
func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes())) scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes()))
return nil, nil return nil, nil
@ -267,6 +268,7 @@ func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes()))
return nil, nil return nil, nil
} }
func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes())) scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes()))
return nil, nil return nil, nil

View File

@ -204,7 +204,10 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.B
return nil, nil, errors.New("header not found") return nil, nil, errors.New("header not found")
} }
stateDb, err := b.eth.BlockChain().StateAt(header.Root) stateDb, err := b.eth.BlockChain().StateAt(header.Root)
return stateDb, header, err if err != nil {
return nil, nil, err
}
return stateDb, header, nil
} }
func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) { func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) {
@ -223,7 +226,10 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN
return nil, nil, errors.New("hash is not currently canonical") return nil, nil, errors.New("hash is not currently canonical")
} }
stateDb, err := b.eth.BlockChain().StateAt(header.Root) stateDb, err := b.eth.BlockChain().StateAt(header.Root)
return stateDb, header, err if err != nil {
return nil, nil, err
}
return stateDb, header, nil
} }
return nil, nil, errors.New("invalid arguments; neither block nor hash specified") return nil, nil, errors.New("invalid arguments; neither block nor hash specified")
} }

View File

@ -387,7 +387,7 @@ func (s *Ethereum) shouldPreserve(header *types.Header) bool {
// r5 A [X] F G // r5 A [X] F G
// r6 [X] // r6 [X]
// //
// In the round5, the inturn signer E is offline, so the worst case // In the round5, the in-turn signer E is offline, so the worst case
// is A, F and G sign the block of round5 and reject the block of opponents // is A, F and G sign the block of round5 and reject the block of opponents
// and in the round6, the last available signer B is offline, the whole // and in the round6, the last available signer B is offline, the whole
// network is stuck. // network is stuck.
@ -474,7 +474,7 @@ func (s *Ethereum) Engine() consensus.Engine { return s.engine }
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
func (s *Ethereum) IsListening() bool { return true } // Always listening func (s *Ethereum) IsListening() bool { return true } // Always listening
func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader } func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader }
func (s *Ethereum) Synced() bool { return s.handler.acceptTxs.Load() } func (s *Ethereum) Synced() bool { return s.handler.synced.Load() }
func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() } func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() }
func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning } func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning }
func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer } func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer }

View File

@ -403,7 +403,9 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
// subsequent state reads, explicitly disable the trie database and state // subsequent state reads, explicitly disable the trie database and state
// syncer is responsible to address and correct any state missing. // syncer is responsible to address and correct any state missing.
if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme { if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
d.blockchain.TrieDB().Reset(types.EmptyRootHash) if err := d.blockchain.TrieDB().Disable(); err != nil {
return err
}
} }
// Snap sync uses the snapshot namespace to store potentially flaky data until // Snap sync uses the snapshot namespace to store potentially flaky data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean- // sync completely heals and finishes. Pause snapshot maintenance in the mean-
@ -1280,41 +1282,13 @@ func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error {
// keeps processing and scheduling them into the header chain and downloader's // keeps processing and scheduling them into the header chain and downloader's
// queue until the stream ends or a failure occurs. // queue until the stream ends or a failure occurs.
func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error {
// Keep a count of uncertain headers to roll back
var ( var (
rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) mode = d.getMode()
rollbackErr error gotHeaders = false // Wait for batches of headers to process
mode = d.getMode()
) )
defer func() {
if rollback > 0 {
lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
if mode != LightSync {
lastFastBlock = d.blockchain.CurrentSnapBlock().Number
lastBlock = d.blockchain.CurrentBlock().Number
}
if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
// We're already unwinding the stack, only print the error to make it more visible
log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
}
curFastBlock, curBlock := common.Big0, common.Big0
if mode != LightSync {
curFastBlock = d.blockchain.CurrentSnapBlock().Number
curBlock = d.blockchain.CurrentBlock().Number
}
log.Warn("Rolled back chain segment",
"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
"snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
"block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
}
}()
// Wait for batches of headers to process
gotHeaders := false
for { for {
select { select {
case <-d.cancelCh: case <-d.cancelCh:
rollbackErr = errCanceled
return errCanceled return errCanceled
case task := <-d.headerProcCh: case task := <-d.headerProcCh:
@ -1363,8 +1337,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
} }
} }
} }
// Disable any rollback and return
rollback = 0
return nil return nil
} }
// Otherwise split the chunk of headers into batches and process them // Otherwise split the chunk of headers into batches and process them
@ -1375,7 +1347,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
// Terminate if something failed in between processing chunks // Terminate if something failed in between processing chunks
select { select {
case <-d.cancelCh: case <-d.cancelCh:
rollbackErr = errCanceled
return errCanceled return errCanceled
default: default:
} }
@ -1422,29 +1393,11 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
} }
if len(chunkHeaders) > 0 { if len(chunkHeaders) > 0 {
if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil { if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil {
rollbackErr = err
// If some headers were inserted, track them as uncertain
if mode == SnapSync && n > 0 && rollback == 0 {
rollback = chunkHeaders[0].Number.Uint64()
}
log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
return fmt.Errorf("%w: %v", errInvalidChain, err) return fmt.Errorf("%w: %v", errInvalidChain, err)
} }
// All verifications passed, track all headers within the allowed limits
if mode == SnapSync {
head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
if head-rollback > uint64(fsHeaderSafetyNet) {
rollback = head - uint64(fsHeaderSafetyNet)
} else {
rollback = 1
}
}
} }
if len(rejected) != 0 { if len(rejected) != 0 {
// Merge threshold reached, stop importing, but don't roll back
rollback = 0
log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd)
return ErrMergeTransition return ErrMergeTransition
} }
@ -1455,7 +1408,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
select { select {
case <-d.cancelCh: case <-d.cancelCh:
rollbackErr = errCanceled
return errCanceled return errCanceled
case <-time.After(time.Second): case <-time.After(time.Second):
} }
@ -1463,7 +1415,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
// Otherwise insert the headers for content retrieval // Otherwise insert the headers for content retrieval
inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin)
if len(inserts) != len(chunkHeaders) { if len(inserts) != len(chunkHeaders) {
rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders))
return fmt.Errorf("%w: stale headers", errBadPeer) return fmt.Errorf("%w: stale headers", errBadPeer)
} }
} }

View File

@ -878,86 +878,6 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
assertOwnChain(t, tester, len(chain.blocks)) assertOwnChain(t, tester, len(chain.blocks))
} }
// Tests that upon detecting an invalid header, the recent ones are rolled back
// for various failure scenarios. Afterwards a full sync is attempted to make
// sure no state was corrupted.
func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) }
func TestInvalidHeaderRollback67Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH67, SnapSync) }
func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
defer tester.terminate()
// Create a small enough block chain to download
targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
chain := testChainBase.shorten(targetBlocks)
// Attempt to sync with an attacker that feeds junk during the fast sync phase.
// This should result in the last fsHeaderSafetyNet headers being rolled back.
missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:])
fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
if err := tester.sync("fast-attack", nil, mode); err == nil {
t.Fatalf("succeeded fast attacker synchronisation")
}
if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
}
// Attempt to sync with an attacker that feeds junk during the block import phase.
// This should result in both the last fsHeaderSafetyNet number of headers being
// rolled back, and also the pivot point being reverted to a non-block status.
missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:])
fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in
blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{}
if err := tester.sync("block-attack", nil, mode); err == nil {
t.Fatalf("succeeded block attacker synchronisation")
}
if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
}
if mode == SnapSync {
if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 {
t.Errorf("fast sync pivot block #%d not rolled back", head)
}
}
// Attempt to sync with an attacker that withholds promised blocks after the
// fast sync pivot point. This could be a trial to leave the node with a bad
// but already imported pivot block.
withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:])
tester.downloader.syncInitHook = func(uint64, uint64) {
for i := missing; i < len(chain.blocks); i++ {
withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{}
}
tester.downloader.syncInitHook = nil
}
if err := tester.sync("withhold-attack", nil, mode); err == nil {
t.Fatalf("succeeded withholding attacker synchronisation")
}
if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
}
if mode == SnapSync {
if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 {
t.Errorf("fast sync pivot block #%d not rolled back", head)
}
}
// Synchronise with the valid peer and make sure sync succeeds. Since the last rollback
// should also disable fast syncing for this process, verify that we did a fresh full
// sync. Note, we can't assert anything about the receipts since we won't purge the
// database of them, hence we can't use assertOwnChain.
tester.newPeer("valid", protocol, chain.blocks[1:])
if err := tester.sync("valid", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
assertOwnChain(t, tester, len(chain.blocks))
}
// Tests that a peer advertising a high TD doesn't get to stall the downloader // Tests that a peer advertising a high TD doesn't get to stall the downloader
// afterwards by not sending any useful hashes. // afterwards by not sending any useful hashes.
func TestHighTDStarvationAttack66Full(t *testing.T) { func TestHighTDStarvationAttack66Full(t *testing.T) {

View File

@ -423,7 +423,7 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
for _, peer := range s.peers.AllPeers() { for _, peer := range s.peers.AllPeers() {
s.idles[peer.id] = peer s.idles[peer.id] = peer
} }
// Nofity any tester listening for startup events // Notify any tester listening for startup events
if s.syncStarting != nil { if s.syncStarting != nil {
s.syncStarting() s.syncStarting()
} }

View File

@ -24,8 +24,8 @@ import (
"sort" "sort"
"time" "time"
mapset "github.com/deckarep/golang-set/v2"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -53,6 +53,9 @@ const (
// re-request them. // re-request them.
maxTxUnderpricedSetSize = 32768 maxTxUnderpricedSetSize = 32768
// maxTxUnderpricedTimeout is the max time a transaction should be stuck in the underpriced set.
maxTxUnderpricedTimeout = int64(5 * time.Minute)
// txArriveTimeout is the time allowance before an announced transaction is // txArriveTimeout is the time allowance before an announced transaction is
// explicitly requested. // explicitly requested.
txArriveTimeout = 500 * time.Millisecond txArriveTimeout = 500 * time.Millisecond
@ -148,7 +151,7 @@ type TxFetcher struct {
drop chan *txDrop drop chan *txDrop
quit chan struct{} quit chan struct{}
underpriced mapset.Set[common.Hash] // Transactions discarded as too cheap (don't re-fetch) underpriced *lru.Cache[common.Hash, int64] // Transactions discarded as too cheap (don't re-fetch)
// Stage 1: Waiting lists for newly discovered transactions that might be // Stage 1: Waiting lists for newly discovered transactions that might be
// broadcast without needing explicit request/reply round trips. // broadcast without needing explicit request/reply round trips.
@ -202,7 +205,7 @@ func NewTxFetcherForTests(
fetching: make(map[common.Hash]string), fetching: make(map[common.Hash]string),
requests: make(map[string]*txRequest), requests: make(map[string]*txRequest),
alternates: make(map[common.Hash]map[string]struct{}), alternates: make(map[common.Hash]map[string]struct{}),
underpriced: mapset.NewSet[common.Hash](), underpriced: lru.NewCache[common.Hash, int64](maxTxUnderpricedSetSize),
hasTx: hasTx, hasTx: hasTx,
addTxs: addTxs, addTxs: addTxs,
fetchTxs: fetchTxs, fetchTxs: fetchTxs,
@ -223,17 +226,16 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
// still valuable to check here because it runs concurrent to the internal // still valuable to check here because it runs concurrent to the internal
// loop, so anything caught here is time saved internally. // loop, so anything caught here is time saved internally.
var ( var (
unknowns = make([]common.Hash, 0, len(hashes)) unknowns = make([]common.Hash, 0, len(hashes))
duplicate, underpriced int64 duplicate int64
underpriced int64
) )
for _, hash := range hashes { for _, hash := range hashes {
switch { switch {
case f.hasTx(hash): case f.hasTx(hash):
duplicate++ duplicate++
case f.isKnownUnderpriced(hash):
case f.underpriced.Contains(hash):
underpriced++ underpriced++
default: default:
unknowns = append(unknowns, hash) unknowns = append(unknowns, hash)
} }
@ -245,10 +247,7 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
if len(unknowns) == 0 { if len(unknowns) == 0 {
return nil return nil
} }
announce := &txAnnounce{ announce := &txAnnounce{origin: peer, hashes: unknowns}
origin: peer,
hashes: unknowns,
}
select { select {
case f.notify <- announce: case f.notify <- announce:
return nil return nil
@ -257,6 +256,16 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
} }
} }
// isKnownUnderpriced reports whether a transaction hash was recently found to be underpriced.
func (f *TxFetcher) isKnownUnderpriced(hash common.Hash) bool {
prevTime, ok := f.underpriced.Peek(hash)
if ok && prevTime+maxTxUnderpricedTimeout < time.Now().Unix() {
f.underpriced.Remove(hash)
return false
}
return ok
}
// Enqueue imports a batch of received transaction into the transaction pool // Enqueue imports a batch of received transaction into the transaction pool
// and the fetcher. This method may be called by both transaction broadcasts and // and the fetcher. This method may be called by both transaction broadcasts and
// direct request replies. The differentiation is important so the fetcher can // direct request replies. The differentiation is important so the fetcher can
@ -300,10 +309,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
// Avoid re-request this transaction when we receive another // Avoid re-request this transaction when we receive another
// announcement. // announcement.
if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) { if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) {
for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { f.underpriced.Add(batch[j].Hash(), batch[j].Time().Unix())
f.underpriced.Pop()
}
f.underpriced.Add(batch[j].Hash())
} }
// Track a few interesting failure types // Track a few interesting failure types
switch { switch {

View File

@ -1509,8 +1509,8 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
} }
case isUnderpriced: case isUnderpriced:
if fetcher.underpriced.Cardinality() != int(step) { if fetcher.underpriced.Len() != int(step) {
t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Cardinality(), step) t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Len(), step)
} }
default: default:

View File

@ -100,8 +100,8 @@ type handler struct {
networkID uint64 networkID uint64
forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks)
acceptTxs atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) synced atomic.Bool // Flag whether we're considered synchronised (enables transaction processing)
database ethdb.Database database ethdb.Database
txpool txPool txpool txPool
@ -163,32 +163,24 @@ func newHandler(config *handlerConfig) (*handler, error) {
fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock() fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock()
if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 { if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 {
h.snapSync.Store(true) h.snapSync.Store(true)
log.Warn("Switch sync mode from full sync to snap sync") log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete")
} else if !h.chain.HasState(fullBlock.Root) {
h.snapSync.Store(true)
log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing")
} }
} else { } else {
if h.chain.CurrentBlock().Number.Uint64() > 0 { head := h.chain.CurrentBlock()
if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) {
// Print warning log if database is not empty to run snap sync. // Print warning log if database is not empty to run snap sync.
log.Warn("Switch sync mode from snap sync to full sync") log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete")
} else { } else {
// If snap sync was requested and our database is empty, grant it // If snap sync was requested and our database is empty, grant it
h.snapSync.Store(true) h.snapSync.Store(true)
log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash())
} }
} }
// If sync succeeds, pass a callback to potentially disable snap sync mode
// and enable transaction propagation.
success := func() {
// If we were running snap sync and it finished, disable doing another
// round on next sync cycle
if h.snapSync.Load() {
log.Info("Snap sync complete, auto disabling")
h.snapSync.Store(false)
}
// If we've successfully finished a sync cycle, accept transactions from
// the network
h.enableSyncedFeatures()
}
// Construct the downloader (long sync) // Construct the downloader (long sync)
h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, success) h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures)
if ttd := h.chain.Config().TerminalTotalDifficulty; ttd != nil { if ttd := h.chain.Config().TerminalTotalDifficulty; ttd != nil {
if h.chain.Config().TerminalTotalDifficultyPassed { if h.chain.Config().TerminalTotalDifficultyPassed {
log.Info("Chain post-merge, sync via beacon client") log.Info("Chain post-merge, sync via beacon client")
@ -245,8 +237,8 @@ func newHandler(config *handlerConfig) (*handler, error) {
// accept each others' blocks until a restart. Unfortunately we haven't figured // accept each others' blocks until a restart. Unfortunately we haven't figured
// out a way yet where nodes can decide unilaterally whether the network is new // out a way yet where nodes can decide unilaterally whether the network is new
// or not. This should be fixed if we figure out a solution. // or not. This should be fixed if we figure out a solution.
if h.snapSync.Load() { if !h.synced.Load() {
log.Warn("Snap syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) log.Warn("Syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
return 0, nil return 0, nil
} }
if h.merger.TDDReached() { if h.merger.TDDReached() {
@ -272,11 +264,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
} }
return 0, nil return 0, nil
} }
n, err := h.chain.InsertChain(blocks) return h.chain.InsertChain(blocks)
if err == nil {
h.enableSyncedFeatures() // Mark initial sync done on any fetcher import
}
return n, err
} }
h.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer) h.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer)
@ -680,7 +668,15 @@ func (h *handler) txBroadcastLoop() {
// enableSyncedFeatures enables the post-sync functionalities when the initial // enableSyncedFeatures enables the post-sync functionalities when the initial
// sync is finished. // sync is finished.
func (h *handler) enableSyncedFeatures() { func (h *handler) enableSyncedFeatures() {
h.acceptTxs.Store(true) // Mark the local node as synced.
h.synced.Store(true)
// If we were running snap sync and it finished, disable doing another
// round on next sync cycle
if h.snapSync.Load() {
log.Info("Snap sync complete, auto disabling")
h.snapSync.Store(false)
}
if h.chain.TrieDB().Scheme() == rawdb.PathScheme { if h.chain.TrieDB().Scheme() == rawdb.PathScheme {
h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize) h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize)
} }

View File

@ -51,7 +51,7 @@ func (h *ethHandler) PeerInfo(id enode.ID) interface{} {
// AcceptTxs retrieves whether transaction processing is enabled on the node // AcceptTxs retrieves whether transaction processing is enabled on the node
// or if inbound transactions should simply be dropped. // or if inbound transactions should simply be dropped.
func (h *ethHandler) AcceptTxs() bool { func (h *ethHandler) AcceptTxs() bool {
return h.acceptTxs.Load() return h.synced.Load()
} }
// Handle is invoked from a peer's message handler when it receives a new remote // Handle is invoked from a peer's message handler when it receives a new remote

View File

@ -248,7 +248,7 @@ func testRecvTransactions(t *testing.T, protocol uint) {
handler := newTestHandler() handler := newTestHandler()
defer handler.close() defer handler.close()
handler.handler.acceptTxs.Store(true) // mark synced to accept transactions handler.handler.synced.Store(true) // mark synced to accept transactions
txs := make(chan core.NewTxsEvent) txs := make(chan core.NewTxsEvent)
sub := handler.txpool.SubscribeNewTxsEvent(txs) sub := handler.txpool.SubscribeNewTxsEvent(txs)
@ -401,7 +401,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
sinks[i] = newTestHandler() sinks[i] = newTestHandler()
defer sinks[i].close() defer sinks[i].close()
sinks[i].handler.acceptTxs.Store(true) // mark synced to accept transactions sinks[i].handler.synced.Store(true) // mark synced to accept transactions
} }
// Interconnect all the sink handlers with the source handler // Interconnect all the sink handlers with the source handler
for i, sink := range sinks { for i, sink := range sinks {

View File

@ -197,16 +197,25 @@ func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
return downloader.SnapSync, td return downloader.SnapSync, td
} }
// We are probably in full sync, but we might have rewound to before the // We are probably in full sync, but we might have rewound to before the
// snap sync pivot, check if we should reenable // snap sync pivot, check if we should re-enable snap sync.
head := cs.handler.chain.CurrentBlock()
if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
if head := cs.handler.chain.CurrentBlock(); head.Number.Uint64() < *pivot { if head.Number.Uint64() < *pivot {
block := cs.handler.chain.CurrentSnapBlock() block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
return downloader.SnapSync, td return downloader.SnapSync, td
} }
} }
// We are in a full sync, but the associated head state is missing. To complete
// the head state, forcefully rerun the snap sync. Note it doesn't mean the
// persistent state is corrupted, just mismatch with the head block.
if !cs.handler.chain.HasState(head.Root) {
block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
log.Info("Reenabled snap sync as chain is stateless")
return downloader.SnapSync, td
}
// Nope, we're really full syncing // Nope, we're really full syncing
head := cs.handler.chain.CurrentBlock()
td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64()) td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64())
return downloader.FullSync, td return downloader.FullSync, td
} }
@ -242,13 +251,7 @@ func (h *handler) doSync(op *chainSyncOp) error {
if err != nil { if err != nil {
return err return err
} }
if h.snapSync.Load() { h.enableSyncedFeatures()
log.Info("Snap sync complete, auto disabling")
h.snapSync.Store(false)
}
// If we've successfully finished a sync cycle, enable accepting transactions
// from the network.
h.acceptTxs.Store(true)
head := h.chain.CurrentBlock() head := h.chain.CurrentBlock()
if head.Number.Uint64() > 0 { if head.Number.Uint64() > 0 {

View File

@ -108,10 +108,10 @@ func (ec *Client) PeerCount(ctx context.Context) (uint64, error) {
return uint64(result), err return uint64(result), err
} }
// BlockReceipts returns the receipts of a given block number or hash // BlockReceipts returns the receipts of a given block number or hash.
func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) {
var r []*types.Receipt var r []*types.Receipt
err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash) err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash.String())
if err == nil && r == nil { if err == nil && r == nil {
return nil, ethereum.NotFound return nil, ethereum.NotFound
} }

2
go.mod
View File

@ -46,7 +46,7 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267
github.com/julienschmidt/httprouter v1.3.0 github.com/julienschmidt/httprouter v1.3.0
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c github.com/karalabe/usb v0.0.2
github.com/kylelemons/godebug v1.1.0 github.com/kylelemons/godebug v1.1.0
github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.16 github.com/mattn/go-isatty v0.0.16

4
go.sum
View File

@ -362,8 +362,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs= github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=

View File

@ -87,8 +87,9 @@ var (
Category: flags.LoggingCategory, Category: flags.LoggingCategory,
} }
logRotateFlag = &cli.BoolFlag{ logRotateFlag = &cli.BoolFlag{
Name: "log.rotate", Name: "log.rotate",
Usage: "Enables log file rotation", Usage: "Enables log file rotation",
Category: flags.LoggingCategory,
} }
logMaxSizeMBsFlag = &cli.IntFlag{ logMaxSizeMBsFlag = &cli.IntFlag{
Name: "log.maxsize", Name: "log.maxsize",

View File

@ -1158,8 +1158,12 @@ func (e *revertError) ErrorData() interface{} {
// //
// Note, this function doesn't make and changes in the state/blockchain and is // Note, this function doesn't make and changes in the state/blockchain and is
// useful to execute and retrieve values. // useful to execute and retrieve values.
func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) { func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) {
result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) if blockNrOrHash == nil {
latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
blockNrOrHash = &latest
}
result, err := DoCall(ctx, s.b, args, *blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -846,7 +846,7 @@ func TestCall(t *testing.T) {
}, },
} }
for i, tc := range testSuite { for i, tc := range testSuite {
result, err := api.Call(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides) result, err := api.Call(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides)
if tc.expectErr != nil { if tc.expectErr != nil {
if err == nil { if err == nil {
t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr)

View File

@ -64,6 +64,7 @@ func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *stat
} }
type testBlockChain struct { type testBlockChain struct {
root common.Hash
config *params.ChainConfig config *params.ChainConfig
statedb *state.StateDB statedb *state.StateDB
gasLimit uint64 gasLimit uint64
@ -89,6 +90,10 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
return bc.statedb, nil return bc.statedb, nil
} }
func (bc *testBlockChain) HasState(root common.Hash) bool {
return bc.root == root
}
func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
return bc.chainHeadFeed.Subscribe(ch) return bc.chainHeadFeed.Subscribe(ch)
} }
@ -302,7 +307,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
t.Fatalf("can't create new chain %v", err) t.Fatalf("can't create new chain %v", err)
} }
statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil) statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil)
blockchain := &testBlockChain{chainConfig, statedb, 10000000, new(event.Feed)} blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)}
pool := legacypool.New(testTxPoolConfig, blockchain) pool := legacypool.New(testTxPoolConfig, blockchain)
txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain, []txpool.SubPool{pool}) txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain, []txpool.SubPool{pool})

View File

@ -26,7 +26,7 @@ import (
// Genesis hashes to enforce below configs on. // Genesis hashes to enforce below configs on.
var ( var (
MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
HoleskyGenesisHash = common.HexToHash("0xff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d") HoleskyGenesisHash = common.HexToHash("0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4")
SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9")
GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a")
) )
@ -80,8 +80,7 @@ var (
TerminalTotalDifficulty: big.NewInt(0), TerminalTotalDifficulty: big.NewInt(0),
TerminalTotalDifficultyPassed: true, TerminalTotalDifficultyPassed: true,
MergeNetsplitBlock: nil, MergeNetsplitBlock: nil,
ShanghaiTime: newUint64(1694790240), ShanghaiTime: newUint64(1696000704),
CancunTime: newUint64(2000000000),
Ethash: new(EthashConfig), Ethash: new(EthashConfig),
} }
// SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network. // SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network.

View File

@ -23,7 +23,7 @@ import (
const ( const (
VersionMajor = 1 // Major version component of the current release VersionMajor = 1 // Major version component of the current release
VersionMinor = 13 // Minor version component of the current release VersionMinor = 13 // Minor version component of the current release
VersionPatch = 1 // Patch version component of the current release VersionPatch = 2 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string VersionMeta = "stable" // Version metadata to append to the version string
) )

View File

@ -191,7 +191,7 @@ func txFactory() {
"from": coinBase, "from": coinBase,
} }
for i := 0; i < 126; i ++ { for i := 0; i < 10; i ++ {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
err = client.Call(&t3, "eth_sendTransaction", genericArg) err = client.Call(&t3, "eth_sendTransaction", genericArg)
if err != nil { if err != nil {
@ -293,7 +293,5 @@ func testGetContractCode(hash core.Hash) {
log.Error("Exit with error, return value from GetContractCode is divergent from control value") log.Error("Exit with error, return value from GetContractCode is divergent from control value")
os.Exit(1) os.Exit(1)
} }
log.Info("made it through checkGetContractCode")
} }

View File

@ -56,7 +56,7 @@ if ps -p $pid1 > /dev/null; then
kill $pid1 kill $pid1
fi fi
sleep 255 sleep 25
if ps -p $pid0 > /dev/null; then if ps -p $pid0 > /dev/null; then
kill $pid0 kill $pid0

View File

@ -189,6 +189,15 @@ func (db *Database) WritePreimages() {
} }
} }
// Preimage retrieves a cached trie node pre-image from memory. If it cannot be
// found cached, the method queries the persistent database for the content.
func (db *Database) Preimage(hash common.Hash) []byte {
if db.preimages == nil {
return nil
}
return db.preimages.preimage(hash)
}
// Cap iteratively flushes old but still referenced trie nodes until the total // Cap iteratively flushes old but still referenced trie nodes until the total
// memory usage goes below the given threshold. The held pre-images accumulated // memory usage goes below the given threshold. The held pre-images accumulated
// up to this point will be flushed in case the size exceeds the threshold. // up to this point will be flushed in case the size exceeds the threshold.
@ -264,15 +273,27 @@ func (db *Database) Recoverable(root common.Hash) (bool, error) {
return pdb.Recoverable(root), nil return pdb.Recoverable(root), nil
} }
// Reset wipes all available journal from the persistent database and discard // Disable deactivates the database and invalidates all available state layers
// all caches and diff layers. Using the given root to create a new disk layer. // as stale to prevent access to the persistent state, which is in the syncing
// stage.
//
// It's only supported by path-based database and will return an error for others. // It's only supported by path-based database and will return an error for others.
func (db *Database) Reset(root common.Hash) error { func (db *Database) Disable() error {
pdb, ok := db.backend.(*pathdb.Database) pdb, ok := db.backend.(*pathdb.Database)
if !ok { if !ok {
return errors.New("not supported") return errors.New("not supported")
} }
return pdb.Reset(root) return pdb.Disable()
}
// Enable activates database and resets the state tree with the provided persistent
// state root once the state sync is finished.
func (db *Database) Enable(root common.Hash) error {
pdb, ok := db.backend.(*pathdb.Database)
if !ok {
return errors.New("not supported")
}
return pdb.Enable(root)
} }
// Journal commits an entire diff hierarchy to disk into a single journal entry. // Journal commits an entire diff hierarchy to disk into a single journal entry.

View File

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
) )
// ErrNotRequested is returned by the trie sync when it's requested to process a // ErrNotRequested is returned by the trie sync when it's requested to process a
@ -42,6 +43,16 @@ var ErrAlreadyProcessed = errors.New("already processed")
// memory if the node was configured with a significant number of peers. // memory if the node was configured with a significant number of peers.
const maxFetchesPerDepth = 16384 const maxFetchesPerDepth = 16384
var (
// deletionGauge is the metric to track how many trie node deletions
// are performed in total during the sync process.
deletionGauge = metrics.NewRegisteredGauge("trie/sync/delete", nil)
// lookupGauge is the metric to track how many trie node lookups are
// performed to determine if node needs to be deleted.
lookupGauge = metrics.NewRegisteredGauge("trie/sync/lookup", nil)
)
// SyncPath is a path tuple identifying a particular trie node either in a single // SyncPath is a path tuple identifying a particular trie node either in a single
// trie (account) or a layered trie (account -> storage). // trie (account) or a layered trie (account -> storage).
// //
@ -93,9 +104,10 @@ type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Ha
// nodeRequest represents a scheduled or already in-flight trie node retrieval request. // nodeRequest represents a scheduled or already in-flight trie node retrieval request.
type nodeRequest struct { type nodeRequest struct {
hash common.Hash // Hash of the trie node to retrieve hash common.Hash // Hash of the trie node to retrieve
path []byte // Merkle path leading to this node for prioritization path []byte // Merkle path leading to this node for prioritization
data []byte // Data content of the node, cached until all subtrees complete data []byte // Data content of the node, cached until all subtrees complete
deletes [][]byte // List of internal path segments for trie nodes to delete
parent *nodeRequest // Parent state node referencing this entry parent *nodeRequest // Parent state node referencing this entry
deps int // Number of dependencies before allowed to commit this node deps int // Number of dependencies before allowed to commit this node
@ -125,18 +137,20 @@ type CodeSyncResult struct {
// syncMemBatch is an in-memory buffer of successfully downloaded but not yet // syncMemBatch is an in-memory buffer of successfully downloaded but not yet
// persisted data items. // persisted data items.
type syncMemBatch struct { type syncMemBatch struct {
nodes map[string][]byte // In-memory membatch of recently completed nodes nodes map[string][]byte // In-memory membatch of recently completed nodes
hashes map[string]common.Hash // Hashes of recently completed nodes hashes map[string]common.Hash // Hashes of recently completed nodes
codes map[common.Hash][]byte // In-memory membatch of recently completed codes deletes map[string]struct{} // List of paths for trie node to delete
size uint64 // Estimated batch-size of in-memory data. codes map[common.Hash][]byte // In-memory membatch of recently completed codes
size uint64 // Estimated batch-size of in-memory data.
} }
// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
func newSyncMemBatch() *syncMemBatch { func newSyncMemBatch() *syncMemBatch {
return &syncMemBatch{ return &syncMemBatch{
nodes: make(map[string][]byte), nodes: make(map[string][]byte),
hashes: make(map[string]common.Hash), hashes: make(map[string]common.Hash),
codes: make(map[common.Hash][]byte), deletes: make(map[string]struct{}),
codes: make(map[common.Hash][]byte),
} }
} }
@ -347,16 +361,23 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error {
// Commit flushes the data stored in the internal membatch out to persistent // Commit flushes the data stored in the internal membatch out to persistent
// storage, returning any occurred error. // storage, returning any occurred error.
func (s *Sync) Commit(dbw ethdb.Batch) error { func (s *Sync) Commit(dbw ethdb.Batch) error {
// Dump the membatch into a database dbw // Flush the pending node writes into database batch.
for path, value := range s.membatch.nodes { for path, value := range s.membatch.nodes {
owner, inner := ResolvePath([]byte(path)) owner, inner := ResolvePath([]byte(path))
rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme) rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme)
} }
// Flush the pending node deletes into the database batch.
// Please note that each written and deleted node has a
// unique path, ensuring no duplication occurs.
for path := range s.membatch.deletes {
owner, inner := ResolvePath([]byte(path))
rawdb.DeleteTrieNode(dbw, owner, inner, common.Hash{} /* unused */, s.scheme)
}
// Flush the pending code writes into database batch.
for hash, value := range s.membatch.codes { for hash, value := range s.membatch.codes {
rawdb.WriteCode(dbw, hash, value) rawdb.WriteCode(dbw, hash, value)
} }
// Drop the membatch data and return s.membatch = newSyncMemBatch() // reset the batch
s.membatch = newSyncMemBatch()
return nil return nil
} }
@ -425,6 +446,39 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
node: node.Val, node: node.Val,
path: append(append([]byte(nil), req.path...), key...), path: append(append([]byte(nil), req.path...), key...),
}} }}
// Mark all internal nodes between shortNode and its **in disk**
// child as invalid. This is essential in the case of path mode
// scheme; otherwise, state healing might overwrite existing child
// nodes silently while leaving a dangling parent node within the
// range of this internal path on disk. This would break the
// guarantee for state healing.
//
// While it's possible for this shortNode to overwrite a previously
// existing full node, the other branches of the fullNode can be
// retained as they remain untouched and complete.
//
// This step is only necessary for path mode, as there is no deletion
// in hash mode at all.
if _, ok := node.Val.(hashNode); ok && s.scheme == rawdb.PathScheme {
owner, inner := ResolvePath(req.path)
for i := 1; i < len(key); i++ {
// While checking for a non-existent item in Pebble can be less efficient
// without a bloom filter, the relatively low frequency of lookups makes
// the performance impact negligible.
var exists bool
if owner == (common.Hash{}) {
exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...))
} else {
exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...))
}
if exists {
req.deletes = append(req.deletes, key[:i])
deletionGauge.Inc(1)
log.Debug("Detected dangling node", "owner", owner, "path", append(inner, key[:i]...))
}
}
lookupGauge.Inc(int64(len(key) - 1))
}
case *fullNode: case *fullNode:
for i := 0; i < 17; i++ { for i := 0; i < 17; i++ {
if node.Children[i] != nil { if node.Children[i] != nil {
@ -509,10 +563,19 @@ func (s *Sync) commitNodeRequest(req *nodeRequest) error {
// Write the node content to the membatch // Write the node content to the membatch
s.membatch.nodes[string(req.path)] = req.data s.membatch.nodes[string(req.path)] = req.data
s.membatch.hashes[string(req.path)] = req.hash s.membatch.hashes[string(req.path)] = req.hash
// The size tracking refers to the db-batch, not the in-memory data. // The size tracking refers to the db-batch, not the in-memory data.
// Therefore, we ignore the req.path, and account only for the hash+data if s.scheme == rawdb.PathScheme {
// which eventually is written to db. s.membatch.size += uint64(len(req.path) + len(req.data))
s.membatch.size += common.HashLength + uint64(len(req.data)) } else {
s.membatch.size += common.HashLength + uint64(len(req.data))
}
// Delete the internal nodes which are marked as invalid
for _, segment := range req.deletes {
path := append(req.path, segment...)
s.membatch.deletes[string(path)] = struct{}{}
s.membatch.size += uint64(len(path))
}
delete(s.nodeReqs, string(req.path)) delete(s.nodeReqs, string(req.path))
s.fetches[len(req.path)]-- s.fetches[len(req.path)]--

View File

@ -70,31 +70,53 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str
// checkTrieContents cross references a reconstructed trie with an expected data // checkTrieContents cross references a reconstructed trie with an expected data
// content map. // content map.
func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) { func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte, rawTrie bool) {
// Check root availability and trie contents // Check root availability and trie contents
ndb := newTestDatabase(db, scheme) ndb := newTestDatabase(db, scheme)
trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) if err := checkTrieConsistency(db, scheme, common.BytesToHash(root), rawTrie); err != nil {
if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err)
}
if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil {
t.Fatalf("inconsistent trie at %x: %v", root, err) t.Fatalf("inconsistent trie at %x: %v", root, err)
} }
type reader interface {
MustGet(key []byte) []byte
}
var r reader
if rawTrie {
trie, err := New(TrieID(common.BytesToHash(root)), ndb)
if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err)
}
r = trie
} else {
trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb)
if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err)
}
r = trie
}
for key, val := range content { for key, val := range content {
if have := trie.MustGet([]byte(key)); !bytes.Equal(have, val) { if have := r.MustGet([]byte(key)); !bytes.Equal(have, val) {
t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
} }
} }
} }
// checkTrieConsistency checks that all nodes in a trie are indeed present. // checkTrieConsistency checks that all nodes in a trie are indeed present.
func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error { func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error {
ndb := newTestDatabase(db, scheme) ndb := newTestDatabase(db, scheme)
trie, err := NewStateTrie(TrieID(root), ndb) var it NodeIterator
if err != nil { if rawTrie {
return nil // Consider a non existent state consistent trie, err := New(TrieID(root), ndb)
if err != nil {
return nil // Consider a non existent state consistent
}
it = trie.MustNodeIterator(nil)
} else {
trie, err := NewStateTrie(TrieID(root), ndb)
if err != nil {
return nil // Consider a non existent state consistent
}
it = trie.MustNodeIterator(nil)
} }
it := trie.MustNodeIterator(nil)
for it.Next(true) { for it.Next(true) {
} }
return it.Error() return it.Error()
@ -205,7 +227,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) {
} }
} }
// Cross check that the two tries are in sync // Cross check that the two tries are in sync
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
} }
// Tests that the trie scheduler can correctly reconstruct the state even if only // Tests that the trie scheduler can correctly reconstruct the state even if only
@ -271,7 +293,7 @@ func testIterativeDelayedSync(t *testing.T, scheme string) {
} }
} }
// Cross check that the two tries are in sync // Cross check that the two tries are in sync
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
} }
// Tests that given a root hash, a trie can sync iteratively on a single thread, // Tests that given a root hash, a trie can sync iteratively on a single thread,
@ -341,7 +363,7 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) {
} }
} }
// Cross check that the two tries are in sync // Cross check that the two tries are in sync
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
} }
// Tests that the trie scheduler can correctly reconstruct the state even if only // Tests that the trie scheduler can correctly reconstruct the state even if only
@ -413,7 +435,7 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) {
} }
} }
// Cross check that the two tries are in sync // Cross check that the two tries are in sync
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
} }
// Tests that a trie sync will not request nodes multiple times, even if they // Tests that a trie sync will not request nodes multiple times, even if they
@ -484,7 +506,7 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) {
} }
} }
// Cross check that the two tries are in sync // Cross check that the two tries are in sync
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
} }
// Tests that at any point in time during a sync, only complete sub-tries are in // Tests that at any point in time during a sync, only complete sub-tries are in
@ -569,7 +591,7 @@ func testIncompleteSync(t *testing.T, scheme string) {
nodeHash := addedHashes[i] nodeHash := addedHashes[i]
value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme) value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme)
rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme) rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme)
if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil { if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root, false); err == nil {
t.Fatalf("trie inconsistency not caught, missing: %x", path) t.Fatalf("trie inconsistency not caught, missing: %x", path)
} }
rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme) rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme)
@ -643,7 +665,7 @@ func testSyncOrdering(t *testing.T, scheme string) {
} }
} }
// Cross check that the two tries are in sync // Cross check that the two tries are in sync
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
// Check that the trie nodes have been requested path-ordered // Check that the trie nodes have been requested path-ordered
for i := 0; i < len(reqs)-1; i++ { for i := 0; i < len(reqs)-1; i++ {
@ -664,7 +686,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database
// The code requests are ignored here since there is no code // The code requests are ignored here since there is no code
// at the testing trie. // at the testing trie.
paths, nodes, _ := sched.Missing(1) paths, nodes, _ := sched.Missing(0)
var elements []trieElement var elements []trieElement
for i := 0; i < len(paths); i++ { for i := 0; i < len(paths); i++ {
elements = append(elements, trieElement{ elements = append(elements, trieElement{
@ -698,7 +720,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database
} }
batch.Write() batch.Write()
paths, nodes, _ = sched.Missing(1) paths, nodes, _ = sched.Missing(0)
elements = elements[:0] elements = elements[:0]
for i := 0; i < len(paths); i++ { for i := 0; i < len(paths); i++ {
elements = append(elements, trieElement{ elements = append(elements, trieElement{
@ -724,7 +746,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
// Create a destination trie and sync with the scheduler // Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase() diskdb := rawdb.NewMemoryDatabase()
syncWith(t, srcTrie.Hash(), diskdb, srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb)
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
// Push more modifications into the src trie, to see if dest trie can still // Push more modifications into the src trie, to see if dest trie can still
// sync with it(overwrite stale states) // sync with it(overwrite stale states)
@ -748,7 +770,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
srcTrie, _ = NewStateTrie(TrieID(root), srcDb) srcTrie, _ = NewStateTrie(TrieID(root), srcDb)
syncWith(t, srcTrie.Hash(), diskdb, srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb)
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff, false)
// Revert added modifications from the src trie, to see if dest trie can still // Revert added modifications from the src trie, to see if dest trie can still
// sync with it(overwrite reverted states) // sync with it(overwrite reverted states)
@ -772,5 +794,98 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
srcTrie, _ = NewStateTrie(TrieID(root), srcDb) srcTrie, _ = NewStateTrie(TrieID(root), srcDb)
syncWith(t, srcTrie.Hash(), diskdb, srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb)
checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted) checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted, false)
}
// Tests if state syncer can correctly catch up the pivot move.
func TestPivotMove(t *testing.T) {
testPivotMove(t, rawdb.HashScheme, true)
testPivotMove(t, rawdb.HashScheme, false)
testPivotMove(t, rawdb.PathScheme, true)
testPivotMove(t, rawdb.PathScheme, false)
}
func testPivotMove(t *testing.T, scheme string, tiny bool) {
var (
srcDisk = rawdb.NewMemoryDatabase()
srcTrieDB = newTestDatabase(srcDisk, scheme)
srcTrie, _ = New(TrieID(types.EmptyRootHash), srcTrieDB)
deleteFn = func(key []byte, tr *Trie, states map[string][]byte) {
tr.Delete(key)
delete(states, string(key))
}
writeFn = func(key []byte, val []byte, tr *Trie, states map[string][]byte) {
if val == nil {
if tiny {
val = randBytes(4)
} else {
val = randBytes(32)
}
}
tr.Update(key, val)
states[string(key)] = common.CopyBytes(val)
}
copyStates = func(states map[string][]byte) map[string][]byte {
cpy := make(map[string][]byte)
for k, v := range states {
cpy[k] = v
}
return cpy
}
)
stateA := make(map[string][]byte)
writeFn([]byte{0x01, 0x23}, nil, srcTrie, stateA)
writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateA)
writeFn([]byte{0x12, 0x33}, nil, srcTrie, stateA)
writeFn([]byte{0x12, 0x34}, nil, srcTrie, stateA)
writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateA)
writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA)
rootA, nodesA, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil {
panic(err)
}
if err := srcTrieDB.Commit(rootA, false); err != nil {
panic(err)
}
// Create a destination trie and sync with the scheduler
destDisk := rawdb.NewMemoryDatabase()
syncWith(t, rootA, destDisk, srcTrieDB)
checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateA, true)
// Delete element to collapse trie
stateB := copyStates(stateA)
srcTrie, _ = New(TrieID(rootA), srcTrieDB)
deleteFn([]byte{0x02, 0x34}, srcTrie, stateB)
deleteFn([]byte{0x13, 0x44}, srcTrie, stateB)
writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB)
rootB, nodesB, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil {
panic(err)
}
if err := srcTrieDB.Commit(rootB, false); err != nil {
panic(err)
}
syncWith(t, rootB, destDisk, srcTrieDB)
checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateB, true)
// Add elements to expand trie
stateC := copyStates(stateB)
srcTrie, _ = New(TrieID(rootB), srcTrieDB)
writeFn([]byte{0x01, 0x24}, stateA[string([]byte{0x01, 0x24})], srcTrie, stateC)
writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateC)
writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC)
rootC, nodesC, _ := srcTrie.Commit(false)
if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil {
panic(err)
}
if err := srcTrieDB.Commit(rootC, false); err != nil {
panic(err)
}
syncWith(t, rootC, destDisk, srcTrieDB)
checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateC, true)
} }

View File

@ -128,7 +128,8 @@ type Database struct {
// readOnly is the flag whether the mutation is allowed to be applied. // readOnly is the flag whether the mutation is allowed to be applied.
// It will be set automatically when the database is journaled during // It will be set automatically when the database is journaled during
// the shutdown to reject all following unexpected mutations. // the shutdown to reject all following unexpected mutations.
readOnly bool // Indicator if database is opened in read only mode readOnly bool // Flag if database is opened in read only mode
waitSync bool // Flag if database is deactivated due to initial state sync
bufferSize int // Memory allowance (in bytes) for caching dirty nodes bufferSize int // Memory allowance (in bytes) for caching dirty nodes
config *Config // Configuration for database config *Config // Configuration for database
diskdb ethdb.Database // Persistent storage for matured trie nodes diskdb ethdb.Database // Persistent storage for matured trie nodes
@ -179,6 +180,12 @@ func New(diskdb ethdb.Database, config *Config) *Database {
log.Warn("Truncated extra state histories", "number", pruned) log.Warn("Truncated extra state histories", "number", pruned)
} }
} }
// Disable database in case node is still in the initial state sync stage.
if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly {
if err := db.Disable(); err != nil {
log.Crit("Failed to disable database", "err", err) // impossible to happen
}
}
log.Warn("Path-based state scheme is an experimental feature") log.Warn("Path-based state scheme is an experimental feature")
return db return db
} }
@ -204,9 +211,9 @@ func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint6
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock() defer db.lock.Unlock()
// Short circuit if the database is in read only mode. // Short circuit if the mutation is not allowed.
if db.readOnly { if err := db.modifyAllowed(); err != nil {
return errSnapshotReadOnly return err
} }
if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil { if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil {
return err return err
@ -227,45 +234,59 @@ func (db *Database) Commit(root common.Hash, report bool) error {
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock() defer db.lock.Unlock()
// Short circuit if the database is in read only mode. // Short circuit if the mutation is not allowed.
if db.readOnly { if err := db.modifyAllowed(); err != nil {
return errSnapshotReadOnly return err
} }
return db.tree.cap(root, 0) return db.tree.cap(root, 0)
} }
// Reset rebuilds the database with the specified state as the base. // Disable deactivates the database and invalidates all available state layers
// // as stale to prevent access to the persistent state, which is in the syncing
// - if target state is empty, clear the stored state and all layers on top // stage.
// - if target state is non-empty, ensure the stored state matches with it func (db *Database) Disable() error {
// and clear all other layers on top.
func (db *Database) Reset(root common.Hash) error {
db.lock.Lock() db.lock.Lock()
defer db.lock.Unlock() defer db.lock.Unlock()
// Short circuit if the database is in read only mode. // Short circuit if the database is in read only mode.
if db.readOnly { if db.readOnly {
return errSnapshotReadOnly return errDatabaseReadOnly
} }
batch := db.diskdb.NewBatch() // Prevent duplicated disable operation.
root = types.TrieRootHash(root) if db.waitSync {
if root == types.EmptyRootHash { log.Error("Reject duplicated disable operation")
// Empty state is requested as the target, nuke out return nil
// the root node and leave all others as dangling.
rawdb.DeleteAccountTrieNode(batch, nil)
} else {
// Ensure the requested state is existent before any
// action is applied.
_, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil)
if hash != root {
return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root)
}
} }
// Mark the disk layer as stale before applying any mutation. db.waitSync = true
// Mark the disk layer as stale to prevent access to persistent state.
db.tree.bottom().markStale() db.tree.bottom().markStale()
// Write the initial sync flag to persist it across restarts.
rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncRunning)
log.Info("Disabled trie database due to state sync")
return nil
}
// Enable activates database and resets the state tree with the provided persistent
// state root once the state sync is finished.
func (db *Database) Enable(root common.Hash) error {
db.lock.Lock()
defer db.lock.Unlock()
// Short circuit if the database is in read only mode.
if db.readOnly {
return errDatabaseReadOnly
}
// Ensure the provided state root matches the stored one.
root = types.TrieRootHash(root)
_, stored := rawdb.ReadAccountTrieNode(db.diskdb, nil)
if stored != root {
return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root)
}
// Drop the stale state journal in persistent database and // Drop the stale state journal in persistent database and
// reset the persistent state id back to zero. // reset the persistent state id back to zero.
batch := db.diskdb.NewBatch()
rawdb.DeleteTrieJournal(batch) rawdb.DeleteTrieJournal(batch)
rawdb.WritePersistentStateID(batch, 0) rawdb.WritePersistentStateID(batch, 0)
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
@ -282,8 +303,11 @@ func (db *Database) Reset(root common.Hash) error {
} }
// Re-construct a new disk layer backed by persistent state // Re-construct a new disk layer backed by persistent state
// with **empty clean cache and node buffer**. // with **empty clean cache and node buffer**.
dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)) db.tree.reset(newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)))
db.tree.reset(dl)
// Re-enable the database as the final step.
db.waitSync = false
rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncFinished)
log.Info("Rebuilt trie database", "root", root) log.Info("Rebuilt trie database", "root", root)
return nil return nil
} }
@ -296,7 +320,10 @@ func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error
defer db.lock.Unlock() defer db.lock.Unlock()
// Short circuit if rollback operation is not supported. // Short circuit if rollback operation is not supported.
if db.readOnly || db.freezer == nil { if err := db.modifyAllowed(); err != nil {
return err
}
if db.freezer == nil {
return errors.New("state rollback is non-supported") return errors.New("state rollback is non-supported")
} }
// Short circuit if the target state is not recoverable. // Short circuit if the target state is not recoverable.
@ -424,3 +451,15 @@ func (db *Database) SetBufferSize(size int) error {
func (db *Database) Scheme() string { func (db *Database) Scheme() string {
return rawdb.PathScheme return rawdb.PathScheme
} }
// modifyAllowed returns the indicator if mutation is allowed. This function
// assumes the db.lock is already held.
func (db *Database) modifyAllowed() error {
if db.readOnly {
return errDatabaseReadOnly
}
if db.waitSync {
return errDatabaseWaitSync
}
return nil
}

View File

@ -439,38 +439,39 @@ func TestDatabaseRecoverable(t *testing.T) {
} }
} }
func TestReset(t *testing.T) { func TestDisable(t *testing.T) {
var ( tester := newTester(t)
tester = newTester(t)
index = tester.bottomIndex()
)
defer tester.release() defer tester.release()
// Reset database to unknown target, should reject it _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)
if err := tester.db.Reset(testutil.RandomHash()); err == nil { if err := tester.db.Disable(); err != nil {
t.Fatal("Failed to reject invalid reset") t.Fatal("Failed to deactivate database")
} }
// Reset database to state persisted in the disk if err := tester.db.Enable(types.EmptyRootHash); err == nil {
if err := tester.db.Reset(types.EmptyRootHash); err != nil { t.Fatalf("Invalid activation should be rejected")
t.Fatalf("Failed to reset database %v", err)
} }
if err := tester.db.Enable(stored); err != nil {
t.Fatal("Failed to activate database")
}
// Ensure journal is deleted from disk // Ensure journal is deleted from disk
if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 { if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
t.Fatal("Failed to clean journal") t.Fatal("Failed to clean journal")
} }
// Ensure all trie histories are removed // Ensure all trie histories are removed
for i := 0; i <= index; i++ { n, err := tester.db.freezer.Ancients()
_, err := readHistory(tester.db.freezer, uint64(i+1)) if err != nil {
if err == nil { t.Fatal("Failed to clean state history")
t.Fatalf("Failed to clean state history, index %d", i+1) }
} if n != 0 {
t.Fatal("Failed to clean state history")
} }
// Verify layer tree structure, single disk layer is expected // Verify layer tree structure, single disk layer is expected
if tester.db.tree.len() != 1 { if tester.db.tree.len() != 1 {
t.Fatalf("Extra layer kept %d", tester.db.tree.len()) t.Fatalf("Extra layer kept %d", tester.db.tree.len())
} }
if tester.db.tree.bottom().rootHash() != types.EmptyRootHash { if tester.db.tree.bottom().rootHash() != stored {
t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash()) t.Fatalf("Root hash is not matched exp %x got %x", stored, tester.db.tree.bottom().rootHash())
} }
} }

View File

@ -114,7 +114,7 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, dept
if n.Hash != hash { if n.Hash != hash {
dirtyFalseMeter.Mark(1) dirtyFalseMeter.Mark(1)
log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path) return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path, n.Blob)
} }
dirtyHitMeter.Mark(1) dirtyHitMeter.Mark(1)
dirtyNodeHitDepthHist.Update(int64(depth)) dirtyNodeHitDepthHist.Update(int64(depth))

View File

@ -150,7 +150,7 @@ func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]b
if nHash != hash { if nHash != hash {
diskFalseMeter.Mark(1) diskFalseMeter.Mark(1)
log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash) log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash)
return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path) return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path, nBlob)
} }
if dl.cleans != nil && len(nBlob) > 0 { if dl.cleans != nil && len(nBlob) > 0 {
dl.cleans.Set(key, nBlob) dl.cleans.Set(key, nBlob)

View File

@ -21,12 +21,17 @@ import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
) )
var ( var (
// errSnapshotReadOnly is returned if the database is opened in read only mode // errDatabaseReadOnly is returned if the database is opened in read only mode
// and mutation is requested. // to prevent any mutation.
errSnapshotReadOnly = errors.New("read only") errDatabaseReadOnly = errors.New("read only")
// errDatabaseWaitSync is returned if the initial state sync is not completed
// yet and database is disabled to prevent accessing state.
errDatabaseWaitSync = errors.New("waiting for sync")
// errSnapshotStale is returned from data accessors if the underlying layer // errSnapshotStale is returned from data accessors if the underlying layer
// layer had been invalidated due to the chain progressing forward far enough // layer had been invalidated due to the chain progressing forward far enough
@ -46,6 +51,10 @@ var (
errUnexpectedNode = errors.New("unexpected node") errUnexpectedNode = errors.New("unexpected node")
) )
func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error { func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte, blob []byte) error {
return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash) blobHex := "nil"
if len(blob) > 0 {
blobHex = hexutil.Encode(blob)
}
return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x, blob: %s", errUnexpectedNode, loc, owner, path, expHash, gotHash, blobHex)
} }

View File

@ -356,7 +356,7 @@ func (db *Database) Journal(root common.Hash) error {
// Short circuit if the database is in read only mode. // Short circuit if the database is in read only mode.
if db.readOnly { if db.readOnly {
return errSnapshotReadOnly return errDatabaseReadOnly
} }
// Firstly write out the metadata of journal // Firstly write out the metadata of journal
journal := new(bytes.Buffer) journal := new(bytes.Buffer)

View File

@ -71,7 +71,7 @@ func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*tr
if n.Hash != hash { if n.Hash != hash {
dirtyFalseMeter.Mark(1) dirtyFalseMeter.Mark(1)
log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path) return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob)
} }
return n, nil return n, nil
} }