diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 0c4342c49..dbdcd1782 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -199,7 +199,6 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, if err != nil { return nil, err } - return stateDB.GetCode(contract), nil } @@ -212,7 +211,6 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres if err != nil { return nil, err } - return stateDB.GetBalance(contract), nil } @@ -225,7 +223,6 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, if err != nil { return 0, err } - return stateDB.GetNonce(contract), nil } @@ -238,7 +235,6 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres if err != nil { return nil, err } - val := stateDB.GetState(contract, key) return val[:], nil } @@ -700,8 +696,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa } block.AddTxWithChain(b.blockchain, tx) }) - stateDB, _ := b.blockchain.State() - + stateDB, err := b.blockchain.State() + if err != nil { + return err + } b.pendingBlock = blocks[0] b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) b.pendingReceipts = receipts[0] @@ -821,11 +819,12 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { block.OffsetTime(int64(adjustment.Seconds())) }) - stateDB, _ := b.blockchain.State() - + stateDB, err := b.blockchain.State() + if err != nil { + return err + } b.pendingBlock = blocks[0] b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) - return nil } diff --git a/accounts/keystore/watch.go b/accounts/keystore/watch.go index 3f64b89c5..a9f87e7c3 100644 --- a/accounts/keystore/watch.go +++ b/accounts/keystore/watch.go @@ -20,6 +20,7 @@ package keystore import ( + "os" "time" "github.com/ethereum/go-ethereum/log" @@ -77,7 +78,9 @@ func (w *watcher) loop() { } defer watcher.Close() if err := watcher.Add(w.ac.keydir); err != nil { - logger.Warn("Failed to watch keystore folder", "err", err) + if !os.IsNotExist(err) { + logger.Warn("Failed to watch keystore folder", "err", err) + } return } diff --git a/cmd/devp2p/crawl.go b/cmd/devp2p/crawl.go index 8c0defff6..4288a5feb 100644 --- a/cmd/devp2p/crawl.go +++ b/cmd/devp2p/crawl.go @@ -17,6 +17,7 @@ package main import ( + "errors" "sync" "sync/atomic" "time" @@ -51,7 +52,14 @@ type resolver interface { RequestENR(*enode.Node) (*enode.Node, error) } -func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler { +func newCrawler(input nodeSet, bootnodes []*enode.Node, disc resolver, iters ...enode.Iterator) (*crawler, error) { + if len(input) == 0 { + input.add(bootnodes...) + } + if len(input) == 0 { + return nil, errors.New("no input nodes to start crawling") + } + c := &crawler{ input: input, output: make(nodeSet, len(input)), @@ -67,7 +75,7 @@ func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler for id, n := range input { c.output[id] = n } - return c + return c, nil } func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet { diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index 0117c7eb8..37b139dea 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -143,7 +143,7 @@ var discoveryNodeFlags = []cli.Flag{ func discv4Ping(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV4(ctx) + disc, _ := startV4(ctx) defer disc.Close() start := time.Now() @@ -156,7 +156,7 @@ func discv4Ping(ctx *cli.Context) error { func discv4RequestRecord(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV4(ctx) + disc, _ := startV4(ctx) defer disc.Close() respN, err := disc.RequestENR(n) @@ -169,7 +169,7 @@ func discv4RequestRecord(ctx *cli.Context) error { func discv4Resolve(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV4(ctx) + disc, _ := startV4(ctx) defer disc.Close() fmt.Println(disc.Resolve(n).String()) @@ -196,10 +196,13 @@ func discv4ResolveJSON(ctx *cli.Context) error { nodeargs = append(nodeargs, n) } - // Run the crawler. - disc := startV4(ctx) + disc, config := startV4(ctx) defer disc.Close() - c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs)) + + c, err := newCrawler(inputSet, config.Bootnodes, disc, enode.IterNodes(nodeargs)) + if err != nil { + return err + } c.revalidateInterval = 0 output := c.run(0, 1) writeNodesJSON(nodesFile, output) @@ -211,14 +214,18 @@ func discv4Crawl(ctx *cli.Context) error { return errors.New("need nodes file as argument") } nodesFile := ctx.Args().First() - var inputSet nodeSet + inputSet := make(nodeSet) if common.FileExist(nodesFile) { inputSet = loadNodesJSON(nodesFile) } - disc := startV4(ctx) + disc, config := startV4(ctx) defer disc.Close() - c := newCrawler(inputSet, disc, disc.RandomNodes()) + + c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes()) + if err != nil { + return err + } c.revalidateInterval = 10 * time.Minute output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name)) writeNodesJSON(nodesFile, output) @@ -238,14 +245,14 @@ func discv4Test(ctx *cli.Context) error { } // startV4 starts an ephemeral discovery V4 node. -func startV4(ctx *cli.Context) *discover.UDPv4 { +func startV4(ctx *cli.Context) (*discover.UDPv4, discover.Config) { ln, config := makeDiscoveryConfig(ctx) socket := listen(ctx, ln) disc, err := discover.ListenV4(socket, ln, config) if err != nil { exit(err) } - return disc + return disc, config } func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) { diff --git a/cmd/devp2p/discv5cmd.go b/cmd/devp2p/discv5cmd.go index c5e226f0d..0dac94526 100644 --- a/cmd/devp2p/discv5cmd.go +++ b/cmd/devp2p/discv5cmd.go @@ -81,7 +81,7 @@ var ( func discv5Ping(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV5(ctx) + disc, _ := startV5(ctx) defer disc.Close() fmt.Println(disc.Ping(n)) @@ -90,7 +90,7 @@ func discv5Ping(ctx *cli.Context) error { func discv5Resolve(ctx *cli.Context) error { n := getNodeArg(ctx) - disc := startV5(ctx) + disc, _ := startV5(ctx) defer disc.Close() fmt.Println(disc.Resolve(n)) @@ -102,14 +102,18 @@ func discv5Crawl(ctx *cli.Context) error { return errors.New("need nodes file as argument") } nodesFile := ctx.Args().First() - var inputSet nodeSet + inputSet := make(nodeSet) if common.FileExist(nodesFile) { inputSet = loadNodesJSON(nodesFile) } - disc := startV5(ctx) + disc, config := startV5(ctx) defer disc.Close() - c := newCrawler(inputSet, disc, disc.RandomNodes()) + + c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes()) + if err != nil { + return err + } c.revalidateInterval = 10 * time.Minute output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name)) writeNodesJSON(nodesFile, output) @@ -127,7 +131,7 @@ func discv5Test(ctx *cli.Context) error { } func discv5Listen(ctx *cli.Context) error { - disc := startV5(ctx) + disc, _ := startV5(ctx) defer disc.Close() fmt.Println(disc.Self()) @@ -135,12 +139,12 @@ func discv5Listen(ctx *cli.Context) error { } // startV5 starts an ephemeral discovery v5 node. -func startV5(ctx *cli.Context) *discover.UDPv5 { +func startV5(ctx *cli.Context) (*discover.UDPv5, discover.Config) { ln, config := makeDiscoveryConfig(ctx) socket := listen(ctx, ln) disc, err := discover.ListenV5(socket, ln, config) if err != nil { exit(err) } - return disc + return disc, config } diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go index c5bcc3db1..7890c3134 100644 --- a/cmd/devp2p/internal/ethtest/suite_test.go +++ b/cmd/devp2p/internal/ethtest/suite_test.go @@ -120,6 +120,7 @@ func setupGeth(stack *node.Node) error { if err != nil { return err } + backend.SetSynced() _, err = backend.BlockChain().InsertChain(chain.blocks[1:]) return err diff --git a/cmd/evm/blockrunner.go b/cmd/evm/blockrunner.go index 0be5f6971..6612680dc 100644 --- a/cmd/evm/blockrunner.go +++ b/cmd/evm/blockrunner.go @@ -25,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" "github.com/urfave/cli/v2" ) @@ -41,10 +40,7 @@ func blockTestCmd(ctx *cli.Context) error { if len(ctx.Args().First()) == 0 { return errors.New("path-to-test argument required") } - // Configure the go-ethereum logger - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) - log.Root().SetHandler(glogger) + var tracer vm.EVMLogger // Configure the EVM logger if ctx.Bool(MachineFlag.Name) { diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 024be62b9..1f6500b78 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -23,107 +23,116 @@ import ( "os" "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" + "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/flags" "github.com/urfave/cli/v2" ) var ( DebugFlag = &cli.BoolFlag{ - Name: "debug", - Usage: "output full trace logs", - } - MemProfileFlag = &cli.StringFlag{ - Name: "memprofile", - Usage: "creates a memory profile at the given path", - } - CPUProfileFlag = &cli.StringFlag{ - Name: "cpuprofile", - Usage: "creates a CPU profile at the given path", + Name: "debug", + Usage: "output full trace logs", + Category: flags.VMCategory, } StatDumpFlag = &cli.BoolFlag{ - Name: "statdump", - Usage: "displays stack and heap memory information", + Name: "statdump", + Usage: "displays stack and heap memory information", + Category: flags.VMCategory, } CodeFlag = &cli.StringFlag{ - Name: "code", - Usage: "EVM code", + Name: "code", + Usage: "EVM code", + Category: flags.VMCategory, } CodeFileFlag = &cli.StringFlag{ - Name: "codefile", - Usage: "File containing EVM code. If '-' is specified, code is read from stdin ", + Name: "codefile", + Usage: "File containing EVM code. If '-' is specified, code is read from stdin ", + Category: flags.VMCategory, } GasFlag = &cli.Uint64Flag{ - Name: "gas", - Usage: "gas limit for the evm", - Value: 10000000000, + Name: "gas", + Usage: "gas limit for the evm", + Value: 10000000000, + Category: flags.VMCategory, } PriceFlag = &flags.BigFlag{ - Name: "price", - Usage: "price set for the evm", - Value: new(big.Int), + Name: "price", + Usage: "price set for the evm", + Value: new(big.Int), + Category: flags.VMCategory, } ValueFlag = &flags.BigFlag{ - Name: "value", - Usage: "value set for the evm", - Value: new(big.Int), + Name: "value", + Usage: "value set for the evm", + Value: new(big.Int), + Category: flags.VMCategory, } DumpFlag = &cli.BoolFlag{ - Name: "dump", - Usage: "dumps the state after the run", + Name: "dump", + Usage: "dumps the state after the run", + Category: flags.VMCategory, } InputFlag = &cli.StringFlag{ - Name: "input", - Usage: "input for the EVM", + Name: "input", + Usage: "input for the EVM", + Category: flags.VMCategory, } InputFileFlag = &cli.StringFlag{ - Name: "inputfile", - Usage: "file containing input for the EVM", - } - VerbosityFlag = &cli.IntFlag{ - Name: "verbosity", - Usage: "sets the verbosity level", + Name: "inputfile", + Usage: "file containing input for the EVM", + Category: flags.VMCategory, } BenchFlag = &cli.BoolFlag{ - Name: "bench", - Usage: "benchmark the execution", + Name: "bench", + Usage: "benchmark the execution", + Category: flags.VMCategory, } CreateFlag = &cli.BoolFlag{ - Name: "create", - Usage: "indicates the action should be create rather than call", + Name: "create", + Usage: "indicates the action should be create rather than call", + Category: flags.VMCategory, } GenesisFlag = &cli.StringFlag{ - Name: "prestate", - Usage: "JSON file with prestate (genesis) config", + Name: "prestate", + Usage: "JSON file with prestate (genesis) config", + Category: flags.VMCategory, } MachineFlag = &cli.BoolFlag{ - Name: "json", - Usage: "output trace logs in machine readable format (json)", + Name: "json", + Usage: "output trace logs in machine readable format (json)", + Category: flags.VMCategory, } SenderFlag = &cli.StringFlag{ - Name: "sender", - Usage: "The transaction origin", + Name: "sender", + Usage: "The transaction origin", + Category: flags.VMCategory, } ReceiverFlag = &cli.StringFlag{ - Name: "receiver", - Usage: "The transaction receiver (execution context)", + Name: "receiver", + Usage: "The transaction receiver (execution context)", + Category: flags.VMCategory, } DisableMemoryFlag = &cli.BoolFlag{ - Name: "nomemory", - Value: true, - Usage: "disable memory output", + Name: "nomemory", + Value: true, + Usage: "disable memory output", + Category: flags.VMCategory, } DisableStackFlag = &cli.BoolFlag{ - Name: "nostack", - Usage: "disable stack output", + Name: "nostack", + Usage: "disable stack output", + Category: flags.VMCategory, } DisableStorageFlag = &cli.BoolFlag{ - Name: "nostorage", - Usage: "disable storage output", + Name: "nostorage", + Usage: "disable storage output", + Category: flags.VMCategory, } DisableReturnDataFlag = &cli.BoolFlag{ - Name: "noreturndata", - Value: true, - Usage: "enable return data output", + Name: "noreturndata", + Value: true, + Usage: "enable return data output", + Category: flags.VMCategory, } ) @@ -183,34 +192,38 @@ var blockBuilderCommand = &cli.Command{ }, } +// vmFlags contains flags related to running the EVM. +var vmFlags = []cli.Flag{ + CodeFlag, + CodeFileFlag, + CreateFlag, + GasFlag, + PriceFlag, + ValueFlag, + InputFlag, + InputFileFlag, + GenesisFlag, + SenderFlag, + ReceiverFlag, +} + +// traceFlags contains flags that configure tracing output. +var traceFlags = []cli.Flag{ + BenchFlag, + DebugFlag, + DumpFlag, + MachineFlag, + StatDumpFlag, + DisableMemoryFlag, + DisableStackFlag, + DisableStorageFlag, + DisableReturnDataFlag, +} + var app = flags.NewApp("the evm command line interface") func init() { - app.Flags = []cli.Flag{ - BenchFlag, - CreateFlag, - DebugFlag, - VerbosityFlag, - CodeFlag, - CodeFileFlag, - GasFlag, - PriceFlag, - ValueFlag, - DumpFlag, - InputFlag, - InputFileFlag, - MemProfileFlag, - CPUProfileFlag, - StatDumpFlag, - GenesisFlag, - MachineFlag, - SenderFlag, - ReceiverFlag, - DisableMemoryFlag, - DisableStackFlag, - DisableStorageFlag, - DisableReturnDataFlag, - } + app.Flags = flags.Merge(vmFlags, traceFlags, debug.Flags) app.Commands = []*cli.Command{ compileCommand, disasmCommand, @@ -221,6 +234,14 @@ func init() { transactionCommand, blockBuilderCommand, } + app.Before = func(ctx *cli.Context) error { + flags.MigrateGlobalFlags(ctx) + return debug.Setup(ctx) + } + app.After = func(ctx *cli.Context) error { + debug.Exit() + return nil + } } func main() { diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index ac8432bad..017388efb 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -24,7 +24,6 @@ import ( "math/big" "os" goruntime "runtime" - "runtime/pprof" "testing" "time" @@ -34,12 +33,10 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm/runtime" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/internal/flags" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" @@ -52,6 +49,7 @@ var runCommand = &cli.Command{ Usage: "run arbitrary evm binary", ArgsUsage: "", Description: `The run command runs arbitrary EVM code.`, + Flags: flags.Merge(vmFlags, traceFlags), } // readGenesis will read the given JSON format genesis file and return @@ -109,9 +107,6 @@ func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []by } func runCmd(ctx *cli.Context) error { - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) - log.Root().SetHandler(glogger) logconfig := &logger.Config{ EnableMemory: !ctx.Bool(DisableMemoryFlag.Name), DisableStack: ctx.Bool(DisableStackFlag.Name), @@ -121,15 +116,14 @@ func runCmd(ctx *cli.Context) error { } var ( - tracer vm.EVMLogger - debugLogger *logger.StructLogger - statedb *state.StateDB - chainConfig *params.ChainConfig - sender = common.BytesToAddress([]byte("sender")) - receiver = common.BytesToAddress([]byte("receiver")) - genesisConfig *core.Genesis - preimages = ctx.Bool(DumpFlag.Name) - blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests + tracer vm.EVMLogger + debugLogger *logger.StructLogger + statedb *state.StateDB + chainConfig *params.ChainConfig + sender = common.BytesToAddress([]byte("sender")) + receiver = common.BytesToAddress([]byte("receiver")) + preimages = ctx.Bool(DumpFlag.Name) + blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests ) if ctx.Bool(MachineFlag.Name) { tracer = logger.NewJSONLogger(logconfig, os.Stdout) @@ -139,30 +133,30 @@ func runCmd(ctx *cli.Context) error { } else { debugLogger = logger.NewStructLogger(logconfig) } + + initialGas := ctx.Uint64(GasFlag.Name) + genesisConfig := new(core.Genesis) + genesisConfig.GasLimit = initialGas if ctx.String(GenesisFlag.Name) != "" { - gen := readGenesis(ctx.String(GenesisFlag.Name)) - genesisConfig = gen - db := rawdb.NewMemoryDatabase() - triedb := trie.NewDatabase(db, &trie.Config{ - Preimages: preimages, - HashDB: hashdb.Defaults, - }) - defer triedb.Close() - genesis := gen.MustCommit(db, triedb) - sdb := state.NewDatabaseWithNodeDB(db, triedb) - statedb, _ = state.New(genesis.Root(), sdb, nil) - chainConfig = gen.Config + genesisConfig = readGenesis(ctx.String(GenesisFlag.Name)) + if genesisConfig.GasLimit != 0 { + initialGas = genesisConfig.GasLimit + } } else { - db := rawdb.NewMemoryDatabase() - triedb := trie.NewDatabase(db, &trie.Config{ - Preimages: preimages, - HashDB: hashdb.Defaults, - }) - defer triedb.Close() - sdb := state.NewDatabaseWithNodeDB(db, triedb) - statedb, _ = state.New(types.EmptyRootHash, sdb, nil) - genesisConfig = new(core.Genesis) + genesisConfig.Config = params.AllEthashProtocolChanges } + + db := rawdb.NewMemoryDatabase() + triedb := trie.NewDatabase(db, &trie.Config{ + Preimages: preimages, + HashDB: hashdb.Defaults, + }) + defer triedb.Close() + genesis := genesisConfig.MustCommit(db, triedb) + sdb := state.NewDatabaseWithNodeDB(db, triedb) + statedb, _ = state.New(genesis.Root(), sdb, nil) + chainConfig = genesisConfig.Config + if ctx.String(SenderFlag.Name) != "" { sender = common.HexToAddress(ctx.String(SenderFlag.Name)) } @@ -216,10 +210,6 @@ func runCmd(ctx *cli.Context) error { } code = common.Hex2Bytes(bin) } - initialGas := ctx.Uint64(GasFlag.Name) - if genesisConfig.GasLimit != 0 { - initialGas = genesisConfig.GasLimit - } runtimeConfig := runtime.Config{ Origin: sender, State: statedb, @@ -236,19 +226,6 @@ func runCmd(ctx *cli.Context) error { }, } - if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" { - f, err := os.Create(cpuProfilePath) - if err != nil { - fmt.Println("could not create CPU profile: ", err) - os.Exit(1) - } - if err := pprof.StartCPUProfile(f); err != nil { - fmt.Println("could not start CPU profile: ", err) - os.Exit(1) - } - defer pprof.StopCPUProfile() - } - if chainConfig != nil { runtimeConfig.ChainConfig = chainConfig } else { @@ -296,19 +273,6 @@ func runCmd(ctx *cli.Context) error { fmt.Println(string(statedb.Dump(nil))) } - if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" { - f, err := os.Create(memProfilePath) - if err != nil { - fmt.Println("could not create memory profile: ", err) - os.Exit(1) - } - if err := pprof.WriteHeapProfile(f); err != nil { - fmt.Println("could not write memory profile: ", err) - os.Exit(1) - } - f.Close() - } - if ctx.Bool(DebugFlag.Name) { if debugLogger != nil { fmt.Fprintln(os.Stderr, "#### TRACE ####") diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 85931f040..8a07fccdf 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" "github.com/urfave/cli/v2" ) @@ -52,11 +51,6 @@ type StatetestResult struct { } func stateTestCmd(ctx *cli.Context) error { - // Configure the go-ethereum logger - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) - log.Root().SetHandler(glogger) - // Configure the EVM logger config := &logger.Config{ EnableMemory: !ctx.Bool(DisableMemoryFlag.Name), diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index fad2c71e6..aebcc29eb 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -50,8 +50,7 @@ var ( ArgsUsage: "", Flags: flags.Merge([]cli.Flag{ utils.CachePreimagesFlag, - utils.StateSchemeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` The init command initializes a new genesis block and definition for the network. This is a destructive action and changes the network in which you will be @@ -97,9 +96,8 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.MetricsInfluxDBOrganizationFlag, utils.TxLookupLimitFlag, utils.TransactionHistoryFlag, - utils.StateSchemeFlag, utils.StateHistoryFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` The import command imports blocks from an RLP-encoded form. The form can be one file with several RLP-encoded blocks, or several files can be used. @@ -115,8 +113,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`, Flags: flags.Merge([]cli.Flag{ utils.CacheFlag, utils.SyncModeFlag, - utils.StateSchemeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` Requires a first argument of the file to write to. Optional second and third arguments control the first and @@ -132,7 +129,7 @@ be gzipped.`, Flags: flags.Merge([]cli.Flag{ utils.CacheFlag, utils.SyncModeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` The import-preimages command imports hash preimages from an RLP encoded stream. It's deprecated, please use "geth db import" instead. @@ -146,7 +143,7 @@ It's deprecated, please use "geth db import" instead. Flags: flags.Merge([]cli.Flag{ utils.CacheFlag, utils.SyncModeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` The export-preimages command exports hash preimages to an RLP encoded stream. It's deprecated, please use "geth db export" instead. @@ -165,8 +162,7 @@ It's deprecated, please use "geth db export" instead. utils.IncludeIncompletesFlag, utils.StartKeyFlag, utils.DumpLimitFlag, - utils.StateSchemeFlag, - }, utils.DatabasePathFlags), + }, utils.DatabaseFlags), Description: ` This command dumps out the state for a given block (or latest, if none provided). `, @@ -340,7 +336,8 @@ func exportChain(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - chain, _ := utils.MakeChain(ctx, stack, true) + chain, db := utils.MakeChain(ctx, stack, true) + defer db.Close() start := time.Now() var err error @@ -380,6 +377,7 @@ func importPreimages(ctx *cli.Context) error { defer stack.Close() db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() start := time.Now() if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { @@ -398,6 +396,7 @@ func exportPreimages(ctx *cli.Context) error { defer stack.Close() db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() start := time.Now() if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { @@ -409,6 +408,8 @@ func exportPreimages(ctx *cli.Context) error { func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + var header *types.Header if ctx.NArg() > 1 { return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index a1868eb8c..6f802716c 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -48,7 +48,7 @@ var ( Name: "removedb", Usage: "Remove blockchain and state databases", ArgsUsage: "", - Flags: utils.DatabasePathFlags, + Flags: utils.DatabaseFlags, Description: ` Remove blockchain and state databases`, } @@ -77,7 +77,7 @@ Remove blockchain and state databases`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Usage: "Inspect the storage size for each type of data in the database", Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, } @@ -85,7 +85,7 @@ Remove blockchain and state databases`, Action: checkStateContent, Name: "check-state-content", ArgsUsage: "", - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Usage: "Verify that state data is cryptographically correct", Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes. For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates @@ -97,7 +97,7 @@ a data corruption.`, Usage: "Print leveldb statistics", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), } dbCompactCmd = &cli.Command{ Action: dbCompact, @@ -107,7 +107,7 @@ a data corruption.`, utils.SyncModeFlag, utils.CacheFlag, utils.CacheDatabaseFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: `This command performs a database compaction. WARNING: This operation may take a very long time to finish, and may cause database corruption if it is aborted during execution'!`, @@ -119,7 +119,7 @@ corruption if it is aborted during execution'!`, ArgsUsage: "", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "This command looks up the specified database key from the database.", } dbDeleteCmd = &cli.Command{ @@ -129,7 +129,7 @@ corruption if it is aborted during execution'!`, ArgsUsage: "", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: `This command deletes the specified database key from the database. WARNING: This is a low-level operation which may cause database corruption!`, } @@ -140,7 +140,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: `This command sets a given database key to the given value. WARNING: This is a low-level operation which may cause database corruption!`, } @@ -151,8 +151,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "This command looks up the specified database key from the database.", } dbDumpFreezerIndex = &cli.Command{ @@ -162,7 +161,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "This command displays information about the freezer index.", } dbImportCmd = &cli.Command{ @@ -172,7 +171,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.", } dbMetadataCmd = &cli.Command{ @@ -191,7 +190,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, Usage: "Shows metadata about the chain status.", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: "Shows metadata about the chain status.", } ) @@ -595,6 +594,7 @@ func importLDBdata(ctx *cli.Context) error { close(stop) }() db := utils.MakeChainDatabase(ctx, stack, false) + defer db.Close() return utils.ImportLDBData(db, fName, int64(start), stop) } @@ -691,6 +691,7 @@ func exportChaindata(ctx *cli.Context) error { close(stop) }() db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop) } @@ -698,6 +699,8 @@ func showMetaData(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + ancients, err := db.Ancients() if err != nil { fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index d2da8c152..da143edec 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -96,7 +96,6 @@ var ( utils.SnapshotFlag, utils.TxLookupLimitFlag, utils.TransactionHistoryFlag, - utils.StateSchemeFlag, utils.StateHistoryFlag, utils.LightServeFlag, utils.LightIngressFlag, @@ -152,7 +151,7 @@ var ( utils.GpoMaxGasPriceFlag, utils.GpoIgnoreGasPriceFlag, configFileFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags) + }, utils.NetworkFlags, utils.DatabaseFlags) rpcFlags = []cli.Flag{ utils.HTTPEnabledFlag, diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 5e1c78473..641348251 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -51,7 +51,7 @@ var ( Action: pruneState, Flags: flags.Merge([]cli.Flag{ utils.BloomFilterSizeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot prune-state will prune historical state data with the help of the state snapshot. @@ -69,9 +69,7 @@ WARNING: it's only supported in hash mode(--state.scheme=hash)". Usage: "Recalculate state hash based on the snapshot for verification", ArgsUsage: "", Action: verifyState, - Flags: flags.Merge([]cli.Flag{ - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot verify-state will traverse the whole accounts and storages set based on the specified @@ -84,7 +82,7 @@ In other words, this command does the snapshot to trie conversion. Usage: "Check that there is no 'dangling' snap storage", ArgsUsage: "", Action: checkDanglingStorage, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot check-dangling-storage traverses the snap storage data, and verifies that all snapshot storage data has a corresponding account. @@ -95,7 +93,7 @@ data, and verifies that all snapshot storage data has a corresponding account. Usage: "Check all snapshot layers for the a specific account", ArgsUsage: "
", Action: checkAccount, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot inspect-account
checks all snapshot layers and prints out information about the specified address. @@ -106,9 +104,7 @@ information about the specified address. Usage: "Traverse the state with given root hash and perform quick verification", ArgsUsage: "", Action: traverseState, - Flags: flags.Merge([]cli.Flag{ - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot traverse-state will traverse the whole state from the given state root and will abort if any @@ -123,9 +119,7 @@ It's also usable without snapshot enabled. Usage: "Traverse the state with given root hash and perform detailed verification", ArgsUsage: "", Action: traverseRawState, - Flags: flags.Merge([]cli.Flag{ - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth snapshot traverse-rawstate will traverse the whole state from the given root and will abort if any referenced @@ -146,8 +140,7 @@ It's also usable without snapshot enabled. utils.ExcludeStorageFlag, utils.StartKeyFlag, utils.DumpLimitFlag, - utils.StateSchemeFlag, - }, utils.NetworkFlags, utils.DatabasePathFlags), + }, utils.NetworkFlags, utils.DatabaseFlags), Description: ` This command is semantically equivalent to 'geth dump', but uses the snapshots as the backend data source, making this command a lot faster. @@ -252,7 +245,9 @@ func checkDanglingStorage(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() - return snapshot.CheckDanglingStorage(utils.MakeChainDatabase(ctx, stack, true)) + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + return snapshot.CheckDanglingStorage(db) } // traverseState is a helper function used for pruning verification. @@ -332,6 +327,11 @@ func traverseState(ctx *cli.Context) error { storageIter := trie.NewIterator(storageIt) for storageIter.Next() { slots += 1 + + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } } if storageIter.Err != nil { log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Err) @@ -486,6 +486,10 @@ func traverseRawState(ctx *cli.Context) error { if storageIter.Leaf() { slots += 1 } + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } } if storageIter.Error() != nil { log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Error()) diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 9ba2b4167..aa79889e8 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -45,7 +45,7 @@ var ( Usage: "verify the conversion of a MPT into a verkle tree", ArgsUsage: "", Action: verifyVerkle, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth verkle verify This command takes a root commitment and attempts to rebuild the tree. @@ -56,7 +56,7 @@ This command takes a root commitment and attempts to rebuild the tree. Usage: "Dump a verkle tree to a DOT file", ArgsUsage: " [ ...]", Action: expandVerkle, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), Description: ` geth verkle dump [ ...] This command will produce a dot file representing the tree, rooted at . @@ -115,6 +115,7 @@ func verifyVerkle(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true) + defer chaindb.Close() headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { log.Error("Failed to load head block") @@ -163,6 +164,7 @@ func expandVerkle(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true) + defer chaindb.Close() var ( rootC common.Hash keylist [][]byte diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index b04e468d6..055c7a1c5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -969,18 +969,19 @@ var ( // NetworkFlags is the flag group of all built-in supported networks. NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...) - // DatabasePathFlags is the flag group of all database path flags. - DatabasePathFlags = []cli.Flag{ + // DatabaseFlags is the flag group of all database flags. + DatabaseFlags = []cli.Flag{ DataDirFlag, AncientFlag, RemoteDBFlag, + StateSchemeFlag, HttpHeaderFlag, } ) func init() { if rawdb.PebbleEnabled { - DatabasePathFlags = append(DatabasePathFlags, DBEngineFlag) + DatabaseFlags = append(DatabaseFlags, DBEngineFlag) } } @@ -1006,7 +1007,7 @@ func MakeDataDir(ctx *cli.Context) string { // setNodeKey creates a node key from set command line flags, either loading it // from a file or as a specified hex value. If neither flags were provided, this -// method returns nil and an emphemeral key is to be generated. +// method returns nil and an ephemeral key is to be generated. func setNodeKey(ctx *cli.Context, cfg *p2p.Config) { var ( hex = ctx.String(NodeKeyHexFlag.Name) @@ -1039,35 +1040,45 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) { // setBootstrapNodes creates a list of bootstrap nodes from the command line // flags, reverting to pre-configured ones if none have been specified. +// Priority order for bootnodes configuration: +// +// 1. --bootnodes flag +// 2. Config file +// 3. Network preset flags (e.g. --goerli) +// 4. default to mainnet nodes func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { urls := params.MainnetBootnodes - switch { - case ctx.IsSet(BootnodesFlag.Name): + if ctx.IsSet(BootnodesFlag.Name) { urls = SplitAndTrim(ctx.String(BootnodesFlag.Name)) - case ctx.Bool(HoleskyFlag.Name): - urls = params.HoleskyBootnodes - case ctx.Bool(SepoliaFlag.Name): - urls = params.SepoliaBootnodes - case ctx.Bool(GoerliFlag.Name): - urls = params.GoerliBootnodes + } else { + if cfg.BootstrapNodes != nil { + return // Already set by config file, don't apply defaults. + } + switch { + case ctx.Bool(HoleskyFlag.Name): + urls = params.HoleskyBootnodes + case ctx.Bool(SepoliaFlag.Name): + urls = params.SepoliaBootnodes + case ctx.Bool(GoerliFlag.Name): + urls = params.GoerliBootnodes + } } + cfg.BootstrapNodes = mustParseBootnodes(urls) +} - // don't apply defaults if BootstrapNodes is already set - if cfg.BootstrapNodes != nil { - return - } - - cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls)) +func mustParseBootnodes(urls []string) []*enode.Node { + nodes := make([]*enode.Node, 0, len(urls)) for _, url := range urls { if url != "" { node, err := enode.Parse(enode.ValidSchemes, url) if err != nil { log.Crit("Bootstrap URL invalid", "enode", url, "err", err) - continue + return nil } - cfg.BootstrapNodes = append(cfg.BootstrapNodes, node) + nodes = append(nodes, node) } } + return nodes } // setBootstrapNodesV5 creates a list of bootstrap nodes from the command line @@ -2129,7 +2140,7 @@ func DialRPCWithHeaders(endpoint string, headers []string) (*rpc.Client, error) } var opts []rpc.ClientOption if len(headers) > 0 { - var customHeaders = make(http.Header) + customHeaders := make(http.Header) for _, h := range headers { kv := strings.Split(h, ":") if len(kv) != 2 { diff --git a/core/asm/compiler.go b/core/asm/compiler.go index 4b1d37920..75bf726c9 100644 --- a/core/asm/compiler.go +++ b/core/asm/compiler.go @@ -17,6 +17,8 @@ package asm import ( + "encoding/hex" + "errors" "fmt" "math/big" "os" @@ -30,7 +32,7 @@ import ( // and holds the tokens for the program. type Compiler struct { tokens []token - binary []interface{} + out []byte labels map[string]int @@ -50,12 +52,10 @@ func NewCompiler(debug bool) *Compiler { // Feed feeds tokens in to ch and are interpreted by // the compiler. // -// feed is the first pass in the compile stage as it -// collects the used labels in the program and keeps a -// program counter which is used to determine the locations -// of the jump dests. The labels can than be used in the -// second stage to push labels and determine the right -// position. +// feed is the first pass in the compile stage as it collects the used labels in the +// program and keeps a program counter which is used to determine the locations of the +// jump dests. The labels can than be used in the second stage to push labels and +// determine the right position. func (c *Compiler) Feed(ch <-chan token) { var prev token for i := range ch { @@ -79,7 +79,6 @@ func (c *Compiler) Feed(ch <-chan token) { c.pc++ } } - c.tokens = append(c.tokens, i) prev = i } @@ -88,12 +87,11 @@ func (c *Compiler) Feed(ch <-chan token) { } } -// Compile compiles the current tokens and returns a -// binary string that can be interpreted by the EVM -// and an error if it failed. +// Compile compiles the current tokens and returns a binary string that can be interpreted +// by the EVM and an error if it failed. // -// compile is the second stage in the compile phase -// which compiles the tokens to EVM instructions. +// compile is the second stage in the compile phase which compiles the tokens to EVM +// instructions. func (c *Compiler) Compile() (string, []error) { var errors []error // continue looping over the tokens until @@ -105,16 +103,8 @@ func (c *Compiler) Compile() (string, []error) { } // turn the binary to hex - var bin strings.Builder - for _, v := range c.binary { - switch v := v.(type) { - case vm.OpCode: - bin.WriteString(fmt.Sprintf("%x", []byte{byte(v)})) - case []byte: - bin.WriteString(fmt.Sprintf("%x", v)) - } - } - return bin.String(), errors + h := hex.EncodeToString(c.out) + return h, errors } // next returns the next token and increments the @@ -156,87 +146,114 @@ func (c *Compiler) compileLine() error { return nil } -// compileNumber compiles the number to bytes -func (c *Compiler) compileNumber(element token) { - num := math.MustParseBig256(element.text).Bytes() - if len(num) == 0 { - num = []byte{0} +// parseNumber compiles the number to bytes +func parseNumber(tok token) ([]byte, error) { + if tok.typ != number { + panic("parseNumber of non-number token") } - c.pushBin(num) + num, ok := math.ParseBig256(tok.text) + if !ok { + return nil, errors.New("invalid number") + } + bytes := num.Bytes() + if len(bytes) == 0 { + bytes = []byte{0} + } + return bytes, nil } // compileElement compiles the element (push & label or both) // to a binary representation and may error if incorrect statements // where fed. func (c *Compiler) compileElement(element token) error { - // check for a jump. jumps must be read and compiled - // from right to left. - if isJump(element.text) { - rvalue := c.next() - switch rvalue.typ { - case number: - // TODO figure out how to return the error properly - c.compileNumber(rvalue) - case stringValue: - // strings are quoted, remove them. - c.pushBin(rvalue.text[1 : len(rvalue.text)-2]) - case label: - c.pushBin(vm.PUSH4) - pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes() - pos = append(make([]byte, 4-len(pos)), pos...) - c.pushBin(pos) - case lineEnd: - c.pos-- - default: - return compileErr(rvalue, rvalue.text, "number, string or label") - } - // push the operation - c.pushBin(toBinary(element.text)) + switch { + case isJump(element.text): + return c.compileJump(element.text) + case isPush(element.text): + return c.compilePush() + default: + c.outputOpcode(toBinary(element.text)) return nil - } else if isPush(element.text) { - // handle pushes. pushes are read from left to right. - var value []byte - - rvalue := c.next() - switch rvalue.typ { - case number: - value = math.MustParseBig256(rvalue.text).Bytes() - if len(value) == 0 { - value = []byte{0} - } - case stringValue: - value = []byte(rvalue.text[1 : len(rvalue.text)-1]) - case label: - value = big.NewInt(int64(c.labels[rvalue.text])).Bytes() - value = append(make([]byte, 4-len(value)), value...) - default: - return compileErr(rvalue, rvalue.text, "number, string or label") - } - - if len(value) > 32 { - return fmt.Errorf("%d type error: unsupported string or number with size > 32", rvalue.lineno) - } - - c.pushBin(vm.OpCode(int(vm.PUSH1) - 1 + len(value))) - c.pushBin(value) - } else { - c.pushBin(toBinary(element.text)) } +} +func (c *Compiler) compileJump(jumpType string) error { + rvalue := c.next() + switch rvalue.typ { + case number: + numBytes, err := parseNumber(rvalue) + if err != nil { + return err + } + c.outputBytes(numBytes) + + case stringValue: + // strings are quoted, remove them. + str := rvalue.text[1 : len(rvalue.text)-2] + c.outputBytes([]byte(str)) + + case label: + c.outputOpcode(vm.PUSH4) + pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes() + pos = append(make([]byte, 4-len(pos)), pos...) + c.outputBytes(pos) + + case lineEnd: + // push without argument is supported, it just takes the destination from the stack. + c.pos-- + + default: + return compileErr(rvalue, rvalue.text, "number, string or label") + } + // push the operation + c.outputOpcode(toBinary(jumpType)) + return nil +} + +func (c *Compiler) compilePush() error { + // handle pushes. pushes are read from left to right. + var value []byte + rvalue := c.next() + switch rvalue.typ { + case number: + value = math.MustParseBig256(rvalue.text).Bytes() + if len(value) == 0 { + value = []byte{0} + } + case stringValue: + value = []byte(rvalue.text[1 : len(rvalue.text)-1]) + case label: + value = big.NewInt(int64(c.labels[rvalue.text])).Bytes() + value = append(make([]byte, 4-len(value)), value...) + default: + return compileErr(rvalue, rvalue.text, "number, string or label") + } + if len(value) > 32 { + return fmt.Errorf("%d: string or number size > 32 bytes", rvalue.lineno+1) + } + c.outputOpcode(vm.OpCode(int(vm.PUSH1) - 1 + len(value))) + c.outputBytes(value) return nil } // compileLabel pushes a jumpdest to the binary slice. func (c *Compiler) compileLabel() { - c.pushBin(vm.JUMPDEST) + c.outputOpcode(vm.JUMPDEST) } -// pushBin pushes the value v to the binary stack. -func (c *Compiler) pushBin(v interface{}) { +func (c *Compiler) outputOpcode(op vm.OpCode) { if c.debug { - fmt.Printf("%d: %v\n", len(c.binary), v) + fmt.Printf("%d: %v\n", len(c.out), op) } - c.binary = append(c.binary, v) + c.out = append(c.out, byte(op)) +} + +// output pushes the value v to the binary stack. +func (c *Compiler) outputBytes(b []byte) { + if c.debug { + fmt.Printf("%d: %x\n", len(c.out), b) + } + c.out = append(c.out, b...) } // isPush returns whether the string op is either any of @@ -263,13 +280,13 @@ type compileError struct { } func (err compileError) Error() string { - return fmt.Sprintf("%d syntax error: unexpected %v, expected %v", err.lineno, err.got, err.want) + return fmt.Sprintf("%d: syntax error: unexpected %v, expected %v", err.lineno, err.got, err.want) } func compileErr(c token, got, want string) error { return compileError{ got: got, want: want, - lineno: c.lineno, + lineno: c.lineno + 1, } } diff --git a/core/asm/compiler_test.go b/core/asm/compiler_test.go index ce9df436b..3d64c96bc 100644 --- a/core/asm/compiler_test.go +++ b/core/asm/compiler_test.go @@ -54,6 +54,14 @@ func TestCompiler(t *testing.T) { `, output: "6300000006565b", }, + { + input: ` + JUMP @label +label: ;; comment + ADD ;; comment +`, + output: "6300000006565b01", + }, } for _, test := range tests { ch := Lex([]byte(test.input), false) diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go index 53e05fbbb..173031521 100644 --- a/core/asm/lex_test.go +++ b/core/asm/lex_test.go @@ -72,6 +72,16 @@ func TestLexer(t *testing.T) { input: "@label123", tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, }, + // comment after label + { + input: "@label123 ;; comment", + tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, + }, + // comment after instruction + { + input: "push 3 ;; comment\nadd", + tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}}, + }, } for _, test := range tests { diff --git a/core/asm/lexer.go b/core/asm/lexer.go index d1b79a1fb..e025c6f36 100644 --- a/core/asm/lexer.go +++ b/core/asm/lexer.go @@ -42,6 +42,8 @@ type token struct { // is able to parse and return. type tokenType int +//go:generate go run golang.org/x/tools/cmd/stringer -type tokenType + const ( eof tokenType = iota // end of file lineStart // emitted when a line starts @@ -52,31 +54,13 @@ const ( labelDef // label definition is emitted when a new label is found number // number is emitted when a number is found stringValue // stringValue is emitted when a string has been found - - Numbers = "1234567890" // characters representing any decimal number - HexadecimalNumbers = Numbers + "aAbBcCdDeEfF" // characters representing any hexadecimal - Alpha = "abcdefghijklmnopqrstuwvxyzABCDEFGHIJKLMNOPQRSTUWVXYZ" // characters representing alphanumeric ) -// String implements stringer -func (it tokenType) String() string { - if int(it) > len(stringtokenTypes) { - return "invalid" - } - return stringtokenTypes[it] -} - -var stringtokenTypes = []string{ - eof: "EOF", - lineStart: "new line", - lineEnd: "end of line", - invalidStatement: "invalid statement", - element: "element", - label: "label", - labelDef: "label definition", - number: "number", - stringValue: "string", -} +const ( + decimalNumbers = "1234567890" // characters representing any decimal number + hexNumbers = decimalNumbers + "aAbBcCdDeEfF" // characters representing any hexadecimal + alpha = "abcdefghijklmnopqrstuwvxyzABCDEFGHIJKLMNOPQRSTUWVXYZ" // characters representing alphanumeric +) // lexer is the basic construct for parsing // source code and turning them in to tokens. @@ -200,7 +184,6 @@ func lexLine(l *lexer) stateFn { l.emit(lineEnd) l.ignore() l.lineno++ - l.emit(lineStart) case r == ';' && l.peek() == ';': return lexComment @@ -225,6 +208,7 @@ func lexLine(l *lexer) stateFn { // of the line and discards the text. func lexComment(l *lexer) stateFn { l.acceptRunUntil('\n') + l.backup() l.ignore() return lexLine @@ -234,7 +218,7 @@ func lexComment(l *lexer) stateFn { // the lex text state function to advance the parsing // process. func lexLabel(l *lexer) stateFn { - l.acceptRun(Alpha + "_" + Numbers) + l.acceptRun(alpha + "_" + decimalNumbers) l.emit(label) @@ -253,9 +237,9 @@ func lexInsideString(l *lexer) stateFn { } func lexNumber(l *lexer) stateFn { - acceptance := Numbers + acceptance := decimalNumbers if l.accept("xX") { - acceptance = HexadecimalNumbers + acceptance = hexNumbers } l.acceptRun(acceptance) @@ -265,7 +249,7 @@ func lexNumber(l *lexer) stateFn { } func lexElement(l *lexer) stateFn { - l.acceptRun(Alpha + "_" + Numbers) + l.acceptRun(alpha + "_" + decimalNumbers) if l.peek() == ':' { l.emit(labelDef) diff --git a/core/asm/tokentype_string.go b/core/asm/tokentype_string.go new file mode 100644 index 000000000..ade76aa36 --- /dev/null +++ b/core/asm/tokentype_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type tokenType"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[eof-0] + _ = x[lineStart-1] + _ = x[lineEnd-2] + _ = x[invalidStatement-3] + _ = x[element-4] + _ = x[label-5] + _ = x[labelDef-6] + _ = x[number-7] + _ = x[stringValue-8] +} + +const _tokenType_name = "eoflineStartlineEndinvalidStatementelementlabellabelDefnumberstringValue" + +var _tokenType_index = [...]uint8{0, 3, 12, 19, 35, 42, 47, 55, 61, 72} + +func (i tokenType) String() string { + if i < 0 || i >= tokenType(len(_tokenType_index)-1) { + return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tokenType_name[_tokenType_index[i]:_tokenType_index[i+1]] +} diff --git a/core/blockchain.go b/core/blockchain.go index 8935dc1a7..5bc06d0e7 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -337,17 +337,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis if err := bc.loadLastState(); err != nil { return nil, err } - // Make sure the state associated with the block is available + // Make sure the state associated with the block is available, or log out + // if there is no available state, waiting for state sync. head := bc.CurrentBlock() if !bc.HasState(head.Root) { if head.Number.Uint64() == 0 { // The genesis state is missing, which is only possible in the path-based - // scheme. This situation occurs when the state syncer overwrites it. - // - // The solution is to reset the state to the genesis state. Although it may not - // match the sync target, the state healer will later address and correct any - // inconsistencies. - bc.resetState() + // scheme. This situation occurs when the initial state sync is not finished + // yet, or the chain head is rewound below the pivot point. In both scenario, + // there is no possible recovery approach except for rerunning a snap sync. + // Do nothing here until the state syncer picks it up. + log.Info("Genesis state is missing, wait state sync") } else { // Head state is missing, before the state recovery, find out the // disk layer point of snapshot(if it's enabled). Make sure the @@ -630,28 +630,6 @@ func (bc *BlockChain) SetSafe(header *types.Header) { } } -// resetState resets the persistent state to genesis state if it's not present. -func (bc *BlockChain) resetState() { - // Short circuit if the genesis state is already present. - root := bc.genesisBlock.Root() - if bc.HasState(root) { - return - } - // Reset the state database to empty for committing genesis state. - // Note, it should only happen in path-based scheme and Reset function - // is also only call-able in this mode. - if bc.triedb.Scheme() == rawdb.PathScheme { - if err := bc.triedb.Reset(types.EmptyRootHash); err != nil { - log.Crit("Failed to clean state", "err", err) // Shouldn't happen - } - } - // Write genesis state into database. - if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil { - log.Crit("Failed to commit genesis state", "err", err) - } - log.Info("Reset state to genesis", "root", root) -} - // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition // that the rewind must pass the specified state root. This method is meant to be // used when rewinding with snapshots enabled to ensure that we go back further than @@ -687,7 +665,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha if newHeadBlock == nil { log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) newHeadBlock = bc.genesisBlock - bc.resetState() } else { // Block exists, keep rewinding until we find one with state, // keeping rewinding until we exceed the optional threshold @@ -715,16 +692,14 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha } } if beyondRoot || newHeadBlock.NumberU64() == 0 { - if newHeadBlock.NumberU64() == 0 { - bc.resetState() - } else if !bc.HasState(newHeadBlock.Root()) { + if !bc.HasState(newHeadBlock.Root()) && bc.stateRecoverable(newHeadBlock.Root()) { // Rewind to a block with recoverable state. If the state is // missing, run the state recovery here. if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil { log.Crit("Failed to rollback state", "err", err) // Shouldn't happen } + log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) } - log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) break } log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) @@ -739,6 +714,15 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha // to low, so it's safe to update in-memory markers directly. bc.currentBlock.Store(newHeadBlock.Header()) headBlockGauge.Update(int64(newHeadBlock.NumberU64())) + + // The head state is missing, which is only possible in the path-based + // scheme. This situation occurs when the chain head is rewound below + // the pivot point. In this scenario, there is no possible recovery + // approach except for rerunning a snap sync. Do nothing here until the + // state syncer picks it up. + if !bc.HasState(newHeadBlock.Root()) { + log.Info("Chain is stateless, wait state sync", "number", newHeadBlock.Number(), "hash", newHeadBlock.Hash()) + } } // Rewind the snap block in a simpleton way to the target head if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() { @@ -838,7 +822,7 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { // Reset the trie database with the fresh snap synced state. root := block.Root() if bc.triedb.Scheme() == rawdb.PathScheme { - if err := bc.triedb.Reset(root); err != nil { + if err := bc.triedb.Enable(root); err != nil { return err } } diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go index d8f932041..cf799c832 100644 --- a/core/bloombits/matcher.go +++ b/core/bloombits/matcher.go @@ -630,13 +630,16 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx} result := <-request + + // Deliver a result before s.Close() to avoid a deadlock + s.deliverSections(result.Bit, result.Sections, result.Bitsets) + if result.Error != nil { s.errLock.Lock() s.err = result.Error s.errLock.Unlock() s.Close() } - s.deliverSections(result.Bit, result.Sections, result.Bitsets) } } } diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 3d49b2ece..db634bc14 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -107,6 +107,16 @@ func TestCreation(t *testing.T) { {1735372, 1677557088, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 0}}, // First Shanghai block }, }, + // Holesky test cases + { + params.HoleskyChainConfig, + core.DefaultHoleskyGenesisBlock().ToBlock(), + []testcase{ + {0, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin, London, Paris block + {123, 0, ID{Hash: checksumToBytes(0xc61a6098), Next: 1696000704}}, // First MergeNetsplit block + {123, 1696000704, ID{Hash: checksumToBytes(0xfd4f016b), Next: 0}}, // Last MergeNetsplit block + }, + }, } for i, tt := range tests { for j, ttt := range tt.cases { diff --git a/core/genesis.go b/core/genesis.go index 86a3e42a6..baace3f99 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -587,10 +587,9 @@ func DefaultHoleskyGenesisBlock() *Genesis { return &Genesis{ Config: params.HoleskyChainConfig, Nonce: 0x1234, - ExtraData: hexutil.MustDecode("0x686f77206d7563682069732074686520666973683f"), GasLimit: 0x17d7840, Difficulty: big.NewInt(0x01), - Timestamp: 1694786100, + Timestamp: 1695902100, Alloc: decodePrealloc(holeskyAllocData), } } diff --git a/core/rawdb/accessors_sync.go b/core/rawdb/accessors_sync.go index 7a7374e16..2dc08b3b7 100644 --- a/core/rawdb/accessors_sync.go +++ b/core/rawdb/accessors_sync.go @@ -76,3 +76,25 @@ func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) { log.Crit("Failed to delete skeleton header", "err", err) } } + +const ( + StateSyncUnknown = uint8(0) // flags the state snap sync is unknown + StateSyncRunning = uint8(1) // flags the state snap sync is not completed yet + StateSyncFinished = uint8(2) // flags the state snap sync is completed +) + +// ReadSnapSyncStatusFlag retrieves the state snap sync status flag. +func ReadSnapSyncStatusFlag(db ethdb.KeyValueReader) uint8 { + blob, err := db.Get(snapSyncStatusFlagKey) + if err != nil || len(blob) != 1 { + return StateSyncUnknown + } + return blob[0] +} + +// WriteSnapSyncStatusFlag stores the state snap sync status flag into database. +func WriteSnapSyncStatusFlag(db ethdb.KeyValueWriter, flag uint8) { + if err := db.Put(snapSyncStatusFlagKey, []byte{flag}); err != nil { + log.Crit("Failed to store sync status flag", "err", err) + } +} diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index f5c2f8899..ea437b811 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -89,6 +89,16 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) return h.hash(data) == hash } +// ExistsAccountTrieNode checks the presence of the account trie node with the +// specified node path, regardless of the node hash. +func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { + has, err := db.Has(accountTrieNodeKey(path)) + if err != nil { + return false + } + return has +} + // WriteAccountTrieNode writes the provided account trie node into database. func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) { if err := db.Put(accountTrieNodeKey(path), node); err != nil { @@ -127,6 +137,16 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [ return h.hash(data) == hash } +// ExistsStorageTrieNode checks the presence of the storage trie node with the +// specified account hash and node path, regardless of the node hash. +func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { + has, err := db.Has(storageTrieNodeKey(accountHash, path)) + if err != nil { + return false + } + return has +} + // WriteStorageTrieNode writes the provided storage trie node into database. func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) { if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil { diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 7a7845638..e97eeb2aa 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -253,7 +253,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st break } } - // We are about to exit on error. Print database metdata beore exiting + // We are about to exit on error. Print database metadata before exiting printChainMetadata(db) return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ", frozen-1, number, head) @@ -555,7 +555,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, - persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, + persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey, } { if bytes.Equal(key, meta) { metadata.Add(size) diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index eb10ef54b..6e58cc440 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -108,7 +108,11 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui // Leveldb uses LOCK as the filelock filename. To prevent the // name collision, we use FLOCK as the lock name. lock := flock.New(flockFile) - if locked, err := lock.TryLock(); err != nil { + tryLock := lock.TryLock + if readonly { + tryLock = lock.TryRLock + } + if locked, err := tryLock(); err != nil { return nil, err } else if !locked { return nil, errors.New("locking failed") diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index fc6316c95..cb32d61ae 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -212,6 +212,9 @@ func (t *freezerTable) repair() error { } // Ensure the index is a multiple of indexEntrySize bytes if overflow := stat.Size() % indexEntrySize; overflow != 0 { + if t.readonly { + return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize) + } truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path } // Retrieve the file sizes and prepare for truncation @@ -270,6 +273,9 @@ func (t *freezerTable) repair() error { // Keep truncating both files until they come in sync contentExp = int64(lastIndex.offset) for contentExp != contentSize { + if t.readonly { + return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum) + } verbose = true // Truncate the head file to the last offset pointer if contentExp < contentSize { diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 6183d2274..e1f975ae0 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -289,6 +289,57 @@ func TestFreezerReadonlyValidate(t *testing.T) { } } +func TestFreezerConcurrentReadonly(t *testing.T) { + t.Parallel() + + tables := map[string]bool{"a": true} + dir := t.TempDir() + + f, err := NewFreezer(dir, "", false, 2049, tables) + if err != nil { + t.Fatal("can't open freezer", err) + } + var item = make([]byte, 1024) + batch := f.tables["a"].newBatch() + items := uint64(10) + for i := uint64(0); i < items; i++ { + require.NoError(t, batch.AppendRaw(i, item)) + } + require.NoError(t, batch.commit()) + if loaded := f.tables["a"].items.Load(); loaded != items { + t.Fatalf("unexpected number of items in table, want: %d, have: %d", items, loaded) + } + require.NoError(t, f.Close()) + + var ( + wg sync.WaitGroup + fs = make([]*Freezer, 5) + errs = make([]error, 5) + ) + for i := 0; i < 5; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + f, err := NewFreezer(dir, "", true, 2049, tables) + if err == nil { + fs[i] = f + } else { + errs[i] = err + } + }(i) + } + + wg.Wait() + + for i := range fs { + if err := errs[i]; err != nil { + t.Fatal("failed to open freezer", err) + } + require.NoError(t, fs[i].Close()) + } +} + func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) { t.Helper() diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 7269fe5d5..8e82459e8 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -91,6 +91,9 @@ var ( // transitionStatusKey tracks the eth2 transition status. transitionStatusKey = []byte("eth2-transition") + // snapSyncStatusFlagKey flags that status of snap sync. + snapSyncStatusFlagKey = []byte("SnapSyncStatus") + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 40264b092..f54debebe 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -446,6 +446,10 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi internal += time.Since(istart) } if iter.Err != nil { + // Trie errors should never happen. Still, in case of a bug, expose the + // error here, as the outer code will presume errors are interrupts, not + // some deeper issues. + log.Error("State snapshotter failed to iterate trie", "err", err) return false, nil, iter.Err } // Delete all stale snapshot states remaining diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 042ff3be2..36916c3f0 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -355,7 +355,13 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr return err } } + // Initialize the state with head block, or fallback to empty one in + // case the head state is not available(might occur when node is not + // fully synced). state, err := p.chain.StateAt(head.Root) + if err != nil { + state, err = p.chain.StateAt(types.EmptyRootHash) + } if err != nil { return err } diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go index 7607a911c..df594099f 100644 --- a/core/txpool/blobpool/evictheap.go +++ b/core/txpool/blobpool/evictheap.go @@ -44,7 +44,7 @@ type evictHeap struct { index map[common.Address]int // Indices into the heap for replacements } -// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict +// newPriceHeap creates a new heap of cheapest accounts in the blob pool to evict // from in case of over saturation. func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap { heap := &evictHeap{ diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go index 2d62593de..d1fe9c739 100644 --- a/core/txpool/blobpool/limbo.go +++ b/core/txpool/blobpool/limbo.go @@ -143,7 +143,7 @@ func (l *limbo) push(tx *types.Transaction, block uint64) error { return errors.New("already tracked blob transaction") } if err := l.setAndIndex(tx, block); err != nil { - log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err) + log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err) return err } return nil @@ -191,7 +191,7 @@ func (l *limbo) update(txhash common.Hash, block uint64) { log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block) return } - // Retrieve the old blobs from the data store and write tehm back with a new + // Retrieve the old blobs from the data store and write them back with a new // block number. IF anything fails, there's not much to do, go on. item, err := l.getAndDrop(id) if err != nil { diff --git a/core/txpool/blobpool/priority.go b/core/txpool/blobpool/priority.go index 18e545c2a..a8332bd9b 100644 --- a/core/txpool/blobpool/priority.go +++ b/core/txpool/blobpool/priority.go @@ -27,7 +27,7 @@ import ( var log2_1_125 = math.Log2(1.125) // evictionPriority calculates the eviction priority based on the algorithm -// described in the BlobPool docs for a both fee components. +// described in the BlobPool docs for both fee components. // // This method takes about 8ns on a very recent laptop CPU, recalculating about // 125 million transaction priority values per second. diff --git a/core/txpool/errors.go b/core/txpool/errors.go index bc26550f7..61daa999f 100644 --- a/core/txpool/errors.go +++ b/core/txpool/errors.go @@ -52,6 +52,6 @@ var ( ErrOversizedData = errors.New("oversized data") // ErrFutureReplacePending is returned if a future transaction replaces a pending - // transaction. Future transactions should only be able to replace other future transactions. + // one. Future transactions should only be able to replace other future transactions. ErrFutureReplacePending = errors.New("future transaction tries to replace pending") ) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 00e326c4b..2430028f9 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -298,7 +298,20 @@ func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool // Set the basic pool parameters pool.gasTip.Store(gasTip) - pool.reset(nil, head) + + // Initialize the state with head block, or fallback to empty one in + // case the head state is not available(might occur when node is not + // fully synced). + statedb, err := pool.chain.StateAt(head.Root) + if err != nil { + statedb, err = pool.chain.StateAt(types.EmptyRootHash) + } + if err != nil { + return err + } + pool.currentHead.Store(head) + pool.currentState = statedb + pool.pendingNonces = newNoncer(statedb) // Start the reorg loop early, so it can handle requests generated during // journal loading. @@ -406,7 +419,7 @@ func (pool *LegacyPool) Close() error { } // Reset implements txpool.SubPool, allowing the legacy pool's internal state to be -// kept in sync with the main transacion pool's internal state. +// kept in sync with the main transaction pool's internal state. func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { wait := pool.requestReset(oldHead, newHead) <-wait @@ -637,7 +650,7 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error { // pending or queued one, it overwrites the previous transaction if its price is higher. // // If a newly added transaction is marked as local, its sending account will be -// be added to the allowlist, preventing any associated transaction from being dropped +// added to the allowlist, preventing any associated transaction from being dropped // out of the pool due to pricing constraints. func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { // If the transaction is already known, discard it @@ -901,7 +914,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ } // addLocals enqueues a batch of transactions into the pool if they are valid, marking the -// senders as a local ones, ensuring they go around the local pricing constraints. +// senders as local ones, ensuring they go around the local pricing constraints. // // This method is used to add transactions from the RPC API and performs synchronous pool // reorganization and event propagation. @@ -943,7 +956,7 @@ func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { } // Add enqueues a batch of transactions into the pool if they are valid. Depending -// on the local flag, full pricing contraints will or will not be applied. +// on the local flag, full pricing constraints will or will not be applied. // // If sync is set, the method will block until all internal maintenance related // to the add is finished. Only use this during tests for determinism! diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index e40b41405..cacae7bc0 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -70,7 +70,7 @@ type TxPool struct { reservations map[common.Address]SubPool // Map with the account to pool reservations reserveLock sync.Mutex // Lock protecting the account reservations - subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown + subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown quit chan chan error // Quit channel to tear down the head updater } @@ -404,7 +404,7 @@ func (p *TxPool) Locals() []common.Address { } // Status returns the known status (unknown/pending/queued) of a transaction -// identified by their hashes. +// identified by its hash. func (p *TxPool) Status(hash common.Hash) TxStatus { for _, subpool := range p.subpools { if status := subpool.Status(hash); status != TxStatusUnknown { diff --git a/core/txpool/validation.go b/core/txpool/validation.go index 630d5340c..0df363d81 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -114,7 +114,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types if sidecar == nil { return fmt.Errorf("missing sidecar in blob transaction") } - // Ensure the number of items in the blob transaction and vairous side + // Ensure the number of items in the blob transaction and various side // data match up before doing any expensive validations hashes := tx.BlobHashes() if len(hashes) == 0 { @@ -182,7 +182,7 @@ type ValidationOptionsWithState struct { // be rejected once the number of remaining slots reaches zero. UsedAndLeftSlots func(addr common.Address) (int, int) - // ExistingExpenditure is a mandatory callback to retrieve the cummulative + // ExistingExpenditure is a mandatory callback to retrieve the cumulative // cost of the already pooled transactions to check for overdrafts. ExistingExpenditure func(addr common.Address) *big.Int @@ -237,7 +237,7 @@ func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, op return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(big.Int).Sub(need, balance)) } // Transaction takes a new nonce value out of the pool. Ensure it doesn't - // overflow the number of permitted transactions from a single accoun + // overflow the number of permitted transactions from a single account // (i.e. max cancellable via out-of-bound transaction). if used, left := opts.UsedAndLeftSlots(from); left <= 0 { return fmt.Errorf("%w: pooled %d txs", ErrAccountLimitExceeded, used) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 2105201fc..56ff35020 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -251,6 +251,7 @@ func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( size.SetBytes(interpreter.hasherBuf[:]) return nil, nil } + func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes())) return nil, nil @@ -267,6 +268,7 @@ func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes())) return nil, nil } + func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes())) return nil, nil diff --git a/eth/api_backend.go b/eth/api_backend.go index dea745382..a0c14f133 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -204,7 +204,10 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.B return nil, nil, errors.New("header not found") } stateDb, err := b.eth.BlockChain().StateAt(header.Root) - return stateDb, header, err + if err != nil { + return nil, nil, err + } + return stateDb, header, nil } func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) { @@ -223,7 +226,10 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN return nil, nil, errors.New("hash is not currently canonical") } stateDb, err := b.eth.BlockChain().StateAt(header.Root) - return stateDb, header, err + if err != nil { + return nil, nil, err + } + return stateDb, header, nil } return nil, nil, errors.New("invalid arguments; neither block nor hash specified") } diff --git a/eth/backend.go b/eth/backend.go index 38c0fa974..af0351779 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -387,7 +387,7 @@ func (s *Ethereum) shouldPreserve(header *types.Header) bool { // r5 A [X] F G // r6 [X] // - // In the round5, the inturn signer E is offline, so the worst case + // In the round5, the in-turn signer E is offline, so the worst case // is A, F and G sign the block of round5 and reject the block of opponents // and in the round6, the last available signer B is offline, the whole // network is stuck. @@ -474,7 +474,7 @@ func (s *Ethereum) Engine() consensus.Engine { return s.engine } func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) IsListening() bool { return true } // Always listening func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader } -func (s *Ethereum) Synced() bool { return s.handler.acceptTxs.Load() } +func (s *Ethereum) Synced() bool { return s.handler.synced.Load() } func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() } func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning } func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 1e4f35ccd..7fed48bdb 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -403,7 +403,9 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, // subsequent state reads, explicitly disable the trie database and state // syncer is responsible to address and correct any state missing. if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme { - d.blockchain.TrieDB().Reset(types.EmptyRootHash) + if err := d.blockchain.TrieDB().Disable(); err != nil { + return err + } } // Snap sync uses the snapshot namespace to store potentially flaky data until // sync completely heals and finishes. Pause snapshot maintenance in the mean- @@ -1280,41 +1282,13 @@ func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { // keeps processing and scheduling them into the header chain and downloader's // queue until the stream ends or a failure occurs. func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { - // Keep a count of uncertain headers to roll back var ( - rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) - rollbackErr error - mode = d.getMode() + mode = d.getMode() + gotHeaders = false // Wait for batches of headers to process ) - defer func() { - if rollback > 0 { - lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 - if mode != LightSync { - lastFastBlock = d.blockchain.CurrentSnapBlock().Number - lastBlock = d.blockchain.CurrentBlock().Number - } - if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block - // We're already unwinding the stack, only print the error to make it more visible - log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) - } - curFastBlock, curBlock := common.Big0, common.Big0 - if mode != LightSync { - curFastBlock = d.blockchain.CurrentSnapBlock().Number - curBlock = d.blockchain.CurrentBlock().Number - } - log.Warn("Rolled back chain segment", - "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), - "snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), - "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) - } - }() - // Wait for batches of headers to process - gotHeaders := false - for { select { case <-d.cancelCh: - rollbackErr = errCanceled return errCanceled case task := <-d.headerProcCh: @@ -1363,8 +1337,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } } } - // Disable any rollback and return - rollback = 0 return nil } // Otherwise split the chunk of headers into batches and process them @@ -1375,7 +1347,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode // Terminate if something failed in between processing chunks select { case <-d.cancelCh: - rollbackErr = errCanceled return errCanceled default: } @@ -1422,29 +1393,11 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } if len(chunkHeaders) > 0 { if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil { - rollbackErr = err - - // If some headers were inserted, track them as uncertain - if mode == SnapSync && n > 0 && rollback == 0 { - rollback = chunkHeaders[0].Number.Uint64() - } log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } - // All verifications passed, track all headers within the allowed limits - if mode == SnapSync { - head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() - if head-rollback > uint64(fsHeaderSafetyNet) { - rollback = head - uint64(fsHeaderSafetyNet) - } else { - rollback = 1 - } - } } if len(rejected) != 0 { - // Merge threshold reached, stop importing, but don't roll back - rollback = 0 - log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) return ErrMergeTransition } @@ -1455,7 +1408,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { select { case <-d.cancelCh: - rollbackErr = errCanceled return errCanceled case <-time.After(time.Second): } @@ -1463,7 +1415,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode // Otherwise insert the headers for content retrieval inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) if len(inserts) != len(chunkHeaders) { - rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders)) return fmt.Errorf("%w: stale headers", errBadPeer) } } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 06c22afff..ffe445ea8 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -878,86 +878,6 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { assertOwnChain(t, tester, len(chain.blocks)) } -// Tests that upon detecting an invalid header, the recent ones are rolled back -// for various failure scenarios. Afterwards a full sync is attempted to make -// sure no state was corrupted. -func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) } -func TestInvalidHeaderRollback67Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH67, SnapSync) } - -func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a small enough block chain to download - targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks - chain := testChainBase.shorten(targetBlocks) - - // Attempt to sync with an attacker that feeds junk during the fast sync phase. - // This should result in the last fsHeaderSafetyNet headers being rolled back. - missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 - - fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:]) - fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} - - if err := tester.sync("fast-attack", nil, mode); err == nil { - t.Fatalf("succeeded fast attacker synchronisation") - } - if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) - } - // Attempt to sync with an attacker that feeds junk during the block import phase. - // This should result in both the last fsHeaderSafetyNet number of headers being - // rolled back, and also the pivot point being reverted to a non-block status. - missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 - - blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:]) - fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in - blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} - - if err := tester.sync("block-attack", nil, mode); err == nil { - t.Fatalf("succeeded block attacker synchronisation") - } - if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) - } - if mode == SnapSync { - if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 { - t.Errorf("fast sync pivot block #%d not rolled back", head) - } - } - // Attempt to sync with an attacker that withholds promised blocks after the - // fast sync pivot point. This could be a trial to leave the node with a bad - // but already imported pivot block. - withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:]) - - tester.downloader.syncInitHook = func(uint64, uint64) { - for i := missing; i < len(chain.blocks); i++ { - withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} - } - tester.downloader.syncInitHook = nil - } - if err := tester.sync("withhold-attack", nil, mode); err == nil { - t.Fatalf("succeeded withholding attacker synchronisation") - } - if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) - } - if mode == SnapSync { - if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 { - t.Errorf("fast sync pivot block #%d not rolled back", head) - } - } - // Synchronise with the valid peer and make sure sync succeeds. Since the last rollback - // should also disable fast syncing for this process, verify that we did a fresh full - // sync. Note, we can't assert anything about the receipts since we won't purge the - // database of them, hence we can't use assertOwnChain. - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - // Tests that a peer advertising a high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. func TestHighTDStarvationAttack66Full(t *testing.T) { diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index 59df82bd9..a07e1695f 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -423,7 +423,7 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) { for _, peer := range s.peers.AllPeers() { s.idles[peer.id] = peer } - // Nofity any tester listening for startup events + // Notify any tester listening for startup events if s.syncStarting != nil { s.syncStarting() } diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 95fef0cde..a11b5e216 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -24,8 +24,8 @@ import ( "sort" "time" - mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" @@ -53,6 +53,9 @@ const ( // re-request them. maxTxUnderpricedSetSize = 32768 + // maxTxUnderpricedTimeout is the max time a transaction should be stuck in the underpriced set. + maxTxUnderpricedTimeout = int64(5 * time.Minute) + // txArriveTimeout is the time allowance before an announced transaction is // explicitly requested. txArriveTimeout = 500 * time.Millisecond @@ -148,7 +151,7 @@ type TxFetcher struct { drop chan *txDrop quit chan struct{} - underpriced mapset.Set[common.Hash] // Transactions discarded as too cheap (don't re-fetch) + underpriced *lru.Cache[common.Hash, int64] // Transactions discarded as too cheap (don't re-fetch) // Stage 1: Waiting lists for newly discovered transactions that might be // broadcast without needing explicit request/reply round trips. @@ -202,7 +205,7 @@ func NewTxFetcherForTests( fetching: make(map[common.Hash]string), requests: make(map[string]*txRequest), alternates: make(map[common.Hash]map[string]struct{}), - underpriced: mapset.NewSet[common.Hash](), + underpriced: lru.NewCache[common.Hash, int64](maxTxUnderpricedSetSize), hasTx: hasTx, addTxs: addTxs, fetchTxs: fetchTxs, @@ -223,17 +226,16 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { // still valuable to check here because it runs concurrent to the internal // loop, so anything caught here is time saved internally. var ( - unknowns = make([]common.Hash, 0, len(hashes)) - duplicate, underpriced int64 + unknowns = make([]common.Hash, 0, len(hashes)) + duplicate int64 + underpriced int64 ) for _, hash := range hashes { switch { case f.hasTx(hash): duplicate++ - - case f.underpriced.Contains(hash): + case f.isKnownUnderpriced(hash): underpriced++ - default: unknowns = append(unknowns, hash) } @@ -245,10 +247,7 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { if len(unknowns) == 0 { return nil } - announce := &txAnnounce{ - origin: peer, - hashes: unknowns, - } + announce := &txAnnounce{origin: peer, hashes: unknowns} select { case f.notify <- announce: return nil @@ -257,6 +256,16 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { } } +// isKnownUnderpriced reports whether a transaction hash was recently found to be underpriced. +func (f *TxFetcher) isKnownUnderpriced(hash common.Hash) bool { + prevTime, ok := f.underpriced.Peek(hash) + if ok && prevTime+maxTxUnderpricedTimeout < time.Now().Unix() { + f.underpriced.Remove(hash) + return false + } + return ok +} + // Enqueue imports a batch of received transaction into the transaction pool // and the fetcher. This method may be called by both transaction broadcasts and // direct request replies. The differentiation is important so the fetcher can @@ -300,10 +309,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) // Avoid re-request this transaction when we receive another // announcement. if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) { - for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { - f.underpriced.Pop() - } - f.underpriced.Add(batch[j].Hash()) + f.underpriced.Add(batch[j].Hash(), batch[j].Time().Unix()) } // Track a few interesting failure types switch { diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go index 1715def99..980c1a6c2 100644 --- a/eth/fetcher/tx_fetcher_test.go +++ b/eth/fetcher/tx_fetcher_test.go @@ -1509,8 +1509,8 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) { } case isUnderpriced: - if fetcher.underpriced.Cardinality() != int(step) { - t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Cardinality(), step) + if fetcher.underpriced.Len() != int(step) { + t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Len(), step) } default: diff --git a/eth/handler.go b/eth/handler.go index a629ec5ee..59040442e 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -100,8 +100,8 @@ type handler struct { networkID uint64 forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node - snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) - acceptTxs atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) + snapSync atomic.Bool // Flag whether snap sync is enabled (gets disabled if we already have blocks) + synced atomic.Bool // Flag whether we're considered synchronised (enables transaction processing) database ethdb.Database txpool txPool @@ -163,32 +163,24 @@ func newHandler(config *handlerConfig) (*handler, error) { fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock() if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 { h.snapSync.Store(true) - log.Warn("Switch sync mode from full sync to snap sync") + log.Warn("Switch sync mode from full sync to snap sync", "reason", "snap sync incomplete") + } else if !h.chain.HasState(fullBlock.Root) { + h.snapSync.Store(true) + log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing") } } else { - if h.chain.CurrentBlock().Number.Uint64() > 0 { + head := h.chain.CurrentBlock() + if head.Number.Uint64() > 0 && h.chain.HasState(head.Root) { // Print warning log if database is not empty to run snap sync. - log.Warn("Switch sync mode from snap sync to full sync") + log.Warn("Switch sync mode from snap sync to full sync", "reason", "snap sync complete") } else { // If snap sync was requested and our database is empty, grant it h.snapSync.Store(true) + log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash()) } } - // If sync succeeds, pass a callback to potentially disable snap sync mode - // and enable transaction propagation. - success := func() { - // If we were running snap sync and it finished, disable doing another - // round on next sync cycle - if h.snapSync.Load() { - log.Info("Snap sync complete, auto disabling") - h.snapSync.Store(false) - } - // If we've successfully finished a sync cycle, accept transactions from - // the network - h.enableSyncedFeatures() - } // Construct the downloader (long sync) - h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, success) + h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures) if ttd := h.chain.Config().TerminalTotalDifficulty; ttd != nil { if h.chain.Config().TerminalTotalDifficultyPassed { log.Info("Chain post-merge, sync via beacon client") @@ -245,8 +237,8 @@ func newHandler(config *handlerConfig) (*handler, error) { // accept each others' blocks until a restart. Unfortunately we haven't figured // out a way yet where nodes can decide unilaterally whether the network is new // or not. This should be fixed if we figure out a solution. - if h.snapSync.Load() { - log.Warn("Snap syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) + if !h.synced.Load() { + log.Warn("Syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } if h.merger.TDDReached() { @@ -272,11 +264,7 @@ func newHandler(config *handlerConfig) (*handler, error) { } return 0, nil } - n, err := h.chain.InsertChain(blocks) - if err == nil { - h.enableSyncedFeatures() // Mark initial sync done on any fetcher import - } - return n, err + return h.chain.InsertChain(blocks) } h.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer) @@ -680,7 +668,15 @@ func (h *handler) txBroadcastLoop() { // enableSyncedFeatures enables the post-sync functionalities when the initial // sync is finished. func (h *handler) enableSyncedFeatures() { - h.acceptTxs.Store(true) + // Mark the local node as synced. + h.synced.Store(true) + + // If we were running snap sync and it finished, disable doing another + // round on next sync cycle + if h.snapSync.Load() { + log.Info("Snap sync complete, auto disabling") + h.snapSync.Store(false) + } if h.chain.TrieDB().Scheme() == rawdb.PathScheme { h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize) } diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 3a5e6608b..2aba16f92 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -51,7 +51,7 @@ func (h *ethHandler) PeerInfo(id enode.ID) interface{} { // AcceptTxs retrieves whether transaction processing is enabled on the node // or if inbound transactions should simply be dropped. func (h *ethHandler) AcceptTxs() bool { - return h.acceptTxs.Load() + return h.synced.Load() } // Handle is invoked from a peer's message handler when it receives a new remote diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 41619fe30..a16abc5ed 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -248,7 +248,7 @@ func testRecvTransactions(t *testing.T, protocol uint) { handler := newTestHandler() defer handler.close() - handler.handler.acceptTxs.Store(true) // mark synced to accept transactions + handler.handler.synced.Store(true) // mark synced to accept transactions txs := make(chan core.NewTxsEvent) sub := handler.txpool.SubscribeNewTxsEvent(txs) @@ -401,7 +401,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { sinks[i] = newTestHandler() defer sinks[i].close() - sinks[i].handler.acceptTxs.Store(true) // mark synced to accept transactions + sinks[i].handler.synced.Store(true) // mark synced to accept transactions } // Interconnect all the sink handlers with the source handler for i, sink := range sinks { diff --git a/eth/sync.go b/eth/sync.go index ba7a7427a..c7ba7c93d 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -197,16 +197,25 @@ func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { return downloader.SnapSync, td } // We are probably in full sync, but we might have rewound to before the - // snap sync pivot, check if we should reenable + // snap sync pivot, check if we should re-enable snap sync. + head := cs.handler.chain.CurrentBlock() if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { - if head := cs.handler.chain.CurrentBlock(); head.Number.Uint64() < *pivot { + if head.Number.Uint64() < *pivot { block := cs.handler.chain.CurrentSnapBlock() td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) return downloader.SnapSync, td } } + // We are in a full sync, but the associated head state is missing. To complete + // the head state, forcefully rerun the snap sync. Note it doesn't mean the + // persistent state is corrupted, just mismatch with the head block. + if !cs.handler.chain.HasState(head.Root) { + block := cs.handler.chain.CurrentSnapBlock() + td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64()) + log.Info("Reenabled snap sync as chain is stateless") + return downloader.SnapSync, td + } // Nope, we're really full syncing - head := cs.handler.chain.CurrentBlock() td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64()) return downloader.FullSync, td } @@ -242,13 +251,7 @@ func (h *handler) doSync(op *chainSyncOp) error { if err != nil { return err } - if h.snapSync.Load() { - log.Info("Snap sync complete, auto disabling") - h.snapSync.Store(false) - } - // If we've successfully finished a sync cycle, enable accepting transactions - // from the network. - h.acceptTxs.Store(true) + h.enableSyncedFeatures() head := h.chain.CurrentBlock() if head.Number.Uint64() > 0 { diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index a21d8ff67..af373b993 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -108,10 +108,10 @@ func (ec *Client) PeerCount(ctx context.Context) (uint64, error) { return uint64(result), err } -// BlockReceipts returns the receipts of a given block number or hash +// BlockReceipts returns the receipts of a given block number or hash. func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { var r []*types.Receipt - err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash) + err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash.String()) if err == nil && r == nil { return nil, ethereum.NotFound } diff --git a/go.mod b/go.mod index 32693c00a..2490cd6e3 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 github.com/julienschmidt/httprouter v1.3.0 - github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c + github.com/karalabe/usb v0.0.2 github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.16 diff --git a/go.sum b/go.sum index cce5e4022..57596642f 100644 --- a/go.sum +++ b/go.sum @@ -362,8 +362,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs= -github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4= +github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 52a634245..736fede94 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -87,8 +87,9 @@ var ( Category: flags.LoggingCategory, } logRotateFlag = &cli.BoolFlag{ - Name: "log.rotate", - Usage: "Enables log file rotation", + Name: "log.rotate", + Usage: "Enables log file rotation", + Category: flags.LoggingCategory, } logMaxSizeMBsFlag = &cli.IntFlag{ Name: "log.maxsize", diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 733e671e0..e2911c6b1 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1158,8 +1158,12 @@ func (e *revertError) ErrorData() interface{} { // // Note, this function doesn't make and changes in the state/blockchain and is // useful to execute and retrieve values. -func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) { - result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) +func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) { + if blockNrOrHash == nil { + latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) + blockNrOrHash = &latest + } + result, err := DoCall(ctx, s.b, args, *blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) if err != nil { return nil, err } diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index fc135c377..846a4347a 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -846,7 +846,7 @@ func TestCall(t *testing.T) { }, } for i, tc := range testSuite { - result, err := api.Call(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides) + result, err := api.Call(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides) if tc.expectErr != nil { if err == nil { t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) diff --git a/miner/miner_test.go b/miner/miner_test.go index 489bc46a9..36d5166c6 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -64,6 +64,7 @@ func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *stat } type testBlockChain struct { + root common.Hash config *params.ChainConfig statedb *state.StateDB gasLimit uint64 @@ -89,6 +90,10 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { return bc.statedb, nil } +func (bc *testBlockChain) HasState(root common.Hash) bool { + return bc.root == root +} + func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { return bc.chainHeadFeed.Subscribe(ch) } @@ -302,7 +307,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { t.Fatalf("can't create new chain %v", err) } statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil) - blockchain := &testBlockChain{chainConfig, statedb, 10000000, new(event.Feed)} + blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)} pool := legacypool.New(testTxPoolConfig, blockchain) txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain, []txpool.SubPool{pool}) diff --git a/params/config.go b/params/config.go index f50386242..ac55d3771 100644 --- a/params/config.go +++ b/params/config.go @@ -26,7 +26,7 @@ import ( // Genesis hashes to enforce below configs on. var ( MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") - HoleskyGenesisHash = common.HexToHash("0xff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d") + HoleskyGenesisHash = common.HexToHash("0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4") SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") ) @@ -80,8 +80,7 @@ var ( TerminalTotalDifficulty: big.NewInt(0), TerminalTotalDifficultyPassed: true, MergeNetsplitBlock: nil, - ShanghaiTime: newUint64(1694790240), - CancunTime: newUint64(2000000000), + ShanghaiTime: newUint64(1696000704), Ethash: new(EthashConfig), } // SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network. diff --git a/params/version.go b/params/version.go index d75165f3c..5941701b6 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 13 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release + VersionPatch = 2 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/trie/database.go b/trie/database.go index 2915ff948..1e59f0908 100644 --- a/trie/database.go +++ b/trie/database.go @@ -189,6 +189,15 @@ func (db *Database) WritePreimages() { } } +// Preimage retrieves a cached trie node pre-image from memory. If it cannot be +// found cached, the method queries the persistent database for the content. +func (db *Database) Preimage(hash common.Hash) []byte { + if db.preimages == nil { + return nil + } + return db.preimages.preimage(hash) +} + // Cap iteratively flushes old but still referenced trie nodes until the total // memory usage goes below the given threshold. The held pre-images accumulated // up to this point will be flushed in case the size exceeds the threshold. @@ -264,15 +273,27 @@ func (db *Database) Recoverable(root common.Hash) (bool, error) { return pdb.Recoverable(root), nil } -// Reset wipes all available journal from the persistent database and discard -// all caches and diff layers. Using the given root to create a new disk layer. +// Disable deactivates the database and invalidates all available state layers +// as stale to prevent access to the persistent state, which is in the syncing +// stage. +// // It's only supported by path-based database and will return an error for others. -func (db *Database) Reset(root common.Hash) error { +func (db *Database) Disable() error { pdb, ok := db.backend.(*pathdb.Database) if !ok { return errors.New("not supported") } - return pdb.Reset(root) + return pdb.Disable() +} + +// Enable activates database and resets the state tree with the provided persistent +// state root once the state sync is finished. +func (db *Database) Enable(root common.Hash) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.Enable(root) } // Journal commits an entire diff hierarchy to disk into a single journal entry. diff --git a/trie/sync.go b/trie/sync.go index 4f5584599..9da070607 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" ) // ErrNotRequested is returned by the trie sync when it's requested to process a @@ -42,6 +43,16 @@ var ErrAlreadyProcessed = errors.New("already processed") // memory if the node was configured with a significant number of peers. const maxFetchesPerDepth = 16384 +var ( + // deletionGauge is the metric to track how many trie node deletions + // are performed in total during the sync process. + deletionGauge = metrics.NewRegisteredGauge("trie/sync/delete", nil) + + // lookupGauge is the metric to track how many trie node lookups are + // performed to determine if node needs to be deleted. + lookupGauge = metrics.NewRegisteredGauge("trie/sync/lookup", nil) +) + // SyncPath is a path tuple identifying a particular trie node either in a single // trie (account) or a layered trie (account -> storage). // @@ -93,9 +104,10 @@ type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Ha // nodeRequest represents a scheduled or already in-flight trie node retrieval request. type nodeRequest struct { - hash common.Hash // Hash of the trie node to retrieve - path []byte // Merkle path leading to this node for prioritization - data []byte // Data content of the node, cached until all subtrees complete + hash common.Hash // Hash of the trie node to retrieve + path []byte // Merkle path leading to this node for prioritization + data []byte // Data content of the node, cached until all subtrees complete + deletes [][]byte // List of internal path segments for trie nodes to delete parent *nodeRequest // Parent state node referencing this entry deps int // Number of dependencies before allowed to commit this node @@ -125,18 +137,20 @@ type CodeSyncResult struct { // syncMemBatch is an in-memory buffer of successfully downloaded but not yet // persisted data items. type syncMemBatch struct { - nodes map[string][]byte // In-memory membatch of recently completed nodes - hashes map[string]common.Hash // Hashes of recently completed nodes - codes map[common.Hash][]byte // In-memory membatch of recently completed codes - size uint64 // Estimated batch-size of in-memory data. + nodes map[string][]byte // In-memory membatch of recently completed nodes + hashes map[string]common.Hash // Hashes of recently completed nodes + deletes map[string]struct{} // List of paths for trie node to delete + codes map[common.Hash][]byte // In-memory membatch of recently completed codes + size uint64 // Estimated batch-size of in-memory data. } // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. func newSyncMemBatch() *syncMemBatch { return &syncMemBatch{ - nodes: make(map[string][]byte), - hashes: make(map[string]common.Hash), - codes: make(map[common.Hash][]byte), + nodes: make(map[string][]byte), + hashes: make(map[string]common.Hash), + deletes: make(map[string]struct{}), + codes: make(map[common.Hash][]byte), } } @@ -347,16 +361,23 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error { // Commit flushes the data stored in the internal membatch out to persistent // storage, returning any occurred error. func (s *Sync) Commit(dbw ethdb.Batch) error { - // Dump the membatch into a database dbw + // Flush the pending node writes into database batch. for path, value := range s.membatch.nodes { owner, inner := ResolvePath([]byte(path)) rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme) } + // Flush the pending node deletes into the database batch. + // Please note that each written and deleted node has a + // unique path, ensuring no duplication occurs. + for path := range s.membatch.deletes { + owner, inner := ResolvePath([]byte(path)) + rawdb.DeleteTrieNode(dbw, owner, inner, common.Hash{} /* unused */, s.scheme) + } + // Flush the pending code writes into database batch. for hash, value := range s.membatch.codes { rawdb.WriteCode(dbw, hash, value) } - // Drop the membatch data and return - s.membatch = newSyncMemBatch() + s.membatch = newSyncMemBatch() // reset the batch return nil } @@ -425,6 +446,39 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { node: node.Val, path: append(append([]byte(nil), req.path...), key...), }} + // Mark all internal nodes between shortNode and its **in disk** + // child as invalid. This is essential in the case of path mode + // scheme; otherwise, state healing might overwrite existing child + // nodes silently while leaving a dangling parent node within the + // range of this internal path on disk. This would break the + // guarantee for state healing. + // + // While it's possible for this shortNode to overwrite a previously + // existing full node, the other branches of the fullNode can be + // retained as they remain untouched and complete. + // + // This step is only necessary for path mode, as there is no deletion + // in hash mode at all. + if _, ok := node.Val.(hashNode); ok && s.scheme == rawdb.PathScheme { + owner, inner := ResolvePath(req.path) + for i := 1; i < len(key); i++ { + // While checking for a non-existent item in Pebble can be less efficient + // without a bloom filter, the relatively low frequency of lookups makes + // the performance impact negligible. + var exists bool + if owner == (common.Hash{}) { + exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...)) + } else { + exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...)) + } + if exists { + req.deletes = append(req.deletes, key[:i]) + deletionGauge.Inc(1) + log.Debug("Detected dangling node", "owner", owner, "path", append(inner, key[:i]...)) + } + } + lookupGauge.Inc(int64(len(key) - 1)) + } case *fullNode: for i := 0; i < 17; i++ { if node.Children[i] != nil { @@ -509,10 +563,19 @@ func (s *Sync) commitNodeRequest(req *nodeRequest) error { // Write the node content to the membatch s.membatch.nodes[string(req.path)] = req.data s.membatch.hashes[string(req.path)] = req.hash + // The size tracking refers to the db-batch, not the in-memory data. - // Therefore, we ignore the req.path, and account only for the hash+data - // which eventually is written to db. - s.membatch.size += common.HashLength + uint64(len(req.data)) + if s.scheme == rawdb.PathScheme { + s.membatch.size += uint64(len(req.path) + len(req.data)) + } else { + s.membatch.size += common.HashLength + uint64(len(req.data)) + } + // Delete the internal nodes which are marked as invalid + for _, segment := range req.deletes { + path := append(req.path, segment...) + s.membatch.deletes[string(path)] = struct{}{} + s.membatch.size += uint64(len(path)) + } delete(s.nodeReqs, string(req.path)) s.fetches[len(req.path)]-- diff --git a/trie/sync_test.go b/trie/sync_test.go index dd3506559..3b7986ef6 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -70,31 +70,53 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str // checkTrieContents cross references a reconstructed trie with an expected data // content map. -func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) { +func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte, rawTrie bool) { // Check root availability and trie contents ndb := newTestDatabase(db, scheme) - trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) - if err != nil { - t.Fatalf("failed to create trie at %x: %v", root, err) - } - if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil { + if err := checkTrieConsistency(db, scheme, common.BytesToHash(root), rawTrie); err != nil { t.Fatalf("inconsistent trie at %x: %v", root, err) } + type reader interface { + MustGet(key []byte) []byte + } + var r reader + if rawTrie { + trie, err := New(TrieID(common.BytesToHash(root)), ndb) + if err != nil { + t.Fatalf("failed to create trie at %x: %v", root, err) + } + r = trie + } else { + trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) + if err != nil { + t.Fatalf("failed to create trie at %x: %v", root, err) + } + r = trie + } for key, val := range content { - if have := trie.MustGet([]byte(key)); !bytes.Equal(have, val) { + if have := r.MustGet([]byte(key)); !bytes.Equal(have, val) { t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) } } } // checkTrieConsistency checks that all nodes in a trie are indeed present. -func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error { +func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error { ndb := newTestDatabase(db, scheme) - trie, err := NewStateTrie(TrieID(root), ndb) - if err != nil { - return nil // Consider a non existent state consistent + var it NodeIterator + if rawTrie { + trie, err := New(TrieID(root), ndb) + if err != nil { + return nil // Consider a non existent state consistent + } + it = trie.MustNodeIterator(nil) + } else { + trie, err := NewStateTrie(TrieID(root), ndb) + if err != nil { + return nil // Consider a non existent state consistent + } + it = trie.MustNodeIterator(nil) } - it := trie.MustNodeIterator(nil) for it.Next(true) { } return it.Error() @@ -205,7 +227,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that the trie scheduler can correctly reconstruct the state even if only @@ -271,7 +293,7 @@ func testIterativeDelayedSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that given a root hash, a trie can sync iteratively on a single thread, @@ -341,7 +363,7 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that the trie scheduler can correctly reconstruct the state even if only @@ -413,7 +435,7 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that a trie sync will not request nodes multiple times, even if they @@ -484,7 +506,7 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that at any point in time during a sync, only complete sub-tries are in @@ -569,7 +591,7 @@ func testIncompleteSync(t *testing.T, scheme string) { nodeHash := addedHashes[i] value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme) rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme) - if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil { + if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root, false); err == nil { t.Fatalf("trie inconsistency not caught, missing: %x", path) } rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme) @@ -643,7 +665,7 @@ func testSyncOrdering(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) // Check that the trie nodes have been requested path-ordered for i := 0; i < len(reqs)-1; i++ { @@ -664,7 +686,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database // The code requests are ignored here since there is no code // at the testing trie. - paths, nodes, _ := sched.Missing(1) + paths, nodes, _ := sched.Missing(0) var elements []trieElement for i := 0; i < len(paths); i++ { elements = append(elements, trieElement{ @@ -698,7 +720,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database } batch.Write() - paths, nodes, _ = sched.Missing(1) + paths, nodes, _ = sched.Missing(0) elements = elements[:0] for i := 0; i < len(paths); i++ { elements = append(elements, trieElement{ @@ -724,7 +746,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) { // Create a destination trie and sync with the scheduler diskdb := rawdb.NewMemoryDatabase() syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) // Push more modifications into the src trie, to see if dest trie can still // sync with it(overwrite stale states) @@ -748,7 +770,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) { srcTrie, _ = NewStateTrie(TrieID(root), srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff, false) // Revert added modifications from the src trie, to see if dest trie can still // sync with it(overwrite reverted states) @@ -772,5 +794,98 @@ func testSyncMovingTarget(t *testing.T, scheme string) { srcTrie, _ = NewStateTrie(TrieID(root), srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted, false) +} + +// Tests if state syncer can correctly catch up the pivot move. +func TestPivotMove(t *testing.T) { + testPivotMove(t, rawdb.HashScheme, true) + testPivotMove(t, rawdb.HashScheme, false) + testPivotMove(t, rawdb.PathScheme, true) + testPivotMove(t, rawdb.PathScheme, false) +} + +func testPivotMove(t *testing.T, scheme string, tiny bool) { + var ( + srcDisk = rawdb.NewMemoryDatabase() + srcTrieDB = newTestDatabase(srcDisk, scheme) + srcTrie, _ = New(TrieID(types.EmptyRootHash), srcTrieDB) + + deleteFn = func(key []byte, tr *Trie, states map[string][]byte) { + tr.Delete(key) + delete(states, string(key)) + } + writeFn = func(key []byte, val []byte, tr *Trie, states map[string][]byte) { + if val == nil { + if tiny { + val = randBytes(4) + } else { + val = randBytes(32) + } + } + tr.Update(key, val) + states[string(key)] = common.CopyBytes(val) + } + copyStates = func(states map[string][]byte) map[string][]byte { + cpy := make(map[string][]byte) + for k, v := range states { + cpy[k] = v + } + return cpy + } + ) + stateA := make(map[string][]byte) + writeFn([]byte{0x01, 0x23}, nil, srcTrie, stateA) + writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateA) + writeFn([]byte{0x12, 0x33}, nil, srcTrie, stateA) + writeFn([]byte{0x12, 0x34}, nil, srcTrie, stateA) + writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateA) + writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA) + + rootA, nodesA, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootA, false); err != nil { + panic(err) + } + // Create a destination trie and sync with the scheduler + destDisk := rawdb.NewMemoryDatabase() + syncWith(t, rootA, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateA, true) + + // Delete element to collapse trie + stateB := copyStates(stateA) + srcTrie, _ = New(TrieID(rootA), srcTrieDB) + deleteFn([]byte{0x02, 0x34}, srcTrie, stateB) + deleteFn([]byte{0x13, 0x44}, srcTrie, stateB) + writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB) + + rootB, nodesB, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootB, false); err != nil { + panic(err) + } + syncWith(t, rootB, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateB, true) + + // Add elements to expand trie + stateC := copyStates(stateB) + srcTrie, _ = New(TrieID(rootB), srcTrieDB) + + writeFn([]byte{0x01, 0x24}, stateA[string([]byte{0x01, 0x24})], srcTrie, stateC) + writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateC) + writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC) + + rootC, nodesC, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootC, false); err != nil { + panic(err) + } + syncWith(t, rootC, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateC, true) } diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go index 18cc36ffc..dc64414e9 100644 --- a/trie/triedb/pathdb/database.go +++ b/trie/triedb/pathdb/database.go @@ -128,7 +128,8 @@ type Database struct { // readOnly is the flag whether the mutation is allowed to be applied. // It will be set automatically when the database is journaled during // the shutdown to reject all following unexpected mutations. - readOnly bool // Indicator if database is opened in read only mode + readOnly bool // Flag if database is opened in read only mode + waitSync bool // Flag if database is deactivated due to initial state sync bufferSize int // Memory allowance (in bytes) for caching dirty nodes config *Config // Configuration for database diskdb ethdb.Database // Persistent storage for matured trie nodes @@ -179,6 +180,12 @@ func New(diskdb ethdb.Database, config *Config) *Database { log.Warn("Truncated extra state histories", "number", pruned) } } + // Disable database in case node is still in the initial state sync stage. + if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly { + if err := db.Disable(); err != nil { + log.Crit("Failed to disable database", "err", err) // impossible to happen + } + } log.Warn("Path-based state scheme is an experimental feature") return db } @@ -204,9 +211,9 @@ func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint6 db.lock.Lock() defer db.lock.Unlock() - // Short circuit if the database is in read only mode. - if db.readOnly { - return errSnapshotReadOnly + // Short circuit if the mutation is not allowed. + if err := db.modifyAllowed(); err != nil { + return err } if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil { return err @@ -227,45 +234,59 @@ func (db *Database) Commit(root common.Hash, report bool) error { db.lock.Lock() defer db.lock.Unlock() - // Short circuit if the database is in read only mode. - if db.readOnly { - return errSnapshotReadOnly + // Short circuit if the mutation is not allowed. + if err := db.modifyAllowed(); err != nil { + return err } return db.tree.cap(root, 0) } -// Reset rebuilds the database with the specified state as the base. -// -// - if target state is empty, clear the stored state and all layers on top -// - if target state is non-empty, ensure the stored state matches with it -// and clear all other layers on top. -func (db *Database) Reset(root common.Hash) error { +// Disable deactivates the database and invalidates all available state layers +// as stale to prevent access to the persistent state, which is in the syncing +// stage. +func (db *Database) Disable() error { db.lock.Lock() defer db.lock.Unlock() // Short circuit if the database is in read only mode. if db.readOnly { - return errSnapshotReadOnly + return errDatabaseReadOnly } - batch := db.diskdb.NewBatch() - root = types.TrieRootHash(root) - if root == types.EmptyRootHash { - // Empty state is requested as the target, nuke out - // the root node and leave all others as dangling. - rawdb.DeleteAccountTrieNode(batch, nil) - } else { - // Ensure the requested state is existent before any - // action is applied. - _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil) - if hash != root { - return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root) - } + // Prevent duplicated disable operation. + if db.waitSync { + log.Error("Reject duplicated disable operation") + return nil } - // Mark the disk layer as stale before applying any mutation. + db.waitSync = true + + // Mark the disk layer as stale to prevent access to persistent state. db.tree.bottom().markStale() + // Write the initial sync flag to persist it across restarts. + rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncRunning) + log.Info("Disabled trie database due to state sync") + return nil +} + +// Enable activates database and resets the state tree with the provided persistent +// state root once the state sync is finished. +func (db *Database) Enable(root common.Hash) error { + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errDatabaseReadOnly + } + // Ensure the provided state root matches the stored one. + root = types.TrieRootHash(root) + _, stored := rawdb.ReadAccountTrieNode(db.diskdb, nil) + if stored != root { + return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root) + } // Drop the stale state journal in persistent database and // reset the persistent state id back to zero. + batch := db.diskdb.NewBatch() rawdb.DeleteTrieJournal(batch) rawdb.WritePersistentStateID(batch, 0) if err := batch.Write(); err != nil { @@ -282,8 +303,11 @@ func (db *Database) Reset(root common.Hash) error { } // Re-construct a new disk layer backed by persistent state // with **empty clean cache and node buffer**. - dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)) - db.tree.reset(dl) + db.tree.reset(newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0))) + + // Re-enable the database as the final step. + db.waitSync = false + rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncFinished) log.Info("Rebuilt trie database", "root", root) return nil } @@ -296,7 +320,10 @@ func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error defer db.lock.Unlock() // Short circuit if rollback operation is not supported. - if db.readOnly || db.freezer == nil { + if err := db.modifyAllowed(); err != nil { + return err + } + if db.freezer == nil { return errors.New("state rollback is non-supported") } // Short circuit if the target state is not recoverable. @@ -424,3 +451,15 @@ func (db *Database) SetBufferSize(size int) error { func (db *Database) Scheme() string { return rawdb.PathScheme } + +// modifyAllowed returns the indicator if mutation is allowed. This function +// assumes the db.lock is already held. +func (db *Database) modifyAllowed() error { + if db.readOnly { + return errDatabaseReadOnly + } + if db.waitSync { + return errDatabaseWaitSync + } + return nil +} diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go index 6d346d20e..912364f7f 100644 --- a/trie/triedb/pathdb/database_test.go +++ b/trie/triedb/pathdb/database_test.go @@ -439,38 +439,39 @@ func TestDatabaseRecoverable(t *testing.T) { } } -func TestReset(t *testing.T) { - var ( - tester = newTester(t) - index = tester.bottomIndex() - ) +func TestDisable(t *testing.T) { + tester := newTester(t) defer tester.release() - // Reset database to unknown target, should reject it - if err := tester.db.Reset(testutil.RandomHash()); err == nil { - t.Fatal("Failed to reject invalid reset") + _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + if err := tester.db.Disable(); err != nil { + t.Fatal("Failed to deactivate database") } - // Reset database to state persisted in the disk - if err := tester.db.Reset(types.EmptyRootHash); err != nil { - t.Fatalf("Failed to reset database %v", err) + if err := tester.db.Enable(types.EmptyRootHash); err == nil { + t.Fatalf("Invalid activation should be rejected") } + if err := tester.db.Enable(stored); err != nil { + t.Fatal("Failed to activate database") + } + // Ensure journal is deleted from disk if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 { t.Fatal("Failed to clean journal") } // Ensure all trie histories are removed - for i := 0; i <= index; i++ { - _, err := readHistory(tester.db.freezer, uint64(i+1)) - if err == nil { - t.Fatalf("Failed to clean state history, index %d", i+1) - } + n, err := tester.db.freezer.Ancients() + if err != nil { + t.Fatal("Failed to clean state history") + } + if n != 0 { + t.Fatal("Failed to clean state history") } // Verify layer tree structure, single disk layer is expected if tester.db.tree.len() != 1 { t.Fatalf("Extra layer kept %d", tester.db.tree.len()) } - if tester.db.tree.bottom().rootHash() != types.EmptyRootHash { - t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash()) + if tester.db.tree.bottom().rootHash() != stored { + t.Fatalf("Root hash is not matched exp %x got %x", stored, tester.db.tree.bottom().rootHash()) } } diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go index d25ac1c60..10567715d 100644 --- a/trie/triedb/pathdb/difflayer.go +++ b/trie/triedb/pathdb/difflayer.go @@ -114,7 +114,7 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, dept if n.Hash != hash { dirtyFalseMeter.Mark(1) log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) - return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path) + return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path, n.Blob) } dirtyHitMeter.Mark(1) dirtyNodeHitDepthHist.Update(int64(depth)) diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go index 87718290f..d3b6419cc 100644 --- a/trie/triedb/pathdb/disklayer.go +++ b/trie/triedb/pathdb/disklayer.go @@ -150,7 +150,7 @@ func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]b if nHash != hash { diskFalseMeter.Mark(1) log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash) - return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path) + return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path, nBlob) } if dl.cleans != nil && len(nBlob) > 0 { dl.cleans.Set(key, nBlob) diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go index f503a9c49..78ee4459f 100644 --- a/trie/triedb/pathdb/errors.go +++ b/trie/triedb/pathdb/errors.go @@ -21,12 +21,17 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) var ( - // errSnapshotReadOnly is returned if the database is opened in read only mode - // and mutation is requested. - errSnapshotReadOnly = errors.New("read only") + // errDatabaseReadOnly is returned if the database is opened in read only mode + // to prevent any mutation. + errDatabaseReadOnly = errors.New("read only") + + // errDatabaseWaitSync is returned if the initial state sync is not completed + // yet and database is disabled to prevent accessing state. + errDatabaseWaitSync = errors.New("waiting for sync") // errSnapshotStale is returned from data accessors if the underlying layer // layer had been invalidated due to the chain progressing forward far enough @@ -46,6 +51,10 @@ var ( errUnexpectedNode = errors.New("unexpected node") ) -func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error { - return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash) +func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte, blob []byte) error { + blobHex := "nil" + if len(blob) > 0 { + blobHex = hexutil.Encode(blob) + } + return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x, blob: %s", errUnexpectedNode, loc, owner, path, expHash, gotHash, blobHex) } diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go index ea90207f2..ac770763e 100644 --- a/trie/triedb/pathdb/journal.go +++ b/trie/triedb/pathdb/journal.go @@ -356,7 +356,7 @@ func (db *Database) Journal(root common.Hash) error { // Short circuit if the database is in read only mode. if db.readOnly { - return errSnapshotReadOnly + return errDatabaseReadOnly } // Firstly write out the metadata of journal journal := new(bytes.Buffer) diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go index 67de225b0..4a7d328b9 100644 --- a/trie/triedb/pathdb/nodebuffer.go +++ b/trie/triedb/pathdb/nodebuffer.go @@ -71,7 +71,7 @@ func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*tr if n.Hash != hash { dirtyFalseMeter.Mark(1) log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) - return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path) + return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob) } return n, nil }