forked from cerc-io/plugeth
cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)
* cmd/geth, eth, core: snapshot dump + unify with trie dump * cmd/evm: dump API fixes * cmd/geth, core, eth: fix some remaining errors * cmd/evm: dump - add limit, support address startkey, address review concerns * cmd, core/state, eth: minor polishes, fix snap dump crash, unify format Co-authored-by: Péter Szilágyi <peterke@gmail.com>
This commit is contained in:
parent
1cca781a02
commit
addd8824cf
@ -212,16 +212,15 @@ func Main(ctx *cli.Context) error {
|
||||
// Iterate over all the tests, run them and aggregate the results
|
||||
|
||||
// Run the test and aggregate the result
|
||||
state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
|
||||
s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body, _ := rlp.EncodeToBytes(txs)
|
||||
// Dump the excution result
|
||||
collector := make(Alloc)
|
||||
state.DumpToCollector(collector, false, false, false, nil, -1)
|
||||
s.DumpToCollector(collector, nil)
|
||||
return dispatchOutput(ctx, baseDir, result, collector, body)
|
||||
|
||||
}
|
||||
|
||||
// txWithKey is a helper-struct, to allow us to use the types.Transaction along with
|
||||
@ -303,7 +302,7 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) {
|
||||
}
|
||||
}
|
||||
genesisAccount := core.GenesisAccount{
|
||||
Code: common.FromHex(dumpAccount.Code),
|
||||
Code: dumpAccount.Code,
|
||||
Storage: storage,
|
||||
Balance: balance,
|
||||
Nonce: dumpAccount.Nonce,
|
||||
|
@ -270,7 +270,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
if ctx.GlobalBool(DumpFlag.Name) {
|
||||
statedb.Commit(true)
|
||||
statedb.IntermediateRoot(true)
|
||||
fmt.Println(string(statedb.Dump(false, false, true)))
|
||||
fmt.Println(string(statedb.Dump(nil)))
|
||||
}
|
||||
|
||||
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
|
||||
|
@ -98,16 +98,16 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
for _, st := range test.Subtests() {
|
||||
// Run the test and aggregate the result
|
||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||
_, state, err := test.Run(st, cfg, false)
|
||||
_, s, err := test.Run(st, cfg, false)
|
||||
// print state root for evmlab tracing
|
||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||
if ctx.GlobalBool(MachineFlag.Name) && s != nil {
|
||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false))
|
||||
}
|
||||
if err != nil {
|
||||
// Test failed, mark as so and dump any state to aid debugging
|
||||
result.Pass, result.Error = false, err.Error()
|
||||
if ctx.GlobalBool(DumpFlag.Name) && state != nil {
|
||||
dump := state.RawDump(false, false, true)
|
||||
if ctx.GlobalBool(DumpFlag.Name) && s != nil {
|
||||
dump := s.RawDump(nil)
|
||||
result.State = &dump
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
@ -27,12 +28,16 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
@ -152,20 +157,21 @@ The export-preimages command export hash preimages to an RLP encoded stream`,
|
||||
Action: utils.MigrateFlags(dump),
|
||||
Name: "dump",
|
||||
Usage: "Dump a specific block from storage",
|
||||
ArgsUsage: "[<blockHash> | <blockNum>]...",
|
||||
ArgsUsage: "[? <blockHash> | <blockNum>]",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.IterativeOutputFlag,
|
||||
utils.ExcludeCodeFlag,
|
||||
utils.ExcludeStorageFlag,
|
||||
utils.IncludeIncompletesFlag,
|
||||
utils.StartKeyFlag,
|
||||
utils.DumpLimitFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The arguments are interpreted as block numbers or hashes.
|
||||
Use "ethereum dump 0" to dump the genesis block.`,
|
||||
This command dumps out the state for a given block (or latest, if none provided).
|
||||
`,
|
||||
}
|
||||
)
|
||||
|
||||
@ -373,47 +379,85 @@ func exportPreimages(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
var header *types.Header
|
||||
if ctx.NArg() > 1 {
|
||||
return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
|
||||
}
|
||||
if ctx.NArg() == 1 {
|
||||
arg := ctx.Args().First()
|
||||
if hashish(arg) {
|
||||
hash := common.HexToHash(arg)
|
||||
if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
|
||||
header = rawdb.ReadHeader(db, hash, *number)
|
||||
} else {
|
||||
return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
|
||||
}
|
||||
} else {
|
||||
number, err := strconv.Atoi(arg)
|
||||
if err != nil {
|
||||
return nil, nil, common.Hash{}, err
|
||||
}
|
||||
if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
|
||||
header = rawdb.ReadHeader(db, hash, uint64(number))
|
||||
} else {
|
||||
return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Use latest
|
||||
header = rawdb.ReadHeadHeader(db)
|
||||
}
|
||||
if header == nil {
|
||||
return nil, nil, common.Hash{}, errors.New("no head block found")
|
||||
}
|
||||
startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
|
||||
var start common.Hash
|
||||
switch len(startArg) {
|
||||
case 0: // common.Hash
|
||||
case 32:
|
||||
start = common.BytesToHash(startArg)
|
||||
case 20:
|
||||
start = crypto.Keccak256Hash(startArg)
|
||||
log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
|
||||
default:
|
||||
return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
|
||||
}
|
||||
var conf = &state.DumpConfig{
|
||||
SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name),
|
||||
SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name),
|
||||
OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
|
||||
Start: start.Bytes(),
|
||||
Max: ctx.Uint64(utils.DumpLimitFlag.Name),
|
||||
}
|
||||
log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
|
||||
"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
|
||||
"start", hexutil.Encode(conf.Start), "limit", conf.Max)
|
||||
return conf, db, header.Root, nil
|
||||
}
|
||||
|
||||
func dump(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
for _, arg := range ctx.Args() {
|
||||
var header *types.Header
|
||||
if hashish(arg) {
|
||||
hash := common.HexToHash(arg)
|
||||
number := rawdb.ReadHeaderNumber(db, hash)
|
||||
if number != nil {
|
||||
header = rawdb.ReadHeader(db, hash, *number)
|
||||
}
|
||||
} else {
|
||||
number, _ := strconv.Atoi(arg)
|
||||
hash := rawdb.ReadCanonicalHash(db, uint64(number))
|
||||
if hash != (common.Hash{}) {
|
||||
header = rawdb.ReadHeader(db, hash, uint64(number))
|
||||
}
|
||||
}
|
||||
if header == nil {
|
||||
fmt.Println("{}")
|
||||
utils.Fatalf("block not found")
|
||||
} else {
|
||||
state, err := state.New(header.Root, state.NewDatabase(db), nil)
|
||||
conf, db, root, err := parseDumpConfig(ctx, stack)
|
||||
if err != nil {
|
||||
utils.Fatalf("could not create new state: %v", err)
|
||||
return err
|
||||
}
|
||||
state, err := state.New(root, state.NewDatabase(db), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
|
||||
excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
|
||||
includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
|
||||
if ctx.Bool(utils.IterativeOutputFlag.Name) {
|
||||
state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
|
||||
state.IterativeDump(conf, json.NewEncoder(os.Stdout))
|
||||
} else {
|
||||
if includeMissing {
|
||||
fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
|
||||
if conf.OnlyWithAddresses {
|
||||
fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
|
||||
" otherwise the accounts will overwrite each other in the resulting mapping.")
|
||||
return fmt.Errorf("incompatible options")
|
||||
}
|
||||
fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
|
||||
}
|
||||
}
|
||||
fmt.Println(string(state.Dump(conf)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -18,7 +18,9 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@ -142,6 +144,31 @@ verification. The default checking target is the HEAD state. It's basically iden
|
||||
to traverse-state, but the check granularity is smaller.
|
||||
|
||||
It's also usable without snapshot enabled.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Dump a specific block from storage (same as 'geth dump' but using snapshots)",
|
||||
ArgsUsage: "[? <blockHash> | <blockNum>]",
|
||||
Action: utils.MigrateFlags(dumpState),
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.AncientFlag,
|
||||
utils.RopstenFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.GoerliFlag,
|
||||
utils.ExcludeCodeFlag,
|
||||
utils.ExcludeStorageFlag,
|
||||
utils.StartKeyFlag,
|
||||
utils.DumpLimitFlag,
|
||||
},
|
||||
Description: `
|
||||
This command is semantically equivalent to 'geth dump', but uses the snapshots
|
||||
as the backend data source, making this command a lot faster.
|
||||
|
||||
The argument is interpreted as block number or hash. If none is provided, the latest
|
||||
block is used.
|
||||
`,
|
||||
},
|
||||
},
|
||||
@ -430,3 +457,73 @@ func parseRoot(input string) (common.Hash, error) {
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func dumpState(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
|
||||
conf, db, root, err := parseDumpConfig(ctx, stack)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accIt, err := snaptree.AccountIterator(root, common.BytesToHash(conf.Start))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer accIt.Release()
|
||||
|
||||
log.Info("Snapshot dumping started", "root", root)
|
||||
var (
|
||||
start = time.Now()
|
||||
logged = time.Now()
|
||||
accounts uint64
|
||||
)
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.Encode(struct {
|
||||
Root common.Hash `json:"root"`
|
||||
}{root})
|
||||
for accIt.Next() {
|
||||
account, err := snapshot.FullAccount(accIt.Account())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
da := &state.DumpAccount{
|
||||
Balance: account.Balance.String(),
|
||||
Nonce: account.Nonce,
|
||||
Root: account.Root,
|
||||
CodeHash: account.CodeHash,
|
||||
SecureKey: accIt.Hash().Bytes(),
|
||||
}
|
||||
if !conf.SkipCode && !bytes.Equal(account.CodeHash, emptyCode) {
|
||||
da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash))
|
||||
}
|
||||
if !conf.SkipStorage {
|
||||
da.Storage = make(map[common.Hash]string)
|
||||
|
||||
stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for stIt.Next() {
|
||||
da.Storage[stIt.Hash()] = common.Bytes2Hex(stIt.Slot())
|
||||
}
|
||||
}
|
||||
enc.Encode(da)
|
||||
accounts++
|
||||
if time.Since(logged) > 8*time.Second {
|
||||
log.Info("Snapshot dumping in progress", "at", accIt.Hash(), "accounts", accounts,
|
||||
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||
logged = time.Now()
|
||||
}
|
||||
if conf.Max > 0 && accounts >= conf.Max {
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Info("Snapshot dumping complete", "accounts", accounts,
|
||||
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||
return nil
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ var (
|
||||
Name: "exitwhensynced",
|
||||
Usage: "Exits after block synchronisation completes",
|
||||
}
|
||||
IterativeOutputFlag = cli.BoolFlag{
|
||||
IterativeOutputFlag = cli.BoolTFlag{
|
||||
Name: "iterative",
|
||||
Usage: "Print streaming JSON iteratively, delimited by newlines",
|
||||
}
|
||||
@ -200,6 +200,16 @@ var (
|
||||
Name: "nocode",
|
||||
Usage: "Exclude contract code (save db lookups)",
|
||||
}
|
||||
StartKeyFlag = cli.StringFlag{
|
||||
Name: "start",
|
||||
Usage: "Start position. Either a hash or address",
|
||||
Value: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
}
|
||||
DumpLimitFlag = cli.Uint64Flag{
|
||||
Name: "limit",
|
||||
Usage: "Max number of elements (0 = no limit)",
|
||||
Value: 0,
|
||||
}
|
||||
defaultSyncMode = ethconfig.Defaults.SyncMode
|
||||
SyncModeFlag = TextMarshalerFlag{
|
||||
Name: "syncmode",
|
||||
|
@ -19,6 +19,7 @@ package state
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@ -27,6 +28,16 @@ import (
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
// DumpConfig is a set of options to control what portions of the statewill be
|
||||
// iterated and collected.
|
||||
type DumpConfig struct {
|
||||
SkipCode bool
|
||||
SkipStorage bool
|
||||
OnlyWithAddresses bool
|
||||
Start []byte
|
||||
Max uint64
|
||||
}
|
||||
|
||||
// DumpCollector interface which the state trie calls during iteration
|
||||
type DumpCollector interface {
|
||||
// OnRoot is called with the state root
|
||||
@ -39,9 +50,9 @@ type DumpCollector interface {
|
||||
type DumpAccount struct {
|
||||
Balance string `json:"balance"`
|
||||
Nonce uint64 `json:"nonce"`
|
||||
Root string `json:"root"`
|
||||
CodeHash string `json:"codeHash"`
|
||||
Code string `json:"code,omitempty"`
|
||||
Root hexutil.Bytes `json:"root"`
|
||||
CodeHash hexutil.Bytes `json:"codeHash"`
|
||||
Code hexutil.Bytes `json:"code,omitempty"`
|
||||
Storage map[common.Hash]string `json:"storage,omitempty"`
|
||||
Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode
|
||||
SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key
|
||||
@ -111,12 +122,23 @@ func (d iterativeDump) OnRoot(root common.Hash) {
|
||||
}{root})
|
||||
}
|
||||
|
||||
func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) (nextKey []byte) {
|
||||
missingPreimages := 0
|
||||
// DumpToCollector iterates the state according to the given options and inserts
|
||||
// the items into a collector for aggregation or serialization.
|
||||
func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte) {
|
||||
// Sanitize the input to allow nil configs
|
||||
if conf == nil {
|
||||
conf = new(DumpConfig)
|
||||
}
|
||||
var (
|
||||
missingPreimages int
|
||||
accounts uint64
|
||||
start = time.Now()
|
||||
logged = time.Now()
|
||||
)
|
||||
log.Info("Trie dumping started", "root", s.trie.Hash())
|
||||
c.OnRoot(s.trie.Hash())
|
||||
|
||||
var count int
|
||||
it := trie.NewIterator(s.trie.NodeIterator(start))
|
||||
it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
|
||||
for it.Next() {
|
||||
var data Account
|
||||
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
|
||||
@ -125,24 +147,25 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage,
|
||||
account := DumpAccount{
|
||||
Balance: data.Balance.String(),
|
||||
Nonce: data.Nonce,
|
||||
Root: common.Bytes2Hex(data.Root[:]),
|
||||
CodeHash: common.Bytes2Hex(data.CodeHash),
|
||||
Root: data.Root[:],
|
||||
CodeHash: data.CodeHash,
|
||||
SecureKey: it.Key,
|
||||
}
|
||||
addrBytes := s.trie.GetKey(it.Key)
|
||||
if addrBytes == nil {
|
||||
// Preimage missing
|
||||
missingPreimages++
|
||||
if excludeMissingPreimages {
|
||||
if conf.OnlyWithAddresses {
|
||||
continue
|
||||
}
|
||||
account.SecureKey = it.Key
|
||||
}
|
||||
addr := common.BytesToAddress(addrBytes)
|
||||
obj := newObject(s, addr, data)
|
||||
if !excludeCode {
|
||||
account.Code = common.Bytes2Hex(obj.Code(s.db))
|
||||
if !conf.SkipCode {
|
||||
account.Code = obj.Code(s.db)
|
||||
}
|
||||
if !excludeStorage {
|
||||
if !conf.SkipStorage {
|
||||
account.Storage = make(map[common.Hash]string)
|
||||
storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil))
|
||||
for storageIt.Next() {
|
||||
@ -155,8 +178,13 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage,
|
||||
}
|
||||
}
|
||||
c.OnAccount(addr, account)
|
||||
count++
|
||||
if maxResults > 0 && count >= maxResults {
|
||||
accounts++
|
||||
if time.Since(logged) > 8*time.Second {
|
||||
log.Info("Trie dumping in progress", "at", it.Key, "accounts", accounts,
|
||||
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||
logged = time.Now()
|
||||
}
|
||||
if conf.Max > 0 && accounts >= conf.Max {
|
||||
if it.Next() {
|
||||
nextKey = it.Key
|
||||
}
|
||||
@ -166,22 +194,24 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage,
|
||||
if missingPreimages > 0 {
|
||||
log.Warn("Dump incomplete due to missing preimages", "missing", missingPreimages)
|
||||
}
|
||||
log.Info("Trie dumping complete", "accounts", accounts,
|
||||
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
return nextKey
|
||||
}
|
||||
|
||||
// RawDump returns the entire state an a single large object
|
||||
func (s *StateDB) RawDump(excludeCode, excludeStorage, excludeMissingPreimages bool) Dump {
|
||||
func (s *StateDB) RawDump(opts *DumpConfig) Dump {
|
||||
dump := &Dump{
|
||||
Accounts: make(map[common.Address]DumpAccount),
|
||||
}
|
||||
s.DumpToCollector(dump, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0)
|
||||
s.DumpToCollector(dump, opts)
|
||||
return *dump
|
||||
}
|
||||
|
||||
// Dump returns a JSON string representing the entire state as a single json-object
|
||||
func (s *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool) []byte {
|
||||
dump := s.RawDump(excludeCode, excludeStorage, excludeMissingPreimages)
|
||||
func (s *StateDB) Dump(opts *DumpConfig) []byte {
|
||||
dump := s.RawDump(opts)
|
||||
json, err := json.MarshalIndent(dump, "", " ")
|
||||
if err != nil {
|
||||
fmt.Println("Dump err", err)
|
||||
@ -190,15 +220,15 @@ func (s *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool
|
||||
}
|
||||
|
||||
// IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout
|
||||
func (s *StateDB) IterativeDump(excludeCode, excludeStorage, excludeMissingPreimages bool, output *json.Encoder) {
|
||||
s.DumpToCollector(iterativeDump{output}, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0)
|
||||
func (s *StateDB) IterativeDump(opts *DumpConfig, output *json.Encoder) {
|
||||
s.DumpToCollector(iterativeDump{output}, opts)
|
||||
}
|
||||
|
||||
// IteratorDump dumps out a batch of accounts starts with the given start key
|
||||
func (s *StateDB) IteratorDump(excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) IteratorDump {
|
||||
func (s *StateDB) IteratorDump(opts *DumpConfig) IteratorDump {
|
||||
iterator := &IteratorDump{
|
||||
Accounts: make(map[common.Address]DumpAccount),
|
||||
}
|
||||
iterator.Next = s.DumpToCollector(iterator, excludeCode, excludeStorage, excludeMissingPreimages, start, maxResults)
|
||||
iterator.Next = s.DumpToCollector(iterator, opts)
|
||||
return *iterator
|
||||
}
|
||||
|
@ -57,28 +57,31 @@ func TestDump(t *testing.T) {
|
||||
s.state.Commit(false)
|
||||
|
||||
// check that DumpToCollector contains the state objects that are in trie
|
||||
got := string(s.state.Dump(false, false, true))
|
||||
got := string(s.state.Dump(nil))
|
||||
want := `{
|
||||
"root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2",
|
||||
"accounts": {
|
||||
"0x0000000000000000000000000000000000000001": {
|
||||
"balance": "22",
|
||||
"nonce": 0,
|
||||
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
"root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
||||
"key": "0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"
|
||||
},
|
||||
"0x0000000000000000000000000000000000000002": {
|
||||
"balance": "44",
|
||||
"nonce": 0,
|
||||
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
"root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
||||
"key": "0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"
|
||||
},
|
||||
"0x0000000000000000000000000000000000000102": {
|
||||
"balance": "0",
|
||||
"nonce": 0,
|
||||
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"codeHash": "87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3",
|
||||
"code": "03030303030303"
|
||||
"root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"codeHash": "0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3",
|
||||
"code": "0x03030303030303",
|
||||
"key": "0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
21
eth/api.go
21
eth/api.go
@ -264,12 +264,16 @@ func NewPublicDebugAPI(eth *Ethereum) *PublicDebugAPI {
|
||||
|
||||
// DumpBlock retrieves the entire state of the database at a given block.
|
||||
func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
|
||||
opts := &state.DumpConfig{
|
||||
OnlyWithAddresses: true,
|
||||
Max: AccountRangeMaxResults, // Sanity limit over RPC
|
||||
}
|
||||
if blockNr == rpc.PendingBlockNumber {
|
||||
// If we're dumping the pending state, we need to request
|
||||
// both the pending block as well as the pending state from
|
||||
// the miner and operate on those
|
||||
_, stateDb := api.eth.miner.Pending()
|
||||
return stateDb.RawDump(false, false, true), nil
|
||||
return stateDb.RawDump(opts), nil
|
||||
}
|
||||
var block *types.Block
|
||||
if blockNr == rpc.LatestBlockNumber {
|
||||
@ -284,7 +288,7 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error
|
||||
if err != nil {
|
||||
return state.Dump{}, err
|
||||
}
|
||||
return stateDb.RawDump(false, false, true), nil
|
||||
return stateDb.RawDump(opts), nil
|
||||
}
|
||||
|
||||
// PrivateDebugAPI is the collection of Ethereum full node APIs exposed over
|
||||
@ -386,10 +390,17 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta
|
||||
return state.IteratorDump{}, errors.New("either block number or block hash must be specified")
|
||||
}
|
||||
|
||||
if maxResults > AccountRangeMaxResults || maxResults <= 0 {
|
||||
maxResults = AccountRangeMaxResults
|
||||
opts := &state.DumpConfig{
|
||||
SkipCode: nocode,
|
||||
SkipStorage: nostorage,
|
||||
OnlyWithAddresses: !incompletes,
|
||||
Start: start,
|
||||
Max: uint64(maxResults),
|
||||
}
|
||||
return stateDb.IteratorDump(nocode, nostorage, incompletes, start, maxResults), nil
|
||||
if maxResults > AccountRangeMaxResults || maxResults <= 0 {
|
||||
opts.Max = AccountRangeMaxResults
|
||||
}
|
||||
return stateDb.IteratorDump(opts), nil
|
||||
}
|
||||
|
||||
// StorageRangeResult is the result of a debug_storageRangeAt API call.
|
||||
|
@ -34,7 +34,13 @@ import (
|
||||
var dumper = spew.ConfigState{Indent: " "}
|
||||
|
||||
func accountRangeTest(t *testing.T, trie *state.Trie, statedb *state.StateDB, start common.Hash, requestedNum int, expectedNum int) state.IteratorDump {
|
||||
result := statedb.IteratorDump(true, true, false, start.Bytes(), requestedNum)
|
||||
result := statedb.IteratorDump(&state.DumpConfig{
|
||||
SkipCode: true,
|
||||
SkipStorage: true,
|
||||
OnlyWithAddresses: false,
|
||||
Start: start.Bytes(),
|
||||
Max: uint64(requestedNum),
|
||||
})
|
||||
|
||||
if len(result.Accounts) != expectedNum {
|
||||
t.Fatalf("expected %d results, got %d", expectedNum, len(result.Accounts))
|
||||
@ -132,11 +138,16 @@ func TestEmptyAccountRange(t *testing.T) {
|
||||
|
||||
var (
|
||||
statedb = state.NewDatabase(rawdb.NewMemoryDatabase())
|
||||
state, _ = state.New(common.Hash{}, statedb, nil)
|
||||
st, _ = state.New(common.Hash{}, statedb, nil)
|
||||
)
|
||||
state.Commit(true)
|
||||
state.IntermediateRoot(true)
|
||||
results := state.IteratorDump(true, true, true, (common.Hash{}).Bytes(), AccountRangeMaxResults)
|
||||
st.Commit(true)
|
||||
st.IntermediateRoot(true)
|
||||
results := st.IteratorDump(&state.DumpConfig{
|
||||
SkipCode: true,
|
||||
SkipStorage: true,
|
||||
OnlyWithAddresses: true,
|
||||
Max: uint64(AccountRangeMaxResults),
|
||||
})
|
||||
if bytes.Equal(results.Next, (common.Hash{}).Bytes()) {
|
||||
t.Fatalf("Empty results should not return a second page")
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user