cmd/geth: implement data import and export (#22931)

This PR offers two more database sub commands for exporting and importing data.
Two exporters are implemented: preimage and snapshot data respectively. 
The import command is generic, it can take any data export and import into leveldb. 
The data format has a 'magic' for disambiguation, and a version field for future compatibility.
This commit is contained in:
rjl493456442 2021-11-02 18:31:45 +08:00 committed by GitHub
parent 551bd6e721
commit 2e8b58f076
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 590 additions and 15 deletions

View File

@ -140,7 +140,9 @@ be gzipped.`,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The import-preimages command imports hash preimages from an RLP encoded stream.`, The import-preimages command imports hash preimages from an RLP encoded stream.
It's deprecated, please use "geth db import" instead.
`,
} }
exportPreimagesCommand = cli.Command{ exportPreimagesCommand = cli.Command{
Action: utils.MigrateFlags(exportPreimages), Action: utils.MigrateFlags(exportPreimages),
@ -154,7 +156,9 @@ be gzipped.`,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The export-preimages command export hash preimages to an RLP encoded stream`, The export-preimages command exports hash preimages to an RLP encoded stream.
It's deprecated, please use "geth db export" instead.
`,
} }
dumpCommand = cli.Command{ dumpCommand = cli.Command{
Action: utils.MigrateFlags(dump), Action: utils.MigrateFlags(dump),
@ -368,7 +372,6 @@ func exportPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()

View File

@ -17,12 +17,16 @@
package main package main
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"os" "os"
"os/signal"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv" "strconv"
"strings"
"syscall"
"time" "time"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
@ -63,6 +67,8 @@ Remove blockchain and state databases`,
dbPutCmd, dbPutCmd,
dbGetSlotsCmd, dbGetSlotsCmd,
dbDumpFreezerIndex, dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
}, },
} }
dbInspectCmd = cli.Command{ dbInspectCmd = cli.Command{
@ -188,6 +194,36 @@ WARNING: This is a low-level operation which may cause database corruption!`,
}, },
Description: "This command displays information about the freezer index.", Description: "This command displays information about the freezer index.",
} }
dbImportCmd = cli.Command{
Action: utils.MigrateFlags(importLDBdata),
Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = cli.Command{
Action: utils.MigrateFlags(exportChaindata),
Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
) )
func removeDB(ctx *cli.Context) error { func removeDB(ctx *cli.Context) error {
@ -510,3 +546,133 @@ func parseHexOrString(str string) ([]byte, error) {
} }
return b, err return b, err
} }
func importLDBdata(ctx *cli.Context) error {
start := 0
switch ctx.NArg() {
case 1:
break
case 2:
s, err := strconv.Atoi(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("second arg must be an integer: %v", err)
}
start = s
default:
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
var (
fName = ctx.Args().Get(0)
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during ldb import, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
return utils.ImportLDBData(db, fName, int64(start), stop)
}
type preimageIterator struct {
iter ethdb.Iterator
}
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
for iter.iter.Next() {
key := iter.iter.Key()
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.iter.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *preimageIterator) Release() {
iter.iter.Release()
}
type snapshotIterator struct {
init bool
account ethdb.Iterator
storage ethdb.Iterator
}
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
if !iter.init {
iter.init = true
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
}
for iter.account.Next() {
key := iter.account.Key()
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.account.Value(), true
}
}
for iter.storage.Next() {
key := iter.storage.Key()
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
return utils.OpBatchAdd, key, iter.storage.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *snapshotIterator) Release() {
iter.account.Release()
iter.storage.Release()
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
return &preimageIterator{iter: iter}
},
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
return &snapshotIterator{account: account, storage: storage}
},
}
func exportChaindata(ctx *cli.Context) error {
if ctx.NArg() < 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
// Parse the required chain data type, make sure it's supported.
kind := ctx.Args().Get(0)
kind = strings.ToLower(strings.Trim(kind, " "))
exporter, ok := chainExporters[kind]
if !ok {
var kinds []string
for kind := range chainExporters {
kinds = append(kinds, kind)
}
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
}
var (
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during db export, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}

View File

@ -18,7 +18,9 @@
package utils package utils
import ( import (
"bufio"
"compress/gzip" "compress/gzip"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -270,6 +272,7 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
} }
// ImportPreimages imports a batch of exported hash preimages into the database. // ImportPreimages imports a batch of exported hash preimages into the database.
// It's a part of the deprecated functionality, should be removed in the future.
func ImportPreimages(db ethdb.Database, fn string) error { func ImportPreimages(db ethdb.Database, fn string) error {
log.Info("Importing preimages", "file", fn) log.Info("Importing preimages", "file", fn)
@ -280,7 +283,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
} }
defer fh.Close() defer fh.Close()
var reader io.Reader = fh var reader io.Reader = bufio.NewReader(fh)
if strings.HasSuffix(fn, ".gz") { if strings.HasSuffix(fn, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil { if reader, err = gzip.NewReader(reader); err != nil {
return err return err
@ -288,7 +291,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
} }
stream := rlp.NewStream(reader, 0) stream := rlp.NewStream(reader, 0)
// Import the preimages in batches to prevent disk trashing // Import the preimages in batches to prevent disk thrashing
preimages := make(map[common.Hash][]byte) preimages := make(map[common.Hash][]byte)
for { for {
@ -317,6 +320,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
// ExportPreimages exports all known hash preimages into the specified file, // ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file. // truncating any data already present in the file.
// It's a part of the deprecated functionality, should be removed in the future.
func ExportPreimages(db ethdb.Database, fn string) error { func ExportPreimages(db ethdb.Database, fn string) error {
log.Info("Exporting preimages", "file", fn) log.Info("Exporting preimages", "file", fn)
@ -344,3 +348,207 @@ func ExportPreimages(db ethdb.Database, fn string) error {
log.Info("Exported preimages", "file", fn) log.Info("Exported preimages", "file", fn)
return nil return nil
} }
// exportHeader is used in the export/import flow. When we do an export,
// the first element we output is the exportHeader.
// Whenever a backwards-incompatible change is made, the Version header
// should be bumped.
// If the importer sees a higher version, it should reject the import.
type exportHeader struct {
Magic string // Always set to 'gethdbdump' for disambiguation
Version uint64
Kind string
UnixTime uint64
}
const exportMagic = "gethdbdump"
const (
OpBatchAdd = 0
OpBatchDel = 1
)
// ImportLDBData imports a batch of snapshot data into the database
func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
log.Info("Importing leveldb data", "file", f)
// Open the file handle and potentially unwrap the gzip stream
fh, err := os.Open(f)
if err != nil {
return err
}
defer fh.Close()
var reader io.Reader = bufio.NewReader(fh)
if strings.HasSuffix(f, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil {
return err
}
}
stream := rlp.NewStream(reader, 0)
// Read the header
var header exportHeader
if err := stream.Decode(&header); err != nil {
return fmt.Errorf("could not decode header: %v", err)
}
if header.Magic != exportMagic {
return errors.New("incompatible data, wrong magic")
}
if header.Version != 0 {
return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
}
log.Info("Importing data", "file", f, "type", header.Kind, "data age",
common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
// Import the snapshot in batches to prevent disk thrashing
var (
count int64
start = time.Now()
logged = time.Now()
batch = db.NewBatch()
)
for {
// Read the next entry
var (
op byte
key, val []byte
)
if err := stream.Decode(&op); err != nil {
if err == io.EOF {
break
}
return err
}
if err := stream.Decode(&key); err != nil {
return err
}
if err := stream.Decode(&val); err != nil {
return err
}
if count < startIndex {
count++
continue
}
switch op {
case OpBatchDel:
batch.Delete(key)
case OpBatchAdd:
batch.Put(key, val)
default:
return fmt.Errorf("unknown op %d\n", op)
}
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
// Check interruption emitted by ctrl+c
if count%1000 == 0 {
select {
case <-interrupt:
if err := batch.Write(); err != nil {
return err
}
log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
default:
}
}
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
count += 1
}
// Flush the last batch snapshot data
if batch.ValueSize() > 0 {
if err := batch.Write(); err != nil {
return err
}
}
log.Info("Imported chain data", "file", f, "count", count,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
// ChainDataIterator is an interface wraps all necessary functions to iterate
// the exporting chain data.
type ChainDataIterator interface {
// Next returns the key-value pair for next exporting entry in the iterator.
// When the end is reached, it will return (0, nil, nil, false).
Next() (byte, []byte, []byte, bool)
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
Release()
}
// ExportChaindata exports the given data type (truncating any data already present)
// in the file. If the suffix is 'gz', gzip compression is used.
func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
log.Info("Exporting chain data", "file", fn, "kind", kind)
defer iter.Release()
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close()
}
// Write the header
if err := rlp.Encode(writer, &exportHeader{
Magic: exportMagic,
Version: 0,
Kind: kind,
UnixTime: uint64(time.Now().Unix()),
}); err != nil {
return err
}
// Extract data from source iterator and dump them out to file
var (
count int64
start = time.Now()
logged = time.Now()
)
for {
op, key, val, ok := iter.Next()
if !ok {
break
}
if err := rlp.Encode(writer, op); err != nil {
return err
}
if err := rlp.Encode(writer, key); err != nil {
return err
}
if err := rlp.Encode(writer, val); err != nil {
return err
}
if count%1000 == 0 {
// Check interruption emitted by ctrl+c
select {
case <-interrupt:
log.Info("Chain data exporting interrupted", "file", fn,
"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
default:
}
if time.Since(logged) > 8*time.Second {
log.Info("Exporting chain data", "file", fn, "kind", kind,
"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
}
count++
}
log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}

198
cmd/utils/export_test.go Normal file
View File

@ -0,0 +1,198 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/rlp"
)
// TestExport does basic sanity checks on the export/import functionality
func TestExport(t *testing.T) {
f := fmt.Sprintf("%v/tempdump", os.TempDir())
defer func() {
os.Remove(f)
}()
testExport(t, f)
}
func TestExportGzip(t *testing.T) {
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
defer func() {
os.Remove(f)
}()
testExport(t, f)
}
type testIterator struct {
index int
}
func newTestIterator() *testIterator {
return &testIterator{index: -1}
}
func (iter *testIterator) Next() (byte, []byte, []byte, bool) {
if iter.index >= 999 {
return 0, nil, nil, false
}
iter.index += 1
if iter.index == 42 {
iter.index += 1
}
return OpBatchAdd, []byte(fmt.Sprintf("key-%04d", iter.index)),
[]byte(fmt.Sprintf("value %d", iter.index)), true
}
func (iter *testIterator) Release() {}
func testExport(t *testing.T, f string) {
err := ExportChaindata(f, "testdata", newTestIterator(), make(chan struct{}))
if err != nil {
t.Fatal(err)
}
db := rawdb.NewMemoryDatabase()
err = ImportLDBData(db, f, 5, make(chan struct{}))
if err != nil {
t.Fatal(err)
}
// verify
for i := 0; i < 1000; i++ {
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
if (i < 5 || i == 42) && err == nil {
t.Fatalf("expected no element at idx %d, got '%v'", i, string(v))
}
if !(i < 5 || i == 42) {
if err != nil {
t.Fatalf("expected element idx %d: %v", i, err)
}
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
t.Fatalf("have %v, want %v", have, want)
}
}
}
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", 1000)))
if err == nil {
t.Fatalf("expected no element at idx %d, got '%v'", 1000, string(v))
}
}
// testDeletion tests if the deletion markers can be exported/imported correctly
func TestDeletionExport(t *testing.T) {
f := fmt.Sprintf("%v/tempdump", os.TempDir())
defer func() {
os.Remove(f)
}()
testDeletion(t, f)
}
// TestDeletionExportGzip tests if the deletion markers can be exported/imported
// correctly with gz compression.
func TestDeletionExportGzip(t *testing.T) {
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
defer func() {
os.Remove(f)
}()
testDeletion(t, f)
}
type deletionIterator struct {
index int
}
func newDeletionIterator() *deletionIterator {
return &deletionIterator{index: -1}
}
func (iter *deletionIterator) Next() (byte, []byte, []byte, bool) {
if iter.index >= 999 {
return 0, nil, nil, false
}
iter.index += 1
if iter.index == 42 {
iter.index += 1
}
return OpBatchDel, []byte(fmt.Sprintf("key-%04d", iter.index)), nil, true
}
func (iter *deletionIterator) Release() {}
func testDeletion(t *testing.T, f string) {
err := ExportChaindata(f, "testdata", newDeletionIterator(), make(chan struct{}))
if err != nil {
t.Fatal(err)
}
db := rawdb.NewMemoryDatabase()
for i := 0; i < 1000; i++ {
db.Put([]byte(fmt.Sprintf("key-%04d", i)), []byte(fmt.Sprintf("value %d", i)))
}
err = ImportLDBData(db, f, 5, make(chan struct{}))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
if i < 5 || i == 42 {
if err != nil {
t.Fatalf("expected element at idx %d, got '%v'", i, err)
}
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
t.Fatalf("have %v, want %v", have, want)
}
}
if !(i < 5 || i == 42) {
if err == nil {
t.Fatalf("expected no element idx %d: %v", i, string(v))
}
}
}
}
// TestImportFutureFormat tests that we reject unsupported future versions.
func TestImportFutureFormat(t *testing.T) {
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
defer func() {
os.Remove(f)
}()
fh, err := os.OpenFile(f, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
t.Fatal(err)
}
defer fh.Close()
if err := rlp.Encode(fh, &exportHeader{
Magic: exportMagic,
Version: 500,
Kind: "testdata",
UnixTime: uint64(time.Now().Unix()),
}); err != nil {
t.Fatal(err)
}
db2 := rawdb.NewMemoryDatabase()
err = ImportLDBData(db2, f, 0, make(chan struct{}))
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.HasPrefix(err.Error(), "incompatible version") {
t.Fatalf("wrong error: %v", err)
}
}

View File

@ -47,7 +47,7 @@ func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) {
// ReadSnapshotRoot retrieves the root of the block whose state is contained in // ReadSnapshotRoot retrieves the root of the block whose state is contained in
// the persisted snapshot. // the persisted snapshot.
func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
data, _ := db.Get(snapshotRootKey) data, _ := db.Get(SnapshotRootKey)
if len(data) != common.HashLength { if len(data) != common.HashLength {
return common.Hash{} return common.Hash{}
} }
@ -57,7 +57,7 @@ func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
// WriteSnapshotRoot stores the root of the block whose state is contained in // WriteSnapshotRoot stores the root of the block whose state is contained in
// the persisted snapshot. // the persisted snapshot.
func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
if err := db.Put(snapshotRootKey, root[:]); err != nil { if err := db.Put(SnapshotRootKey, root[:]); err != nil {
log.Crit("Failed to store snapshot root", "err", err) log.Crit("Failed to store snapshot root", "err", err)
} }
} }
@ -67,7 +67,7 @@ func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
// be used during updates, so a crash or failure will mark the entire snapshot // be used during updates, so a crash or failure will mark the entire snapshot
// invalid. // invalid.
func DeleteSnapshotRoot(db ethdb.KeyValueWriter) { func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
if err := db.Delete(snapshotRootKey); err != nil { if err := db.Delete(SnapshotRootKey); err != nil {
log.Crit("Failed to remove snapshot root", "err", err) log.Crit("Failed to remove snapshot root", "err", err)
} }
} }

View File

@ -371,7 +371,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
accountSnaps.Add(size) accountSnaps.Add(size)
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
storageSnaps.Add(size) storageSnaps.Add(size)
case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
preimages.Add(size) preimages.Add(size)
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
metadata.Add(size) metadata.Add(size)
@ -393,7 +393,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
var accounted bool var accounted bool
for _, meta := range [][]byte{ for _, meta := range [][]byte{
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
fastTrieProgressKey, snapshotDisabledKey, snapshotRootKey, snapshotJournalKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, uncleanShutdownKey, badBlockKey,
} { } {

View File

@ -48,8 +48,8 @@ var (
// snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync. // snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync.
snapshotDisabledKey = []byte("SnapshotDisabled") snapshotDisabledKey = []byte("SnapshotDisabled")
// snapshotRootKey tracks the hash of the last snapshot. // SnapshotRootKey tracks the hash of the last snapshot.
snapshotRootKey = []byte("SnapshotRoot") SnapshotRootKey = []byte("SnapshotRoot")
// snapshotJournalKey tracks the in-memory diff layers across restarts. // snapshotJournalKey tracks the in-memory diff layers across restarts.
snapshotJournalKey = []byte("SnapshotJournal") snapshotJournalKey = []byte("SnapshotJournal")
@ -90,7 +90,7 @@ var (
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
CodePrefix = []byte("c") // CodePrefix + code hash -> account code CodePrefix = []byte("c") // CodePrefix + code hash -> account code
preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db configPrefix = []byte("ethereum-config-") // config prefix for the db
// Chain index prefixes (use `i` + single byte to avoid mixing data types). // Chain index prefixes (use `i` + single byte to avoid mixing data types).
@ -207,9 +207,9 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
return key return key
} }
// preimageKey = preimagePrefix + hash // preimageKey = PreimagePrefix + hash
func preimageKey(hash common.Hash) []byte { func preimageKey(hash common.Hash) []byte {
return append(preimagePrefix, hash.Bytes()...) return append(PreimagePrefix, hash.Bytes()...)
} }
// codeKey = CodePrefix + hash // codeKey = CodePrefix + hash