diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index c9ab72b6d..d3086921b 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -95,6 +95,34 @@ Requires a first argument of the file to write to. Optional second and third arguments control the first and last block to write. In this mode, the file will be appended if already existing.`, + } + importPreimagesCommand = cli.Command{ + Action: utils.MigrateFlags(importPreimages), + Name: "import-preimages", + Usage: "Import the preimage database from an RLP stream", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.LightModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` + The import-preimages command imports hash preimages from an RLP encoded stream.`, + } + exportPreimagesCommand = cli.Command{ + Action: utils.MigrateFlags(exportPreimages), + Name: "export-preimages", + Usage: "Export the preimage database into an RLP stream", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.LightModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The export-preimages command export hash preimages to an RLP encoded stream`, } copydbCommand = cli.Command{ Action: utils.MigrateFlags(copyDb), @@ -299,7 +327,39 @@ func exportChain(ctx *cli.Context) error { if err != nil { utils.Fatalf("Export error: %v\n", err) } - fmt.Printf("Export done in %v", time.Since(start)) + fmt.Printf("Export done in %v\n", time.Since(start)) + return nil +} + +// importPreimages imports preimage data from the specified file. +func importPreimages(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + stack := makeFullNode(ctx) + diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase) + + start := time.Now() + if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil { + utils.Fatalf("Export error: %v\n", err) + } + fmt.Printf("Export done in %v\n", time.Since(start)) + return nil +} + +// exportPreimages dumps the preimage data to specified json file in streaming way. +func exportPreimages(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + stack := makeFullNode(ctx) + diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase) + + start := time.Now() + if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil { + utils.Fatalf("Export error: %v\n", err) + } + fmt.Printf("Export done in %v\n", time.Since(start)) return nil } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index f5a3fa941..061384d1b 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -155,6 +155,8 @@ func init() { initCommand, importCommand, exportCommand, + importPreimagesCommand, + exportPreimagesCommand, copydbCommand, removedbCommand, dumpCommand, diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 186d18d8f..c0af4c13e 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -27,8 +27,11 @@ import ( "strings" "syscall" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -105,6 +108,8 @@ func ImportChain(chain *core.BlockChain, fn string) error { } log.Info("Importing blockchain", "file", fn) + + // Open the file handle and potentially unwrap the gzip stream fh, err := os.Open(fn) if err != nil { return err @@ -180,8 +185,12 @@ func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block return nil } +// ExportChain exports a blockchain into the specified file, truncating any data +// already present in the file. func ExportChain(blockchain *core.BlockChain, fn string) error { log.Info("Exporting blockchain", "file", fn) + + // Open the file handle and potentially wrap with a gzip stream fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) if err != nil { return err @@ -193,7 +202,7 @@ func ExportChain(blockchain *core.BlockChain, fn string) error { writer = gzip.NewWriter(writer) defer writer.(*gzip.Writer).Close() } - + // Iterate over the blocks and export them if err := blockchain.Export(writer); err != nil { return err } @@ -202,9 +211,12 @@ func ExportChain(blockchain *core.BlockChain, fn string) error { return nil } +// ExportAppendChain exports a blockchain into the specified file, appending to +// the file if data already exists in it. func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { log.Info("Exporting blockchain", "file", fn) - // TODO verify mode perms + + // Open the file handle and potentially wrap with a gzip stream fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) if err != nil { return err @@ -216,10 +228,86 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las writer = gzip.NewWriter(writer) defer writer.(*gzip.Writer).Close() } - + // Iterate over the blocks and export them if err := blockchain.ExportN(writer, first, last); err != nil { return err } log.Info("Exported blockchain to", "file", fn) return nil } + +// ImportPreimages imports a batch of exported hash preimages into the database. +func ImportPreimages(db *ethdb.LDBDatabase, fn string) error { + log.Info("Importing preimages", "file", fn) + + // Open the file handle and potentially unwrap the gzip stream + fh, err := os.Open(fn) + if err != nil { + return err + } + defer fh.Close() + + var reader io.Reader = fh + if strings.HasSuffix(fn, ".gz") { + if reader, err = gzip.NewReader(reader); err != nil { + return err + } + } + stream := rlp.NewStream(reader, 0) + + // Import the preimages in batches to prevent disk trashing + preimages := make(map[common.Hash][]byte) + + for { + // Read the next entry and ensure it's not junk + var blob []byte + + if err := stream.Decode(&blob); err != nil { + if err == io.EOF { + break + } + return err + } + // Accumulate the preimages and flush when enough ws gathered + preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob) + if len(preimages) > 1024 { + if err := core.WritePreimages(db, 0, preimages); err != nil { + return err + } + preimages = make(map[common.Hash][]byte) + } + } + // Flush the last batch preimage data + if len(preimages) > 0 { + return core.WritePreimages(db, 0, preimages) + } + return nil +} + +// ExportPreimages exports all known hash preimages into the specified file, +// truncating any data already present in the file. +func ExportPreimages(db *ethdb.LDBDatabase, fn string) error { + log.Info("Exporting preimages", "file", fn) + + // Open the file handle and potentially wrap with a gzip stream + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + var writer io.Writer = fh + if strings.HasSuffix(fn, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + // Iterate over the preimages and export them + it := db.NewIteratorWithPrefix([]byte("secure-key-")) + for it.Next() { + if err := rlp.Encode(writer, it.Value()); err != nil { + return err + } + } + log.Info("Exported preimages", "file", fn) + return nil +} diff --git a/ethdb/database.go b/ethdb/database.go index 8c557e482..30ed37dc7 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -29,6 +29,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" ) var OpenFileLimit = 64 @@ -121,6 +122,11 @@ func (db *LDBDatabase) NewIterator() iterator.Iterator { return db.db.NewIterator(nil, nil) } +// NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix. +func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator { + return db.db.NewIterator(util.BytesPrefix(prefix), nil) +} + func (db *LDBDatabase) Close() { // Stop the metrics collection to avoid internal database races db.quitLock.Lock()