Merge pull request #16319 from rjl493456442/dump_preimages

cmd: implement preimage dump and import cmds
This commit is contained in:
Péter Szilágyi 2018-03-26 14:45:01 +03:00 committed by GitHub
commit dd708c1636
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 160 additions and 4 deletions

View File

@ -95,6 +95,34 @@ Requires a first argument of the file to write to.
Optional second and third arguments control the first and Optional second and third arguments control the first and
last block to write. In this mode, the file will be appended last block to write. In this mode, the file will be appended
if already existing.`, if already existing.`,
}
importPreimagesCommand = cli.Command{
Action: utils.MigrateFlags(importPreimages),
Name: "import-preimages",
Usage: "Import the preimage database from an RLP stream",
ArgsUsage: "<datafile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
The import-preimages command imports hash preimages from an RLP encoded stream.`,
}
exportPreimagesCommand = cli.Command{
Action: utils.MigrateFlags(exportPreimages),
Name: "export-preimages",
Usage: "Export the preimage database into an RLP stream",
ArgsUsage: "<dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.CacheFlag,
utils.LightModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
The export-preimages command export hash preimages to an RLP encoded stream`,
} }
copydbCommand = cli.Command{ copydbCommand = cli.Command{
Action: utils.MigrateFlags(copyDb), Action: utils.MigrateFlags(copyDb),
@ -299,7 +327,39 @@ func exportChain(ctx *cli.Context) error {
if err != nil { if err != nil {
utils.Fatalf("Export error: %v\n", err) utils.Fatalf("Export error: %v\n", err)
} }
fmt.Printf("Export done in %v", time.Since(start)) fmt.Printf("Export done in %v\n", time.Since(start))
return nil
}
// importPreimages imports preimage data from the specified file.
func importPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
}
stack := makeFullNode(ctx)
diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
start := time.Now()
if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
utils.Fatalf("Export error: %v\n", err)
}
fmt.Printf("Export done in %v\n", time.Since(start))
return nil
}
// exportPreimages dumps the preimage data to specified json file in streaming way.
func exportPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
}
stack := makeFullNode(ctx)
diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
start := time.Now()
if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
utils.Fatalf("Export error: %v\n", err)
}
fmt.Printf("Export done in %v\n", time.Since(start))
return nil return nil
} }

View File

@ -155,6 +155,8 @@ func init() {
initCommand, initCommand,
importCommand, importCommand,
exportCommand, exportCommand,
importPreimagesCommand,
exportPreimagesCommand,
copydbCommand, copydbCommand,
removedbCommand, removedbCommand,
dumpCommand, dumpCommand,

View File

@ -27,8 +27,11 @@ import (
"strings" "strings"
"syscall" "syscall"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
@ -105,6 +108,8 @@ func ImportChain(chain *core.BlockChain, fn string) error {
} }
log.Info("Importing blockchain", "file", fn) log.Info("Importing blockchain", "file", fn)
// Open the file handle and potentially unwrap the gzip stream
fh, err := os.Open(fn) fh, err := os.Open(fn)
if err != nil { if err != nil {
return err return err
@ -180,8 +185,12 @@ func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block
return nil return nil
} }
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
func ExportChain(blockchain *core.BlockChain, fn string) error { func ExportChain(blockchain *core.BlockChain, fn string) error {
log.Info("Exporting blockchain", "file", fn) log.Info("Exporting blockchain", "file", fn)
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil { if err != nil {
return err return err
@ -193,7 +202,7 @@ func ExportChain(blockchain *core.BlockChain, fn string) error {
writer = gzip.NewWriter(writer) writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close() defer writer.(*gzip.Writer).Close()
} }
// Iterate over the blocks and export them
if err := blockchain.Export(writer); err != nil { if err := blockchain.Export(writer); err != nil {
return err return err
} }
@ -202,9 +211,12 @@ func ExportChain(blockchain *core.BlockChain, fn string) error {
return nil return nil
} }
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
log.Info("Exporting blockchain", "file", fn) log.Info("Exporting blockchain", "file", fn)
// TODO verify mode perms
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
if err != nil { if err != nil {
return err return err
@ -216,10 +228,86 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
writer = gzip.NewWriter(writer) writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close() defer writer.(*gzip.Writer).Close()
} }
// Iterate over the blocks and export them
if err := blockchain.ExportN(writer, first, last); err != nil { if err := blockchain.ExportN(writer, first, last); err != nil {
return err return err
} }
log.Info("Exported blockchain to", "file", fn) log.Info("Exported blockchain to", "file", fn)
return nil return nil
} }
// ImportPreimages imports a batch of exported hash preimages into the database.
func ImportPreimages(db *ethdb.LDBDatabase, fn string) error {
log.Info("Importing preimages", "file", fn)
// Open the file handle and potentially unwrap the gzip stream
fh, err := os.Open(fn)
if err != nil {
return err
}
defer fh.Close()
var reader io.Reader = fh
if strings.HasSuffix(fn, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil {
return err
}
}
stream := rlp.NewStream(reader, 0)
// Import the preimages in batches to prevent disk trashing
preimages := make(map[common.Hash][]byte)
for {
// Read the next entry and ensure it's not junk
var blob []byte
if err := stream.Decode(&blob); err != nil {
if err == io.EOF {
break
}
return err
}
// Accumulate the preimages and flush when enough ws gathered
preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
if len(preimages) > 1024 {
if err := core.WritePreimages(db, 0, preimages); err != nil {
return err
}
preimages = make(map[common.Hash][]byte)
}
}
// Flush the last batch preimage data
if len(preimages) > 0 {
return core.WritePreimages(db, 0, preimages)
}
return nil
}
// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
func ExportPreimages(db *ethdb.LDBDatabase, fn string) error {
log.Info("Exporting preimages", "file", fn)
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close()
}
// Iterate over the preimages and export them
it := db.NewIteratorWithPrefix([]byte("secure-key-"))
for it.Next() {
if err := rlp.Encode(writer, it.Value()); err != nil {
return err
}
}
log.Info("Exported preimages", "file", fn)
return nil
}

View File

@ -29,6 +29,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
) )
var OpenFileLimit = 64 var OpenFileLimit = 64
@ -121,6 +122,11 @@ func (db *LDBDatabase) NewIterator() iterator.Iterator {
return db.db.NewIterator(nil, nil) return db.db.NewIterator(nil, nil)
} }
// NewIteratorWithPrefix returns a iterator to iterate over subset of database content with a particular prefix.
func (db *LDBDatabase) NewIteratorWithPrefix(prefix []byte) iterator.Iterator {
return db.db.NewIterator(util.BytesPrefix(prefix), nil)
}
func (db *LDBDatabase) Close() { func (db *LDBDatabase) Close() {
// Stop the metrics collection to avoid internal database races // Stop the metrics collection to avoid internal database races
db.quitLock.Lock() db.quitLock.Lock()