cmd/geth: add db commands stats, compact, put, get, delete (#22014)

This PR introduces:

- db.put to put a value into the database
- db.get to read a value from the database
- db.delete to delete a value from the database
- db.stats to check compaction info from the database
- db.compact to trigger a db compaction

It also moves inspectdb to db.inspect.
This commit is contained in:
Martin Holst Swende 2021-02-23 11:27:32 +01:00 committed by GitHub
parent 3ecfdccd9a
commit c4a2b682ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 401 additions and 152 deletions

View File

@ -20,7 +20,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
"path/filepath"
"runtime" "runtime"
"strconv" "strconv"
"sync/atomic" "sync/atomic"
@ -28,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
@ -170,18 +168,6 @@ The export-preimages command export hash preimages to an RLP encoded stream`,
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The first argument must be the directory containing the blockchain to download from`, The first argument must be the directory containing the blockchain to download from`,
}
removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB),
Name: "removedb",
Usage: "Remove blockchain and state databases",
ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
Remove blockchain and state databases`,
} }
dumpCommand = cli.Command{ dumpCommand = cli.Command{
Action: utils.MigrateFlags(dump), Action: utils.MigrateFlags(dump),
@ -202,25 +188,6 @@ Remove blockchain and state databases`,
The arguments are interpreted as block numbers or hashes. The arguments are interpreted as block numbers or hashes.
Use "ethereum dump 0" to dump the genesis block.`, Use "ethereum dump 0" to dump the genesis block.`,
} }
inspectCommand = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
Usage: "Inspect the storage size for each type of data in the database",
ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.CacheFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.YoloV3Flag,
utils.LegacyTestnetFlag,
utils.SyncModeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
}
) )
// initGenesis will initialise the given JSON format genesis file and writes it as // initGenesis will initialise the given JSON format genesis file and writes it as
@ -323,17 +290,7 @@ func importChain(ctx *cli.Context) error {
fmt.Printf("Import done in %v.\n\n", time.Since(start)) fmt.Printf("Import done in %v.\n\n", time.Since(start))
// Output pre-compaction stats mostly to see the import trashing // Output pre-compaction stats mostly to see the import trashing
stats, err := db.Stat("leveldb.stats") showLeveldbStats(db)
if err != nil {
utils.Fatalf("Failed to read database stats: %v", err)
}
fmt.Println(stats)
ioStats, err := db.Stat("leveldb.iostats")
if err != nil {
utils.Fatalf("Failed to read database iostats: %v", err)
}
fmt.Println(ioStats)
// Print the memory statistics used by the importing // Print the memory statistics used by the importing
mem := new(runtime.MemStats) mem := new(runtime.MemStats)
@ -351,22 +308,12 @@ func importChain(ctx *cli.Context) error {
// Compact the entire database to more accurately measure disk io and print the stats // Compact the entire database to more accurately measure disk io and print the stats
start = time.Now() start = time.Now()
fmt.Println("Compacting entire database...") fmt.Println("Compacting entire database...")
if err = db.Compact(nil, nil); err != nil { if err := db.Compact(nil, nil); err != nil {
utils.Fatalf("Compaction failed: %v", err) utils.Fatalf("Compaction failed: %v", err)
} }
fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
stats, err = db.Stat("leveldb.stats") showLeveldbStats(db)
if err != nil {
utils.Fatalf("Failed to read database stats: %v", err)
}
fmt.Println(stats)
ioStats, err = db.Stat("leveldb.iostats")
if err != nil {
utils.Fatalf("Failed to read database iostats: %v", err)
}
fmt.Println(ioStats)
return importErr return importErr
} }
@ -499,66 +446,6 @@ func copyDb(ctx *cli.Context) error {
return nil return nil
} }
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
// Remove the full node state database
path := stack.ResolvePath("chaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node state database")
} else {
log.Info("Full node state database missing", "path", path)
}
// Remove the full node ancient database
path = config.Eth.DatabaseFreezer
switch {
case path == "":
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
case !filepath.IsAbs(path):
path = config.Node.ResolvePath(path)
}
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node ancient database")
} else {
log.Info("Full node ancient database missing", "path", path)
}
// Remove the light node database
path = stack.ResolvePath("lightchaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "light node database")
} else {
log.Info("Light node database missing", "path", path)
}
return nil
}
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB(database string, kind string) {
confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Info("Database deletion skipped", "path", database)
default:
start := time.Now()
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if !info.IsDir() {
os.Remove(path)
return nil
}
return filepath.SkipDir
})
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func dump(ctx *cli.Context) error { func dump(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
@ -598,16 +485,6 @@ func dump(ctx *cli.Context) error {
return nil return nil
} }
func inspect(ctx *cli.Context) error {
node, _ := makeConfigNode(ctx)
defer node.Close()
_, chainDb := utils.MakeChain(ctx, node, true)
defer chainDb.Close()
return rawdb.InspectDatabase(chainDb)
}
// hashish returns true for strings that look like hashes. // hashish returns true for strings that look like hashes.
func hashish(x string) bool { func hashish(x string) bool {
_, err := strconv.Atoi(x) _, err := strconv.Atoi(x)

341
cmd/geth/dbcmd.go Normal file
View File

@ -0,0 +1,341 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/log"
"github.com/syndtr/goleveldb/leveldb/opt"
"gopkg.in/urfave/cli.v1"
)
var (
removedbCommand = cli.Command{
Action: utils.MigrateFlags(removeDB),
Name: "removedb",
Usage: "Remove blockchain and state databases",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
},
Category: "DATABASE COMMANDS",
Description: `
Remove blockchain and state databases`,
}
dbCommand = cli.Command{
Name: "db",
Usage: "Low level database operations",
ArgsUsage: "",
Category: "DATABASE COMMANDS",
Subcommands: []cli.Command{
dbInspectCmd,
dbStatCmd,
dbCompactCmd,
dbGetCmd,
dbDeleteCmd,
dbPutCmd,
},
}
dbInspectCmd = cli.Command{
Action: utils.MigrateFlags(inspect),
Name: "inspect",
ArgsUsage: "<prefix> <start>",
Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
}
dbStatCmd = cli.Command{
Action: dbStats,
Name: "stats",
Usage: "Print leveldb statistics",
}
dbCompactCmd = cli.Command{
Action: dbCompact,
Name: "compact",
Usage: "Compact leveldb database. WARNING: May take a very long time",
Description: `This command performs a database compaction.
WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`,
}
dbGetCmd = cli.Command{
Action: dbGet,
Name: "get",
Usage: "Show the value of a database key",
ArgsUsage: "<hex-encoded key>",
Description: "This command looks up the specified database key from the database.",
}
dbDeleteCmd = cli.Command{
Action: dbDelete,
Name: "delete",
Usage: "Delete a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key>",
Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`,
}
dbPutCmd = cli.Command{
Action: dbPut,
Name: "put",
Usage: "Set the value of a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`,
}
)
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
// Remove the full node state database
path := stack.ResolvePath("chaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node state database")
} else {
log.Info("Full node state database missing", "path", path)
}
// Remove the full node ancient database
path = config.Eth.DatabaseFreezer
switch {
case path == "":
path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
case !filepath.IsAbs(path):
path = config.Node.ResolvePath(path)
}
if common.FileExist(path) {
confirmAndRemoveDB(path, "full node ancient database")
} else {
log.Info("Full node ancient database missing", "path", path)
}
// Remove the light node database
path = stack.ResolvePath("lightchaindata")
if common.FileExist(path) {
confirmAndRemoveDB(path, "light node database")
} else {
log.Info("Light node database missing", "path", path)
}
return nil
}
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB(database string, kind string) {
confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
log.Info("Database deletion skipped", "path", database)
default:
start := time.Now()
filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if !info.IsDir() {
os.Remove(path)
return nil
}
return filepath.SkipDir
})
log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
func inspect(ctx *cli.Context) error {
var (
prefix []byte
start []byte
)
if ctx.NArg() > 2 {
return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
}
if ctx.NArg() >= 1 {
if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
} else {
prefix = d
}
}
if ctx.NArg() >= 2 {
if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
return fmt.Errorf("failed to hex-decode 'start': %v", err)
} else {
start = d
}
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
_, chainDb := utils.MakeChain(ctx, stack, true)
defer chainDb.Close()
return rawdb.InspectDatabase(chainDb, prefix, start)
}
func showLeveldbStats(db ethdb.Stater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
fmt.Println(stats)
}
if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
log.Warn("Failed to read database iostats", "error", err)
} else {
fmt.Println(ioStats)
}
}
func dbStats(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
path := stack.ResolvePath("chaindata")
db, err := leveldb.NewCustom(path, "", func(options *opt.Options) {
options.ReadOnly = true
})
if err != nil {
return err
}
showLeveldbStats(db)
err = db.Close()
if err != nil {
log.Info("Close err", "error", err)
}
return nil
}
func dbCompact(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
path := stack.ResolvePath("chaindata")
cache := ctx.GlobalInt(utils.CacheFlag.Name) * ctx.GlobalInt(utils.CacheDatabaseFlag.Name) / 100
db, err := leveldb.NewCustom(path, "", func(options *opt.Options) {
options.OpenFilesCacheCapacity = utils.MakeDatabaseHandles()
options.BlockCacheCapacity = cache / 2 * opt.MiB
options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally
})
if err != nil {
return err
}
showLeveldbStats(db)
log.Info("Triggering compaction")
err = db.Compact(nil, nil)
if err != nil {
log.Info("Compact err", "error", err)
}
showLeveldbStats(db)
log.Info("Closing db")
err = db.Close()
if err != nil {
log.Info("Close err", "error", err)
}
log.Info("Exiting")
return err
}
// dbGet shows the value of a given database key
func dbGet(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
path := stack.ResolvePath("chaindata")
db, err := leveldb.NewCustom(path, "", func(options *opt.Options) {
options.ReadOnly = true
})
if err != nil {
return err
}
defer db.Close()
key, err := hexutil.Decode(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err != nil {
log.Info("Get operation failed", "error", err)
return err
}
fmt.Printf("key %#x:\n\t%#x\n", key, data)
return nil
}
// dbDelete deletes a key from the database
func dbDelete(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack)
defer db.Close()
key, err := hexutil.Decode(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
if err = db.Delete(key); err != nil {
log.Info("Delete operation returned an error", "error", err)
return err
}
return nil
}
// dbPut overwrite a value in the database
func dbPut(ctx *cli.Context) error {
if ctx.NArg() != 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack)
defer db.Close()
var (
key []byte
value []byte
data []byte
err error
)
key, err = hexutil.Decode(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
value, err = hexutil.Decode(ctx.Args().Get(1))
if err != nil {
log.Info("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)
if err == nil {
fmt.Printf("Previous value:\n%#x\n", data)
}
return db.Put(key, value)
}

View File

@ -238,7 +238,6 @@ func init() {
removedbCommand, removedbCommand,
dumpCommand, dumpCommand,
dumpGenesisCommand, dumpGenesisCommand,
inspectCommand,
// See accountcmd.go: // See accountcmd.go:
accountCommand, accountCommand,
walletCommand, walletCommand,
@ -254,6 +253,8 @@ func init() {
licenseCommand, licenseCommand,
// See config.go // See config.go
dumpConfigCommand, dumpConfigCommand,
// see dbcmd.go
dbCommand,
// See cmd/utils/flags_legacy.go // See cmd/utils/flags_legacy.go
utils.ShowDeprecated, utils.ShowDeprecated,
// See snapshot.go // See snapshot.go

View File

@ -1073,9 +1073,9 @@ func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
} }
} }
// makeDatabaseHandles raises out the number of allowed file handles per process // MakeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database. // for Geth and returns half of the allowance to assign to the database.
func makeDatabaseHandles() int { func MakeDatabaseHandles() int {
limit, err := fdlimit.Maximum() limit, err := fdlimit.Maximum()
if err != nil { if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err) Fatalf("Failed to retrieve file descriptor allowance: %v", err)
@ -1546,7 +1546,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
} }
cfg.DatabaseHandles = makeDatabaseHandles() cfg.DatabaseHandles = MakeDatabaseHandles()
if ctx.GlobalIsSet(AncientFlag.Name) { if ctx.GlobalIsSet(AncientFlag.Name) {
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name) cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
} }
@ -1821,7 +1821,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
var ( var (
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
handles = makeDatabaseHandles() handles = MakeDatabaseHandles()
err error err error
chainDb ethdb.Database chainDb ethdb.Database

View File

@ -270,8 +270,8 @@ func (s *stat) Count() string {
// InspectDatabase traverses the entire database and checks the size // InspectDatabase traverses the entire database and checks the size
// of all different categories of data. // of all different categories of data.
func InspectDatabase(db ethdb.Database) error { func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
it := db.NewIterator(nil, nil) it := db.NewIterator(keyPrefix, keyStart)
defer it.Release() defer it.Release()
var ( var (
@ -307,8 +307,9 @@ func InspectDatabase(db ethdb.Database) error {
bloomTrieNodes stat bloomTrieNodes stat
// Meta- and unaccounted data // Meta- and unaccounted data
metadata stat metadata stat
unaccounted stat unaccounted stat
shutdownInfo stat
// Totals // Totals
total common.StorageSize total common.StorageSize
@ -359,6 +360,8 @@ func InspectDatabase(db ethdb.Database) error {
bytes.HasPrefix(key, []byte("bltIndex-")) || bytes.HasPrefix(key, []byte("bltIndex-")) ||
bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub
bloomTrieNodes.Add(size) bloomTrieNodes.Add(size)
case bytes.Equal(key, uncleanShutdownKey):
shutdownInfo.Add(size)
default: default:
var accounted bool var accounted bool
for _, meta := range [][]byte{ for _, meta := range [][]byte{
@ -413,6 +416,7 @@ func InspectDatabase(db ethdb.Database) error {
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()}, {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()}, {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Key-Value store", "Shutdown metadata", shutdownInfo.Size(), shutdownInfo.Count()},
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()}, {"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
{"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()}, {"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
{"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()}, {"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},

View File

@ -84,24 +84,36 @@ type Database struct {
// New returns a wrapped LevelDB object. The namespace is the prefix that the // New returns a wrapped LevelDB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats. // metrics reporting should use for surfacing internal stats.
func New(file string, cache int, handles int, namespace string) (*Database, error) { func New(file string, cache int, handles int, namespace string) (*Database, error) {
// Ensure we have some minimal caching and file guarantees return NewCustom(file, namespace, func(options *opt.Options) {
if cache < minCache { // Ensure we have some minimal caching and file guarantees
cache = minCache if cache < minCache {
} cache = minCache
if handles < minHandles { }
handles = minHandles if handles < minHandles {
} handles = minHandles
}
// Set default options
options.OpenFilesCacheCapacity = handles
options.BlockCacheCapacity = cache / 2 * opt.MiB
options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally
})
}
// NewCustom returns a wrapped LevelDB object. The namespace is the prefix that the
// metrics reporting should use for surfacing internal stats.
// The customize function allows the caller to modify the leveldb options.
func NewCustom(file string, namespace string, customize func(options *opt.Options)) (*Database, error) {
options := configureOptions(customize)
logger := log.New("database", file) logger := log.New("database", file)
logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2
logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()}
if options.ReadOnly {
logCtx = append(logCtx, "readonly", "true")
}
logger.Info("Allocated cache and file handles", logCtx...)
// Open the db and recover any potential corruptions // Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, &opt.Options{ db, err := leveldb.OpenFile(file, options)
OpenFilesCacheCapacity: handles,
BlockCacheCapacity: cache / 2 * opt.MiB,
WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally
Filter: filter.NewBloomFilter(10),
DisableSeeksCompaction: true,
})
if _, corrupted := err.(*errors.ErrCorrupted); corrupted { if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
db, err = leveldb.RecoverFile(file, nil) db, err = leveldb.RecoverFile(file, nil)
} }
@ -133,6 +145,20 @@ func New(file string, cache int, handles int, namespace string) (*Database, erro
return ldb, nil return ldb, nil
} }
// configureOptions sets some default options, then runs the provided setter.
func configureOptions(customizeFn func(*opt.Options)) *opt.Options {
// Set default options
options := &opt.Options{
Filter: filter.NewBloomFilter(10),
DisableSeeksCompaction: true,
}
// Allow caller to make custom modifications to the options
if customizeFn != nil {
customizeFn(options)
}
return options
}
// Close stops the metrics collection, flushes any pending data to disk and closes // Close stops the metrics collection, flushes any pending data to disk and closes
// all io accesses to the underlying key-value store. // all io accesses to the underlying key-value store.
func (db *Database) Close() error { func (db *Database) Close() error {

View File

@ -25,7 +25,7 @@ import (
) )
var ( var (
CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} [arguments...] CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} {{.cmd.ArgsUsage}}
{{if .cmd.Description}}{{.cmd.Description}} {{if .cmd.Description}}{{.cmd.Description}}
{{end}}{{if .cmd.Subcommands}} {{end}}{{if .cmd.Subcommands}}
SUBCOMMANDS: SUBCOMMANDS:
@ -36,7 +36,7 @@ SUBCOMMANDS:
{{end}} {{end}}
{{end}}{{end}}` {{end}}{{end}}`
OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...] OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} {{.ArgsUsage}}
{{if .Description}}{{.Description}} {{if .Description}}{{.Description}}
{{end}}{{if .Subcommands}} {{end}}{{if .Subcommands}}
SUBCOMMANDS: SUBCOMMANDS: