2015-07-07 00:54:22 +00:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
|
|
|
// This file is part of go-ethereum.
|
|
|
|
//
|
|
|
|
// go-ethereum is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// go-ethereum is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 16:48:40 +00:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 00:54:22 +00:00
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2015-07-22 16:48:40 +00:00
|
|
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 00:54:22 +00:00
|
|
|
|
2015-05-27 11:43:49 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2016-10-21 08:40:00 +00:00
|
|
|
"runtime"
|
2015-05-27 11:43:49 +00:00
|
|
|
"strconv"
|
2016-10-21 08:40:00 +00:00
|
|
|
"sync/atomic"
|
2015-05-27 11:43:49 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2016-05-06 09:40:23 +00:00
|
|
|
"github.com/ethereum/go-ethereum/console"
|
2015-05-27 11:43:49 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core"
|
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-09-14 07:35:57 +00:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2015-05-27 11:43:49 +00:00
|
|
|
"github.com/ethereum/go-ethereum/logger/glog"
|
2016-10-18 11:44:41 +00:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2016-10-18 10:45:16 +00:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/util"
|
2016-06-09 09:44:42 +00:00
|
|
|
"gopkg.in/urfave/cli.v1"
|
2015-05-27 11:43:49 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
importCommand = cli.Command{
|
2016-11-10 11:00:09 +00:00
|
|
|
Action: importChain,
|
|
|
|
Name: "import",
|
|
|
|
Usage: "Import a blockchain file",
|
|
|
|
ArgsUsage: "<filename>",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
TODO: Please write this
|
|
|
|
`,
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
exportCommand = cli.Command{
|
2016-11-10 11:00:09 +00:00
|
|
|
Action: exportChain,
|
|
|
|
Name: "export",
|
|
|
|
Usage: "Export blockchain into file",
|
|
|
|
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2015-06-06 04:02:32 +00:00
|
|
|
Description: `
|
|
|
|
Requires a first argument of the file to write to.
|
|
|
|
Optional second and third arguments control the first and
|
|
|
|
last block to write. In this mode, the file will be appended
|
|
|
|
if already existing.
|
2016-11-10 11:00:09 +00:00
|
|
|
`,
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
upgradedbCommand = cli.Command{
|
2016-11-10 11:00:09 +00:00
|
|
|
Action: upgradeDB,
|
|
|
|
Name: "upgradedb",
|
|
|
|
Usage: "Upgrade chainblock database",
|
|
|
|
ArgsUsage: " ",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
TODO: Please write this
|
|
|
|
`,
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
removedbCommand = cli.Command{
|
2016-11-10 11:00:09 +00:00
|
|
|
Action: removeDB,
|
|
|
|
Name: "removedb",
|
|
|
|
Usage: "Remove blockchain and state databases",
|
|
|
|
ArgsUsage: " ",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
|
|
|
Description: `
|
|
|
|
TODO: Please write this
|
|
|
|
`,
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
dumpCommand = cli.Command{
|
2016-11-10 11:00:09 +00:00
|
|
|
Action: dump,
|
|
|
|
Name: "dump",
|
|
|
|
Usage: "Dump a specific block from storage",
|
|
|
|
ArgsUsage: "[<blockHash> | <blockNum>]...",
|
|
|
|
Category: "BLOCKCHAIN COMMANDS",
|
2015-05-27 11:43:49 +00:00
|
|
|
Description: `
|
|
|
|
The arguments are interpreted as block numbers or hashes.
|
|
|
|
Use "ethereum dump 0" to dump the genesis block.
|
|
|
|
`,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2016-06-10 08:23:00 +00:00
|
|
|
func importChain(ctx *cli.Context) error {
|
2015-05-27 11:43:49 +00:00
|
|
|
if len(ctx.Args()) != 1 {
|
|
|
|
utils.Fatalf("This command requires an argument.")
|
|
|
|
}
|
2016-08-23 08:36:40 +00:00
|
|
|
if ctx.GlobalBool(utils.TestNetFlag.Name) {
|
|
|
|
state.StartingNonce = 1048576 // (2**20)
|
|
|
|
}
|
2016-08-18 11:28:17 +00:00
|
|
|
stack := makeFullNode(ctx)
|
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack)
|
2016-10-18 10:45:16 +00:00
|
|
|
defer chainDb.Close()
|
|
|
|
|
2016-10-21 08:40:00 +00:00
|
|
|
// Start periodically gathering memory profiles
|
|
|
|
var peakMemAlloc, peakMemSys uint64
|
|
|
|
go func() {
|
|
|
|
stats := new(runtime.MemStats)
|
|
|
|
for {
|
|
|
|
runtime.ReadMemStats(stats)
|
|
|
|
if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
|
|
|
|
atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
|
|
|
|
}
|
|
|
|
if atomic.LoadUint64(&peakMemSys) < stats.Sys {
|
|
|
|
atomic.StoreUint64(&peakMemSys, stats.Sys)
|
|
|
|
}
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
}
|
|
|
|
}()
|
2016-10-18 10:45:16 +00:00
|
|
|
// Import the chain
|
2015-05-27 11:43:49 +00:00
|
|
|
start := time.Now()
|
2016-10-18 10:45:16 +00:00
|
|
|
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
|
2015-05-27 14:02:08 +00:00
|
|
|
utils.Fatalf("Import error: %v", err)
|
|
|
|
}
|
2016-10-21 08:40:00 +00:00
|
|
|
fmt.Printf("Import done in %v.\n\n", time.Since(start))
|
2016-10-18 10:45:16 +00:00
|
|
|
|
2016-10-21 08:40:00 +00:00
|
|
|
// Output pre-compaction stats mostly to see the import trashing
|
|
|
|
db := chainDb.(*ethdb.LDBDatabase)
|
2016-10-18 11:44:41 +00:00
|
|
|
|
2016-10-21 08:40:00 +00:00
|
|
|
stats, err := db.LDB().GetProperty("leveldb.stats")
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to read database stats: %v", err)
|
|
|
|
}
|
|
|
|
fmt.Println(stats)
|
|
|
|
fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses())
|
|
|
|
fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
|
2016-10-18 10:45:16 +00:00
|
|
|
|
2016-10-21 08:40:00 +00:00
|
|
|
// Print the memory statistics used by the importing
|
|
|
|
mem := new(runtime.MemStats)
|
|
|
|
runtime.ReadMemStats(mem)
|
|
|
|
|
|
|
|
fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
|
|
|
|
fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
|
|
|
|
fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
|
|
|
|
fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
|
|
|
|
|
|
|
|
// Compact the entire database to more accurately measure disk io and print the stats
|
|
|
|
start = time.Now()
|
|
|
|
fmt.Println("Compacting entire database...")
|
|
|
|
if err = db.LDB().CompactRange(util.Range{}); err != nil {
|
|
|
|
utils.Fatalf("Compaction failed: %v", err)
|
2016-10-18 10:45:16 +00:00
|
|
|
}
|
2016-10-21 08:40:00 +00:00
|
|
|
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
|
|
|
|
|
|
|
|
stats, err = db.LDB().GetProperty("leveldb.stats")
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("Failed to read database stats: %v", err)
|
|
|
|
}
|
|
|
|
fmt.Println(stats)
|
|
|
|
|
2016-06-10 08:23:00 +00:00
|
|
|
return nil
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
|
2016-06-10 08:23:00 +00:00
|
|
|
func exportChain(ctx *cli.Context) error {
|
2015-06-06 13:50:23 +00:00
|
|
|
if len(ctx.Args()) < 1 {
|
2015-05-27 11:43:49 +00:00
|
|
|
utils.Fatalf("This command requires an argument.")
|
|
|
|
}
|
2016-08-18 11:28:17 +00:00
|
|
|
stack := makeFullNode(ctx)
|
|
|
|
chain, _ := utils.MakeChain(ctx, stack)
|
2015-05-27 11:43:49 +00:00
|
|
|
start := time.Now()
|
2015-06-06 04:02:32 +00:00
|
|
|
|
|
|
|
var err error
|
2015-06-06 14:04:13 +00:00
|
|
|
fp := ctx.Args().First()
|
2015-06-06 04:02:32 +00:00
|
|
|
if len(ctx.Args()) < 3 {
|
2015-06-06 14:04:13 +00:00
|
|
|
err = utils.ExportChain(chain, fp)
|
2015-06-06 04:02:32 +00:00
|
|
|
} else {
|
|
|
|
// This can be improved to allow for numbers larger than 9223372036854775807
|
|
|
|
first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
|
|
|
|
last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
|
|
|
|
if ferr != nil || lerr != nil {
|
2015-06-06 14:04:13 +00:00
|
|
|
utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
|
2015-06-06 04:02:32 +00:00
|
|
|
}
|
2015-06-06 14:04:13 +00:00
|
|
|
if first < 0 || last < 0 {
|
|
|
|
utils.Fatalf("Export error: block number must be greater than 0\n")
|
|
|
|
}
|
|
|
|
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
2015-06-06 04:02:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2015-05-27 11:43:49 +00:00
|
|
|
utils.Fatalf("Export error: %v\n", err)
|
|
|
|
}
|
|
|
|
fmt.Printf("Export done in %v", time.Since(start))
|
2016-06-10 08:23:00 +00:00
|
|
|
return nil
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
|
2016-06-10 08:23:00 +00:00
|
|
|
func removeDB(ctx *cli.Context) error {
|
2016-08-18 11:28:17 +00:00
|
|
|
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
|
2016-01-13 18:35:48 +00:00
|
|
|
dbdir := stack.ResolvePath(utils.ChainDbName(ctx))
|
2016-08-18 11:28:17 +00:00
|
|
|
if !common.FileExist(dbdir) {
|
|
|
|
fmt.Println(dbdir, "does not exist")
|
|
|
|
return nil
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 11:28:17 +00:00
|
|
|
fmt.Println(dbdir)
|
|
|
|
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
|
|
|
|
switch {
|
|
|
|
case err != nil:
|
|
|
|
utils.Fatalf("%v", err)
|
|
|
|
case !confirm:
|
|
|
|
fmt.Println("Operation aborted")
|
|
|
|
default:
|
|
|
|
fmt.Println("Removing...")
|
2015-05-27 11:43:49 +00:00
|
|
|
start := time.Now()
|
2016-08-18 11:28:17 +00:00
|
|
|
os.RemoveAll(dbdir)
|
2015-05-27 11:43:49 +00:00
|
|
|
fmt.Printf("Removed in %v\n", time.Since(start))
|
|
|
|
}
|
2016-06-10 08:23:00 +00:00
|
|
|
return nil
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
|
2016-06-10 08:23:00 +00:00
|
|
|
func upgradeDB(ctx *cli.Context) error {
|
2015-05-27 11:43:49 +00:00
|
|
|
glog.Infoln("Upgrading blockchain database")
|
|
|
|
|
2016-08-18 11:28:17 +00:00
|
|
|
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
|
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack)
|
2015-12-11 00:33:45 +00:00
|
|
|
bcVersion := core.GetBlockChainVersion(chainDb)
|
2015-05-27 11:43:49 +00:00
|
|
|
if bcVersion == 0 {
|
|
|
|
bcVersion = core.BlockChainVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
// Export the current chain.
|
|
|
|
filename := fmt.Sprintf("blockchain_%d_%s.chain", bcVersion, time.Now().Format("20060102_150405"))
|
|
|
|
exportFile := filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), filename)
|
|
|
|
if err := utils.ExportChain(chain, exportFile); err != nil {
|
2015-05-27 14:02:08 +00:00
|
|
|
utils.Fatalf("Unable to export chain for reimport %s", err)
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
2015-08-06 17:57:39 +00:00
|
|
|
chainDb.Close()
|
2016-08-18 11:28:17 +00:00
|
|
|
if dir := dbDirectory(chainDb); dir != "" {
|
|
|
|
os.RemoveAll(dir)
|
|
|
|
}
|
2015-05-27 11:43:49 +00:00
|
|
|
|
|
|
|
// Import the chain file.
|
2016-08-18 11:28:17 +00:00
|
|
|
chain, chainDb = utils.MakeChain(ctx, stack)
|
2015-12-11 00:33:45 +00:00
|
|
|
core.WriteBlockChainVersion(chainDb, core.BlockChainVersion)
|
2015-05-27 11:43:49 +00:00
|
|
|
err := utils.ImportChain(chain, exportFile)
|
2015-08-06 17:57:39 +00:00
|
|
|
chainDb.Close()
|
2015-05-27 11:43:49 +00:00
|
|
|
if err != nil {
|
2015-05-27 14:02:08 +00:00
|
|
|
utils.Fatalf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile)
|
2015-05-27 11:43:49 +00:00
|
|
|
} else {
|
|
|
|
os.Remove(exportFile)
|
|
|
|
glog.Infoln("Import finished")
|
|
|
|
}
|
2016-06-10 08:23:00 +00:00
|
|
|
return nil
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 11:28:17 +00:00
|
|
|
func dbDirectory(db ethdb.Database) string {
|
|
|
|
ldb, ok := db.(*ethdb.LDBDatabase)
|
|
|
|
if !ok {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return ldb.Path()
|
|
|
|
}
|
|
|
|
|
2016-06-10 08:23:00 +00:00
|
|
|
func dump(ctx *cli.Context) error {
|
2016-08-18 11:28:17 +00:00
|
|
|
stack := makeFullNode(ctx)
|
|
|
|
chain, chainDb := utils.MakeChain(ctx, stack)
|
2015-05-27 11:43:49 +00:00
|
|
|
for _, arg := range ctx.Args() {
|
|
|
|
var block *types.Block
|
|
|
|
if hashish(arg) {
|
2016-04-05 13:22:04 +00:00
|
|
|
block = chain.GetBlockByHash(common.HexToHash(arg))
|
2015-05-27 11:43:49 +00:00
|
|
|
} else {
|
|
|
|
num, _ := strconv.Atoi(arg)
|
|
|
|
block = chain.GetBlockByNumber(uint64(num))
|
|
|
|
}
|
|
|
|
if block == nil {
|
|
|
|
fmt.Println("{}")
|
|
|
|
utils.Fatalf("block not found")
|
|
|
|
} else {
|
2015-10-06 14:35:55 +00:00
|
|
|
state, err := state.New(block.Root(), chainDb)
|
|
|
|
if err != nil {
|
|
|
|
utils.Fatalf("could not create new state: %v", err)
|
|
|
|
}
|
2015-05-27 11:43:49 +00:00
|
|
|
fmt.Printf("%s\n", state.Dump())
|
|
|
|
}
|
|
|
|
}
|
2015-08-06 17:57:39 +00:00
|
|
|
chainDb.Close()
|
2016-06-10 08:23:00 +00:00
|
|
|
return nil
|
2015-05-27 11:43:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// hashish returns true for strings that look like hashes.
|
|
|
|
func hashish(x string) bool {
|
|
|
|
_, err := strconv.Atoi(x)
|
|
|
|
return err != nil
|
|
|
|
}
|
|
|
|
|
2015-09-14 07:35:57 +00:00
|
|
|
func closeAll(dbs ...ethdb.Database) {
|
2015-05-27 11:43:49 +00:00
|
|
|
for _, db := range dbs {
|
|
|
|
db.Close()
|
|
|
|
}
|
|
|
|
}
|