2022-05-24 18:39:40 +00:00
// Copyright 2021 The go-ethereum Authors
2021-02-23 10:27:32 +00:00
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
2021-11-02 10:31:45 +00:00
"bytes"
2021-02-23 10:27:32 +00:00
"fmt"
"os"
2021-11-02 10:31:45 +00:00
"os/signal"
2021-02-23 10:27:32 +00:00
"path/filepath"
2021-04-13 13:45:30 +00:00
"sort"
2021-03-30 11:57:21 +00:00
"strconv"
2021-11-02 10:31:45 +00:00
"strings"
"syscall"
2021-02-23 10:27:32 +00:00
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
2022-01-18 10:30:41 +00:00
"github.com/ethereum/go-ethereum/core/state/snapshot"
2022-03-23 19:57:32 +00:00
"github.com/ethereum/go-ethereum/core/types"
2022-05-17 11:01:46 +00:00
"github.com/ethereum/go-ethereum/crypto"
2021-02-23 10:27:32 +00:00
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
2021-03-30 11:57:21 +00:00
"github.com/ethereum/go-ethereum/trie"
2022-01-18 10:30:41 +00:00
"github.com/olekukonko/tablewriter"
2021-02-23 10:27:32 +00:00
"gopkg.in/urfave/cli.v1"
)
var (
removedbCommand = cli . Command {
Action : utils . MigrateFlags ( removeDB ) ,
Name : "removedb" ,
Usage : "Remove blockchain and state databases" ,
ArgsUsage : "" ,
2022-05-03 06:46:17 +00:00
Flags : utils . DatabasePathFlags ,
Category : "DATABASE COMMANDS" ,
2021-02-23 10:27:32 +00:00
Description : `
Remove blockchain and state databases ` ,
}
dbCommand = cli . Command {
Name : "db" ,
Usage : "Low level database operations" ,
ArgsUsage : "" ,
Category : "DATABASE COMMANDS" ,
Subcommands : [ ] cli . Command {
dbInspectCmd ,
dbStatCmd ,
dbCompactCmd ,
dbGetCmd ,
dbDeleteCmd ,
dbPutCmd ,
2021-03-30 11:57:21 +00:00
dbGetSlotsCmd ,
2021-04-13 13:45:30 +00:00
dbDumpFreezerIndex ,
2021-11-02 10:31:45 +00:00
dbImportCmd ,
dbExportCmd ,
2022-01-18 10:30:41 +00:00
dbMetadataCmd ,
2022-03-23 19:57:32 +00:00
dbMigrateFreezerCmd ,
2022-05-17 11:01:46 +00:00
dbCheckStateContentCmd ,
2021-02-23 10:27:32 +00:00
} ,
}
dbInspectCmd = cli . Command {
Action : utils . MigrateFlags ( inspect ) ,
Name : "inspect" ,
ArgsUsage : "<prefix> <start>" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-03-22 18:06:30 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-02-23 10:27:32 +00:00
Usage : "Inspect the storage size for each type of data in the database" ,
Description : ` This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data. ` ,
}
2022-05-17 11:01:46 +00:00
dbCheckStateContentCmd = cli . Command {
Action : utils . MigrateFlags ( checkStateContent ) ,
Name : "check-state-content" ,
ArgsUsage : "<start (optional)>" ,
Flags : utils . GroupFlags ( utils . NetworkFlags , utils . DatabasePathFlags ) ,
Usage : "Verify that state data is cryptographically correct" ,
Description : ` This command iterates the entire database for 32 - byte keys , looking for rlp - encoded trie nodes .
For each trie node encountered , it checks that the key corresponds to the keccak256 ( value ) . If this is not true , this indicates
a data corruption . ` ,
}
2021-02-23 10:27:32 +00:00
dbStatCmd = cli . Command {
2021-03-22 18:06:30 +00:00
Action : utils . MigrateFlags ( dbStats ) ,
2021-02-23 10:27:32 +00:00
Name : "stats" ,
Usage : "Print leveldb statistics" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-03-22 18:06:30 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-02-23 10:27:32 +00:00
}
dbCompactCmd = cli . Command {
2021-03-22 18:06:30 +00:00
Action : utils . MigrateFlags ( dbCompact ) ,
2021-02-23 10:27:32 +00:00
Name : "compact" ,
Usage : "Compact leveldb database. WARNING: May take a very long time" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-03-22 18:06:30 +00:00
utils . SyncModeFlag ,
utils . CacheFlag ,
utils . CacheDatabaseFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-02-23 10:27:32 +00:00
Description : ` This command performs a database compaction .
WARNING : This operation may take a very long time to finish , and may cause database
corruption if it is aborted during execution ' ! ` ,
}
dbGetCmd = cli . Command {
2021-03-22 18:06:30 +00:00
Action : utils . MigrateFlags ( dbGet ) ,
Name : "get" ,
Usage : "Show the value of a database key" ,
ArgsUsage : "<hex-encoded key>" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-03-22 18:06:30 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-02-23 10:27:32 +00:00
Description : "This command looks up the specified database key from the database." ,
}
dbDeleteCmd = cli . Command {
2021-03-22 18:06:30 +00:00
Action : utils . MigrateFlags ( dbDelete ) ,
2021-02-23 10:27:32 +00:00
Name : "delete" ,
Usage : "Delete a database key (WARNING: may corrupt your database)" ,
ArgsUsage : "<hex-encoded key>" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-03-22 18:06:30 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-02-23 10:27:32 +00:00
Description : ` This command deletes the specified database key from the database .
WARNING : This is a low - level operation which may cause database corruption ! ` ,
}
dbPutCmd = cli . Command {
2021-03-22 18:06:30 +00:00
Action : utils . MigrateFlags ( dbPut ) ,
2021-02-23 10:27:32 +00:00
Name : "put" ,
Usage : "Set the value of a database key (WARNING: may corrupt your database)" ,
ArgsUsage : "<hex-encoded key> <hex-encoded value>" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-03-22 18:06:30 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-02-23 10:27:32 +00:00
Description : ` This command sets a given database key to the given value .
WARNING : This is a low - level operation which may cause database corruption ! ` ,
}
2021-03-30 11:57:21 +00:00
dbGetSlotsCmd = cli . Command {
Action : utils . MigrateFlags ( dbDumpTrie ) ,
Name : "dumptrie" ,
Usage : "Show the storage key/values of a given storage trie" ,
ArgsUsage : "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-03-30 11:57:21 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-03-30 11:57:21 +00:00
Description : "This command looks up the specified database key from the database." ,
}
2021-04-13 13:45:30 +00:00
dbDumpFreezerIndex = cli . Command {
Action : utils . MigrateFlags ( freezerInspect ) ,
Name : "freezer-index" ,
Usage : "Dump out the index of a given freezer type" ,
ArgsUsage : "<type> <start (int)> <end (int)>" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-04-13 13:45:30 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-04-13 13:45:30 +00:00
Description : "This command displays information about the freezer index." ,
}
2021-11-02 10:31:45 +00:00
dbImportCmd = cli . Command {
Action : utils . MigrateFlags ( importLDBdata ) ,
Name : "import" ,
Usage : "Imports leveldb-data from an exported RLP dump." ,
ArgsUsage : "<dumpfile> <start (optional)" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-11-02 10:31:45 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-11-02 10:31:45 +00:00
Description : "The import command imports the specific chain data from an RLP encoded stream." ,
}
dbExportCmd = cli . Command {
Action : utils . MigrateFlags ( exportChaindata ) ,
Name : "export" ,
Usage : "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used." ,
ArgsUsage : "<type> <dumpfile>" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2021-11-02 10:31:45 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2021-11-02 10:31:45 +00:00
Description : "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed." ,
}
2022-01-18 10:30:41 +00:00
dbMetadataCmd = cli . Command {
Action : utils . MigrateFlags ( showMetaData ) ,
Name : "metadata" ,
Usage : "Shows metadata about the chain status." ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2022-01-18 10:30:41 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2022-01-18 10:30:41 +00:00
Description : "Shows metadata about the chain status." ,
}
2022-03-23 19:57:32 +00:00
dbMigrateFreezerCmd = cli . Command {
Action : utils . MigrateFlags ( freezerMigrate ) ,
Name : "freezer-migrate" ,
Usage : "Migrate legacy parts of the freezer. (WARNING: may take a long time)" ,
ArgsUsage : "" ,
2022-05-03 06:46:17 +00:00
Flags : utils . GroupFlags ( [ ] cli . Flag {
2022-03-23 19:57:32 +00:00
utils . SyncModeFlag ,
2022-05-03 06:46:17 +00:00
} , utils . NetworkFlags , utils . DatabasePathFlags ) ,
2022-03-23 19:57:32 +00:00
Description : ` The freezer - migrate command checks your database for receipts in a legacy format and updates those .
WARNING : please back - up the receipt files in your ancients before running this command . ` ,
}
2021-02-23 10:27:32 +00:00
)
func removeDB ( ctx * cli . Context ) error {
stack , config := makeConfigNode ( ctx )
// Remove the full node state database
path := stack . ResolvePath ( "chaindata" )
if common . FileExist ( path ) {
confirmAndRemoveDB ( path , "full node state database" )
} else {
log . Info ( "Full node state database missing" , "path" , path )
}
// Remove the full node ancient database
path = config . Eth . DatabaseFreezer
switch {
case path == "" :
path = filepath . Join ( stack . ResolvePath ( "chaindata" ) , "ancient" )
case ! filepath . IsAbs ( path ) :
path = config . Node . ResolvePath ( path )
}
if common . FileExist ( path ) {
confirmAndRemoveDB ( path , "full node ancient database" )
} else {
log . Info ( "Full node ancient database missing" , "path" , path )
}
// Remove the light node database
path = stack . ResolvePath ( "lightchaindata" )
if common . FileExist ( path ) {
confirmAndRemoveDB ( path , "light node database" )
} else {
log . Info ( "Light node database missing" , "path" , path )
}
return nil
}
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
// folder if accepted.
func confirmAndRemoveDB ( database string , kind string ) {
confirm , err := prompt . Stdin . PromptConfirm ( fmt . Sprintf ( "Remove %s (%s)?" , kind , database ) )
switch {
case err != nil :
utils . Fatalf ( "%v" , err )
case ! confirm :
log . Info ( "Database deletion skipped" , "path" , database )
default :
start := time . Now ( )
filepath . Walk ( database , func ( path string , info os . FileInfo , err error ) error {
// If we're at the top level folder, recurse into
if path == database {
return nil
}
// Delete all the files, but not subfolders
if ! info . IsDir ( ) {
os . Remove ( path )
return nil
}
return filepath . SkipDir
} )
log . Info ( "Database successfully deleted" , "path" , database , "elapsed" , common . PrettyDuration ( time . Since ( start ) ) )
}
}
func inspect ( ctx * cli . Context ) error {
var (
prefix [ ] byte
start [ ] byte
)
if ctx . NArg ( ) > 2 {
return fmt . Errorf ( "Max 2 arguments: %v" , ctx . Command . ArgsUsage )
}
if ctx . NArg ( ) >= 1 {
if d , err := hexutil . Decode ( ctx . Args ( ) . Get ( 0 ) ) ; err != nil {
return fmt . Errorf ( "failed to hex-decode 'prefix': %v" , err )
} else {
prefix = d
}
}
if ctx . NArg ( ) >= 2 {
if d , err := hexutil . Decode ( ctx . Args ( ) . Get ( 1 ) ) ; err != nil {
return fmt . Errorf ( "failed to hex-decode 'start': %v" , err )
} else {
start = d
}
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-22 18:06:30 +00:00
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
2021-02-23 10:27:32 +00:00
2021-03-22 18:06:30 +00:00
return rawdb . InspectDatabase ( db , prefix , start )
2021-02-23 10:27:32 +00:00
}
2022-05-17 11:01:46 +00:00
func checkStateContent ( ctx * cli . Context ) error {
var (
prefix [ ] byte
start [ ] byte
)
if ctx . NArg ( ) > 1 {
2022-05-30 10:37:42 +00:00
return fmt . Errorf ( "max 1 argument: %v" , ctx . Command . ArgsUsage )
2022-05-17 11:01:46 +00:00
}
if ctx . NArg ( ) > 0 {
if d , err := hexutil . Decode ( ctx . Args ( ) . First ( ) ) ; err != nil {
return fmt . Errorf ( "failed to hex-decode 'start': %v" , err )
} else {
start = d
}
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
var (
it = rawdb . NewKeyLengthIterator ( db . NewIterator ( prefix , start ) , 32 )
hasher = crypto . NewKeccakState ( )
got = make ( [ ] byte , 32 )
errs int
count int
startTime = time . Now ( )
lastLog = time . Now ( )
)
for it . Next ( ) {
count ++
k := it . Key ( )
2022-05-30 10:37:42 +00:00
v := it . Value ( )
2022-05-17 11:01:46 +00:00
hasher . Reset ( )
hasher . Write ( v )
hasher . Read ( got )
if ! bytes . Equal ( k , got ) {
errs ++
fmt . Printf ( "Error at 0x%x\n" , k )
fmt . Printf ( " Hash: 0x%x\n" , got )
fmt . Printf ( " Data: 0x%x\n" , v )
}
if time . Since ( lastLog ) > 8 * time . Second {
log . Info ( "Iterating the database" , "at" , fmt . Sprintf ( "%#x" , k ) , "elapsed" , common . PrettyDuration ( time . Since ( startTime ) ) )
lastLog = time . Now ( )
}
}
if err := it . Error ( ) ; err != nil {
return err
}
log . Info ( "Iterated the state content" , "errors" , errs , "items" , count )
return nil
}
2022-05-06 11:28:42 +00:00
func showLeveldbStats ( db ethdb . KeyValueStater ) {
2021-02-23 10:27:32 +00:00
if stats , err := db . Stat ( "leveldb.stats" ) ; err != nil {
log . Warn ( "Failed to read database stats" , "error" , err )
} else {
fmt . Println ( stats )
}
if ioStats , err := db . Stat ( "leveldb.iostats" ) ; err != nil {
log . Warn ( "Failed to read database iostats" , "error" , err )
} else {
fmt . Println ( ioStats )
}
}
func dbStats ( ctx * cli . Context ) error {
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-22 18:06:30 +00:00
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
2021-02-23 10:27:32 +00:00
showLeveldbStats ( db )
return nil
}
func dbCompact ( ctx * cli . Context ) error {
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-22 18:06:30 +00:00
db := utils . MakeChainDatabase ( ctx , stack , false )
defer db . Close ( )
log . Info ( "Stats before compaction" )
2021-02-23 10:27:32 +00:00
showLeveldbStats ( db )
2021-03-22 18:06:30 +00:00
2021-02-23 10:27:32 +00:00
log . Info ( "Triggering compaction" )
2021-03-22 18:06:30 +00:00
if err := db . Compact ( nil , nil ) ; err != nil {
2021-02-23 10:27:32 +00:00
log . Info ( "Compact err" , "error" , err )
2021-03-22 18:06:30 +00:00
return err
2021-02-23 10:27:32 +00:00
}
2021-03-22 18:06:30 +00:00
log . Info ( "Stats after compaction" )
2021-02-23 10:27:32 +00:00
showLeveldbStats ( db )
2021-03-22 18:06:30 +00:00
return nil
2021-02-23 10:27:32 +00:00
}
// dbGet shows the value of a given database key
func dbGet ( ctx * cli . Context ) error {
if ctx . NArg ( ) != 1 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-22 18:06:30 +00:00
db := utils . MakeChainDatabase ( ctx , stack , true )
2021-02-23 10:27:32 +00:00
defer db . Close ( )
2021-03-22 18:06:30 +00:00
2022-04-27 06:37:48 +00:00
key , err := common . ParseHexOrString ( ctx . Args ( ) . Get ( 0 ) )
2021-02-23 10:27:32 +00:00
if err != nil {
log . Info ( "Could not decode the key" , "error" , err )
return err
}
2021-10-18 10:18:49 +00:00
2021-02-23 10:27:32 +00:00
data , err := db . Get ( key )
if err != nil {
2021-10-18 10:18:49 +00:00
log . Info ( "Get operation failed" , "key" , fmt . Sprintf ( "0x%#x" , key ) , "error" , err )
2021-02-23 10:27:32 +00:00
return err
}
2021-03-22 18:06:30 +00:00
fmt . Printf ( "key %#x: %#x\n" , key , data )
2021-02-23 10:27:32 +00:00
return nil
}
// dbDelete deletes a key from the database
func dbDelete ( ctx * cli . Context ) error {
if ctx . NArg ( ) != 1 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-22 18:06:30 +00:00
db := utils . MakeChainDatabase ( ctx , stack , false )
2021-02-23 10:27:32 +00:00
defer db . Close ( )
2021-03-22 18:06:30 +00:00
2022-04-27 06:37:48 +00:00
key , err := common . ParseHexOrString ( ctx . Args ( ) . Get ( 0 ) )
2021-02-23 10:27:32 +00:00
if err != nil {
log . Info ( "Could not decode the key" , "error" , err )
return err
}
2021-03-22 18:06:30 +00:00
data , err := db . Get ( key )
if err == nil {
fmt . Printf ( "Previous value: %#x\n" , data )
}
2021-02-23 10:27:32 +00:00
if err = db . Delete ( key ) ; err != nil {
2021-10-18 10:18:49 +00:00
log . Info ( "Delete operation returned an error" , "key" , fmt . Sprintf ( "0x%#x" , key ) , "error" , err )
2021-02-23 10:27:32 +00:00
return err
}
return nil
}
// dbPut overwrite a value in the database
func dbPut ( ctx * cli . Context ) error {
if ctx . NArg ( ) != 2 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
2021-03-22 18:06:30 +00:00
db := utils . MakeChainDatabase ( ctx , stack , false )
2021-02-23 10:27:32 +00:00
defer db . Close ( )
2021-03-22 18:06:30 +00:00
2021-02-23 10:27:32 +00:00
var (
key [ ] byte
value [ ] byte
data [ ] byte
err error
)
2022-04-27 06:37:48 +00:00
key , err = common . ParseHexOrString ( ctx . Args ( ) . Get ( 0 ) )
2021-02-23 10:27:32 +00:00
if err != nil {
log . Info ( "Could not decode the key" , "error" , err )
return err
}
value , err = hexutil . Decode ( ctx . Args ( ) . Get ( 1 ) )
if err != nil {
log . Info ( "Could not decode the value" , "error" , err )
return err
}
data , err = db . Get ( key )
if err == nil {
2021-03-22 18:06:30 +00:00
fmt . Printf ( "Previous value: %#x\n" , data )
2021-02-23 10:27:32 +00:00
}
return db . Put ( key , value )
}
2021-03-30 11:57:21 +00:00
// dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie ( ctx * cli . Context ) error {
if ctx . NArg ( ) < 1 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
defer db . Close ( )
var (
root [ ] byte
start [ ] byte
max = int64 ( - 1 )
err error
)
if root , err = hexutil . Decode ( ctx . Args ( ) . Get ( 0 ) ) ; err != nil {
log . Info ( "Could not decode the root" , "error" , err )
return err
}
stRoot := common . BytesToHash ( root )
if ctx . NArg ( ) >= 2 {
if start , err = hexutil . Decode ( ctx . Args ( ) . Get ( 1 ) ) ; err != nil {
log . Info ( "Could not decode the seek position" , "error" , err )
return err
}
}
if ctx . NArg ( ) >= 3 {
if max , err = strconv . ParseInt ( ctx . Args ( ) . Get ( 2 ) , 10 , 64 ) ; err != nil {
log . Info ( "Could not decode the max count" , "error" , err )
return err
}
}
theTrie , err := trie . New ( stRoot , trie . NewDatabase ( db ) )
if err != nil {
return err
}
var count int64
it := trie . NewIterator ( theTrie . NodeIterator ( start ) )
for it . Next ( ) {
if max > 0 && count == max {
fmt . Printf ( "Exiting after %d values\n" , count )
break
}
fmt . Printf ( " %d. key %#x: %#x\n" , count , it . Key , it . Value )
count ++
}
return it . Err
}
2021-04-13 13:45:30 +00:00
func freezerInspect ( ctx * cli . Context ) error {
var (
start , end int64
disableSnappy bool
err error
)
if ctx . NArg ( ) < 3 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
kind := ctx . Args ( ) . Get ( 0 )
if noSnap , ok := rawdb . FreezerNoSnappy [ kind ] ; ! ok {
var options [ ] string
for opt := range rawdb . FreezerNoSnappy {
options = append ( options , opt )
}
sort . Strings ( options )
return fmt . Errorf ( "Could read freezer-type '%v'. Available options: %v" , kind , options )
} else {
disableSnappy = noSnap
}
if start , err = strconv . ParseInt ( ctx . Args ( ) . Get ( 1 ) , 10 , 64 ) ; err != nil {
log . Info ( "Could read start-param" , "error" , err )
return err
}
if end , err = strconv . ParseInt ( ctx . Args ( ) . Get ( 2 ) , 10 , 64 ) ; err != nil {
log . Info ( "Could read count param" , "error" , err )
return err
}
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
path := filepath . Join ( stack . ResolvePath ( "chaindata" ) , "ancient" )
log . Info ( "Opening freezer" , "location" , path , "name" , kind )
2022-01-18 09:29:38 +00:00
if f , err := rawdb . NewFreezerTable ( path , kind , disableSnappy , true ) ; err != nil {
2021-04-13 13:45:30 +00:00
return err
} else {
f . DumpIndex ( start , end )
}
return nil
}
2021-10-18 10:18:49 +00:00
2021-11-02 10:31:45 +00:00
func importLDBdata ( ctx * cli . Context ) error {
start := 0
switch ctx . NArg ( ) {
case 1 :
break
case 2 :
s , err := strconv . Atoi ( ctx . Args ( ) . Get ( 1 ) )
if err != nil {
return fmt . Errorf ( "second arg must be an integer: %v" , err )
}
start = s
default :
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
var (
fName = ctx . Args ( ) . Get ( 0 )
stack , _ = makeConfigNode ( ctx )
interrupt = make ( chan os . Signal , 1 )
stop = make ( chan struct { } )
)
defer stack . Close ( )
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
log . Info ( "Interrupted during ldb import, stopping at next batch" )
}
close ( stop )
} ( )
db := utils . MakeChainDatabase ( ctx , stack , false )
return utils . ImportLDBData ( db , fName , int64 ( start ) , stop )
}
type preimageIterator struct {
iter ethdb . Iterator
}
func ( iter * preimageIterator ) Next ( ) ( byte , [ ] byte , [ ] byte , bool ) {
for iter . iter . Next ( ) {
key := iter . iter . Key ( )
if bytes . HasPrefix ( key , rawdb . PreimagePrefix ) && len ( key ) == ( len ( rawdb . PreimagePrefix ) + common . HashLength ) {
return utils . OpBatchAdd , key , iter . iter . Value ( ) , true
}
}
return 0 , nil , nil , false
}
func ( iter * preimageIterator ) Release ( ) {
iter . iter . Release ( )
}
type snapshotIterator struct {
init bool
account ethdb . Iterator
storage ethdb . Iterator
}
func ( iter * snapshotIterator ) Next ( ) ( byte , [ ] byte , [ ] byte , bool ) {
if ! iter . init {
iter . init = true
return utils . OpBatchDel , rawdb . SnapshotRootKey , nil , true
}
for iter . account . Next ( ) {
key := iter . account . Key ( )
if bytes . HasPrefix ( key , rawdb . SnapshotAccountPrefix ) && len ( key ) == ( len ( rawdb . SnapshotAccountPrefix ) + common . HashLength ) {
return utils . OpBatchAdd , key , iter . account . Value ( ) , true
}
}
for iter . storage . Next ( ) {
key := iter . storage . Key ( )
if bytes . HasPrefix ( key , rawdb . SnapshotStoragePrefix ) && len ( key ) == ( len ( rawdb . SnapshotStoragePrefix ) + 2 * common . HashLength ) {
return utils . OpBatchAdd , key , iter . storage . Value ( ) , true
}
}
return 0 , nil , nil , false
}
func ( iter * snapshotIterator ) Release ( ) {
iter . account . Release ( )
iter . storage . Release ( )
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map [ string ] func ( db ethdb . Database ) utils . ChainDataIterator {
"preimage" : func ( db ethdb . Database ) utils . ChainDataIterator {
iter := db . NewIterator ( rawdb . PreimagePrefix , nil )
return & preimageIterator { iter : iter }
} ,
"snapshot" : func ( db ethdb . Database ) utils . ChainDataIterator {
account := db . NewIterator ( rawdb . SnapshotAccountPrefix , nil )
storage := db . NewIterator ( rawdb . SnapshotStoragePrefix , nil )
return & snapshotIterator { account : account , storage : storage }
} ,
}
func exportChaindata ( ctx * cli . Context ) error {
if ctx . NArg ( ) < 2 {
return fmt . Errorf ( "required arguments: %v" , ctx . Command . ArgsUsage )
}
// Parse the required chain data type, make sure it's supported.
kind := ctx . Args ( ) . Get ( 0 )
kind = strings . ToLower ( strings . Trim ( kind , " " ) )
exporter , ok := chainExporters [ kind ]
if ! ok {
var kinds [ ] string
for kind := range chainExporters {
kinds = append ( kinds , kind )
}
return fmt . Errorf ( "invalid data type %s, supported types: %s" , kind , strings . Join ( kinds , ", " ) )
}
var (
stack , _ = makeConfigNode ( ctx )
interrupt = make ( chan os . Signal , 1 )
stop = make ( chan struct { } )
)
defer stack . Close ( )
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
log . Info ( "Interrupted during db export, stopping at next batch" )
}
close ( stop )
} ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
return utils . ExportChaindata ( ctx . Args ( ) . Get ( 1 ) , kind , exporter ( db ) , stop )
}
2022-01-18 10:30:41 +00:00
func showMetaData ( ctx * cli . Context ) error {
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
ancients , err := db . Ancients ( )
if err != nil {
fmt . Fprintf ( os . Stderr , "Error accessing ancients: %v" , err )
}
pp := func ( val * uint64 ) string {
if val == nil {
return "<nil>"
}
return fmt . Sprintf ( "%d (0x%x)" , * val , * val )
}
data := [ ] [ ] string {
{ "databaseVersion" , pp ( rawdb . ReadDatabaseVersion ( db ) ) } ,
{ "headBlockHash" , fmt . Sprintf ( "%v" , rawdb . ReadHeadBlockHash ( db ) ) } ,
{ "headFastBlockHash" , fmt . Sprintf ( "%v" , rawdb . ReadHeadFastBlockHash ( db ) ) } ,
{ "headHeaderHash" , fmt . Sprintf ( "%v" , rawdb . ReadHeadHeaderHash ( db ) ) } }
if b := rawdb . ReadHeadBlock ( db ) ; b != nil {
data = append ( data , [ ] string { "headBlock.Hash" , fmt . Sprintf ( "%v" , b . Hash ( ) ) } )
data = append ( data , [ ] string { "headBlock.Root" , fmt . Sprintf ( "%v" , b . Root ( ) ) } )
data = append ( data , [ ] string { "headBlock.Number" , fmt . Sprintf ( "%d (0x%x)" , b . Number ( ) , b . Number ( ) ) } )
}
2022-03-29 16:26:18 +00:00
if b := rawdb . ReadSkeletonSyncStatus ( db ) ; b != nil {
data = append ( data , [ ] string { "SkeletonSyncStatus" , string ( b ) } )
}
2022-01-18 10:30:41 +00:00
if h := rawdb . ReadHeadHeader ( db ) ; h != nil {
data = append ( data , [ ] string { "headHeader.Hash" , fmt . Sprintf ( "%v" , h . Hash ( ) ) } )
data = append ( data , [ ] string { "headHeader.Root" , fmt . Sprintf ( "%v" , h . Root ) } )
data = append ( data , [ ] string { "headHeader.Number" , fmt . Sprintf ( "%d (0x%x)" , h . Number , h . Number ) } )
}
data = append ( data , [ ] [ ] string { { "frozen" , fmt . Sprintf ( "%d items" , ancients ) } ,
{ "lastPivotNumber" , pp ( rawdb . ReadLastPivotNumber ( db ) ) } ,
{ "len(snapshotSyncStatus)" , fmt . Sprintf ( "%d bytes" , len ( rawdb . ReadSnapshotSyncStatus ( db ) ) ) } ,
{ "snapshotGenerator" , snapshot . ParseGeneratorStatus ( rawdb . ReadSnapshotGenerator ( db ) ) } ,
{ "snapshotDisabled" , fmt . Sprintf ( "%v" , rawdb . ReadSnapshotDisabled ( db ) ) } ,
{ "snapshotJournal" , fmt . Sprintf ( "%d bytes" , len ( rawdb . ReadSnapshotJournal ( db ) ) ) } ,
{ "snapshotRecoveryNumber" , pp ( rawdb . ReadSnapshotRecoveryNumber ( db ) ) } ,
{ "snapshotRoot" , fmt . Sprintf ( "%v" , rawdb . ReadSnapshotRoot ( db ) ) } ,
{ "txIndexTail" , pp ( rawdb . ReadTxIndexTail ( db ) ) } ,
{ "fastTxLookupLimit" , pp ( rawdb . ReadFastTxLookupLimit ( db ) ) } ,
} ... )
table := tablewriter . NewWriter ( os . Stdout )
table . SetHeader ( [ ] string { "Field" , "Value" } )
table . AppendBulk ( data )
table . Render ( )
return nil
}
2022-03-23 19:57:32 +00:00
func freezerMigrate ( ctx * cli . Context ) error {
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , false )
defer db . Close ( )
// Check first block for legacy receipt format
numAncients , err := db . Ancients ( )
if err != nil {
return err
}
if numAncients < 1 {
log . Info ( "No receipts in freezer to migrate" )
return nil
}
isFirstLegacy , firstIdx , err := dbHasLegacyReceipts ( db , 0 )
if err != nil {
return err
}
if ! isFirstLegacy {
log . Info ( "No legacy receipts to migrate" )
return nil
}
log . Info ( "Starting migration" , "ancients" , numAncients , "firstLegacy" , firstIdx )
start := time . Now ( )
if err := db . MigrateTable ( "receipts" , types . ConvertLegacyStoredReceipts ) ; err != nil {
return err
}
if err := db . Close ( ) ; err != nil {
return err
}
log . Info ( "Migration finished" , "duration" , time . Since ( start ) )
return nil
}
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
// non-empty receipt and checks its format. The index of this first non-empty element is
// the second return parameter.
func dbHasLegacyReceipts ( db ethdb . Database , firstIdx uint64 ) ( bool , uint64 , error ) {
// Check first block for legacy receipt format
numAncients , err := db . Ancients ( )
if err != nil {
return false , 0 , err
}
if numAncients < 1 {
return false , 0 , nil
}
if firstIdx >= numAncients {
return false , firstIdx , nil
}
var (
legacy bool
blob [ ] byte
emptyRLPList = [ ] byte { 192 }
)
// Find first block with non-empty receipt, only if
// the index is not already provided.
if firstIdx == 0 {
for i := uint64 ( 0 ) ; i < numAncients ; i ++ {
blob , err = db . Ancient ( "receipts" , i )
if err != nil {
return false , 0 , err
}
if len ( blob ) == 0 {
continue
}
if ! bytes . Equal ( blob , emptyRLPList ) {
firstIdx = i
break
}
}
}
// Is first non-empty receipt legacy?
first , err := db . Ancient ( "receipts" , firstIdx )
if err != nil {
return false , 0 , err
}
legacy , err = types . IsLegacyStoredReceipts ( first )
return legacy , firstIdx , err
}