2015-07-07 00:54:22 +00:00
// Copyright 2014 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
2015-07-22 16:48:40 +00:00
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 00:54:22 +00:00
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
2015-07-22 16:48:40 +00:00
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
2015-01-06 11:13:57 +00:00
2015-07-07 03:08:16 +00:00
// Package utils contains internal helper functions for go-ethereum commands.
2014-05-14 10:41:30 +00:00
package utils
import (
2016-12-12 15:08:23 +00:00
"compress/gzip"
2014-06-26 17:41:36 +00:00
"fmt"
2015-03-18 12:36:48 +00:00
"io"
2014-08-14 23:07:40 +00:00
"os"
"os/signal"
2016-09-26 15:23:26 +00:00
"runtime"
2016-12-12 15:08:23 +00:00
"strings"
2018-02-20 12:33:34 +00:00
"syscall"
2021-01-19 08:26:42 +00:00
"time"
2014-08-14 23:07:40 +00:00
2018-03-26 10:34:21 +00:00
"github.com/ethereum/go-ethereum/common"
2015-03-06 02:00:41 +00:00
"github.com/ethereum/go-ethereum/core"
2018-05-07 11:35:06 +00:00
"github.com/ethereum/go-ethereum/core/rawdb"
2014-12-23 14:37:03 +00:00
"github.com/ethereum/go-ethereum/core/types"
2018-03-26 10:34:21 +00:00
"github.com/ethereum/go-ethereum/crypto"
2021-02-05 12:51:15 +00:00
"github.com/ethereum/go-ethereum/eth/ethconfig"
2018-03-26 10:34:21 +00:00
"github.com/ethereum/go-ethereum/ethdb"
2016-03-11 23:39:45 +00:00
"github.com/ethereum/go-ethereum/internal/debug"
2017-02-22 12:10:07 +00:00
"github.com/ethereum/go-ethereum/log"
2015-11-17 16:33:25 +00:00
"github.com/ethereum/go-ethereum/node"
2014-12-23 14:37:03 +00:00
"github.com/ethereum/go-ethereum/rlp"
2021-01-19 08:26:42 +00:00
"gopkg.in/urfave/cli.v1"
2014-05-14 10:41:30 +00:00
)
2015-05-27 23:16:57 +00:00
const (
importBatchSize = 2500
)
2015-05-27 13:48:07 +00:00
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
2015-03-06 02:00:41 +00:00
func Fatalf ( format string , args ... interface { } ) {
2015-05-27 13:48:07 +00:00
w := io . MultiWriter ( os . Stdout , os . Stderr )
2016-09-26 15:23:26 +00:00
if runtime . GOOS == "windows" {
// The SameFile check below doesn't work on Windows.
// stdout is unlikely to get redirected though, so just print there.
w = os . Stdout
} else {
outf , _ := os . Stdout . Stat ( )
errf , _ := os . Stderr . Stat ( )
if outf != nil && errf != nil && os . SameFile ( outf , errf ) {
w = os . Stderr
}
2015-05-27 13:48:07 +00:00
}
fmt . Fprintf ( w , "Fatal: " + format + "\n" , args ... )
2015-03-06 02:00:41 +00:00
os . Exit ( 1 )
}
2021-01-19 08:26:42 +00:00
func StartNode ( ctx * cli . Context , stack * node . Node ) {
2015-11-17 16:33:25 +00:00
if err := stack . Start ( ) ; err != nil {
2017-02-22 15:22:50 +00:00
Fatalf ( "Error starting protocol stack: %v" , err )
2015-01-05 16:12:52 +00:00
}
2015-07-06 13:01:13 +00:00
go func ( ) {
sigc := make ( chan os . Signal , 1 )
2018-02-20 12:33:34 +00:00
signal . Notify ( sigc , syscall . SIGINT , syscall . SIGTERM )
2015-07-06 13:01:13 +00:00
defer signal . Stop ( sigc )
2021-01-19 08:26:42 +00:00
2021-02-05 12:51:15 +00:00
minFreeDiskSpace := ethconfig . Defaults . TrieDirtyCache
2021-01-19 08:26:42 +00:00
if ctx . GlobalIsSet ( MinFreeDiskSpaceFlag . Name ) {
minFreeDiskSpace = ctx . GlobalInt ( MinFreeDiskSpaceFlag . Name )
} else if ctx . GlobalIsSet ( CacheFlag . Name ) || ctx . GlobalIsSet ( CacheGCFlag . Name ) {
minFreeDiskSpace = ctx . GlobalInt ( CacheFlag . Name ) * ctx . GlobalInt ( CacheGCFlag . Name ) / 100
}
if minFreeDiskSpace > 0 {
go monitorFreeDiskSpace ( sigc , stack . InstanceDir ( ) , uint64 ( minFreeDiskSpace ) * 1024 * 1024 )
}
2015-07-06 13:01:13 +00:00
<- sigc
2017-03-02 13:06:16 +00:00
log . Info ( "Got interrupt, shutting down..." )
2020-08-03 17:40:46 +00:00
go stack . Close ( )
2015-07-06 13:01:13 +00:00
for i := 10 ; i > 0 ; i -- {
<- sigc
if i > 1 {
2017-03-02 13:06:16 +00:00
log . Warn ( "Already shutting down, interrupt more to panic." , "times" , i - 1 )
2015-07-06 13:01:13 +00:00
}
}
2016-05-06 09:04:52 +00:00
debug . Exit ( ) // ensure trace and CPU profile data is flushed.
2016-03-11 23:39:45 +00:00
debug . LoudPanic ( "boom" )
2015-07-06 13:01:13 +00:00
} ( )
2015-03-06 02:25:57 +00:00
}
2021-01-19 08:26:42 +00:00
func monitorFreeDiskSpace ( sigc chan os . Signal , path string , freeDiskSpaceCritical uint64 ) {
for {
freeSpace , err := getFreeDiskSpace ( path )
if err != nil {
log . Warn ( "Failed to get free disk space" , "path" , path , "err" , err )
break
}
if freeSpace < freeDiskSpaceCritical {
log . Error ( "Low disk space. Gracefully shutting down Geth to prevent database corruption." , "available" , common . StorageSize ( freeSpace ) )
sigc <- syscall . SIGTERM
break
} else if freeSpace < 2 * freeDiskSpaceCritical {
log . Warn ( "Disk space is running low. Geth will shutdown if disk space runs below critical level." , "available" , common . StorageSize ( freeSpace ) , "critical_level" , common . StorageSize ( freeDiskSpaceCritical ) )
}
time . Sleep ( 60 * time . Second )
}
}
2015-08-31 15:09:50 +00:00
func ImportChain ( chain * core . BlockChain , fn string ) error {
2015-05-27 14:02:08 +00:00
// Watch for Ctrl-C while the import is running.
// If a signal is received, the import will stop at the next batch.
interrupt := make ( chan os . Signal , 1 )
stop := make ( chan struct { } )
2018-02-20 12:33:34 +00:00
signal . Notify ( interrupt , syscall . SIGINT , syscall . SIGTERM )
2015-05-27 14:02:08 +00:00
defer signal . Stop ( interrupt )
defer close ( interrupt )
go func ( ) {
if _ , ok := <- interrupt ; ok {
2017-03-02 13:06:16 +00:00
log . Info ( "Interrupted during import, stopping at next batch" )
2015-05-27 14:02:08 +00:00
}
close ( stop )
} ( )
checkInterrupt := func ( ) bool {
select {
case <- stop :
return true
default :
return false
}
}
2017-03-02 13:06:16 +00:00
log . Info ( "Importing blockchain" , "file" , fn )
2018-03-26 10:34:21 +00:00
// Open the file handle and potentially unwrap the gzip stream
2015-05-27 14:02:08 +00:00
fh , err := os . Open ( fn )
2014-12-23 14:37:03 +00:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 15:08:23 +00:00
var reader io . Reader = fh
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
2015-04-13 08:13:52 +00:00
2015-05-27 15:35:08 +00:00
// Run actual the import.
2015-05-27 23:16:57 +00:00
blocks := make ( types . Blocks , importBatchSize )
2015-05-27 11:29:34 +00:00
n := 0
2015-05-27 15:35:08 +00:00
for batch := 0 ; ; batch ++ {
2015-05-27 11:29:34 +00:00
// Load a batch of RLP blocks.
2015-05-27 14:02:08 +00:00
if checkInterrupt ( ) {
return fmt . Errorf ( "interrupted" )
}
2015-05-27 11:29:34 +00:00
i := 0
2015-05-27 23:16:57 +00:00
for ; i < importBatchSize ; i ++ {
2015-05-27 11:29:34 +00:00
var b types . Block
if err := stream . Decode ( & b ) ; err == io . EOF {
break
} else if err != nil {
return fmt . Errorf ( "at block %d: %v" , n , err )
2015-04-13 08:13:52 +00:00
}
2015-08-03 15:48:24 +00:00
// don't import first block
if b . NumberU64 ( ) == 0 {
i --
continue
}
2015-05-27 11:29:34 +00:00
blocks [ i ] = & b
n ++
2015-04-13 08:13:52 +00:00
}
2015-05-27 11:29:34 +00:00
if i == 0 {
break
}
// Import the batch.
2015-05-27 14:02:08 +00:00
if checkInterrupt ( ) {
return fmt . Errorf ( "interrupted" )
}
2018-02-05 16:40:32 +00:00
missing := missingBlocks ( chain , blocks [ : i ] )
if len ( missing ) == 0 {
2017-03-02 13:06:16 +00:00
log . Info ( "Skipping batch as all blocks present" , "batch" , batch , "first" , blocks [ 0 ] . Hash ( ) , "last" , blocks [ i - 1 ] . Hash ( ) )
2015-05-27 15:35:08 +00:00
continue
}
2018-02-05 16:40:32 +00:00
if _ , err := chain . InsertChain ( missing ) ; err != nil {
2015-05-27 11:29:34 +00:00
return fmt . Errorf ( "invalid block %d: %v" , n , err )
2015-03-18 12:36:48 +00:00
}
2014-12-23 14:37:03 +00:00
}
return nil
}
2015-03-08 15:44:48 +00:00
2018-02-05 16:40:32 +00:00
func missingBlocks ( chain * core . BlockChain , blocks [ ] * types . Block ) [ ] * types . Block {
head := chain . CurrentBlock ( )
for i , block := range blocks {
// If we're behind the chain head, only check block, state is available at head
if head . NumberU64 ( ) > block . NumberU64 ( ) {
if ! chain . HasBlock ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
}
continue
}
// If we're above the chain head, state availability is a must
if ! chain . HasBlockAndState ( block . Hash ( ) , block . NumberU64 ( ) ) {
return blocks [ i : ]
2015-05-27 15:35:08 +00:00
}
}
2018-02-05 16:40:32 +00:00
return nil
2015-05-27 15:35:08 +00:00
}
2018-03-26 10:34:21 +00:00
// ExportChain exports a blockchain into the specified file, truncating any data
// already present in the file.
2015-08-31 15:09:50 +00:00
func ExportChain ( blockchain * core . BlockChain , fn string ) error {
2017-03-02 13:06:16 +00:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 10:34:21 +00:00
// Open the file handle and potentially wrap with a gzip stream
2015-03-18 13:04:19 +00:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
2015-03-18 12:36:48 +00:00
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 15:08:23 +00:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 10:34:21 +00:00
// Iterate over the blocks and export them
2016-12-12 15:08:23 +00:00
if err := blockchain . Export ( writer ) ; err != nil {
2015-03-08 15:44:48 +00:00
return err
}
2017-03-02 13:06:16 +00:00
log . Info ( "Exported blockchain" , "file" , fn )
2016-12-12 15:08:23 +00:00
2015-03-08 15:44:48 +00:00
return nil
}
2015-06-06 04:02:32 +00:00
2018-03-26 10:34:21 +00:00
// ExportAppendChain exports a blockchain into the specified file, appending to
// the file if data already exists in it.
2015-08-31 15:09:50 +00:00
func ExportAppendChain ( blockchain * core . BlockChain , fn string , first uint64 , last uint64 ) error {
2017-03-02 13:06:16 +00:00
log . Info ( "Exporting blockchain" , "file" , fn )
2018-03-26 10:34:21 +00:00
// Open the file handle and potentially wrap with a gzip stream
2015-06-06 04:02:32 +00:00
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_APPEND | os . O_WRONLY , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
2016-12-12 15:08:23 +00:00
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
2018-03-26 10:34:21 +00:00
// Iterate over the blocks and export them
2016-12-12 15:08:23 +00:00
if err := blockchain . ExportN ( writer , first , last ) ; err != nil {
2015-06-06 04:02:32 +00:00
return err
}
2017-03-02 13:06:16 +00:00
log . Info ( "Exported blockchain to" , "file" , fn )
2015-06-06 04:02:32 +00:00
return nil
}
2018-03-26 10:34:21 +00:00
// ImportPreimages imports a batch of exported hash preimages into the database.
2018-09-24 12:57:49 +00:00
func ImportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 10:34:21 +00:00
log . Info ( "Importing preimages" , "file" , fn )
// Open the file handle and potentially unwrap the gzip stream
fh , err := os . Open ( fn )
if err != nil {
return err
}
defer fh . Close ( )
var reader io . Reader = fh
if strings . HasSuffix ( fn , ".gz" ) {
if reader , err = gzip . NewReader ( reader ) ; err != nil {
return err
}
}
stream := rlp . NewStream ( reader , 0 )
// Import the preimages in batches to prevent disk trashing
preimages := make ( map [ common . Hash ] [ ] byte )
for {
// Read the next entry and ensure it's not junk
var blob [ ] byte
if err := stream . Decode ( & blob ) ; err != nil {
if err == io . EOF {
break
}
return err
}
// Accumulate the preimages and flush when enough ws gathered
preimages [ crypto . Keccak256Hash ( blob ) ] = common . CopyBytes ( blob )
if len ( preimages ) > 1024 {
2018-11-09 10:51:07 +00:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 10:34:21 +00:00
preimages = make ( map [ common . Hash ] [ ] byte )
}
}
// Flush the last batch preimage data
if len ( preimages ) > 0 {
2018-11-09 10:51:07 +00:00
rawdb . WritePreimages ( db , preimages )
2018-03-26 10:34:21 +00:00
}
return nil
}
// ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file.
2018-09-24 12:57:49 +00:00
func ExportPreimages ( db ethdb . Database , fn string ) error {
2018-03-26 10:34:21 +00:00
log . Info ( "Exporting preimages" , "file" , fn )
// Open the file handle and potentially wrap with a gzip stream
fh , err := os . OpenFile ( fn , os . O_CREATE | os . O_WRONLY | os . O_TRUNC , os . ModePerm )
if err != nil {
return err
}
defer fh . Close ( )
var writer io . Writer = fh
if strings . HasSuffix ( fn , ".gz" ) {
writer = gzip . NewWriter ( writer )
defer writer . ( * gzip . Writer ) . Close ( )
}
// Iterate over the preimages and export them
2020-04-15 11:08:53 +00:00
it := db . NewIterator ( [ ] byte ( "secure-key-" ) , nil )
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
defer it . Release ( )
2018-03-26 10:34:21 +00:00
for it . Next ( ) {
if err := rlp . Encode ( writer , it . Value ( ) ) ; err != nil {
return err
}
}
log . Info ( "Exported preimages" , "file" , fn )
return nil
}