Merge pull request #49 from openrelayxyz/feature/v.1.10.21-merge

Feature/v.1.10.21 merge
This commit is contained in:
AusIV 2022-07-27 12:48:01 -05:00 committed by GitHub
commit 2ef96afe2a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
380 changed files with 6941 additions and 6267 deletions

View File

@ -19,10 +19,25 @@ linters:
- govet - govet
- ineffassign - ineffassign
- misspell - misspell
# - staticcheck
- unconvert - unconvert
# - unused
- varcheck - varcheck
- typecheck
- unused
- staticcheck
- bidichk
- durationcheck
- exportloopref
- gosec
- whitespace
# - structcheck # lots of false positives
# - errcheck #lot of false positives
# - contextcheck
# - errchkjson # lots of false positives
# - errorlint # this check crashes
# - exhaustive # silly check
# - makezero # false positives
# - nilerr # several intentional
linters-settings: linters-settings:
gofmt: gofmt:
@ -30,21 +45,29 @@ linters-settings:
goconst: goconst:
min-len: 3 # minimum length of string constant min-len: 3 # minimum length of string constant
min-occurrences: 6 # minimum number of occurrences min-occurrences: 6 # minimum number of occurrences
gosec:
excludes:
- G404 # Use of weak random number generator - lots of FP
- G107 # Potential http request -- those are intentional
- G306 # G306: Expect WriteFile permissions to be 0600 or less
issues: issues:
exclude-rules: exclude-rules:
- path: crypto/blake2b/ - path: crypto/bn256/cloudflare/optate.go
linters:
- deadcode
- path: crypto/bn256/cloudflare
linters:
- deadcode
- path: p2p/discv5/
linters:
- deadcode
- path: core/vm/instructions_test.go
linters:
- goconst
- path: cmd/faucet/
linters: linters:
- deadcode - deadcode
- staticcheck
- path: internal/build/pgp.go
text: 'SA1019: package golang.org/x/crypto/openpgp is deprecated'
- path: core/vm/contracts.go
text: 'SA1019: package golang.org/x/crypto/ripemd160 is deprecated'
- path: accounts/usbwallet/trezor.go
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
- path: accounts/usbwallet/trezor/
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
exclude:
- 'SA1019: event.TypeMux is deprecated: use Feed'
- 'SA1019: strings.Title is deprecated'
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
- 'SA1029: should not use built-in type string as key for value'
- 'G306: Expect WriteFile permissions to be 0600 or less'

View File

@ -42,7 +42,7 @@ directory.
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | | `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | | `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | | `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | | `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | | `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
## Running `geth` ## Running `geth`
@ -188,7 +188,7 @@ accessible from the outside.
As a developer, sooner rather than later you'll want to start interacting with `geth` and the As a developer, sooner rather than later you'll want to start interacting with `geth` and the
Ethereum network via your own programs and not manually through the console. To aid Ethereum network via your own programs and not manually through the console. To aid
this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API) this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://ethereum.github.io/execution-apis/api-documentation/)
and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)). and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)).
These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based
platforms, and named pipes on Windows). platforms, and named pipes on Windows).
@ -211,7 +211,7 @@ HTTP based JSON-RPC API options:
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept websockets requests * `--ws.origins` Origins from which to accept websockets requests
* `--ipcdisable` Disable the IPC-RPC server * `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to You'll need to use your own programming environments' capabilities (libraries, tools, etc) to
@ -297,7 +297,7 @@ $ bootnode --genkey=boot.key
$ bootnode --nodekey=boot.key $ bootnode --nodekey=boot.key
``` ```
With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format) With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
that other nodes can use to connect to it and exchange peer information. Make sure to that other nodes can use to connect to it and exchange peer information. Make sure to
replace the displayed IP address information (most probably `[::]`) with your externally replace the displayed IP address information (most probably `[::]`) with your externally
accessible IP to get the actual `enode` URL. accessible IP to get the actual `enode` URL.

View File

@ -164,7 +164,7 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
case "constructor": case "constructor":
abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil) abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
case "function": case "function":
name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Methods[s]; return ok }) name := ResolveNameConflict(field.Name, func(s string) bool { _, ok := abi.Methods[s]; return ok })
abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs) abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
case "fallback": case "fallback":
// New introduced function type in v0.6.0, check more detail // New introduced function type in v0.6.0, check more detail
@ -184,9 +184,11 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
} }
abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil) abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
case "event": case "event":
name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Events[s]; return ok }) name := ResolveNameConflict(field.Name, func(s string) bool { _, ok := abi.Events[s]; return ok })
abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs) abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
case "error": case "error":
// Errors cannot be overloaded or overridden but are inherited,
// no need to resolve the name conflict here.
abi.Errors[field.Name] = NewError(field.Name, field.Inputs) abi.Errors[field.Name] = NewError(field.Name, field.Inputs)
default: default:
return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name) return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
@ -251,20 +253,3 @@ func UnpackRevert(data []byte) (string, error) {
} }
return unpacked[0].(string), nil return unpacked[0].(string), nil
} }
// overloadedName returns the next available name for a given thing.
// Needed since solidity allows for overloading.
//
// e.g. if the abi contains Methods send, send1
// overloadedName would return send2 for input send.
//
// overloadedName works for methods, events and errors.
func overloadedName(rawName string, isAvail func(string) bool) string {
name := rawName
ok := isAvail(name)
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
ok = isAvail(name)
}
return name
}

View File

@ -1038,9 +1038,7 @@ func TestABI_EventById(t *testing.T) {
} }
if event == nil { if event == nil {
t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum) t.Errorf("We should find a event for topic %s, test #%d", topicID.Hex(), testnum)
} } else if event.ID != topicID {
if event.ID != topicID {
t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum) t.Errorf("Event id %s does not match topic %s, test #%d", event.ID.Hex(), topicID.Hex(), testnum)
} }

View File

@ -18,6 +18,7 @@ package abi
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"strings" "strings"
@ -79,7 +80,7 @@ func (arguments Arguments) isTuple() bool {
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) { func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
if len(data) == 0 { if len(data) == 0 {
if len(arguments.NonIndexed()) != 0 { if len(arguments.NonIndexed()) != 0 {
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
} }
return make([]interface{}, 0), nil return make([]interface{}, 0), nil
} }
@ -90,11 +91,11 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error { func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
// Make sure map is not nil // Make sure map is not nil
if v == nil { if v == nil {
return fmt.Errorf("abi: cannot unpack into a nil map") return errors.New("abi: cannot unpack into a nil map")
} }
if len(data) == 0 { if len(data) == 0 {
if len(arguments.NonIndexed()) != 0 { if len(arguments.NonIndexed()) != 0 {
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") return errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
} }
return nil // Nothing to unmarshal, return return nil // Nothing to unmarshal, return
} }
@ -116,7 +117,7 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
} }
if len(values) == 0 { if len(values) == 0 {
if len(arguments.NonIndexed()) != 0 { if len(arguments.NonIndexed()) != 0 {
return fmt.Errorf("abi: attempting to copy no values while arguments are expected") return errors.New("abi: attempting to copy no values while arguments are expected")
} }
return nil // Nothing to copy, return return nil // Nothing to copy, return
} }

View File

@ -66,6 +66,7 @@ type SimulatedBackend struct {
mu sync.Mutex mu sync.Mutex
pendingBlock *types.Block // Currently pending block that will be imported on request pendingBlock *types.Block // Currently pending block that will be imported on request
pendingState *state.StateDB // Currently pending state that will be the active on request pendingState *state.StateDB // Currently pending state that will be the active on request
pendingReceipts types.Receipts // Currently receipts for the pending block
events *filters.EventSystem // Event system for filtering log events live events *filters.EventSystem // Event system for filtering log events live
@ -84,8 +85,8 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis
database: database, database: database,
blockchain: blockchain, blockchain: blockchain,
config: genesis.Config, config: genesis.Config,
events: filters.NewEventSystem(&filterBackend{database, blockchain}, false),
} }
backend.events = filters.NewEventSystem(&filterBackend{database, blockchain, backend}, false)
backend.rollback(blockchain.CurrentBlock()) backend.rollback(blockchain.CurrentBlock())
return backend return backend
} }
@ -105,16 +106,20 @@ func (b *SimulatedBackend) Close() error {
// Commit imports all the pending transactions as a single block and starts a // Commit imports all the pending transactions as a single block and starts a
// fresh new state. // fresh new state.
func (b *SimulatedBackend) Commit() { func (b *SimulatedBackend) Commit() common.Hash {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil { if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil {
panic(err) // This cannot happen unless the simulator is wrong, fail in that case panic(err) // This cannot happen unless the simulator is wrong, fail in that case
} }
blockHash := b.pendingBlock.Hash()
// Using the last inserted block here makes it possible to build on a side // Using the last inserted block here makes it possible to build on a side
// chain after a fork. // chain after a fork.
b.rollback(b.pendingBlock) b.rollback(b.pendingBlock)
return blockHash
} }
// Rollback aborts all pending transactions, reverting to the last committed state. // Rollback aborts all pending transactions, reverting to the last committed state.
@ -662,7 +667,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce) return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
} }
// Include tx in chain // Include tx in chain
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { blocks, receipts := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
for _, tx := range b.pendingBlock.Transactions() { for _, tx := range b.pendingBlock.Transactions() {
block.AddTxWithChain(b.blockchain, tx) block.AddTxWithChain(b.blockchain, tx)
} }
@ -672,6 +677,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
b.pendingBlock = blocks[0] b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
b.pendingReceipts = receipts[0]
return nil return nil
} }
@ -683,7 +689,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
var filter *filters.Filter var filter *filters.Filter
if query.BlockHash != nil { if query.BlockHash != nil {
// Block filter requested, construct a single-shot filter // Block filter requested, construct a single-shot filter
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics) filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain, b}, *query.BlockHash, query.Addresses, query.Topics)
} else { } else {
// Initialize unset filter boundaries to run from genesis to chain head // Initialize unset filter boundaries to run from genesis to chain head
from := int64(0) from := int64(0)
@ -695,7 +701,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
to = query.ToBlock.Int64() to = query.ToBlock.Int64()
} }
// Construct the range filter // Construct the range filter
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics) filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain, b}, from, to, query.Addresses, query.Topics)
} }
// Run the filter and return all the logs // Run the filter and return all the logs
logs, err := filter.Logs(ctx) logs, err := filter.Logs(ctx)
@ -818,6 +824,7 @@ func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList }
type filterBackend struct { type filterBackend struct {
db ethdb.Database db ethdb.Database
bc *core.BlockChain bc *core.BlockChain
backend *SimulatedBackend
} }
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
@ -834,6 +841,10 @@ func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*t
return fb.bc.GetHeaderByHash(hash), nil return fb.bc.GetHeaderByHash(hash), nil
} }
func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
return fb.backend.pendingBlock, fb.backend.pendingReceipts
}
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash) number := rawdb.ReadHeaderNumber(fb.db, hash)
if number == nil { if number == nil {

View File

@ -655,8 +655,7 @@ func TestHeaderByNumber(t *testing.T) {
} }
if latestBlockHeader == nil { if latestBlockHeader == nil {
t.Errorf("received a nil block header") t.Errorf("received a nil block header")
} } else if latestBlockHeader.Number.Uint64() != uint64(0) {
if latestBlockHeader.Number.Uint64() != uint64(0) {
t.Errorf("expected block header number 0, instead got %v", latestBlockHeader.Number.Uint64()) t.Errorf("expected block header number 0, instead got %v", latestBlockHeader.Number.Uint64())
} }
@ -1336,3 +1335,42 @@ func TestForkResendTx(t *testing.T) {
t.Errorf("TX included in wrong block: %d", h) t.Errorf("TX included in wrong block: %d", h)
} }
} }
func TestCommitReturnValue(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
sim := simTestBackend(testAddr)
defer sim.Close()
startBlockHeight := sim.blockchain.CurrentBlock().NumberU64()
// Test if Commit returns the correct block hash
h1 := sim.Commit()
if h1 != sim.blockchain.CurrentBlock().Hash() {
t.Error("Commit did not return the hash of the last block.")
}
// Create a block in the original chain (containing a transaction to force different block hashes)
head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
_tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil)
tx, _ := types.SignTx(_tx, types.HomesteadSigner{}, testKey)
sim.SendTransaction(context.Background(), tx)
h2 := sim.Commit()
// Create another block in the original chain
sim.Commit()
// Fork at the first bock
if err := sim.Fork(context.Background(), h1); err != nil {
t.Errorf("forking: %v", err)
}
// Test if Commit returns the correct block hash after the reorg
h2fork := sim.Commit()
if h2 == h2fork {
t.Error("The block in the fork and the original block are the same block!")
}
if sim.blockchain.GetHeader(h2fork, startBlockHeight+2) == nil {
t.Error("Could not retrieve the just created block (side-chain)")
}
}

View File

@ -115,7 +115,6 @@ func (mc *mockPendingCaller) PendingCallContract(ctx context.Context, call ether
} }
func TestPassingBlockNumber(t *testing.T) { func TestPassingBlockNumber(t *testing.T) {
mc := &mockPendingCaller{ mc := &mockPendingCaller{
mockCaller: &mockCaller{ mockCaller: &mockCaller{
codeAtBytes: []byte{1, 2, 3}, codeAtBytes: []byte{1, 2, 3},

View File

@ -43,6 +43,43 @@ const (
LangObjC LangObjC
) )
func isKeyWord(arg string) bool {
switch arg {
case "break":
case "case":
case "chan":
case "const":
case "continue":
case "default":
case "defer":
case "else":
case "fallthrough":
case "for":
case "func":
case "go":
case "goto":
case "if":
case "import":
case "interface":
case "iota":
case "map":
case "make":
case "new":
case "package":
case "range":
case "return":
case "select":
case "struct":
case "switch":
case "type":
case "var":
default:
return false
}
return true
}
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant // Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
// to be used as is in client code, but rather as an intermediate struct which // to be used as is in client code, but rather as an intermediate struct which
// enforces compile time type safety and naming convention opposed to having to // enforces compile time type safety and naming convention opposed to having to
@ -99,6 +136,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
// Normalize the method for capital cases and non-anonymous inputs/outputs // Normalize the method for capital cases and non-anonymous inputs/outputs
normalized := original normalized := original
normalizedName := methodNormalizer[lang](alias(aliases, original.Name)) normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
// Ensure there is no duplicated identifier // Ensure there is no duplicated identifier
var identifiers = callIdentifiers var identifiers = callIdentifiers
if !original.IsConstant() { if !original.IsConstant() {
@ -108,11 +146,12 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName) return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName)
} }
identifiers[normalizedName] = true identifiers[normalizedName] = true
normalized.Name = normalizedName normalized.Name = normalizedName
normalized.Inputs = make([]abi.Argument, len(original.Inputs)) normalized.Inputs = make([]abi.Argument, len(original.Inputs))
copy(normalized.Inputs, original.Inputs) copy(normalized.Inputs, original.Inputs)
for j, input := range normalized.Inputs { for j, input := range normalized.Inputs {
if input.Name == "" { if input.Name == "" || isKeyWord(input.Name) {
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
} }
if hasStruct(input.Type) { if hasStruct(input.Type) {
@ -152,12 +191,22 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
eventIdentifiers[normalizedName] = true eventIdentifiers[normalizedName] = true
normalized.Name = normalizedName normalized.Name = normalizedName
used := make(map[string]bool)
normalized.Inputs = make([]abi.Argument, len(original.Inputs)) normalized.Inputs = make([]abi.Argument, len(original.Inputs))
copy(normalized.Inputs, original.Inputs) copy(normalized.Inputs, original.Inputs)
for j, input := range normalized.Inputs { for j, input := range normalized.Inputs {
if input.Name == "" { if input.Name == "" || isKeyWord(input.Name) {
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
} }
// Event is a bit special, we need to define event struct in binding,
// ensure there is no camel-case-style name conflict.
for index := 0; ; index++ {
if !used[capitalise(normalized.Inputs[j].Name)] {
used[capitalise(normalized.Inputs[j].Name)] = true
break
}
normalized.Inputs[j].Name = fmt.Sprintf("%s%d", normalized.Inputs[j].Name, index)
}
if hasStruct(input.Type) { if hasStruct(input.Type) {
bindStructType[lang](input.Type, structs) bindStructType[lang](input.Type, structs)
} }
@ -432,15 +481,22 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
if s, exist := structs[id]; exist { if s, exist := structs[id]; exist {
return s.Name return s.Name
} }
var fields []*tmplField var (
names = make(map[string]bool)
fields []*tmplField
)
for i, elem := range kind.TupleElems { for i, elem := range kind.TupleElems {
field := bindStructTypeGo(*elem, structs) name := capitalise(kind.TupleRawNames[i])
fields = append(fields, &tmplField{Type: field, Name: capitalise(kind.TupleRawNames[i]), SolKind: *elem}) name = abi.ResolveNameConflict(name, func(s string) bool { return names[s] })
names[name] = true
fields = append(fields, &tmplField{Type: bindStructTypeGo(*elem, structs), Name: name, SolKind: *elem})
} }
name := kind.TupleRawName name := kind.TupleRawName
if name == "" { if name == "" {
name = fmt.Sprintf("Struct%d", len(structs)) name = fmt.Sprintf("Struct%d", len(structs))
} }
name = capitalise(name)
structs[id] = &tmplStruct{ structs[id] = &tmplStruct{
Name: name, Name: name,
Fields: fields, Fields: fields,

View File

@ -1954,6 +1954,91 @@ var bindTests = []struct {
} }
`, `,
}, },
{
name: `NameConflict`,
contract: `
// SPDX-License-Identifier: GPL-3.0
pragma solidity >=0.4.22 <0.9.0;
contract oracle {
struct request {
bytes data;
bytes _data;
}
event log (int msg, int _msg);
function addRequest(request memory req) public pure {}
function getRequest() pure public returns (request memory) {
return request("", "");
}
}
`,
bytecode: []string{"0x608060405234801561001057600080fd5b5061042b806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063c2bb515f1461003b578063cce7b04814610059575b600080fd5b610043610075565b60405161005091906101af565b60405180910390f35b610073600480360381019061006e91906103ac565b6100b5565b005b61007d6100b8565b604051806040016040528060405180602001604052806000815250815260200160405180602001604052806000815250815250905090565b50565b604051806040016040528060608152602001606081525090565b600081519050919050565b600082825260208201905092915050565b60005b8381101561010c5780820151818401526020810190506100f1565b8381111561011b576000848401525b50505050565b6000601f19601f8301169050919050565b600061013d826100d2565b61014781856100dd565b93506101578185602086016100ee565b61016081610121565b840191505092915050565b600060408301600083015184820360008601526101888282610132565b915050602083015184820360208601526101a28282610132565b9150508091505092915050565b600060208201905081810360008301526101c9818461016b565b905092915050565b6000604051905090565b600080fd5b600080fd5b600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61022282610121565b810181811067ffffffffffffffff82111715610241576102406101ea565b5b80604052505050565b60006102546101d1565b90506102608282610219565b919050565b600080fd5b600080fd5b600080fd5b600067ffffffffffffffff82111561028f5761028e6101ea565b5b61029882610121565b9050602081019050919050565b82818337600083830152505050565b60006102c76102c284610274565b61024a565b9050828152602081018484840111156102e3576102e261026f565b5b6102ee8482856102a5565b509392505050565b600082601f83011261030b5761030a61026a565b5b813561031b8482602086016102b4565b91505092915050565b60006040828403121561033a576103396101e5565b5b610344604061024a565b9050600082013567ffffffffffffffff81111561036457610363610265565b5b610370848285016102f6565b600083015250602082013567ffffffffffffffff81111561039457610393610265565b5b6103a0848285016102f6565b60208301525092915050565b6000602082840312156103c2576103c16101db565b5b600082013567ffffffffffffffff8111156103e0576103df6101e0565b5b6103ec84828501610324565b9150509291505056fea264697066735822122033bca1606af9b6aeba1673f98c52003cec19338539fb44b86690ce82c51483b564736f6c634300080e0033"},
abi: []string{`[ { "anonymous": false, "inputs": [ { "indexed": false, "internalType": "int256", "name": "msg", "type": "int256" }, { "indexed": false, "internalType": "int256", "name": "_msg", "type": "int256" } ], "name": "log", "type": "event" }, { "inputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "req", "type": "tuple" } ], "name": "addRequest", "outputs": [], "stateMutability": "pure", "type": "function" }, { "inputs": [], "name": "getRequest", "outputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "", "type": "tuple" } ], "stateMutability": "pure", "type": "function" } ]`},
imports: `
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
tester: `
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
defer sim.Close()
_, tx, _, err := DeployNameConflict(user, sim)
if err != nil {
t.Fatalf("DeployNameConflict() got err %v; want nil err", err)
}
sim.Commit()
if _, err = bind.WaitDeployed(nil, sim, tx); err != nil {
t.Logf("Deployment tx: %+v", tx)
t.Errorf("bind.WaitDeployed(nil, %T, <deployment tx>) got err %v; want nil err", sim, err)
}
`,
},
{
name: "RangeKeyword",
contract: `
// SPDX-License-Identifier: GPL-3.0
pragma solidity >=0.4.22 <0.9.0;
contract keywordcontract {
function functionWithKeywordParameter(range uint256) public pure {}
}
`,
bytecode: []string{"0x608060405234801561001057600080fd5b5060dc8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063527a119f14602d575b600080fd5b60436004803603810190603f9190605b565b6045565b005b50565b6000813590506055816092565b92915050565b600060208284031215606e57606d608d565b5b6000607a848285016048565b91505092915050565b6000819050919050565b600080fd5b6099816083565b811460a357600080fd5b5056fea2646970667358221220d4f4525e2615516394055d369fb17df41c359e5e962734f27fd683ea81fd9db164736f6c63430008070033"},
abi: []string{`[{"inputs":[{"internalType":"uint256","name":"range","type":"uint256"}],"name":"functionWithKeywordParameter","outputs":[],"stateMutability":"pure","type":"function"}]`},
imports: `
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
tester: `
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
_, tx, _, err := DeployRangeKeyword(user, sim)
if err != nil {
t.Fatalf("error deploying contract: %v", err)
}
sim.Commit()
if _, err = bind.WaitDeployed(nil, sim, tx); err != nil {
t.Errorf("error deploying the contract: %v", err)
}
`,
},
} }
// Tests that packages generated by the binder can be successfully compiled and // Tests that packages generated by the binder can be successfully compiled and

View File

@ -30,11 +30,13 @@ type Error struct {
Name string Name string
Inputs Arguments Inputs Arguments
str string str string
// Sig contains the string signature according to the ABI spec. // Sig contains the string signature according to the ABI spec.
// e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256" // Please note that "int" is substitute for its canonical representation "int256"
Sig string Sig string
// ID returns the canonical representation of the event's signature used by the
// ID returns the canonical representation of the error's signature used by the
// abi definition to identify event names and types. // abi definition to identify event names and types.
ID common.Hash ID common.Hash
} }

View File

@ -73,7 +73,6 @@ func typeCheck(t Type, value reflect.Value) error {
} else { } else {
return nil return nil
} }
} }
// typeErr returns a formatted type casting error. // typeErr returns a formatted type casting error.

View File

@ -29,24 +29,27 @@ import (
// don't get the signature canonical representation as the first LOG topic. // don't get the signature canonical representation as the first LOG topic.
type Event struct { type Event struct {
// Name is the event name used for internal representation. It's derived from // Name is the event name used for internal representation. It's derived from
// the raw name and a suffix will be added in the case of a event overload. // the raw name and a suffix will be added in the case of event overloading.
// //
// e.g. // e.g.
// These are two events that have the same name: // These are two events that have the same name:
// * foo(int,int) // * foo(int,int)
// * foo(uint,uint) // * foo(uint,uint)
// The event name of the first one wll be resolved as foo while the second one // The event name of the first one will be resolved as foo while the second one
// will be resolved as foo0. // will be resolved as foo0.
Name string Name string
// RawName is the raw event name parsed from ABI. // RawName is the raw event name parsed from ABI.
RawName string RawName string
Anonymous bool Anonymous bool
Inputs Arguments Inputs Arguments
str string str string
// Sig contains the string signature according to the ABI spec. // Sig contains the string signature according to the ABI spec.
// e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256" // Please note that "int" is substitute for its canonical representation "int256"
Sig string Sig string
// ID returns the canonical representation of the event's signature used by the // ID returns the canonical representation of the event's signature used by the
// abi definition to identify event names and types. // abi definition to identify event names and types.
ID common.Hash ID common.Hash

View File

@ -161,7 +161,6 @@ func TestEventMultiValueWithArrayUnpack(t *testing.T) {
} }
func TestEventTupleUnpack(t *testing.T) { func TestEventTupleUnpack(t *testing.T) {
type EventTransfer struct { type EventTransfer struct {
Value *big.Int Value *big.Int
} }

View File

@ -220,7 +220,6 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
// second round ~~~ // second round ~~~
for _, argName := range argNames { for _, argName := range argNames {
structFieldName := ToCamelCase(argName) structFieldName := ToCamelCase(argName)
if structFieldName == "" { if structFieldName == "" {

View File

@ -166,7 +166,7 @@ func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest) return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
} }
// Reassemble the fake ABI and constuct the JSON // Reassemble the fake ABI and construct the JSON
fakeArgs, err := assembleArgs(args) fakeArgs, err := assembleArgs(args)
if err != nil { if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err) return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)

View File

@ -163,22 +163,26 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
elems []*Type elems []*Type
names []string names []string
expression string // canonical parameter expression expression string // canonical parameter expression
used = make(map[string]bool)
) )
expression += "(" expression += "("
overloadedNames := make(map[string]string)
for idx, c := range components { for idx, c := range components {
cType, err := NewType(c.Type, c.InternalType, c.Components) cType, err := NewType(c.Type, c.InternalType, c.Components)
if err != nil { if err != nil {
return Type{}, err return Type{}, err
} }
fieldName, err := overloadedArgName(c.Name, overloadedNames) name := ToCamelCase(c.Name)
if name == "" {
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
}
fieldName := ResolveNameConflict(name, func(s string) bool { return used[s] })
if err != nil { if err != nil {
return Type{}, err return Type{}, err
} }
used[fieldName] = true
if !isValidFieldName(fieldName) { if !isValidFieldName(fieldName) {
return Type{}, fmt.Errorf("field %d has invalid name", idx) return Type{}, fmt.Errorf("field %d has invalid name", idx)
} }
overloadedNames[fieldName] = fieldName
fields = append(fields, reflect.StructField{ fields = append(fields, reflect.StructField{
Name: fieldName, // reflect.StructOf will panic for any exported field. Name: fieldName, // reflect.StructOf will panic for any exported field.
Type: cType.GetType(), Type: cType.GetType(),
@ -255,20 +259,6 @@ func (t Type) GetType() reflect.Type {
} }
} }
func overloadedArgName(rawName string, names map[string]string) (string, error) {
fieldName := ToCamelCase(rawName)
if fieldName == "" {
return "", errors.New("abi: purely anonymous or underscored field is not supported")
}
// Handle overloaded fieldNames
_, ok := names[fieldName]
for idx := 0; ok; idx++ {
fieldName = fmt.Sprintf("%s%d", ToCamelCase(rawName), idx)
_, ok = names[fieldName]
}
return fieldName, nil
}
// String implements Stringer. // String implements Stringer.
func (t Type) String() (out string) { func (t Type) String() (out string) {
return t.stringKind return t.stringKind

View File

@ -115,7 +115,6 @@ func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
reflect.Copy(array, reflect.ValueOf(word[0:t.Size])) reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
return array.Interface(), nil return array.Interface(), nil
} }
// forEachUnpack iteratively unpack elements. // forEachUnpack iteratively unpack elements.
@ -255,7 +254,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
// lengthPrefixPointsTo interprets a 32 byte slice as an offset and then determines which indices to look to decode the type. // lengthPrefixPointsTo interprets a 32 byte slice as an offset and then determines which indices to look to decode the type.
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) { func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32]) bigOffsetEnd := new(big.Int).SetBytes(output[index : index+32])
bigOffsetEnd.Add(bigOffsetEnd, common.Big32) bigOffsetEnd.Add(bigOffsetEnd, common.Big32)
outputLength := big.NewInt(int64(len(output))) outputLength := big.NewInt(int64(len(output)))
@ -268,11 +267,9 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
} }
offsetEnd := int(bigOffsetEnd.Uint64()) offsetEnd := int(bigOffsetEnd.Uint64())
lengthBig := big.NewInt(0).SetBytes(output[offsetEnd-32 : offsetEnd]) lengthBig := new(big.Int).SetBytes(output[offsetEnd-32 : offsetEnd])
totalSize := big.NewInt(0) totalSize := new(big.Int).Add(bigOffsetEnd, lengthBig)
totalSize.Add(totalSize, bigOffsetEnd)
totalSize.Add(totalSize, lengthBig)
if totalSize.BitLen() > 63 { if totalSize.BitLen() > 63 {
return 0, 0, fmt.Errorf("abi: length larger than int64: %v", totalSize) return 0, 0, fmt.Errorf("abi: length larger than int64: %v", totalSize)
} }
@ -287,7 +284,7 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
// tuplePointsTo resolves the location reference for dynamic tuple. // tuplePointsTo resolves the location reference for dynamic tuple.
func tuplePointsTo(index int, output []byte) (start int, err error) { func tuplePointsTo(index int, output []byte) (start int, err error) {
offset := big.NewInt(0).SetBytes(output[index : index+32]) offset := new(big.Int).SetBytes(output[index : index+32])
outputLen := big.NewInt(int64(len(output))) outputLen := big.NewInt(int64(len(output)))
if offset.Cmp(outputLen) > 0 { if offset.Cmp(outputLen) > 0 {

View File

@ -424,7 +424,7 @@ func TestMultiReturnWithStringArray(t *testing.T) {
} }
buff := new(bytes.Buffer) buff := new(bytes.Buffer)
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000")) buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000"))
temp, _ := big.NewInt(0).SetString("30000000000000000000", 10) temp, _ := new(big.Int).SetString("30000000000000000000", 10)
ret1, ret1Exp := new([3]*big.Int), [3]*big.Int{big.NewInt(1545304298), big.NewInt(6), temp} ret1, ret1Exp := new([3]*big.Int), [3]*big.Int{big.NewInt(1545304298), big.NewInt(6), temp}
ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f") ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f")
ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"} ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"}

41
accounts/abi/utils.go Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import "fmt"
// ResolveNameConflict returns the next available name for a given thing.
// This helper can be used for lots of purposes:
//
// - In solidity function overloading is supported, this function can fix
// the name conflicts of overloaded functions.
// - In golang binding generation, the parameter(in function, event, error,
// and struct definition) name will be converted to camelcase style which
// may eventually lead to name conflicts.
//
// Name conflicts are mostly resolved by adding number suffix.
// e.g. if the abi contains Methods send, send1
// ResolveNameConflict would return send2 for input send.
func ResolveNameConflict(rawName string, used func(string) bool) string {
name := rawName
ok := used(name)
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
ok = used(name)
}
return name
}

View File

@ -152,10 +152,6 @@ func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain eth
log.Error("operation SelfDerive not supported on external signers") log.Error("operation SelfDerive not supported on external signers")
} }
func (api *ExternalSigner) signHash(account accounts.Account, hash []byte) ([]byte, error) {
return []byte{}, fmt.Errorf("operation not supported on external signers")
}
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed // SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
var res hexutil.Bytes var res hexutil.Bytes

View File

@ -383,7 +383,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
time.Sleep(1000 * time.Millisecond) time.Sleep(1000 * time.Millisecond)
// Now replace file contents with crap // Now replace file contents with crap
if err := os.WriteFile(file, []byte("foo"), 0644); err != nil { if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
t.Fatal(err) t.Fatal(err)
return return
} }

View File

@ -377,7 +377,6 @@ func TestImportExport(t *testing.T) {
if _, err = ks2.Import(json, "new", "new"); err == nil { if _, err = ks2.Import(json, "new", "new"); err == nil {
t.Errorf("importing a key twice succeeded") t.Errorf("importing a key twice succeeded")
} }
} }
// TestImportRace tests the keystore on races. // TestImportRace tests the keystore on races.
@ -402,7 +401,6 @@ func TestImportRace(t *testing.T) {
if _, err := ks2.Import(json, "new", "new"); err != nil { if _, err := ks2.Import(json, "new", "new"); err != nil {
atomic.AddUint32(&atom, 1) atomic.AddUint32(&atom, 1)
} }
}() }()
} }
wg.Wait() wg.Wait()

View File

@ -138,7 +138,6 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
// Encryptdata encrypts the data given as 'data' with the password 'auth'. // Encryptdata encrypts the data given as 'data' with the password 'auth'.
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) { func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
salt := make([]byte, 32) salt := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, salt); err != nil { if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("reading from crypto/rand failed: " + err.Error()) panic("reading from crypto/rand failed: " + err.Error())
@ -341,7 +340,6 @@ func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
r := ensureInt(cryptoJSON.KDFParams["r"]) r := ensureInt(cryptoJSON.KDFParams["r"])
p := ensureInt(cryptoJSON.KDFParams["p"]) p := ensureInt(cryptoJSON.KDFParams["p"])
return scrypt.Key(authArray, salt, n, r, p, dkLen) return scrypt.Key(authArray, salt, n, r, p, dkLen)
} else if cryptoJSON.KDF == "pbkdf2" { } else if cryptoJSON.KDF == "pbkdf2" {
c := ensureInt(cryptoJSON.KDFParams["c"]) c := ensureInt(cryptoJSON.KDFParams["c"])
prf := cryptoJSON.KDFParams["prf"].(string) prf := cryptoJSON.KDFParams["prf"].(string)

View File

@ -52,7 +52,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address) t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)
} }
// Recrypt with a new password and start over // Recrypt with a new password and start over
password += "new data appended" password += "new data appended" // nolint: gosec
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil { if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
t.Errorf("test %d: failed to recrypt key %v", i, err) t.Errorf("test %d: failed to recrypt key %v", i, err)
} }

View File

@ -178,7 +178,7 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
return err return err
} }
if response.Sw1 != 0x90 || response.Sw2 != 0x00 { if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2) return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: %#x%x", response.Sw1, response.Sw2)
} }
if len(response.Data) != scSecretLength { if len(response.Data) != scSecretLength {
@ -261,7 +261,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
rapdu.deserialize(plainData) rapdu.deserialize(plainData)
if rapdu.Sw1 != sw1Ok { if rapdu.Sw1 != sw1Ok {
return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2) return nil, fmt.Errorf("unexpected response status Cla=%#x, Ins=%#x, Sw=%#x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
} }
return rapdu, nil return rapdu, nil

View File

@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
} }
if response.Sw1 != sw1Ok { if response.Sw1 != sw1Ok {
return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2) return nil, fmt.Errorf("unexpected insecure response status Cla=%#x, Ins=%#x, Sw=%#x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
} }
return response, nil return response, nil

View File

@ -32,9 +32,10 @@ func TestURLParsing(t *testing.T) {
t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path) t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path)
} }
_, err = parseURL("ethereum.org") for _, u := range []string{"ethereum.org", ""} {
if err == nil { if _, err = parseURL(u); err == nil {
t.Error("expected err, got: nil") t.Errorf("input %v, expected err, got: nil", u)
}
} }
} }

View File

@ -526,7 +526,6 @@ func (w *wallet) signHash(account accounts.Account, hash []byte) ([]byte, error)
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed // SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
// Unless we are doing 712 signing, simply dispatch to signHash // Unless we are doing 712 signing, simply dispatch to signHash
if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) { if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) {
return w.signHash(account, crypto.Keccak256(data)) return w.signHash(account, crypto.Keccak256(data))

View File

@ -1,58 +1,38 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
efd43e0f1402e083b73a03d444b7b6576bb4c539ac46208b63a916b69aca4088 go1.18.1.src.tar.gz 4525aa6b0e3cecb57845f4060a7075aafc9ab752bb7b6b4cf8a212d43078e1e4 go1.18.4.src.tar.gz
3703e9a0db1000f18c0c7b524f3d378aac71219b4715a6a4c5683eb639f41a4d go1.18.1.darwin-amd64.tar.gz 315e1a2b21a827c68da1b7f492b5dcbe81d8df8a79ebe50922df9588893f87f0 go1.18.4.darwin-amd64.tar.gz
6d5641a06edba8cd6d425fb0adad06bad80e2afe0fa91b4aa0e5aed1bc78f58e go1.18.1.darwin-arm64.tar.gz 04eed623d5143ffa44965b618b509e0beccccfd3a4a1bfebc0cdbcf906046769 go1.18.4.darwin-arm64.tar.gz
b9a9063d4265d8ccc046c9b314194d6eadc47e56d0d637db81e98e68aad45035 go1.18.1.freebsd-386.tar.gz e5244fdcd6b6eaf785dbd8c6e02b4804a4d00409e7edecc63cd59fc8f37c34c5 go1.18.4.freebsd-386.tar.gz
2bc1c138d645e37dbbc63517dd1cf1bf33fc4cb95f442a6384df0418b5134e9f go1.18.1.freebsd-amd64.tar.gz fb00f8aaffcc80e0a2bd39db1d8e8e21ef0a691c564f7b7601383dd6adad4042 go1.18.4.freebsd-amd64.tar.gz
9a8df5dde9058f08ac01ecfaae42534610db398e487138788c01da26a0d41ff9 go1.18.1.linux-386.tar.gz 418232d905e18ece6cb13c4884bb1c68963d7d3b4d889671b3e5be8bd4059862 go1.18.4.linux-386.tar.gz
b3b815f47ababac13810fc6021eb73d65478e0b2db4b09d348eefad9581a2334 go1.18.1.linux-amd64.tar.gz c9b099b68d93f5c5c8a8844a89f8db07eaa58270e3a1e01804f17f4cf8df02f5 go1.18.4.linux-amd64.tar.gz
56a91851c97fb4697077abbca38860f735c32b38993ff79b088dac46e4735633 go1.18.1.linux-arm64.tar.gz 35014d92b50d97da41dade965df7ebeb9a715da600206aa59ce1b2d05527421f go1.18.4.linux-arm64.tar.gz
9edc01c8e7db64e9ceeffc8258359e027812886ceca3444e83c4eb96ddb068ee go1.18.1.linux-armv6l.tar.gz 7dfeab572e49638b0f3d9901457f0622c27b73301c2b99db9f5e9568ff40460c go1.18.4.linux-armv6l.tar.gz
33db623d1eecf362fe365107c12efc90eff0b9609e0b3345e258388019cb552a go1.18.1.linux-ppc64le.tar.gz f80acc4dc054ddc89ccc4869664e331bf16e0ac6e07830e94554162e66f66961 go1.18.4.linux-ppc64le.tar.gz
5d9301324148ed4dbfaa0800da43a843ffd65c834ee73fcf087255697c925f74 go1.18.1.linux-s390x.tar.gz 7e932f36e8f347feea2e706dcd32c1a464b1e5767ab2928ae460a37a975fe4a3 go1.18.4.linux-s390x.tar.gz
49ae65551acbfaa57b52fbefa0350b2072512ae3103b8cf1a919a02626dbc743 go1.18.1.windows-386.zip 6343010a13ab783e553786b3cc3b4d63080128f61cf1e963505139c71ca66a0d go1.18.4.windows-386.zip
c30bc3f1f7314a953fe208bd9cd5e24bd9403392a6c556ced3677f9f70f71fe1 go1.18.1.windows-amd64.zip dfb93c517e050ba0cfc066802b38a8e7cda2ef666efd634859356b33f543cc49 go1.18.4.windows-amd64.zip
2c4a8265030eac37f906634f5c13c22c3d0ea725f2488e1bca005c6b981653d7 go1.18.1.windows-arm64.zip 7d0d7b73592019d276f2bd44ee3cda0d8bd99356fdbf04fdb40c263518108ae4 go1.18.4.windows-arm64.zip
03c181fc1bb29ea3e73cbb23399c43b081063833a7cf7554b94e5a98308df53e golangci-lint-1.45.2-linux-riscv64.deb 658078aaaf7608693f37c4cf1380b2af418ab8b2d23fdb33e7e2d4339328590e golangci-lint-1.46.2-darwin-amd64.tar.gz
08a50bbbf451ede6d5354179eb3e14a5634e156dfa92cb9a2606f855a637e35b golangci-lint-1.45.2-linux-ppc64le.rpm 81f9b4afd62ec5e612ef8bc3b1d612a88b56ff289874831845cdad394427385f golangci-lint-1.46.2-darwin-arm64.tar.gz
0d12f6ec1296b5a70e392aa88cd2295cceef266165eb7028e675f455515dd1c9 golangci-lint-1.45.2-linux-armv7.deb 943486e703e62ec55ecd90caeb22bcd39f8cc3962a93eec18c06b7bae12cb46f golangci-lint-1.46.2-freebsd-386.tar.gz
10f2846e2e50e4ea8ae426ee62dcd2227b23adddd8e991aa3c065927ac948735 golangci-lint-1.45.2-linux-ppc64le.deb a75dd9ba7e08e8315c411697171db5375c0f6a1ece9e6fbeb9e9a4386822e17d golangci-lint-1.46.2-freebsd-amd64.tar.gz
1463049b744871168095e3e8f687247d6040eeb895955b869889ea151e0603ab golangci-lint-1.45.2-linux-arm64.tar.gz 83eedca1af72e8be055a1235177eb1b33524fbf08bec5730df2e6c3efade2b23 golangci-lint-1.46.2-freebsd-armv6.tar.gz
15720f9c4c6f9324af695f081dc189adc7751b255759e78d7b2df1d7e9192533 golangci-lint-1.45.2-linux-amd64.deb 513d276c490de6f82baa01f9346d8d78b385f2ae97608f42f05d1f0f1314cd54 golangci-lint-1.46.2-freebsd-armv7.tar.gz
166d922e4d3cfe3d47786c590154a9c8ea689dff0aa92b73d2f5fc74fc570c29 golangci-lint-1.45.2-linux-arm64.rpm 461a60016d516c69d406dc3e2d4957b722dbe684b7085dfac4802d0f84409e27 golangci-lint-1.46.2-linux-386.tar.gz
1a3754c69f7cc19ab89cbdcc2550da4cf9abb3120383c6b3bd440c1ec22da2e6 golangci-lint-1.45.2-freebsd-386.tar.gz 242cd4f2d6ac0556e315192e8555784d13da5d1874e51304711570769c4f2b9b golangci-lint-1.46.2-linux-amd64.tar.gz
1dec0aa46d4f0d241863b573f70129bdf1de9c595cf51172a840a588a4cd9fc5 golangci-lint-1.45.2-windows-amd64.zip ff5448ada2b3982581984d64b0dec614dba0a3ea4cab2d6a343c77927fc89f7e golangci-lint-1.46.2-linux-arm64.tar.gz
3198453806517c1ad988229f5e758ef850e671203f46d6905509df5bdf4dc24b golangci-lint-1.45.2-freebsd-armv7.tar.gz 177f5210ef04aee282bfbc6ec519d36af5fb7d2b2c8d3f4ea5e59fdba71b0a27 golangci-lint-1.46.2-linux-armv6.tar.gz
46a3cd1749d7b98adc2dc01510ddbe21abe42689c8a53fb0e81662713629f215 golangci-lint-1.45.2-linux-386.deb 10dd512a36ee978a1009edbca3ba3af410f0fda8df4d85f0e4793a24213870cc golangci-lint-1.46.2-linux-armv7.tar.gz
4e28bfb593d464b9e160f2acd5b71993836a183270bf8299b78ad31f7a168c0d golangci-lint-1.45.2-linux-arm64.deb 67779fa517c688c9db1090c3c456117d95c6b92979c623fe8cce8fb84251f21e golangci-lint-1.46.2-linux-mips64.tar.gz
5157a58c8f9ab85c33af2e46f0d7c57a3b1e8953b81d61130e292e09f545cfab golangci-lint-1.45.2-linux-mips64le.tar.gz c085f0f57bdccbb2c902a41b72ce210a3dfff16ca856789374745ab52004b6ee golangci-lint-1.46.2-linux-mips64le.tar.gz
518cd027644129fbf8ec4f02bd6f9ad7278aae826f92b63c80d4d0819ddde49a golangci-lint-1.45.2-linux-armv6.rpm abecef6421499248e58ed75d2938bc12b4b1f98b057f25060680b77bb51a881e golangci-lint-1.46.2-linux-ppc64le.tar.gz
595ad6c6dade4c064351bc309f411703e457f8ffbb7a1806b3d8ee713333427f golangci-lint-1.45.2-linux-amd64.tar.gz 134843a8f5c5c182c11979ea75f5866945d54757b2a04f3e5e04a0cf4fbf3a39 golangci-lint-1.46.2-linux-riscv64.tar.gz
6994d6c80f0730751090986184a3481b4be2e6b6e84416238a2b857910045a4f golangci-lint-1.45.2-windows-arm64.zip 9fe21a9476567aafe7a2e1a926b9641a39f920d4c0ea8eda9d968bc6136337f9 golangci-lint-1.46.2-linux-s390x.tar.gz
6c81652fc340118811b487f713c441fc6f527800bf5fd11b8929d08124efa015 golangci-lint-1.45.2-linux-armv7.tar.gz b48a421ec12a43f8fc8f977b9cf7d4a1ea1c4b97f803a238de7d3ce4ab23a84b golangci-lint-1.46.2-windows-386.zip
726cb045559b7518bafdd3459de70a0647c087eb1b4634627a4b2e95b1258580 golangci-lint-1.45.2-freebsd-amd64.tar.gz 604acc1378a566abb0eac799362f3a37b7fcb5fa2268aeb2d5d954c829367301 golangci-lint-1.46.2-windows-amd64.zip
77df3774cdfda49b956d4a0e676da9a9b883f496ee37293c530770fef6b1d24e golangci-lint-1.45.2-linux-mips64.deb 927def10db073da9687594072e6a3d9c891f67fa897105a2cfd715e018e7386c golangci-lint-1.46.2-windows-arm64.zip
7a9840f279a7d5d405bb434e101c2290964b3729630ac2add29280b962b7b9a5 golangci-lint-1.45.2-windows-armv6.zip 729b76ed1d8b4e2612e38772b211503cb940e00a137bbaace1aa066f7c943737 golangci-lint-1.46.2-windows-armv6.zip
7d4bf9a5d80ec467aaaf66e78dbdcab567bbc6ba8151334c714eee58766aae32 golangci-lint-1.45.2-windows-armv7.zip ea27c86d91e0b245ecbcfbf6cdb4ac0522d4bc6dca56bba02ea1bc77ad2917ac golangci-lint-1.46.2-windows-armv7.zip
7e5f8821d39bb11d273b0841b34355f56bd5a45a2d5179f0d09e614e0efc0482 golangci-lint-1.45.2-linux-s390x.rpm
828de1bde796b23d8656b17a8885fbd879ef612795d62d1e4618126b419728b5 golangci-lint-1.45.2-linux-mips64.rpm
879a52107a797678a03c175cc7cf441411a14a01f66dc87f70bdfa304a4129a6 golangci-lint-1.45.2-windows-386.zip
87b6c7e3a3769f7d9abeb3bb82119b3c91e3c975300f6834fdeef8b2e37c98ff golangci-lint-1.45.2-linux-amd64.rpm
8b605c6d686c8af53ecc4ef39544541eeb1644d34cc10f9ffc5087808210c4ff golangci-lint-1.45.2-linux-s390x.deb
9427dbf51d0ac6f73a0f992838bd40c817470cc5bf6c8e2e2bea6fac46d7af6e golangci-lint-1.45.2-linux-ppc64le.tar.gz
995e509e895ca6a64ffc7395ac884d5961bdec98423cb896b17f345a9b4a19cf golangci-lint-1.45.2-darwin-amd64.tar.gz
a3f36278f2ea5516341e9071a2df6e65df272be80230b5406a12b72c6d425bee golangci-lint-1.45.2-linux-armv7.rpm
a5e12c50c23e87ac1deffc872f92ae85427b1198604969399805ae47cfe43f08 golangci-lint-1.45.2-linux-riscv64.tar.gz
aa8fa1be0729dbc2fbc4e01e82027097613eee74bd686ebef20f860b01fff8b3 golangci-lint-1.45.2-freebsd-armv6.tar.gz
c2b9669decc1b638cf2ee9060571af4e255f6dfcbb225c293e3a7ee4bb2c7217 golangci-lint-1.45.2-darwin-arm64.tar.gz
dfa8bdaf0387aec1cd5c1aa8857f67b2bbdfc2e42efce540c8fb9bbe3e8af302 golangci-lint-1.45.2-linux-armv6.tar.gz
eb8b8539dd017eee5c131ea9b875893ab2cebeeca41e8c6624907fb02224d643 golangci-lint-1.45.2-linux-386.rpm
ed6c7e17a857f30d715c5302fa250d95936936b277024bffea201187a257d7a7 golangci-lint-1.45.2-linux-armv6.deb
ef4d0154ace4001f01b288baeb118176242efb4fd163e178763e3213b77ef30b golangci-lint-1.45.2-linux-mips64le.deb
ef7002a2229f5ff5ba201a715fcf877664ea88decbe58e69d163293913024955 golangci-lint-1.45.2-linux-s390x.tar.gz
f13ecbd09228632e6bbe91a8324bd675c406eed22eb6d2c1e8192eed9ec4f914 golangci-lint-1.45.2-linux-386.tar.gz
f4cd9cfb09252f51699407277512263cae8409b665dd764f55a34738d0e89edc golangci-lint-1.45.2-linux-riscv64.rpm
fb1945dc59d37c9d14bf0a4aea11ea8651fa0e1d582ea80c4c44d0a536c08893 golangci-lint-1.45.2-linux-mips64.tar.gz
fe542c22738010f453c735a3c410decfd3784d1bd394b395c298ee298fc4c606 golangci-lint-1.45.2-linux-mips64le.rpm

View File

@ -149,7 +149,7 @@ var (
// This is the version of go that will be downloaded by // This is the version of go that will be downloaded by
// //
// go run ci.go install -dlgo // go run ci.go install -dlgo
dlgoVersion = "1.18.1" dlgoVersion = "1.18.4"
) )
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -224,6 +224,9 @@ func doInstall(cmdline []string) {
gobuild.Args = append(gobuild.Args, "-p", "1") gobuild.Args = append(gobuild.Args, "-p", "1")
} }
// Disable CLI markdown doc generation in release builds.
gobuild.Args = append(gobuild.Args, "-tags", "urfave_cli_no_docs")
// We use -trimpath to avoid leaking local paths into the built executables. // We use -trimpath to avoid leaking local paths into the built executables.
gobuild.Args = append(gobuild.Args, "-trimpath") gobuild.Args = append(gobuild.Args, "-trimpath")
@ -333,7 +336,7 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint. // downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string { func downloadLinter(cachedir string) string {
const version = "1.45.2" const version = "1.46.2"
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
arch := runtime.GOARCH arch := runtime.GOARCH

View File

@ -0,0 +1,16 @@
_geth_bash_autocomplete() {
if [[ "${COMP_WORDS[0]}" != "source" ]]; then
local cur opts base
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
if [[ "$cur" == "-"* ]]; then
opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} ${cur} --generate-bash-completion )
else
opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
fi
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
}
complete -o bashdefault -o default -o nospace -F _geth_bash_autocomplete geth

View File

@ -0,0 +1,18 @@
_geth_zsh_autocomplete() {
local -a opts
local cur
cur=${words[-1]}
if [[ "$cur" == "-"* ]]; then
opts=("${(@f)$(${words[@]:0:#words[@]-1} ${cur} --generate-bash-completion)}")
else
opts=("${(@f)$(${words[@]:0:#words[@]-1} --generate-bash-completion)}")
fi
if [[ "${opts[1]}" != "" ]]; then
_describe 'values' opts
else
_files
fi
}
compdef _geth_zsh_autocomplete geth

View File

@ -1 +1,5 @@
build/bin/{{.BinaryName}} usr/bin build/bin/{{.BinaryName}} usr/bin
{{- if eq .BinaryName "geth" }}
build/deb/ethereum/completions/bash/geth etc/bash_completion.d
build/deb/ethereum/completions/zsh/_geth usr/share/zsh/vendor-completions
{{end -}}

View File

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
@ -39,42 +39,44 @@ var (
gitDate = "" gitDate = ""
app *cli.App app *cli.App
)
var (
// Flags needed by abigen // Flags needed by abigen
abiFlag = cli.StringFlag{ abiFlag = &cli.StringFlag{
Name: "abi", Name: "abi",
Usage: "Path to the Ethereum contract ABI json to bind, - for STDIN", Usage: "Path to the Ethereum contract ABI json to bind, - for STDIN",
} }
binFlag = cli.StringFlag{ binFlag = &cli.StringFlag{
Name: "bin", Name: "bin",
Usage: "Path to the Ethereum contract bytecode (generate deploy method)", Usage: "Path to the Ethereum contract bytecode (generate deploy method)",
} }
typeFlag = cli.StringFlag{ typeFlag = &cli.StringFlag{
Name: "type", Name: "type",
Usage: "Struct name for the binding (default = package name)", Usage: "Struct name for the binding (default = package name)",
} }
jsonFlag = cli.StringFlag{ jsonFlag = &cli.StringFlag{
Name: "combined-json", Name: "combined-json",
Usage: "Path to the combined-json file generated by compiler", Usage: "Path to the combined-json file generated by compiler, - for STDIN",
} }
excFlag = cli.StringFlag{ excFlag = &cli.StringFlag{
Name: "exc", Name: "exc",
Usage: "Comma separated types to exclude from binding", Usage: "Comma separated types to exclude from binding",
} }
pkgFlag = cli.StringFlag{ pkgFlag = &cli.StringFlag{
Name: "pkg", Name: "pkg",
Usage: "Package name to generate the binding into", Usage: "Package name to generate the binding into",
} }
outFlag = cli.StringFlag{ outFlag = &cli.StringFlag{
Name: "out", Name: "out",
Usage: "Output file for the generated binding (default = stdout)", Usage: "Output file for the generated binding (default = stdout)",
} }
langFlag = cli.StringFlag{ langFlag = &cli.StringFlag{
Name: "lang", Name: "lang",
Usage: "Destination language for the bindings (go, java, objc)", Usage: "Destination language for the bindings (go, java, objc)",
Value: "go", Value: "go",
} }
aliasFlag = cli.StringFlag{ aliasFlag = &cli.StringFlag{
Name: "alias", Name: "alias",
Usage: "Comma separated aliases for function and event renaming, e.g. original1=alias1, original2=alias2", Usage: "Comma separated aliases for function and event renaming, e.g. original1=alias1, original2=alias2",
} }
@ -82,6 +84,7 @@ var (
func init() { func init() {
app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
app.Name = "abigen"
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
abiFlag, abiFlag,
binFlag, binFlag,
@ -93,17 +96,17 @@ func init() {
langFlag, langFlag,
aliasFlag, aliasFlag,
} }
app.Action = utils.MigrateFlags(abigen) app.Action = abigen
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
} }
func abigen(c *cli.Context) error { func abigen(c *cli.Context) error {
utils.CheckExclusive(c, abiFlag, jsonFlag) // Only one source can be selected. utils.CheckExclusive(c, abiFlag, jsonFlag) // Only one source can be selected.
if c.GlobalString(pkgFlag.Name) == "" {
if c.String(pkgFlag.Name) == "" {
utils.Fatalf("No destination package specified (--pkg)") utils.Fatalf("No destination package specified (--pkg)")
} }
var lang bind.Lang var lang bind.Lang
switch c.GlobalString(langFlag.Name) { switch c.String(langFlag.Name) {
case "go": case "go":
lang = bind.LangGo lang = bind.LangGo
case "java": case "java":
@ -112,7 +115,7 @@ func abigen(c *cli.Context) error {
lang = bind.LangObjC lang = bind.LangObjC
utils.Fatalf("Objc binding generation is uncompleted") utils.Fatalf("Objc binding generation is uncompleted")
default: default:
utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.GlobalString(langFlag.Name)) utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name))
} }
// If the entire solidity code was specified, build and bind based on that // If the entire solidity code was specified, build and bind based on that
var ( var (
@ -123,13 +126,13 @@ func abigen(c *cli.Context) error {
libs = make(map[string]string) libs = make(map[string]string)
aliases = make(map[string]string) aliases = make(map[string]string)
) )
if c.GlobalString(abiFlag.Name) != "" { if c.String(abiFlag.Name) != "" {
// Load up the ABI, optional bytecode and type name from the parameters // Load up the ABI, optional bytecode and type name from the parameters
var ( var (
abi []byte abi []byte
err error err error
) )
input := c.GlobalString(abiFlag.Name) input := c.String(abiFlag.Name)
if input == "-" { if input == "-" {
abi, err = io.ReadAll(os.Stdin) abi, err = io.ReadAll(os.Stdin)
} else { } else {
@ -141,7 +144,7 @@ func abigen(c *cli.Context) error {
abis = append(abis, string(abi)) abis = append(abis, string(abi))
var bin []byte var bin []byte
if binFile := c.GlobalString(binFlag.Name); binFile != "" { if binFile := c.String(binFlag.Name); binFile != "" {
if bin, err = os.ReadFile(binFile); err != nil { if bin, err = os.ReadFile(binFile); err != nil {
utils.Fatalf("Failed to read input bytecode: %v", err) utils.Fatalf("Failed to read input bytecode: %v", err)
} }
@ -151,23 +154,32 @@ func abigen(c *cli.Context) error {
} }
bins = append(bins, string(bin)) bins = append(bins, string(bin))
kind := c.GlobalString(typeFlag.Name) kind := c.String(typeFlag.Name)
if kind == "" { if kind == "" {
kind = c.GlobalString(pkgFlag.Name) kind = c.String(pkgFlag.Name)
} }
types = append(types, kind) types = append(types, kind)
} else { } else {
// Generate the list of types to exclude from binding // Generate the list of types to exclude from binding
exclude := make(map[string]bool) exclude := make(map[string]bool)
for _, kind := range strings.Split(c.GlobalString(excFlag.Name), ",") { for _, kind := range strings.Split(c.String(excFlag.Name), ",") {
exclude[strings.ToLower(kind)] = true exclude[strings.ToLower(kind)] = true
} }
var contracts map[string]*compiler.Contract var contracts map[string]*compiler.Contract
if c.GlobalIsSet(jsonFlag.Name) { if c.IsSet(jsonFlag.Name) {
jsonOutput, err := os.ReadFile(c.GlobalString(jsonFlag.Name)) var (
input = c.String(jsonFlag.Name)
jsonOutput []byte
err error
)
if input == "-" {
jsonOutput, err = io.ReadAll(os.Stdin)
} else {
jsonOutput, err = os.ReadFile(input)
}
if err != nil { if err != nil {
utils.Fatalf("Failed to read combined-json from compiler: %v", err) utils.Fatalf("Failed to read combined-json: %v", err)
} }
contracts, err = compiler.ParseCombinedJSON(jsonOutput, "", "", "", "") contracts, err = compiler.ParseCombinedJSON(jsonOutput, "", "", "", "")
if err != nil { if err != nil {
@ -189,33 +201,37 @@ func abigen(c *cli.Context) error {
nameParts := strings.Split(name, ":") nameParts := strings.Split(name, ":")
types = append(types, nameParts[len(nameParts)-1]) types = append(types, nameParts[len(nameParts)-1])
libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] // Derive the library placeholder which is a 34 character prefix of the
// hex encoding of the keccak256 hash of the fully qualified library name.
// Note that the fully qualified library name is the path of its source
// file and the library name separated by ":".
libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] // the first 2 chars are 0x
libs[libPattern] = nameParts[len(nameParts)-1] libs[libPattern] = nameParts[len(nameParts)-1]
} }
} }
// Extract all aliases from the flags // Extract all aliases from the flags
if c.GlobalIsSet(aliasFlag.Name) { if c.IsSet(aliasFlag.Name) {
// We support multi-versions for aliasing // We support multi-versions for aliasing
// e.g. // e.g.
// foo=bar,foo2=bar2 // foo=bar,foo2=bar2
// foo:bar,foo2:bar2 // foo:bar,foo2:bar2
re := regexp.MustCompile(`(?:(\w+)[:=](\w+))`) re := regexp.MustCompile(`(?:(\w+)[:=](\w+))`)
submatches := re.FindAllStringSubmatch(c.GlobalString(aliasFlag.Name), -1) submatches := re.FindAllStringSubmatch(c.String(aliasFlag.Name), -1)
for _, match := range submatches { for _, match := range submatches {
aliases[match[1]] = match[2] aliases[match[1]] = match[2]
} }
} }
// Generate the contract binding // Generate the contract binding
code, err := bind.Bind(types, abis, bins, sigs, c.GlobalString(pkgFlag.Name), lang, libs, aliases) code, err := bind.Bind(types, abis, bins, sigs, c.String(pkgFlag.Name), lang, libs, aliases)
if err != nil { if err != nil {
utils.Fatalf("Failed to generate ABI binding: %v", err) utils.Fatalf("Failed to generate ABI binding: %v", err)
} }
// Either flush it out to a file or display on the standard output // Either flush it out to a file or display on the standard output
if !c.GlobalIsSet(outFlag.Name) { if !c.IsSet(outFlag.Name) {
fmt.Printf("%s\n", code) fmt.Printf("%s\n", code)
return nil return nil
} }
if err := os.WriteFile(c.GlobalString(outFlag.Name), []byte(code), 0600); err != nil { if err := os.WriteFile(c.String(outFlag.Name), []byte(code), 0600); err != nil {
utils.Fatalf("Failed to write ABI binding: %v", err) utils.Fatalf("Failed to write ABI binding: %v", err)
} }
return nil return nil

View File

@ -28,12 +28,12 @@ import (
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
// newClient creates a client with specified remote URL. // newClient creates a client with specified remote URL.
func newClient(ctx *cli.Context) *ethclient.Client { func newClient(ctx *cli.Context) *ethclient.Client {
client, err := ethclient.Dial(ctx.GlobalString(nodeURLFlag.Name)) client, err := ethclient.Dial(ctx.String(nodeURLFlag.Name))
if err != nil { if err != nil {
utils.Fatalf("Failed to connect to Ethereum node: %v", err) utils.Fatalf("Failed to connect to Ethereum node: %v", err)
} }
@ -64,9 +64,9 @@ func getContractAddr(client *rpc.Client) common.Address {
func getCheckpoint(ctx *cli.Context, client *rpc.Client) *params.TrustedCheckpoint { func getCheckpoint(ctx *cli.Context, client *rpc.Client) *params.TrustedCheckpoint {
var checkpoint *params.TrustedCheckpoint var checkpoint *params.TrustedCheckpoint
if ctx.GlobalIsSet(indexFlag.Name) { if ctx.IsSet(indexFlag.Name) {
var result [3]string var result [3]string
index := uint64(ctx.GlobalInt64(indexFlag.Name)) index := uint64(ctx.Int64(indexFlag.Name))
if err := client.Call(&result, "les_getCheckpoint", index); err != nil { if err := client.Call(&result, "les_getCheckpoint", index); err != nil {
utils.Fatalf("Failed to get local checkpoint %v, please ensure the les API is exposed", err) utils.Fatalf("Failed to get local checkpoint %v, please ensure the les API is exposed", err)
} }

View File

@ -36,10 +36,10 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var commandDeploy = cli.Command{ var commandDeploy = &cli.Command{
Name: "deploy", Name: "deploy",
Usage: "Deploy a new checkpoint oracle contract", Usage: "Deploy a new checkpoint oracle contract",
Flags: []cli.Flag{ Flags: []cli.Flag{
@ -49,10 +49,10 @@ var commandDeploy = cli.Command{
signersFlag, signersFlag,
thresholdFlag, thresholdFlag,
}, },
Action: utils.MigrateFlags(deploy), Action: deploy,
} }
var commandSign = cli.Command{ var commandSign = &cli.Command{
Name: "sign", Name: "sign",
Usage: "Sign the checkpoint with the specified key", Usage: "Sign the checkpoint with the specified key",
Flags: []cli.Flag{ Flags: []cli.Flag{
@ -63,10 +63,10 @@ var commandSign = cli.Command{
hashFlag, hashFlag,
oracleFlag, oracleFlag,
}, },
Action: utils.MigrateFlags(sign), Action: sign,
} }
var commandPublish = cli.Command{ var commandPublish = &cli.Command{
Name: "publish", Name: "publish",
Usage: "Publish a checkpoint into the oracle", Usage: "Publish a checkpoint into the oracle",
Flags: []cli.Flag{ Flags: []cli.Flag{
@ -76,7 +76,7 @@ var commandPublish = cli.Command{
indexFlag, indexFlag,
signaturesFlag, signaturesFlag,
}, },
Action: utils.MigrateFlags(publish), Action: publish,
} }
// deploy deploys the checkpoint registrar contract. // deploy deploys the checkpoint registrar contract.
@ -132,7 +132,7 @@ func sign(ctx *cli.Context) error {
node *rpc.Client node *rpc.Client
oracle *checkpointoracle.CheckpointOracle oracle *checkpointoracle.CheckpointOracle
) )
if !ctx.GlobalIsSet(nodeURLFlag.Name) { if !ctx.IsSet(nodeURLFlag.Name) {
// Offline mode signing // Offline mode signing
offline = true offline = true
if !ctx.IsSet(hashFlag.Name) { if !ctx.IsSet(hashFlag.Name) {
@ -151,7 +151,7 @@ func sign(ctx *cli.Context) error {
address = common.HexToAddress(ctx.String(oracleFlag.Name)) address = common.HexToAddress(ctx.String(oracleFlag.Name))
} else { } else {
// Interactive mode signing, retrieve the data from the remote node // Interactive mode signing, retrieve the data from the remote node
node = newRPCClient(ctx.GlobalString(nodeURLFlag.Name)) node = newRPCClient(ctx.String(nodeURLFlag.Name))
checkpoint := getCheckpoint(ctx, node) checkpoint := getCheckpoint(ctx, node)
chash, cindex, address = checkpoint.Hash(), checkpoint.SectionIndex, getContractAddr(node) chash, cindex, address = checkpoint.Hash(), checkpoint.SectionIndex, getContractAddr(node)
@ -265,7 +265,7 @@ func publish(ctx *cli.Context) error {
} }
// Retrieve the checkpoint we want to sign to sort the signatures // Retrieve the checkpoint we want to sign to sort the signatures
var ( var (
client = newRPCClient(ctx.GlobalString(nodeURLFlag.Name)) client = newRPCClient(ctx.String(nodeURLFlag.Name))
addr, oracle = newContract(client) addr, oracle = newContract(client)
checkpoint = getCheckpoint(ctx, client) checkpoint = getCheckpoint(ctx, client)
sighash = sighash(checkpoint.SectionIndex, addr, checkpoint.Hash()) sighash = sighash(checkpoint.SectionIndex, addr, checkpoint.Hash())

View File

@ -25,20 +25,20 @@ import (
"github.com/ethereum/go-ethereum/common/fdlimit" "github.com/ethereum/go-ethereum/common/fdlimit"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
// Git SHA1 commit hash of the release (set via linker flags) // Git SHA1 commit hash of the release (set via linker flags)
gitCommit = "" gitCommit = ""
gitDate = "" gitDate = ""
)
var app *cli.App app *cli.App
)
func init() { func init() {
app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
app.Commands = []cli.Command{ app.Commands = []*cli.Command{
commandStatus, commandStatus,
commandDeploy, commandDeploy,
commandSign, commandSign,
@ -48,46 +48,45 @@ func init() {
oracleFlag, oracleFlag,
nodeURLFlag, nodeURLFlag,
} }
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
} }
// Commonly used command line flags. // Commonly used command line flags.
var ( var (
indexFlag = cli.Int64Flag{ indexFlag = &cli.Int64Flag{
Name: "index", Name: "index",
Usage: "Checkpoint index (query latest from remote node if not specified)", Usage: "Checkpoint index (query latest from remote node if not specified)",
} }
hashFlag = cli.StringFlag{ hashFlag = &cli.StringFlag{
Name: "hash", Name: "hash",
Usage: "Checkpoint hash (query latest from remote node if not specified)", Usage: "Checkpoint hash (query latest from remote node if not specified)",
} }
oracleFlag = cli.StringFlag{ oracleFlag = &cli.StringFlag{
Name: "oracle", Name: "oracle",
Usage: "Checkpoint oracle address (query from remote node if not specified)", Usage: "Checkpoint oracle address (query from remote node if not specified)",
} }
thresholdFlag = cli.Int64Flag{ thresholdFlag = &cli.Int64Flag{
Name: "threshold", Name: "threshold",
Usage: "Minimal number of signatures required to approve a checkpoint", Usage: "Minimal number of signatures required to approve a checkpoint",
} }
nodeURLFlag = cli.StringFlag{ nodeURLFlag = &cli.StringFlag{
Name: "rpc", Name: "rpc",
Value: "http://localhost:8545", Value: "http://localhost:8545",
Usage: "The rpc endpoint of a local or remote geth node", Usage: "The rpc endpoint of a local or remote geth node",
} }
clefURLFlag = cli.StringFlag{ clefURLFlag = &cli.StringFlag{
Name: "clef", Name: "clef",
Value: "http://localhost:8550", Value: "http://localhost:8550",
Usage: "The rpc endpoint of clef", Usage: "The rpc endpoint of clef",
} }
signerFlag = cli.StringFlag{ signerFlag = &cli.StringFlag{
Name: "signer", Name: "signer",
Usage: "Signer address for clef signing", Usage: "Signer address for clef signing",
} }
signersFlag = cli.StringFlag{ signersFlag = &cli.StringFlag{
Name: "signers", Name: "signers",
Usage: "Comma separated accounts of trusted checkpoint signers", Usage: "Comma separated accounts of trusted checkpoint signers",
} }
signaturesFlag = cli.StringFlag{ signaturesFlag = &cli.StringFlag{
Name: "signatures", Name: "signatures",
Usage: "Comma separated checkpoint signatures to submit", Usage: "Comma separated checkpoint signatures to submit",
} }

View File

@ -19,24 +19,23 @@ package main
import ( import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var commandStatus = cli.Command{ var commandStatus = &cli.Command{
Name: "status", Name: "status",
Usage: "Fetches the signers and checkpoint status of the oracle contract", Usage: "Fetches the signers and checkpoint status of the oracle contract",
Flags: []cli.Flag{ Flags: []cli.Flag{
nodeURLFlag, nodeURLFlag,
}, },
Action: utils.MigrateFlags(status), Action: status,
} }
// status fetches the admin list of specified registrar contract. // status fetches the admin list of specified registrar contract.
func status(ctx *cli.Context) error { func status(ctx *cli.Context) error {
// Create a wrapper around the checkpoint oracle contract // Create a wrapper around the checkpoint oracle contract
addr, oracle := newContract(newRPCClient(ctx.GlobalString(nodeURLFlag.Name))) addr, oracle := newContract(newRPCClient(ctx.String(nodeURLFlag.Name)))
fmt.Printf("Oracle => %s\n", addr.Hex()) fmt.Printf("Oracle => %s\n", addr.Hex())
fmt.Println() fmt.Println()

View File

@ -30,7 +30,6 @@ import (
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"runtime" "runtime"
"sort"
"strings" "strings"
"time" "time"
@ -55,7 +54,7 @@ import (
"github.com/ethereum/go-ethereum/signer/storage" "github.com/ethereum/go-ethereum/signer/storage"
"github.com/mattn/go-colorable" "github.com/mattn/go-colorable"
"github.com/mattn/go-isatty" "github.com/mattn/go-isatty"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
const legalWarning = ` const legalWarning = `
@ -73,70 +72,70 @@ PURPOSE. See the GNU General Public License for more details.
` `
var ( var (
logLevelFlag = cli.IntFlag{ logLevelFlag = &cli.IntFlag{
Name: "loglevel", Name: "loglevel",
Value: 4, Value: 4,
Usage: "log level to emit to the screen", Usage: "log level to emit to the screen",
} }
advancedMode = cli.BoolFlag{ advancedMode = &cli.BoolFlag{
Name: "advanced", Name: "advanced",
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off", Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
} }
acceptFlag = cli.BoolFlag{ acceptFlag = &cli.BoolFlag{
Name: "suppress-bootwarn", Name: "suppress-bootwarn",
Usage: "If set, does not show the warning during boot", Usage: "If set, does not show the warning during boot",
} }
keystoreFlag = cli.StringFlag{ keystoreFlag = &cli.StringFlag{
Name: "keystore", Name: "keystore",
Value: filepath.Join(node.DefaultDataDir(), "keystore"), Value: filepath.Join(node.DefaultDataDir(), "keystore"),
Usage: "Directory for the keystore", Usage: "Directory for the keystore",
} }
configdirFlag = cli.StringFlag{ configdirFlag = &cli.StringFlag{
Name: "configdir", Name: "configdir",
Value: DefaultConfigDir(), Value: DefaultConfigDir(),
Usage: "Directory for Clef configuration", Usage: "Directory for Clef configuration",
} }
chainIdFlag = cli.Int64Flag{ chainIdFlag = &cli.Int64Flag{
Name: "chainid", Name: "chainid",
Value: params.MainnetChainConfig.ChainID.Int64(), Value: params.MainnetChainConfig.ChainID.Int64(),
Usage: "Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli)", Usage: "Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli)",
} }
rpcPortFlag = cli.IntFlag{ rpcPortFlag = &cli.IntFlag{
Name: "http.port", Name: "http.port",
Usage: "HTTP-RPC server listening port", Usage: "HTTP-RPC server listening port",
Value: node.DefaultHTTPPort + 5, Value: node.DefaultHTTPPort + 5,
Category: flags.APICategory,
} }
signerSecretFlag = cli.StringFlag{ signerSecretFlag = &cli.StringFlag{
Name: "signersecret", Name: "signersecret",
Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash", Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash",
} }
customDBFlag = cli.StringFlag{ customDBFlag = &cli.StringFlag{
Name: "4bytedb-custom", Name: "4bytedb-custom",
Usage: "File used for writing new 4byte-identifiers submitted via API", Usage: "File used for writing new 4byte-identifiers submitted via API",
Value: "./4byte-custom.json", Value: "./4byte-custom.json",
} }
auditLogFlag = cli.StringFlag{ auditLogFlag = &cli.StringFlag{
Name: "auditlog", Name: "auditlog",
Usage: "File used to emit audit logs. Set to \"\" to disable", Usage: "File used to emit audit logs. Set to \"\" to disable",
Value: "audit.log", Value: "audit.log",
} }
ruleFlag = cli.StringFlag{ ruleFlag = &cli.StringFlag{
Name: "rules", Name: "rules",
Usage: "Path to the rule file to auto-authorize requests with", Usage: "Path to the rule file to auto-authorize requests with",
} }
stdiouiFlag = cli.BoolFlag{ stdiouiFlag = &cli.BoolFlag{
Name: "stdio-ui", Name: "stdio-ui",
Usage: "Use STDIN/STDOUT as a channel for an external UI. " + Usage: "Use STDIN/STDOUT as a channel for an external UI. " +
"This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user " + "This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user " +
"interface, and can be used when Clef is started by an external process.", "interface, and can be used when Clef is started by an external process.",
} }
testFlag = cli.BoolFlag{ testFlag = &cli.BoolFlag{
Name: "stdio-ui-test", Name: "stdio-ui-test",
Usage: "Mechanism to test interface between Clef and UI. Requires 'stdio-ui'.", Usage: "Mechanism to test interface between Clef and UI. Requires 'stdio-ui'.",
} }
app = cli.NewApp() initCommand = &cli.Command{
initCommand = cli.Command{ Action: initializeSecrets,
Action: utils.MigrateFlags(initializeSecrets),
Name: "init", Name: "init",
Usage: "Initialize the signer, generate secret storage", Usage: "Initialize the signer, generate secret storage",
ArgsUsage: "", ArgsUsage: "",
@ -148,8 +147,8 @@ var (
The init command generates a master seed which Clef can use to store credentials and data needed for The init command generates a master seed which Clef can use to store credentials and data needed for
the rule-engine to work.`, the rule-engine to work.`,
} }
attestCommand = cli.Command{ attestCommand = &cli.Command{
Action: utils.MigrateFlags(attestFile), Action: attestFile,
Name: "attest", Name: "attest",
Usage: "Attest that a js-file is to be used", Usage: "Attest that a js-file is to be used",
ArgsUsage: "<sha256sum>", ArgsUsage: "<sha256sum>",
@ -165,8 +164,8 @@ incoming requests.
Whenever you make an edit to the rule file, you need to use attestation to tell Whenever you make an edit to the rule file, you need to use attestation to tell
Clef that the file is 'safe' to execute.`, Clef that the file is 'safe' to execute.`,
} }
setCredentialCommand = cli.Command{ setCredentialCommand = &cli.Command{
Action: utils.MigrateFlags(setCredential), Action: setCredential,
Name: "setpw", Name: "setpw",
Usage: "Store a credential for a keystore file", Usage: "Store a credential for a keystore file",
ArgsUsage: "<address>", ArgsUsage: "<address>",
@ -178,8 +177,8 @@ Clef that the file is 'safe' to execute.`,
Description: ` Description: `
The setpw command stores a password for a given address (keyfile). The setpw command stores a password for a given address (keyfile).
`} `}
delCredentialCommand = cli.Command{ delCredentialCommand = &cli.Command{
Action: utils.MigrateFlags(removeCredential), Action: removeCredential,
Name: "delpw", Name: "delpw",
Usage: "Remove a credential for a keystore file", Usage: "Remove a credential for a keystore file",
ArgsUsage: "<address>", ArgsUsage: "<address>",
@ -191,8 +190,8 @@ The setpw command stores a password for a given address (keyfile).
Description: ` Description: `
The delpw command removes a password for a given address (keyfile). The delpw command removes a password for a given address (keyfile).
`} `}
newAccountCommand = cli.Command{ newAccountCommand = &cli.Command{
Action: utils.MigrateFlags(newAccount), Action: newAccount,
Name: "newaccount", Name: "newaccount",
Usage: "Create a new account", Usage: "Create a new account",
ArgsUsage: "", ArgsUsage: "",
@ -207,7 +206,7 @@ The newaccount command creates a new keystore-backed account. It is a convenienc
which can be used in lieu of an external UI.`, which can be used in lieu of an external UI.`,
} }
gendocCommand = cli.Command{ gendocCommand = &cli.Command{
Action: GenDoc, Action: GenDoc,
Name: "gendoc", Name: "gendoc",
Usage: "Generate documentation about json-rpc format", Usage: "Generate documentation about json-rpc format",
@ -216,39 +215,16 @@ The gendoc generates example structures of the json-rpc communication types.
`} `}
) )
// AppHelpFlagGroups is the application flags, grouped by functionality. var (
var AppHelpFlagGroups = []flags.FlagGroup{ // Git SHA1 commit hash of the release (set via linker flags)
{ gitCommit = ""
Name: "FLAGS", gitDate = ""
Flags: []cli.Flag{
logLevelFlag, app = flags.NewApp(gitCommit, gitDate, "Manage Ethereum account operations")
keystoreFlag, )
configdirFlag,
chainIdFlag,
utils.LightKDFFlag,
utils.NoUSBFlag,
utils.SmartCardDaemonPathFlag,
utils.HTTPListenAddrFlag,
utils.HTTPVirtualHostsFlag,
utils.IPCDisabledFlag,
utils.IPCPathFlag,
utils.HTTPEnabledFlag,
rpcPortFlag,
signerSecretFlag,
customDBFlag,
auditLogFlag,
ruleFlag,
stdiouiFlag,
testFlag,
advancedMode,
acceptFlag,
},
},
}
func init() { func init() {
app.Name = "Clef" app.Name = "Clef"
app.Usage = "Manage Ethereum account operations"
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
logLevelFlag, logLevelFlag,
keystoreFlag, keystoreFlag,
@ -273,46 +249,12 @@ func init() {
acceptFlag, acceptFlag,
} }
app.Action = signer app.Action = signer
app.Commands = []cli.Command{initCommand, app.Commands = []*cli.Command{initCommand,
attestCommand, attestCommand,
setCredentialCommand, setCredentialCommand,
delCredentialCommand, delCredentialCommand,
newAccountCommand, newAccountCommand,
gendocCommand} gendocCommand,
cli.CommandHelpTemplate = flags.CommandHelpTemplate
// Override the default app help template
cli.AppHelpTemplate = flags.ClefAppHelpTemplate
// Override the default app help printer, but only for the global app help
originalHelpPrinter := cli.HelpPrinter
cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) {
if tmpl == flags.ClefAppHelpTemplate {
// Render out custom usage screen
originalHelpPrinter(w, tmpl, flags.HelpData{App: data, FlagGroups: AppHelpFlagGroups})
} else if tmpl == flags.CommandHelpTemplate {
// Iterate over all command specific flags and categorize them
categorized := make(map[string][]cli.Flag)
for _, flag := range data.(cli.Command).Flags {
if _, ok := categorized[flag.String()]; !ok {
categorized[flags.FlagCategory(flag, AppHelpFlagGroups)] = append(categorized[flags.FlagCategory(flag, AppHelpFlagGroups)], flag)
}
}
// sort to get a stable ordering
sorted := make([]flags.FlagGroup, 0, len(categorized))
for cat, flgs := range categorized {
sorted = append(sorted, flags.FlagGroup{Name: cat, Flags: flgs})
}
sort.Sort(flags.ByCategory(sorted))
// add sorted array to data and render with default printer
originalHelpPrinter(w, tmpl, map[string]interface{}{
"cmd": data,
"categorizedFlags": sorted,
})
} else {
originalHelpPrinter(w, tmpl, data)
}
} }
} }
@ -329,7 +271,7 @@ func initializeSecrets(c *cli.Context) error {
return err return err
} }
// Ensure the master key does not yet exist, we're not willing to overwrite // Ensure the master key does not yet exist, we're not willing to overwrite
configDir := c.GlobalString(configdirFlag.Name) configDir := c.String(configdirFlag.Name)
if err := os.Mkdir(configDir, 0700); err != nil && !os.IsExist(err) { if err := os.Mkdir(configDir, 0700); err != nil && !os.IsExist(err) {
return err return err
} }
@ -347,7 +289,7 @@ func initializeSecrets(c *cli.Context) error {
return fmt.Errorf("failed to read enough random") return fmt.Errorf("failed to read enough random")
} }
n, p := keystore.StandardScryptN, keystore.StandardScryptP n, p := keystore.StandardScryptN, keystore.StandardScryptP
if c.GlobalBool(utils.LightKDFFlag.Name) { if c.Bool(utils.LightKDFFlag.Name) {
n, p = keystore.LightScryptN, keystore.LightScryptP n, p = keystore.LightScryptN, keystore.LightScryptP
} }
text := "The master seed of clef will be locked with a password.\nPlease specify a password. Do not forget this password!" text := "The master seed of clef will be locked with a password.\nPlease specify a password. Do not forget this password!"
@ -390,8 +332,9 @@ You should treat 'masterseed.json' with utmost secrecy and make a backup of it!
`) `)
return nil return nil
} }
func attestFile(ctx *cli.Context) error { func attestFile(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if ctx.NArg() < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
if err := initialize(ctx); err != nil { if err := initialize(ctx); err != nil {
@ -402,7 +345,7 @@ func attestFile(ctx *cli.Context) error {
if err != nil { if err != nil {
utils.Fatalf(err.Error()) utils.Fatalf(err.Error())
} }
configDir := ctx.GlobalString(configdirFlag.Name) configDir := ctx.String(configdirFlag.Name)
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10])) vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
confKey := crypto.Keccak256([]byte("config"), stretchedKey) confKey := crypto.Keccak256([]byte("config"), stretchedKey)
@ -415,7 +358,7 @@ func attestFile(ctx *cli.Context) error {
} }
func setCredential(ctx *cli.Context) error { func setCredential(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if ctx.NArg() < 1 {
utils.Fatalf("This command requires an address to be passed as an argument") utils.Fatalf("This command requires an address to be passed as an argument")
} }
if err := initialize(ctx); err != nil { if err := initialize(ctx); err != nil {
@ -433,7 +376,7 @@ func setCredential(ctx *cli.Context) error {
if err != nil { if err != nil {
utils.Fatalf(err.Error()) utils.Fatalf(err.Error())
} }
configDir := ctx.GlobalString(configdirFlag.Name) configDir := ctx.String(configdirFlag.Name)
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10])) vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey) pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
@ -445,7 +388,7 @@ func setCredential(ctx *cli.Context) error {
} }
func removeCredential(ctx *cli.Context) error { func removeCredential(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if ctx.NArg() < 1 {
utils.Fatalf("This command requires an address to be passed as an argument") utils.Fatalf("This command requires an address to be passed as an argument")
} }
if err := initialize(ctx); err != nil { if err := initialize(ctx); err != nil {
@ -461,7 +404,7 @@ func removeCredential(ctx *cli.Context) error {
if err != nil { if err != nil {
utils.Fatalf(err.Error()) utils.Fatalf(err.Error())
} }
configDir := ctx.GlobalString(configdirFlag.Name) configDir := ctx.String(configdirFlag.Name)
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10])) vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey) pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
@ -481,8 +424,8 @@ func newAccount(c *cli.Context) error {
var ( var (
ui = core.NewCommandlineUI() ui = core.NewCommandlineUI()
pwStorage storage.Storage = &storage.NoStorage{} pwStorage storage.Storage = &storage.NoStorage{}
ksLoc = c.GlobalString(keystoreFlag.Name) ksLoc = c.String(keystoreFlag.Name)
lightKdf = c.GlobalBool(utils.LightKDFFlag.Name) lightKdf = c.Bool(utils.LightKDFFlag.Name)
) )
log.Info("Starting clef", "keystore", ksLoc, "light-kdf", lightKdf) log.Info("Starting clef", "keystore", ksLoc, "light-kdf", lightKdf)
am := core.StartClefAccountManager(ksLoc, true, lightKdf, "") am := core.StartClefAccountManager(ksLoc, true, lightKdf, "")
@ -500,13 +443,13 @@ func newAccount(c *cli.Context) error {
func initialize(c *cli.Context) error { func initialize(c *cli.Context) error {
// Set up the logger to print everything // Set up the logger to print everything
logOutput := os.Stdout logOutput := os.Stdout
if c.GlobalBool(stdiouiFlag.Name) { if c.Bool(stdiouiFlag.Name) {
logOutput = os.Stderr logOutput = os.Stderr
// If using the stdioui, we can't do the 'confirm'-flow // If using the stdioui, we can't do the 'confirm'-flow
if !c.GlobalBool(acceptFlag.Name) { if !c.Bool(acceptFlag.Name) {
fmt.Fprint(logOutput, legalWarning) fmt.Fprint(logOutput, legalWarning)
} }
} else if !c.GlobalBool(acceptFlag.Name) { } else if !c.Bool(acceptFlag.Name) {
if !confirm(legalWarning) { if !confirm(legalWarning) {
return fmt.Errorf("aborted by user") return fmt.Errorf("aborted by user")
} }
@ -545,8 +488,8 @@ func ipcEndpoint(ipcPath, datadir string) string {
func signer(c *cli.Context) error { func signer(c *cli.Context) error {
// If we have some unrecognized command, bail out // If we have some unrecognized command, bail out
if args := c.Args(); len(args) > 0 { if c.NArg() > 0 {
return fmt.Errorf("invalid command: %q", args[0]) return fmt.Errorf("invalid command: %q", c.Args().First())
} }
if err := initialize(c); err != nil { if err := initialize(c); err != nil {
return err return err
@ -554,7 +497,7 @@ func signer(c *cli.Context) error {
var ( var (
ui core.UIClientAPI ui core.UIClientAPI
) )
if c.GlobalBool(stdiouiFlag.Name) { if c.Bool(stdiouiFlag.Name) {
log.Info("Using stdin/stdout as UI-channel") log.Info("Using stdin/stdout as UI-channel")
ui = core.NewStdIOUI() ui = core.NewStdIOUI()
} else { } else {
@ -562,7 +505,7 @@ func signer(c *cli.Context) error {
ui = core.NewCommandlineUI() ui = core.NewCommandlineUI()
} }
// 4bytedb data // 4bytedb data
fourByteLocal := c.GlobalString(customDBFlag.Name) fourByteLocal := c.String(customDBFlag.Name)
db, err := fourbyte.NewWithFile(fourByteLocal) db, err := fourbyte.NewWithFile(fourByteLocal)
if err != nil { if err != nil {
utils.Fatalf(err.Error()) utils.Fatalf(err.Error())
@ -574,7 +517,7 @@ func signer(c *cli.Context) error {
api core.ExternalAPI api core.ExternalAPI
pwStorage storage.Storage = &storage.NoStorage{} pwStorage storage.Storage = &storage.NoStorage{}
) )
configDir := c.GlobalString(configdirFlag.Name) configDir := c.String(configdirFlag.Name)
if stretchedKey, err := readMasterKey(c, ui); err != nil { if stretchedKey, err := readMasterKey(c, ui); err != nil {
log.Warn("Failed to open master, rules disabled", "err", err) log.Warn("Failed to open master, rules disabled", "err", err)
} else { } else {
@ -591,7 +534,7 @@ func signer(c *cli.Context) error {
configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confkey) configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confkey)
// Do we have a rule-file? // Do we have a rule-file?
if ruleFile := c.GlobalString(ruleFlag.Name); ruleFile != "" { if ruleFile := c.String(ruleFlag.Name); ruleFile != "" {
ruleJS, err := os.ReadFile(ruleFile) ruleJS, err := os.ReadFile(ruleFile)
if err != nil { if err != nil {
log.Warn("Could not load rules, disabling", "file", ruleFile, "err", err) log.Warn("Could not load rules, disabling", "file", ruleFile, "err", err)
@ -615,12 +558,12 @@ func signer(c *cli.Context) error {
} }
} }
var ( var (
chainId = c.GlobalInt64(chainIdFlag.Name) chainId = c.Int64(chainIdFlag.Name)
ksLoc = c.GlobalString(keystoreFlag.Name) ksLoc = c.String(keystoreFlag.Name)
lightKdf = c.GlobalBool(utils.LightKDFFlag.Name) lightKdf = c.Bool(utils.LightKDFFlag.Name)
advanced = c.GlobalBool(advancedMode.Name) advanced = c.Bool(advancedMode.Name)
nousb = c.GlobalBool(utils.NoUSBFlag.Name) nousb = c.Bool(utils.NoUSBFlag.Name)
scpath = c.GlobalString(utils.SmartCardDaemonPathFlag.Name) scpath = c.String(utils.SmartCardDaemonPathFlag.Name)
) )
log.Info("Starting signer", "chainid", chainId, "keystore", ksLoc, log.Info("Starting signer", "chainid", chainId, "keystore", ksLoc,
"light-kdf", lightKdf, "advanced", advanced) "light-kdf", lightKdf, "advanced", advanced)
@ -632,7 +575,7 @@ func signer(c *cli.Context) error {
ui.RegisterUIServer(core.NewUIServerAPI(apiImpl)) ui.RegisterUIServer(core.NewUIServerAPI(apiImpl))
api = apiImpl api = apiImpl
// Audit logging // Audit logging
if logfile := c.GlobalString(auditLogFlag.Name); logfile != "" { if logfile := c.String(auditLogFlag.Name); logfile != "" {
api, err = core.NewAuditLogger(logfile, api) api, err = core.NewAuditLogger(logfile, api)
if err != nil { if err != nil {
utils.Fatalf(err.Error()) utils.Fatalf(err.Error())
@ -647,16 +590,15 @@ func signer(c *cli.Context) error {
rpcAPI := []rpc.API{ rpcAPI := []rpc.API{
{ {
Namespace: "account", Namespace: "account",
Public: true,
Service: api, Service: api,
Version: "1.0"}, },
} }
if c.GlobalBool(utils.HTTPEnabledFlag.Name) { if c.Bool(utils.HTTPEnabledFlag.Name) {
vhosts := utils.SplitAndTrim(c.GlobalString(utils.HTTPVirtualHostsFlag.Name)) vhosts := utils.SplitAndTrim(c.String(utils.HTTPVirtualHostsFlag.Name))
cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name)) cors := utils.SplitAndTrim(c.String(utils.HTTPCORSDomainFlag.Name))
srv := rpc.NewServer() srv := rpc.NewServer()
err := node.RegisterApis(rpcAPI, []string{"account"}, srv, false) err := node.RegisterApis(rpcAPI, []string{"account"}, srv)
if err != nil { if err != nil {
utils.Fatalf("Could not register API: %w", err) utils.Fatalf("Could not register API: %w", err)
} }
@ -666,7 +608,7 @@ func signer(c *cli.Context) error {
port := c.Int(rpcPortFlag.Name) port := c.Int(rpcPortFlag.Name)
// start http server // start http server
httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPListenAddrFlag.Name), port) httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.HTTPListenAddrFlag.Name), port)
httpServer, addr, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler) httpServer, addr, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler)
if err != nil { if err != nil {
utils.Fatalf("Could not start RPC api: %v", err) utils.Fatalf("Could not start RPC api: %v", err)
@ -680,8 +622,8 @@ func signer(c *cli.Context) error {
log.Info("HTTP endpoint closed", "url", extapiURL) log.Info("HTTP endpoint closed", "url", extapiURL)
}() }()
} }
if !c.GlobalBool(utils.IPCDisabledFlag.Name) { if !c.Bool(utils.IPCDisabledFlag.Name) {
givenPath := c.GlobalString(utils.IPCPathFlag.Name) givenPath := c.String(utils.IPCPathFlag.Name)
ipcapiURL = ipcEndpoint(filepath.Join(givenPath, "clef.ipc"), configDir) ipcapiURL = ipcEndpoint(filepath.Join(givenPath, "clef.ipc"), configDir)
listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI) listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI)
if err != nil { if err != nil {
@ -694,7 +636,7 @@ func signer(c *cli.Context) error {
}() }()
} }
if c.GlobalBool(testFlag.Name) { if c.Bool(testFlag.Name) {
log.Info("Performing UI test") log.Info("Performing UI test")
go testExternalUI(apiImpl) go testExternalUI(apiImpl)
} }
@ -720,7 +662,7 @@ func signer(c *cli.Context) error {
// persistence requirements. // persistence requirements.
func DefaultConfigDir() string { func DefaultConfigDir() string {
// Try to place the data folder in the user's home dir // Try to place the data folder in the user's home dir
home := utils.HomeDir() home := flags.HomeDir()
if home != "" { if home != "" {
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
return filepath.Join(home, "Library", "Signer") return filepath.Join(home, "Library", "Signer")
@ -740,10 +682,10 @@ func DefaultConfigDir() string {
func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) { func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) {
var ( var (
file string file string
configDir = ctx.GlobalString(configdirFlag.Name) configDir = ctx.String(configdirFlag.Name)
) )
if ctx.GlobalIsSet(signerSecretFlag.Name) { if ctx.IsSet(signerSecretFlag.Name) {
file = ctx.GlobalString(signerSecretFlag.Name) file = ctx.String(signerSecretFlag.Name)
} else { } else {
file = filepath.Join(configDir, "masterseed.json") file = filepath.Join(configDir, "masterseed.json")
} }
@ -817,7 +759,6 @@ func confirm(text string) bool {
} }
func testExternalUI(api *core.SignerAPI) { func testExternalUI(api *core.SignerAPI) {
ctx := context.WithValue(context.Background(), "remote", "clef binary") ctx := context.WithValue(context.Background(), "remote", "clef binary")
ctx = context.WithValue(ctx, "scheme", "in-proc") ctx = context.WithValue(ctx, "scheme", "in-proc")
ctx = context.WithValue(ctx, "local", "main") ctx = context.WithValue(ctx, "local", "main")
@ -917,7 +858,6 @@ func testExternalUI(api *core.SignerAPI) {
expectDeny("signdata - text", err) expectDeny("signdata - text", err)
} }
{ // Sign transaction { // Sign transaction
api.UI.ShowInfo("Please reject next transaction") api.UI.ShowInfo("Please reject next transaction")
time.Sleep(delay) time.Sleep(delay)
data := hexutil.Bytes([]byte{}) data := hexutil.Bytes([]byte{})
@ -960,7 +900,6 @@ func testExternalUI(api *core.SignerAPI) {
} }
result := fmt.Sprintf("Tests completed. %d errors:\n%s\n", len(errs), strings.Join(errs, "\n")) result := fmt.Sprintf("Tests completed. %d errors:\n%s\n", len(errs), strings.Join(errs, "\n"))
api.UI.ShowInfo(result) api.UI.ShowInfo(result)
} }
type encryptedSeedStorage struct { type encryptedSeedStorage struct {
@ -996,8 +935,7 @@ func decryptSeed(keyjson []byte, auth string) ([]byte, error) {
} }
// GenDoc outputs examples of all structures used in json-rpc communication // GenDoc outputs examples of all structures used in json-rpc communication
func GenDoc(ctx *cli.Context) { func GenDoc(ctx *cli.Context) error {
var ( var (
a = common.HexToAddress("0xdeadbeef000000000000000000000000deadbeef") a = common.HexToAddress("0xdeadbeef000000000000000000000000deadbeef")
b = common.HexToAddress("0x1111111122222222222233333333334444444444") b = common.HexToAddress("0x1111111122222222222233333333334444444444")
@ -1107,7 +1045,6 @@ func GenDoc(ctx *cli.Context) {
var tx types.Transaction var tx types.Transaction
tx.UnmarshalBinary(rlpdata) tx.UnmarshalBinary(rlpdata)
add("OnApproved - SignTransactionResult", desc, &ethapi.SignTransactionResult{Raw: rlpdata, Tx: &tx}) add("OnApproved - SignTransactionResult", desc, &ethapi.SignTransactionResult{Raw: rlpdata, Tx: &tx})
} }
{ // User input { // User input
add("UserInputRequest", "Sent when clef needs the user to provide data. If 'password' is true, the input field should be treated accordingly (echo-free)", add("UserInputRequest", "Sent when clef needs the user to provide data. If 'password' is true, the input field should be treated accordingly (echo-free)",
@ -1146,4 +1083,5 @@ These data types are defined in the channel between clef and the UI`)
for _, elem := range output { for _, elem := range output {
fmt.Println(elem) fmt.Println(elem)
} }
return nil
} }

View File

@ -25,17 +25,18 @@ import (
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test" "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
discv4Command = cli.Command{ discv4Command = &cli.Command{
Name: "discv4", Name: "discv4",
Usage: "Node Discovery v4 tools", Usage: "Node Discovery v4 tools",
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
discv4PingCommand, discv4PingCommand,
discv4RequestRecordCommand, discv4RequestRecordCommand,
discv4ResolveCommand, discv4ResolveCommand,
@ -44,39 +45,41 @@ var (
discv4TestCommand, discv4TestCommand,
}, },
} }
discv4PingCommand = cli.Command{ discv4PingCommand = &cli.Command{
Name: "ping", Name: "ping",
Usage: "Sends ping to a node", Usage: "Sends ping to a node",
Action: discv4Ping, Action: discv4Ping,
ArgsUsage: "<node>", ArgsUsage: "<node>",
Flags: v4NodeFlags,
} }
discv4RequestRecordCommand = cli.Command{ discv4RequestRecordCommand = &cli.Command{
Name: "requestenr", Name: "requestenr",
Usage: "Requests a node record using EIP-868 enrRequest", Usage: "Requests a node record using EIP-868 enrRequest",
Action: discv4RequestRecord, Action: discv4RequestRecord,
ArgsUsage: "<node>", ArgsUsage: "<node>",
Flags: v4NodeFlags,
} }
discv4ResolveCommand = cli.Command{ discv4ResolveCommand = &cli.Command{
Name: "resolve", Name: "resolve",
Usage: "Finds a node in the DHT", Usage: "Finds a node in the DHT",
Action: discv4Resolve, Action: discv4Resolve,
ArgsUsage: "<node>", ArgsUsage: "<node>",
Flags: []cli.Flag{bootnodesFlag}, Flags: v4NodeFlags,
} }
discv4ResolveJSONCommand = cli.Command{ discv4ResolveJSONCommand = &cli.Command{
Name: "resolve-json", Name: "resolve-json",
Usage: "Re-resolves nodes in a nodes.json file", Usage: "Re-resolves nodes in a nodes.json file",
Action: discv4ResolveJSON, Action: discv4ResolveJSON,
Flags: []cli.Flag{bootnodesFlag}, Flags: v4NodeFlags,
ArgsUsage: "<nodes.json file>", ArgsUsage: "<nodes.json file>",
} }
discv4CrawlCommand = cli.Command{ discv4CrawlCommand = &cli.Command{
Name: "crawl", Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT", Usage: "Updates a nodes.json file with random nodes found in the DHT",
Action: discv4Crawl, Action: discv4Crawl,
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag}, Flags: flags.Merge(v4NodeFlags, []cli.Flag{crawlTimeoutFlag}),
} }
discv4TestCommand = cli.Command{ discv4TestCommand = &cli.Command{
Name: "test", Name: "test",
Usage: "Runs tests against a node", Usage: "Runs tests against a node",
Action: discv4Test, Action: discv4Test,
@ -91,34 +94,41 @@ var (
) )
var ( var (
bootnodesFlag = cli.StringFlag{ bootnodesFlag = &cli.StringFlag{
Name: "bootnodes", Name: "bootnodes",
Usage: "Comma separated nodes used for bootstrapping", Usage: "Comma separated nodes used for bootstrapping",
} }
nodekeyFlag = cli.StringFlag{ nodekeyFlag = &cli.StringFlag{
Name: "nodekey", Name: "nodekey",
Usage: "Hex-encoded node key", Usage: "Hex-encoded node key",
} }
nodedbFlag = cli.StringFlag{ nodedbFlag = &cli.StringFlag{
Name: "nodedb", Name: "nodedb",
Usage: "Nodes database location", Usage: "Nodes database location",
} }
listenAddrFlag = cli.StringFlag{ listenAddrFlag = &cli.StringFlag{
Name: "addr", Name: "addr",
Usage: "Listening address", Usage: "Listening address",
} }
crawlTimeoutFlag = cli.DurationFlag{ crawlTimeoutFlag = &cli.DurationFlag{
Name: "timeout", Name: "timeout",
Usage: "Time limit for the crawl.", Usage: "Time limit for the crawl.",
Value: 30 * time.Minute, Value: 30 * time.Minute,
} }
remoteEnodeFlag = cli.StringFlag{ remoteEnodeFlag = &cli.StringFlag{
Name: "remote", Name: "remote",
Usage: "Enode of the remote node under test", Usage: "Enode of the remote node under test",
EnvVar: "REMOTE_ENODE", EnvVars: []string{"REMOTE_ENODE"},
} }
) )
var v4NodeFlags = []cli.Flag{
bootnodesFlag,
nodekeyFlag,
nodedbFlag,
listenAddrFlag,
}
func discv4Ping(ctx *cli.Context) error { func discv4Ping(ctx *cli.Context) error {
n := getNodeArg(ctx) n := getNodeArg(ctx)
disc := startV4(ctx) disc := startV4(ctx)

View File

@ -23,14 +23,14 @@ import (
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test" "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
discv5Command = cli.Command{ discv5Command = &cli.Command{
Name: "discv5", Name: "discv5",
Usage: "Node Discovery v5 tools", Usage: "Node Discovery v5 tools",
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
discv5PingCommand, discv5PingCommand,
discv5ResolveCommand, discv5ResolveCommand,
discv5CrawlCommand, discv5CrawlCommand,
@ -38,24 +38,24 @@ var (
discv5ListenCommand, discv5ListenCommand,
}, },
} }
discv5PingCommand = cli.Command{ discv5PingCommand = &cli.Command{
Name: "ping", Name: "ping",
Usage: "Sends ping to a node", Usage: "Sends ping to a node",
Action: discv5Ping, Action: discv5Ping,
} }
discv5ResolveCommand = cli.Command{ discv5ResolveCommand = &cli.Command{
Name: "resolve", Name: "resolve",
Usage: "Finds a node in the DHT", Usage: "Finds a node in the DHT",
Action: discv5Resolve, Action: discv5Resolve,
Flags: []cli.Flag{bootnodesFlag}, Flags: []cli.Flag{bootnodesFlag},
} }
discv5CrawlCommand = cli.Command{ discv5CrawlCommand = &cli.Command{
Name: "crawl", Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT", Usage: "Updates a nodes.json file with random nodes found in the DHT",
Action: discv5Crawl, Action: discv5Crawl,
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag}, Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
} }
discv5TestCommand = cli.Command{ discv5TestCommand = &cli.Command{
Name: "test", Name: "test",
Usage: "Runs protocol tests against a node", Usage: "Runs protocol tests against a node",
Action: discv5Test, Action: discv5Test,
@ -66,7 +66,7 @@ var (
testListen2Flag, testListen2Flag,
}, },
} }
discv5ListenCommand = cli.Command{ discv5ListenCommand = &cli.Command{
Name: "listen", Name: "listen",
Usage: "Runs a node", Usage: "Runs a node",
Action: discv5Listen, Action: discv5Listen,

View File

@ -24,16 +24,16 @@ import (
"github.com/cloudflare/cloudflare-go" "github.com/cloudflare/cloudflare-go"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/dnsdisc"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
cloudflareTokenFlag = cli.StringFlag{ cloudflareTokenFlag = &cli.StringFlag{
Name: "token", Name: "token",
Usage: "CloudFlare API token", Usage: "CloudFlare API token",
EnvVar: "CLOUDFLARE_API_TOKEN", EnvVars: []string{"CLOUDFLARE_API_TOKEN"},
} }
cloudflareZoneIDFlag = cli.StringFlag{ cloudflareZoneIDFlag = &cli.StringFlag{
Name: "zoneid", Name: "zoneid",
Usage: "CloudFlare Zone ID (optional)", Usage: "CloudFlare Zone ID (optional)",
} }
@ -134,7 +134,6 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
ttl := rootTTL ttl := rootTTL
if path != name { if path != name {
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
} }
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl} record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) _, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)

View File

@ -32,7 +32,7 @@ import (
"github.com/aws/aws-sdk-go-v2/service/route53/types" "github.com/aws/aws-sdk-go-v2/service/route53/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/dnsdisc"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
const ( const (
@ -45,21 +45,21 @@ const (
) )
var ( var (
route53AccessKeyFlag = cli.StringFlag{ route53AccessKeyFlag = &cli.StringFlag{
Name: "access-key-id", Name: "access-key-id",
Usage: "AWS Access Key ID", Usage: "AWS Access Key ID",
EnvVar: "AWS_ACCESS_KEY_ID", EnvVars: []string{"AWS_ACCESS_KEY_ID"},
} }
route53AccessSecretFlag = cli.StringFlag{ route53AccessSecretFlag = &cli.StringFlag{
Name: "access-key-secret", Name: "access-key-secret",
Usage: "AWS Access Key Secret", Usage: "AWS Access Key Secret",
EnvVar: "AWS_SECRET_ACCESS_KEY", EnvVars: []string{"AWS_SECRET_ACCESS_KEY"},
} }
route53ZoneIDFlag = cli.StringFlag{ route53ZoneIDFlag = &cli.StringFlag{
Name: "zone-id", Name: "zone-id",
Usage: "Route53 Zone ID", Usage: "Route53 Zone ID",
} }
route53RegionFlag = cli.StringFlag{ route53RegionFlag = &cli.StringFlag{
Name: "aws-region", Name: "aws-region",
Usage: "AWS Region", Usage: "AWS Region",
Value: "eu-central-1", Value: "eu-central-1",

View File

@ -29,14 +29,14 @@ import (
"github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/dnsdisc"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
dnsCommand = cli.Command{ dnsCommand = &cli.Command{
Name: "dns", Name: "dns",
Usage: "DNS Discovery Commands", Usage: "DNS Discovery Commands",
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
dnsSyncCommand, dnsSyncCommand,
dnsSignCommand, dnsSignCommand,
dnsTXTCommand, dnsTXTCommand,
@ -45,34 +45,34 @@ var (
dnsRoute53NukeCommand, dnsRoute53NukeCommand,
}, },
} }
dnsSyncCommand = cli.Command{ dnsSyncCommand = &cli.Command{
Name: "sync", Name: "sync",
Usage: "Download a DNS discovery tree", Usage: "Download a DNS discovery tree",
ArgsUsage: "<url> [ <directory> ]", ArgsUsage: "<url> [ <directory> ]",
Action: dnsSync, Action: dnsSync,
Flags: []cli.Flag{dnsTimeoutFlag}, Flags: []cli.Flag{dnsTimeoutFlag},
} }
dnsSignCommand = cli.Command{ dnsSignCommand = &cli.Command{
Name: "sign", Name: "sign",
Usage: "Sign a DNS discovery tree", Usage: "Sign a DNS discovery tree",
ArgsUsage: "<tree-directory> <key-file>", ArgsUsage: "<tree-directory> <key-file>",
Action: dnsSign, Action: dnsSign,
Flags: []cli.Flag{dnsDomainFlag, dnsSeqFlag}, Flags: []cli.Flag{dnsDomainFlag, dnsSeqFlag},
} }
dnsTXTCommand = cli.Command{ dnsTXTCommand = &cli.Command{
Name: "to-txt", Name: "to-txt",
Usage: "Create a DNS TXT records for a discovery tree", Usage: "Create a DNS TXT records for a discovery tree",
ArgsUsage: "<tree-directory> <output-file>", ArgsUsage: "<tree-directory> <output-file>",
Action: dnsToTXT, Action: dnsToTXT,
} }
dnsCloudflareCommand = cli.Command{ dnsCloudflareCommand = &cli.Command{
Name: "to-cloudflare", Name: "to-cloudflare",
Usage: "Deploy DNS TXT records to CloudFlare", Usage: "Deploy DNS TXT records to CloudFlare",
ArgsUsage: "<tree-directory>", ArgsUsage: "<tree-directory>",
Action: dnsToCloudflare, Action: dnsToCloudflare,
Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag}, Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag},
} }
dnsRoute53Command = cli.Command{ dnsRoute53Command = &cli.Command{
Name: "to-route53", Name: "to-route53",
Usage: "Deploy DNS TXT records to Amazon Route53", Usage: "Deploy DNS TXT records to Amazon Route53",
ArgsUsage: "<tree-directory>", ArgsUsage: "<tree-directory>",
@ -84,7 +84,7 @@ var (
route53RegionFlag, route53RegionFlag,
}, },
} }
dnsRoute53NukeCommand = cli.Command{ dnsRoute53NukeCommand = &cli.Command{
Name: "nuke-route53", Name: "nuke-route53",
Usage: "Deletes DNS TXT records of a subdomain on Amazon Route53", Usage: "Deletes DNS TXT records of a subdomain on Amazon Route53",
ArgsUsage: "<domain>", ArgsUsage: "<domain>",
@ -99,15 +99,15 @@ var (
) )
var ( var (
dnsTimeoutFlag = cli.DurationFlag{ dnsTimeoutFlag = &cli.DurationFlag{
Name: "timeout", Name: "timeout",
Usage: "Timeout for DNS lookups", Usage: "Timeout for DNS lookups",
} }
dnsDomainFlag = cli.StringFlag{ dnsDomainFlag = &cli.StringFlag{
Name: "domain", Name: "domain",
Usage: "Domain name of the tree", Usage: "Domain name of the tree",
} }
dnsSeqFlag = cli.UintFlag{ dnsSeqFlag = &cli.UintFlag{
Name: "seq", Name: "seq",
Usage: "New sequence number of the tree", Usage: "New sequence number of the tree",
} }

View File

@ -30,12 +30,12 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var fileFlag = cli.StringFlag{Name: "file"} var fileFlag = &cli.StringFlag{Name: "file"}
var enrdumpCommand = cli.Command{ var enrdumpCommand = &cli.Command{
Name: "enrdump", Name: "enrdump",
Usage: "Pretty-prints node records", Usage: "Pretty-prints node records",
Action: enrdump, Action: enrdump,
@ -62,7 +62,7 @@ func enrdump(ctx *cli.Context) error {
} }
source = string(b) source = string(b)
} else if ctx.NArg() == 1 { } else if ctx.NArg() == 1 {
source = ctx.Args()[0] source = ctx.Args().First()
} else { } else {
return fmt.Errorf("need record as argument") return fmt.Errorf("need record as argument")
} }

View File

@ -47,7 +47,7 @@ func (c *Chain) Len() int {
// TD calculates the total difficulty of the chain at the // TD calculates the total difficulty of the chain at the
// chain head. // chain head.
func (c *Chain) TD() *big.Int { func (c *Chain) TD() *big.Int {
sum := big.NewInt(0) sum := new(big.Int)
for _, block := range c.blocks[:c.Len()] { for _, block := range c.blocks[:c.Len()] {
sum.Add(sum, block.Difficulty()) sum.Add(sum, block.Difficulty())
} }
@ -57,7 +57,7 @@ func (c *Chain) TD() *big.Int {
// TotalDifficultyAt calculates the total difficulty of the chain // TotalDifficultyAt calculates the total difficulty of the chain
// at the given block height. // at the given block height.
func (c *Chain) TotalDifficultyAt(height int) *big.Int { func (c *Chain) TotalDifficultyAt(height int) *big.Int {
sum := big.NewInt(0) sum := new(big.Int)
if height >= c.Len() { if height >= c.Len() {
return sum return sum
} }
@ -119,7 +119,6 @@ func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) {
for i := 1; i < int(req.Amount); i++ { for i := 1; i < int(req.Amount); i++ {
blockNumber -= (1 - req.Skip) blockNumber -= (1 - req.Skip)
headers[i] = c.blocks[blockNumber].Header() headers[i] = c.blocks[blockNumber].Header()
} }
return headers, nil return headers, nil

View File

@ -104,6 +104,7 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
// Max bytes: 0. Expect to deliver one account. // Max bytes: 0. Expect to deliver one account.
{0, root, zero, ffHash, 1, firstKey, firstKey}, {0, root, zero, ffHash, 1, firstKey, firstKey},
} { } {
tc := tc
if err := s.snapGetAccountRange(t, &tc); err != nil { if err := s.snapGetAccountRange(t, &tc); err != nil {
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err) t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
} }
@ -194,6 +195,7 @@ func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
expSlots: 2, expSlots: 2,
}, },
} { } {
tc := tc
if err := s.snapGetStorageRanges(t, &tc); err != nil { if err := s.snapGetStorageRanges(t, &tc); err != nil {
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v", t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err) i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
@ -291,6 +293,7 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
expHashes: 4, expHashes: 4,
}, },
} { } {
tc := tc
if err := s.snapGetByteCodes(t, &tc); err != nil { if err := s.snapGetByteCodes(t, &tc); err != nil {
t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err) t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
} }
@ -347,7 +350,6 @@ func hexToCompact(hex []byte) []byte {
// TestSnapTrieNodes various forms of GetTrieNodes requests. // TestSnapTrieNodes various forms of GetTrieNodes requests.
func (s *Suite) TestSnapTrieNodes(t *utesting.T) { func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
// helper function to iterate the key, and generate the compact-encoded // helper function to iterate the key, and generate the compact-encoded
// trie paths along the way. // trie paths along the way.
@ -436,6 +438,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
}, },
}, },
} { } {
tc := tc
if err := s.snapGetTrieNodes(t, &tc); err != nil { if err := s.snapGetTrieNodes(t, &tc); err != nil {
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err) t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
} }
@ -492,10 +495,10 @@ func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
} }
if len(hashes) > 0 { if len(hashes) > 0 {
if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got { if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got) return fmt.Errorf("expected first account %#x, got %#x", exp, got)
} }
if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got { if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got) return fmt.Errorf("expected last account %#x, got %#x", exp, got)
} }
} }
// Reconstruct a partial trie from the response and verify it // Reconstruct a partial trie from the response and verify it

View File

@ -315,7 +315,6 @@ func (c *Conn) ReadSnap(id uint64) (Message, error) {
return nil, fmt.Errorf("could not rlp decode message: %v", err) return nil, fmt.Errorf("could not rlp decode message: %v", err)
} }
return snpMsg.(Message), nil return snpMsg.(Message), nil
} }
return nil, fmt.Errorf("request timed out") return nil, fmt.Errorf("request timed out")
} }

View File

@ -62,8 +62,6 @@ type conn struct {
log logger log logger
codec *v5wire.Codec codec *v5wire.Codec
lastRequest v5wire.Packet
lastChallenge *v5wire.Whoareyou
idCounter uint32 idCounter uint32
} }

View File

@ -22,25 +22,25 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
keyCommand = cli.Command{ keyCommand = &cli.Command{
Name: "key", Name: "key",
Usage: "Operations on node keys", Usage: "Operations on node keys",
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
keyGenerateCommand, keyGenerateCommand,
keyToNodeCommand, keyToNodeCommand,
}, },
} }
keyGenerateCommand = cli.Command{ keyGenerateCommand = &cli.Command{
Name: "generate", Name: "generate",
Usage: "Generates node key files", Usage: "Generates node key files",
ArgsUsage: "keyfile", ArgsUsage: "keyfile",
Action: genkey, Action: genkey,
} }
keyToNodeCommand = cli.Command{ keyToNodeCommand = &cli.Command{
Name: "to-enode", Name: "to-enode",
Usage: "Creates an enode URL from a node key file", Usage: "Creates an enode URL from a node key file",
ArgsUsage: "keyfile", ArgsUsage: "keyfile",
@ -50,17 +50,17 @@ var (
) )
var ( var (
hostFlag = cli.StringFlag{ hostFlag = &cli.StringFlag{
Name: "ip", Name: "ip",
Usage: "IP address of the node", Usage: "IP address of the node",
Value: "127.0.0.1", Value: "127.0.0.1",
} }
tcpPortFlag = cli.IntFlag{ tcpPortFlag = &cli.IntFlag{
Name: "tcp", Name: "tcp",
Usage: "TCP port of the node", Usage: "TCP port of the node",
Value: 30303, Value: 30303,
} }
udpPortFlag = cli.IntFlag{ udpPortFlag = &cli.IntFlag{
Name: "udp", Name: "udp",
Usage: "UDP port of the node", Usage: "UDP port of the node",
Value: 30303, Value: 30303,

View File

@ -20,12 +20,12 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
@ -45,6 +45,7 @@ func init() {
// Set up the CLI app. // Set up the CLI app.
app.Flags = append(app.Flags, debug.Flags...) app.Flags = append(app.Flags, debug.Flags...)
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
flags.MigrateGlobalFlags(ctx)
return debug.Setup(ctx) return debug.Setup(ctx)
} }
app.After = func(ctx *cli.Context) error { app.After = func(ctx *cli.Context) error {
@ -56,7 +57,7 @@ func init() {
os.Exit(1) os.Exit(1)
} }
// Add subcommands. // Add subcommands.
app.Commands = []cli.Command{ app.Commands = []*cli.Command{
enrdumpCommand, enrdumpCommand,
keyCommand, keyCommand,
discv4Command, discv4Command,
@ -73,10 +74,17 @@ func main() {
// commandHasFlag returns true if the current command supports the given flag. // commandHasFlag returns true if the current command supports the given flag.
func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool { func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool {
flags := ctx.FlagNames() names := flag.Names()
sort.Strings(flags) set := make(map[string]struct{}, len(names))
i := sort.SearchStrings(flags, flag.GetName()) for _, name := range names {
return i != len(flags) && flags[i] == flag.GetName() set[name] = struct{}{}
}
for _, fn := range ctx.FlagNames() {
if _, ok := set[fn]; ok {
return true
}
}
return false
} }
// getNodeArg handles the common case of a single node descriptor argument. // getNodeArg handles the common case of a single node descriptor argument.
@ -84,7 +92,7 @@ func getNodeArg(ctx *cli.Context) *enode.Node {
if ctx.NArg() < 1 { if ctx.NArg() < 1 {
exit("missing node as command-line argument") exit("missing node as command-line argument")
} }
n, err := parseNode(ctx.Args()[0]) n, err := parseNode(ctx.Args().First())
if err != nil { if err != nil {
exit(err) exit(err)
} }

View File

@ -29,25 +29,25 @@ import (
"github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
nodesetCommand = cli.Command{ nodesetCommand = &cli.Command{
Name: "nodeset", Name: "nodeset",
Usage: "Node set tools", Usage: "Node set tools",
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
nodesetInfoCommand, nodesetInfoCommand,
nodesetFilterCommand, nodesetFilterCommand,
}, },
} }
nodesetInfoCommand = cli.Command{ nodesetInfoCommand = &cli.Command{
Name: "info", Name: "info",
Usage: "Shows statistics about a node set", Usage: "Shows statistics about a node set",
Action: nodesetInfo, Action: nodesetInfo,
ArgsUsage: "<nodes.json>", ArgsUsage: "<nodes.json>",
} }
nodesetFilterCommand = cli.Command{ nodesetFilterCommand = &cli.Command{
Name: "filter", Name: "filter",
Usage: "Filters a node set", Usage: "Filters a node set",
Action: nodesetFilter, Action: nodesetFilter,

View File

@ -26,25 +26,25 @@ import (
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/ethereum/go-ethereum/p2p/rlpx"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
rlpxCommand = cli.Command{ rlpxCommand = &cli.Command{
Name: "rlpx", Name: "rlpx",
Usage: "RLPx Commands", Usage: "RLPx Commands",
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
rlpxPingCommand, rlpxPingCommand,
rlpxEthTestCommand, rlpxEthTestCommand,
rlpxSnapTestCommand, rlpxSnapTestCommand,
}, },
} }
rlpxPingCommand = cli.Command{ rlpxPingCommand = &cli.Command{
Name: "ping", Name: "ping",
Usage: "ping <node>", Usage: "ping <node>",
Action: rlpxPing, Action: rlpxPing,
} }
rlpxEthTestCommand = cli.Command{ rlpxEthTestCommand = &cli.Command{
Name: "eth-test", Name: "eth-test",
Usage: "Runs tests against a node", Usage: "Runs tests against a node",
ArgsUsage: "<node> <chain.rlp> <genesis.json>", ArgsUsage: "<node> <chain.rlp> <genesis.json>",
@ -54,7 +54,7 @@ var (
testTAPFlag, testTAPFlag,
}, },
} }
rlpxSnapTestCommand = cli.Command{ rlpxSnapTestCommand = &cli.Command{
Name: "snap-test", Name: "snap-test",
Usage: "Runs tests against a node", Usage: "Runs tests against a node",
ArgsUsage: "<node> <chain.rlp> <genesis.json>", ArgsUsage: "<node> <chain.rlp> <genesis.json>",
@ -106,7 +106,7 @@ func rlpxEthTest(ctx *cli.Context) error {
if ctx.NArg() < 3 { if ctx.NArg() < 3 {
exit("missing path to chain.rlp as command-line argument") exit("missing path to chain.rlp as command-line argument")
} }
suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args().Get(1), ctx.Args().Get(2))
if err != nil { if err != nil {
exit(err) exit(err)
} }
@ -123,7 +123,7 @@ func rlpxSnapTest(ctx *cli.Context) error {
if ctx.NArg() < 3 { if ctx.NArg() < 3 {
exit("missing path to chain.rlp as command-line argument") exit("missing path to chain.rlp as command-line argument")
} }
suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args().Get(1), ctx.Args().Get(2))
if err != nil { if err != nil {
exit(err) exit(err)
} }

View File

@ -22,25 +22,25 @@ import (
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test" "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
"github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/internal/utesting"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
testPatternFlag = cli.StringFlag{ testPatternFlag = &cli.StringFlag{
Name: "run", Name: "run",
Usage: "Pattern of test suite(s) to run", Usage: "Pattern of test suite(s) to run",
} }
testTAPFlag = cli.BoolFlag{ testTAPFlag = &cli.BoolFlag{
Name: "tap", Name: "tap",
Usage: "Output TAP", Usage: "Output TAP",
} }
// These two are specific to the discovery tests. // These two are specific to the discovery tests.
testListen1Flag = cli.StringFlag{ testListen1Flag = &cli.StringFlag{
Name: "listen1", Name: "listen1",
Usage: "IP address of the first tester", Usage: "IP address of the first tester",
Value: v4test.Listen1, Value: v4test.Listen1,
} }
testListen2Flag = cli.StringFlag{ testListen2Flag = &cli.StringFlag{
Name: "listen2", Name: "listen2",
Usage: "IP address of the second tester", Usage: "IP address of the second tester",
Value: v4test.Listen2, Value: v4test.Listen2,
@ -53,7 +53,7 @@ func runTests(ctx *cli.Context, tests []utesting.Test) error {
tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name)) tests = utesting.MatchTests(tests, ctx.String(testPatternFlag.Name))
} }
// Disable logging unless explicitly enabled. // Disable logging unless explicitly enabled.
if !ctx.GlobalIsSet("verbosity") && !ctx.GlobalIsSet("vmodule") { if !ctx.IsSet("verbosity") && !ctx.IsSet("vmodule") {
log.Root().SetHandler(log.DiscardHandler()) log.Root().SetHandler(log.DiscardHandler())
} }
// Run the tests. // Run the tests.

View File

@ -23,15 +23,15 @@ import (
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var newPassphraseFlag = cli.StringFlag{ var newPassphraseFlag = &cli.StringFlag{
Name: "newpasswordfile", Name: "newpasswordfile",
Usage: "the file that contains the new password for the keyfile", Usage: "the file that contains the new password for the keyfile",
} }
var commandChangePassphrase = cli.Command{ var commandChangePassphrase = &cli.Command{
Name: "changepassword", Name: "changepassword",
Usage: "change the password on a keyfile", Usage: "change the password on a keyfile",
ArgsUsage: "<keyfile>", ArgsUsage: "<keyfile>",

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/google/uuid" "github.com/google/uuid"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
type outputGenerate struct { type outputGenerate struct {
@ -35,17 +35,17 @@ type outputGenerate struct {
} }
var ( var (
privateKeyFlag = cli.StringFlag{ privateKeyFlag = &cli.StringFlag{
Name: "privatekey", Name: "privatekey",
Usage: "file containing a raw private key to encrypt", Usage: "file containing a raw private key to encrypt",
} }
lightKDFFlag = cli.BoolFlag{ lightKDFFlag = &cli.BoolFlag{
Name: "lightkdf", Name: "lightkdf",
Usage: "use less secure scrypt parameters", Usage: "use less secure scrypt parameters",
} }
) )
var commandGenerate = cli.Command{ var commandGenerate = &cli.Command{
Name: "generate", Name: "generate",
Usage: "generate new keyfile", Usage: "generate new keyfile",
ArgsUsage: "[ <keyfile> ]", ArgsUsage: "[ <keyfile> ]",

View File

@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
type outputInspect struct { type outputInspect struct {
@ -34,13 +34,13 @@ type outputInspect struct {
} }
var ( var (
privateFlag = cli.BoolFlag{ privateFlag = &cli.BoolFlag{
Name: "private", Name: "private",
Usage: "include the private key in the output", Usage: "include the private key in the output",
} }
) )
var commandInspect = cli.Command{ var commandInspect = &cli.Command{
Name: "inspect", Name: "inspect",
Usage: "inspect a keyfile", Usage: "inspect a keyfile",
ArgsUsage: "<keyfile>", ArgsUsage: "<keyfile>",

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
const ( const (
@ -36,23 +36,22 @@ var app *cli.App
func init() { func init() {
app = flags.NewApp(gitCommit, gitDate, "an Ethereum key manager") app = flags.NewApp(gitCommit, gitDate, "an Ethereum key manager")
app.Commands = []cli.Command{ app.Commands = []*cli.Command{
commandGenerate, commandGenerate,
commandInspect, commandInspect,
commandChangePassphrase, commandChangePassphrase,
commandSignMessage, commandSignMessage,
commandVerifyMessage, commandVerifyMessage,
} }
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
} }
// Commonly used command line flags. // Commonly used command line flags.
var ( var (
passphraseFlag = cli.StringFlag{ passphraseFlag = &cli.StringFlag{
Name: "passwordfile", Name: "passwordfile",
Usage: "the file that contains the password for the keyfile", Usage: "the file that contains the password for the keyfile",
} }
jsonFlag = cli.BoolFlag{ jsonFlag = &cli.BoolFlag{
Name: "json", Name: "json",
Usage: "output JSON instead of human-readable format", Usage: "output JSON instead of human-readable format",
} }

View File

@ -21,23 +21,24 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
type outputSign struct { type outputSign struct {
Signature string Signature string
} }
var msgfileFlag = cli.StringFlag{ var msgfileFlag = &cli.StringFlag{
Name: "msgfile", Name: "msgfile",
Usage: "file containing the message to sign/verify", Usage: "file containing the message to sign/verify",
} }
var commandSignMessage = cli.Command{ var commandSignMessage = &cli.Command{
Name: "signmessage", Name: "signmessage",
Usage: "sign a message", Usage: "sign a message",
ArgsUsage: "<keyfile> <message>", ArgsUsage: "<keyfile> <message>",
@ -68,7 +69,7 @@ To sign a message contained in a file, use the --msgfile flag.
utils.Fatalf("Error decrypting key: %v", err) utils.Fatalf("Error decrypting key: %v", err)
} }
signature, err := crypto.Sign(signHash(message), key.PrivateKey) signature, err := crypto.Sign(accounts.TextHash(message), key.PrivateKey)
if err != nil { if err != nil {
utils.Fatalf("Failed to sign message: %v", err) utils.Fatalf("Failed to sign message: %v", err)
} }
@ -88,7 +89,7 @@ type outputVerify struct {
RecoveredPublicKey string RecoveredPublicKey string
} }
var commandVerifyMessage = cli.Command{ var commandVerifyMessage = &cli.Command{
Name: "verifymessage", Name: "verifymessage",
Usage: "verify the signature of a signed message", Usage: "verify the signature of a signed message",
ArgsUsage: "<address> <signature> <message>", ArgsUsage: "<address> <signature> <message>",
@ -113,7 +114,7 @@ It is possible to refer to a file containing the message.`,
utils.Fatalf("Signature encoding is not hexadecimal: %v", err) utils.Fatalf("Signature encoding is not hexadecimal: %v", err)
} }
recoveredPubkey, err := crypto.SigToPub(signHash(message), signature) recoveredPubkey, err := crypto.SigToPub(accounts.TextHash(message), signature)
if err != nil || recoveredPubkey == nil { if err != nil || recoveredPubkey == nil {
utils.Fatalf("Signature verification failed: %v", err) utils.Fatalf("Signature verification failed: %v", err)
} }
@ -143,7 +144,7 @@ It is possible to refer to a file containing the message.`,
func getMessage(ctx *cli.Context, msgarg int) []byte { func getMessage(ctx *cli.Context, msgarg int) []byte {
if file := ctx.String(msgfileFlag.Name); file != "" { if file := ctx.String(msgfileFlag.Name); file != "" {
if len(ctx.Args()) > msgarg { if ctx.NArg() > msgarg {
utils.Fatalf("Can't use --msgfile and message argument at the same time.") utils.Fatalf("Can't use --msgfile and message argument at the same time.")
} }
msg, err := os.ReadFile(file) msg, err := os.ReadFile(file)
@ -151,9 +152,9 @@ func getMessage(ctx *cli.Context, msgarg int) []byte {
utils.Fatalf("Can't read message file: %v", err) utils.Fatalf("Can't read message file: %v", err)
} }
return msg return msg
} else if len(ctx.Args()) == msgarg+1 { } else if ctx.NArg() == msgarg+1 {
return []byte(ctx.Args().Get(msgarg)) return []byte(ctx.Args().Get(msgarg))
} }
utils.Fatalf("Invalid number of arguments: want %d, got %d", msgarg+1, len(ctx.Args())) utils.Fatalf("Invalid number of arguments: want %d, got %d", msgarg+1, ctx.NArg())
return nil return nil
} }

View File

@ -23,8 +23,7 @@ import (
"strings" "strings"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto" "github.com/urfave/cli/v2"
"gopkg.in/urfave/cli.v1"
) )
// getPassphrase obtains a passphrase given by the user. It first checks the // getPassphrase obtains a passphrase given by the user. It first checks the
@ -46,18 +45,6 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string {
return utils.GetPassPhrase("", confirmation) return utils.GetPassPhrase("", confirmation)
} }
// signHash is a helper function that calculates a hash for the given message
// that can be safely used to calculate a signature from.
//
// The hash is calculated as
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.
func signHash(data []byte) []byte {
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
return crypto.Keccak256([]byte(msg))
}
// mustPrintJSON prints the JSON encoding of the given object and // mustPrintJSON prints the JSON encoding of the given object and
// exits the program with an error message when the marshaling fails. // exits the program with an error message when the marshaling fails.
func mustPrintJSON(jsonObject interface{}) { func mustPrintJSON(jsonObject interface{}) {

View File

@ -23,10 +23,10 @@ import (
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler" "github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var compileCommand = cli.Command{ var compileCommand = &cli.Command{
Action: compileCmd, Action: compileCmd,
Name: "compile", Name: "compile",
Usage: "compiles easm source to evm binary", Usage: "compiles easm source to evm binary",
@ -34,7 +34,7 @@ var compileCommand = cli.Command{
} }
func compileCmd(ctx *cli.Context) error { func compileCmd(ctx *cli.Context) error {
debug := ctx.GlobalBool(DebugFlag.Name) debug := ctx.Bool(DebugFlag.Name)
if len(ctx.Args().First()) == 0 { if len(ctx.Args().First()) == 0 {
return errors.New("filename required") return errors.New("filename required")

View File

@ -23,10 +23,10 @@ import (
"strings" "strings"
"github.com/ethereum/go-ethereum/core/asm" "github.com/ethereum/go-ethereum/core/asm"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var disasmCommand = cli.Command{ var disasmCommand = &cli.Command{
Action: disasmCmd, Action: disasmCmd,
Name: "disasm", Name: "disasm",
Usage: "disassembles evm binary", Usage: "disassembles evm binary",
@ -43,8 +43,8 @@ func disasmCmd(ctx *cli.Context) error {
return err return err
} }
in = string(input) in = string(input)
case ctx.GlobalIsSet(InputFlag.Name): case ctx.IsSet(InputFlag.Name):
in = ctx.GlobalString(InputFlag.Name) in = ctx.String(InputFlag.Name)
default: default:
return errors.New("missing filename or --input value") return errors.New("missing filename or --input value")
} }

View File

@ -33,7 +33,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
//go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go //go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go

View File

@ -100,7 +100,6 @@ type rejectedTx struct {
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txs types.Transactions, miningReward int64, txs types.Transactions, miningReward int64,
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) { getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) {
// Capture errors for BLOCKHASH operation, if we haven't been supplied the // Capture errors for BLOCKHASH operation, if we haven't been supplied the
// required blockhashes // required blockhashes
var hashError error var hashError error
@ -241,7 +240,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
minerReward.Add(minerReward, perOmmer) minerReward.Add(minerReward, perOmmer)
// Add (8-delta)/8 // Add (8-delta)/8
reward := big.NewInt(8) reward := big.NewInt(8)
reward.Sub(reward, big.NewInt(0).SetUint64(ommer.Delta)) reward.Sub(reward, new(big.Int).SetUint64(ommer.Delta))
reward.Mul(reward, blockReward) reward.Mul(reward, blockReward)
reward.Div(reward, big.NewInt(8)) reward.Div(reward, big.NewInt(8))
statedb.AddBalance(ommer.Address, reward) statedb.AddBalance(ommer.Address, reward)

View File

@ -22,45 +22,47 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
TraceFlag = cli.BoolFlag{ TraceFlag = &cli.BoolFlag{
Name: "trace", Name: "trace",
Usage: "Output full trace logs to files <txhash>.jsonl", Usage: "Output full trace logs to files <txhash>.jsonl",
} }
TraceDisableMemoryFlag = cli.BoolTFlag{ TraceDisableMemoryFlag = &cli.BoolFlag{
Name: "trace.nomemory", Name: "trace.nomemory",
Value: true,
Usage: "Disable full memory dump in traces (deprecated)", Usage: "Disable full memory dump in traces (deprecated)",
} }
TraceEnableMemoryFlag = cli.BoolFlag{ TraceEnableMemoryFlag = &cli.BoolFlag{
Name: "trace.memory", Name: "trace.memory",
Usage: "Enable full memory dump in traces", Usage: "Enable full memory dump in traces",
} }
TraceDisableStackFlag = cli.BoolFlag{ TraceDisableStackFlag = &cli.BoolFlag{
Name: "trace.nostack", Name: "trace.nostack",
Usage: "Disable stack output in traces", Usage: "Disable stack output in traces",
} }
TraceDisableReturnDataFlag = cli.BoolTFlag{ TraceDisableReturnDataFlag = &cli.BoolFlag{
Name: "trace.noreturndata", Name: "trace.noreturndata",
Value: true,
Usage: "Disable return data output in traces (deprecated)", Usage: "Disable return data output in traces (deprecated)",
} }
TraceEnableReturnDataFlag = cli.BoolFlag{ TraceEnableReturnDataFlag = &cli.BoolFlag{
Name: "trace.returndata", Name: "trace.returndata",
Usage: "Enable return data output in traces", Usage: "Enable return data output in traces",
} }
OutputBasedir = cli.StringFlag{ OutputBasedir = &cli.StringFlag{
Name: "output.basedir", Name: "output.basedir",
Usage: "Specifies where output files are placed. Will be created if it does not exist.", Usage: "Specifies where output files are placed. Will be created if it does not exist.",
Value: "", Value: "",
} }
OutputBodyFlag = cli.StringFlag{ OutputBodyFlag = &cli.StringFlag{
Name: "output.body", Name: "output.body",
Usage: "If set, the RLP of the transactions (block body) will be written to this file.", Usage: "If set, the RLP of the transactions (block body) will be written to this file.",
Value: "", Value: "",
} }
OutputAllocFlag = cli.StringFlag{ OutputAllocFlag = &cli.StringFlag{
Name: "output.alloc", Name: "output.alloc",
Usage: "Determines where to put the `alloc` of the post-state.\n" + Usage: "Determines where to put the `alloc` of the post-state.\n" +
"\t`stdout` - into the stdout output\n" + "\t`stdout` - into the stdout output\n" +
@ -68,7 +70,7 @@ var (
"\t<file> - into the file <file> ", "\t<file> - into the file <file> ",
Value: "alloc.json", Value: "alloc.json",
} }
OutputResultFlag = cli.StringFlag{ OutputResultFlag = &cli.StringFlag{
Name: "output.result", Name: "output.result",
Usage: "Determines where to put the `result` (stateroot, txroot etc) of the post-state.\n" + Usage: "Determines where to put the `result` (stateroot, txroot etc) of the post-state.\n" +
"\t`stdout` - into the stdout output\n" + "\t`stdout` - into the stdout output\n" +
@ -76,7 +78,7 @@ var (
"\t<file> - into the file <file> ", "\t<file> - into the file <file> ",
Value: "result.json", Value: "result.json",
} }
OutputBlockFlag = cli.StringFlag{ OutputBlockFlag = &cli.StringFlag{
Name: "output.block", Name: "output.block",
Usage: "Determines where to put the `block` after building.\n" + Usage: "Determines where to put the `block` after building.\n" +
"\t`stdout` - into the stdout output\n" + "\t`stdout` - into the stdout output\n" +
@ -84,65 +86,65 @@ var (
"\t<file> - into the file <file> ", "\t<file> - into the file <file> ",
Value: "block.json", Value: "block.json",
} }
InputAllocFlag = cli.StringFlag{ InputAllocFlag = &cli.StringFlag{
Name: "input.alloc", Name: "input.alloc",
Usage: "`stdin` or file name of where to find the prestate alloc to use.", Usage: "`stdin` or file name of where to find the prestate alloc to use.",
Value: "alloc.json", Value: "alloc.json",
} }
InputEnvFlag = cli.StringFlag{ InputEnvFlag = &cli.StringFlag{
Name: "input.env", Name: "input.env",
Usage: "`stdin` or file name of where to find the prestate env to use.", Usage: "`stdin` or file name of where to find the prestate env to use.",
Value: "env.json", Value: "env.json",
} }
InputTxsFlag = cli.StringFlag{ InputTxsFlag = &cli.StringFlag{
Name: "input.txs", Name: "input.txs",
Usage: "`stdin` or file name of where to find the transactions to apply. " + Usage: "`stdin` or file name of where to find the transactions to apply. " +
"If the file extension is '.rlp', then the data is interpreted as an RLP list of signed transactions." + "If the file extension is '.rlp', then the data is interpreted as an RLP list of signed transactions." +
"The '.rlp' format is identical to the output.body format.", "The '.rlp' format is identical to the output.body format.",
Value: "txs.json", Value: "txs.json",
} }
InputHeaderFlag = cli.StringFlag{ InputHeaderFlag = &cli.StringFlag{
Name: "input.header", Name: "input.header",
Usage: "`stdin` or file name of where to find the block header to use.", Usage: "`stdin` or file name of where to find the block header to use.",
Value: "header.json", Value: "header.json",
} }
InputOmmersFlag = cli.StringFlag{ InputOmmersFlag = &cli.StringFlag{
Name: "input.ommers", Name: "input.ommers",
Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.", Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.",
} }
InputTxsRlpFlag = cli.StringFlag{ InputTxsRlpFlag = &cli.StringFlag{
Name: "input.txs", Name: "input.txs",
Usage: "`stdin` or file name of where to find the transactions list in RLP form.", Usage: "`stdin` or file name of where to find the transactions list in RLP form.",
Value: "txs.rlp", Value: "txs.rlp",
} }
SealCliqueFlag = cli.StringFlag{ SealCliqueFlag = &cli.StringFlag{
Name: "seal.clique", Name: "seal.clique",
Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.", Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.",
} }
SealEthashFlag = cli.BoolFlag{ SealEthashFlag = &cli.BoolFlag{
Name: "seal.ethash", Name: "seal.ethash",
Usage: "Seal block with ethash.", Usage: "Seal block with ethash.",
} }
SealEthashDirFlag = cli.StringFlag{ SealEthashDirFlag = &cli.StringFlag{
Name: "seal.ethash.dir", Name: "seal.ethash.dir",
Usage: "Path to ethash DAG. If none exists, a new DAG will be generated.", Usage: "Path to ethash DAG. If none exists, a new DAG will be generated.",
} }
SealEthashModeFlag = cli.StringFlag{ SealEthashModeFlag = &cli.StringFlag{
Name: "seal.ethash.mode", Name: "seal.ethash.mode",
Usage: "Defines the type and amount of PoW verification an ethash engine makes.", Usage: "Defines the type and amount of PoW verification an ethash engine makes.",
Value: "normal", Value: "normal",
} }
RewardFlag = cli.Int64Flag{ RewardFlag = &cli.Int64Flag{
Name: "state.reward", Name: "state.reward",
Usage: "Mining reward. Set to -1 to disable", Usage: "Mining reward. Set to -1 to disable",
Value: 0, Value: 0,
} }
ChainIDFlag = cli.Int64Flag{ ChainIDFlag = &cli.Int64Flag{
Name: "state.chainid", Name: "state.chainid",
Usage: "ChainID to use", Usage: "ChainID to use",
Value: 1, Value: 1,
} }
ForknameFlag = cli.StringFlag{ ForknameFlag = &cli.StringFlag{
Name: "state.fork", Name: "state.fork",
Usage: fmt.Sprintf("Name of ruleset to use."+ Usage: fmt.Sprintf("Name of ruleset to use."+
"\n\tAvailable forknames:"+ "\n\tAvailable forknames:"+
@ -152,9 +154,9 @@ var (
"\n\tSyntax <forkname>(+ExtraEip)", "\n\tSyntax <forkname>(+ExtraEip)",
strings.Join(tests.AvailableForks(), "\n\t "), strings.Join(tests.AvailableForks(), "\n\t "),
strings.Join(vm.ActivateableEips(), ", ")), strings.Join(vm.ActivateableEips(), ", ")),
Value: "ArrowGlacier", Value: "GrayGlacier",
} }
VerbosityFlag = cli.IntFlag{ VerbosityFlag = &cli.IntFlag{
Name: "verbosity", Name: "verbosity",
Usage: "sets the verbosity level", Usage: "sets the verbosity level",
Value: 3, Value: 3,

View File

@ -32,7 +32,7 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
type result struct { type result struct {

View File

@ -38,7 +38,7 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
const ( const (

View File

@ -21,7 +21,7 @@ import (
"fmt" "fmt"
"os" "os"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
// readFile reads the json-data in the provided path and marshals into dest. // readFile reads the json-data in the provided path and marshals into dest.

View File

@ -23,115 +23,118 @@ import (
"os" "os"
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags) var (
var gitDate = "" gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags)
gitDate = ""
app = flags.NewApp(gitCommit, gitDate, "the evm command line interface")
)
var ( var (
app = flags.NewApp(gitCommit, gitDate, "the evm command line interface") DebugFlag = &cli.BoolFlag{
DebugFlag = cli.BoolFlag{
Name: "debug", Name: "debug",
Usage: "output full trace logs", Usage: "output full trace logs",
} }
MemProfileFlag = cli.StringFlag{ MemProfileFlag = &cli.StringFlag{
Name: "memprofile", Name: "memprofile",
Usage: "creates a memory profile at the given path", Usage: "creates a memory profile at the given path",
} }
CPUProfileFlag = cli.StringFlag{ CPUProfileFlag = &cli.StringFlag{
Name: "cpuprofile", Name: "cpuprofile",
Usage: "creates a CPU profile at the given path", Usage: "creates a CPU profile at the given path",
} }
StatDumpFlag = cli.BoolFlag{ StatDumpFlag = &cli.BoolFlag{
Name: "statdump", Name: "statdump",
Usage: "displays stack and heap memory information", Usage: "displays stack and heap memory information",
} }
CodeFlag = cli.StringFlag{ CodeFlag = &cli.StringFlag{
Name: "code", Name: "code",
Usage: "EVM code", Usage: "EVM code",
} }
CodeFileFlag = cli.StringFlag{ CodeFileFlag = &cli.StringFlag{
Name: "codefile", Name: "codefile",
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ", Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
} }
GasFlag = cli.Uint64Flag{ GasFlag = &cli.Uint64Flag{
Name: "gas", Name: "gas",
Usage: "gas limit for the evm", Usage: "gas limit for the evm",
Value: 10000000000, Value: 10000000000,
} }
PriceFlag = utils.BigFlag{ PriceFlag = &flags.BigFlag{
Name: "price", Name: "price",
Usage: "price set for the evm", Usage: "price set for the evm",
Value: new(big.Int), Value: new(big.Int),
} }
ValueFlag = utils.BigFlag{ ValueFlag = &flags.BigFlag{
Name: "value", Name: "value",
Usage: "value set for the evm", Usage: "value set for the evm",
Value: new(big.Int), Value: new(big.Int),
} }
DumpFlag = cli.BoolFlag{ DumpFlag = &cli.BoolFlag{
Name: "dump", Name: "dump",
Usage: "dumps the state after the run", Usage: "dumps the state after the run",
} }
InputFlag = cli.StringFlag{ InputFlag = &cli.StringFlag{
Name: "input", Name: "input",
Usage: "input for the EVM", Usage: "input for the EVM",
} }
InputFileFlag = cli.StringFlag{ InputFileFlag = &cli.StringFlag{
Name: "inputfile", Name: "inputfile",
Usage: "file containing input for the EVM", Usage: "file containing input for the EVM",
} }
VerbosityFlag = cli.IntFlag{ VerbosityFlag = &cli.IntFlag{
Name: "verbosity", Name: "verbosity",
Usage: "sets the verbosity level", Usage: "sets the verbosity level",
} }
BenchFlag = cli.BoolFlag{ BenchFlag = &cli.BoolFlag{
Name: "bench", Name: "bench",
Usage: "benchmark the execution", Usage: "benchmark the execution",
} }
CreateFlag = cli.BoolFlag{ CreateFlag = &cli.BoolFlag{
Name: "create", Name: "create",
Usage: "indicates the action should be create rather than call", Usage: "indicates the action should be create rather than call",
} }
GenesisFlag = cli.StringFlag{ GenesisFlag = &cli.StringFlag{
Name: "prestate", Name: "prestate",
Usage: "JSON file with prestate (genesis) config", Usage: "JSON file with prestate (genesis) config",
} }
MachineFlag = cli.BoolFlag{ MachineFlag = &cli.BoolFlag{
Name: "json", Name: "json",
Usage: "output trace logs in machine readable format (json)", Usage: "output trace logs in machine readable format (json)",
} }
SenderFlag = cli.StringFlag{ SenderFlag = &cli.StringFlag{
Name: "sender", Name: "sender",
Usage: "The transaction origin", Usage: "The transaction origin",
} }
ReceiverFlag = cli.StringFlag{ ReceiverFlag = &cli.StringFlag{
Name: "receiver", Name: "receiver",
Usage: "The transaction receiver (execution context)", Usage: "The transaction receiver (execution context)",
} }
DisableMemoryFlag = cli.BoolTFlag{ DisableMemoryFlag = &cli.BoolFlag{
Name: "nomemory", Name: "nomemory",
Value: true,
Usage: "disable memory output", Usage: "disable memory output",
} }
DisableStackFlag = cli.BoolFlag{ DisableStackFlag = &cli.BoolFlag{
Name: "nostack", Name: "nostack",
Usage: "disable stack output", Usage: "disable stack output",
} }
DisableStorageFlag = cli.BoolFlag{ DisableStorageFlag = &cli.BoolFlag{
Name: "nostorage", Name: "nostorage",
Usage: "disable storage output", Usage: "disable storage output",
} }
DisableReturnDataFlag = cli.BoolTFlag{ DisableReturnDataFlag = &cli.BoolFlag{
Name: "noreturndata", Name: "noreturndata",
Value: true,
Usage: "enable return data output", Usage: "enable return data output",
} }
) )
var stateTransitionCommand = cli.Command{ var stateTransitionCommand = &cli.Command{
Name: "transition", Name: "transition",
Aliases: []string{"t8n"}, Aliases: []string{"t8n"},
Usage: "executes a full state transition", Usage: "executes a full state transition",
@ -156,7 +159,8 @@ var stateTransitionCommand = cli.Command{
t8ntool.VerbosityFlag, t8ntool.VerbosityFlag,
}, },
} }
var transactionCommand = cli.Command{
var transactionCommand = &cli.Command{
Name: "transaction", Name: "transaction",
Aliases: []string{"t9n"}, Aliases: []string{"t9n"},
Usage: "performs transaction validation", Usage: "performs transaction validation",
@ -169,7 +173,7 @@ var transactionCommand = cli.Command{
}, },
} }
var blockBuilderCommand = cli.Command{ var blockBuilderCommand = &cli.Command{
Name: "block-builder", Name: "block-builder",
Aliases: []string{"b11r"}, Aliases: []string{"b11r"},
Usage: "builds a block", Usage: "builds a block",
@ -214,7 +218,7 @@ func init() {
DisableStorageFlag, DisableStorageFlag,
DisableReturnDataFlag, DisableReturnDataFlag,
} }
app.Commands = []cli.Command{ app.Commands = []*cli.Command{
compileCommand, compileCommand,
disasmCommand, disasmCommand,
runCommand, runCommand,
@ -223,7 +227,6 @@ func init() {
transactionCommand, transactionCommand,
blockBuilderCommand, blockBuilderCommand,
} }
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
} }
func main() { func main() {

View File

@ -37,12 +37,13 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime" "github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var runCommand = cli.Command{ var runCommand = &cli.Command{
Action: runCmd, Action: runCmd,
Name: "run", Name: "run",
Usage: "run arbitrary evm binary", Usage: "run arbitrary evm binary",
@ -106,14 +107,14 @@ func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []by
func runCmd(ctx *cli.Context) error { func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger) log.Root().SetHandler(glogger)
logconfig := &logger.Config{ logconfig := &logger.Config{
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.GlobalBool(DisableStackFlag.Name), DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name), EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
Debug: ctx.GlobalBool(DebugFlag.Name), Debug: ctx.Bool(DebugFlag.Name),
} }
var ( var (
@ -125,16 +126,16 @@ func runCmd(ctx *cli.Context) error {
receiver = common.BytesToAddress([]byte("receiver")) receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis genesisConfig *core.Genesis
) )
if ctx.GlobalBool(MachineFlag.Name) { if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout) tracer = logger.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.GlobalBool(DebugFlag.Name) { } else if ctx.Bool(DebugFlag.Name) {
debugLogger = logger.NewStructLogger(logconfig) debugLogger = logger.NewStructLogger(logconfig)
tracer = debugLogger tracer = debugLogger
} else { } else {
debugLogger = logger.NewStructLogger(logconfig) debugLogger = logger.NewStructLogger(logconfig)
} }
if ctx.GlobalString(GenesisFlag.Name) != "" { if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name)) gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen genesisConfig = gen
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
genesis := gen.ToBlock(db) genesis := gen.ToBlock(db)
@ -144,18 +145,18 @@ func runCmd(ctx *cli.Context) error {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
genesisConfig = new(core.Genesis) genesisConfig = new(core.Genesis)
} }
if ctx.GlobalString(SenderFlag.Name) != "" { if ctx.String(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name)) sender = common.HexToAddress(ctx.String(SenderFlag.Name))
} }
statedb.CreateAccount(sender) statedb.CreateAccount(sender)
if ctx.GlobalString(ReceiverFlag.Name) != "" { if ctx.String(ReceiverFlag.Name) != "" {
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name)) receiver = common.HexToAddress(ctx.String(ReceiverFlag.Name))
} }
var code []byte var code []byte
codeFileFlag := ctx.GlobalString(CodeFileFlag.Name) codeFileFlag := ctx.String(CodeFileFlag.Name)
codeFlag := ctx.GlobalString(CodeFlag.Name) codeFlag := ctx.String(CodeFlag.Name)
// The '--code' or '--codefile' flag overrides code in state // The '--code' or '--codefile' flag overrides code in state
if codeFileFlag != "" || codeFlag != "" { if codeFileFlag != "" || codeFlag != "" {
@ -197,7 +198,7 @@ func runCmd(ctx *cli.Context) error {
} }
code = common.Hex2Bytes(bin) code = common.Hex2Bytes(bin)
} }
initialGas := ctx.GlobalUint64(GasFlag.Name) initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 { if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit initialGas = genesisConfig.GasLimit
} }
@ -205,19 +206,19 @@ func runCmd(ctx *cli.Context) error {
Origin: sender, Origin: sender,
State: statedb, State: statedb,
GasLimit: initialGas, GasLimit: initialGas,
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name), GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: utils.GlobalBig(ctx, ValueFlag.Name), Value: flags.GlobalBig(ctx, ValueFlag.Name),
Difficulty: genesisConfig.Difficulty, Difficulty: genesisConfig.Difficulty,
Time: new(big.Int).SetUint64(genesisConfig.Timestamp), Time: new(big.Int).SetUint64(genesisConfig.Timestamp),
Coinbase: genesisConfig.Coinbase, Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{ EVMConfig: vm.Config{
Tracer: tracer, Tracer: tracer,
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
}, },
} }
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" { if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath) f, err := os.Create(cpuProfilePath)
if err != nil { if err != nil {
fmt.Println("could not create CPU profile: ", err) fmt.Println("could not create CPU profile: ", err)
@ -237,14 +238,14 @@ func runCmd(ctx *cli.Context) error {
} }
var hexInput []byte var hexInput []byte
if inputFileFlag := ctx.GlobalString(InputFileFlag.Name); inputFileFlag != "" { if inputFileFlag := ctx.String(InputFileFlag.Name); inputFileFlag != "" {
var err error var err error
if hexInput, err = os.ReadFile(inputFileFlag); err != nil { if hexInput, err = os.ReadFile(inputFileFlag); err != nil {
fmt.Printf("could not load input from file: %v\n", err) fmt.Printf("could not load input from file: %v\n", err)
os.Exit(1) os.Exit(1)
} }
} else { } else {
hexInput = []byte(ctx.GlobalString(InputFlag.Name)) hexInput = []byte(ctx.String(InputFlag.Name))
} }
hexInput = bytes.TrimSpace(hexInput) hexInput = bytes.TrimSpace(hexInput)
if len(hexInput)%2 != 0 { if len(hexInput)%2 != 0 {
@ -254,7 +255,7 @@ func runCmd(ctx *cli.Context) error {
input := common.FromHex(string(hexInput)) input := common.FromHex(string(hexInput))
var execFunc func() ([]byte, uint64, error) var execFunc func() ([]byte, uint64, error)
if ctx.GlobalBool(CreateFlag.Name) { if ctx.Bool(CreateFlag.Name) {
input = append(code, input...) input = append(code, input...)
execFunc = func() ([]byte, uint64, error) { execFunc = func() ([]byte, uint64, error) {
output, _, gasLeft, err := runtime.Create(input, &runtimeConfig) output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
@ -269,16 +270,16 @@ func runCmd(ctx *cli.Context) error {
} }
} }
bench := ctx.GlobalBool(BenchFlag.Name) bench := ctx.Bool(BenchFlag.Name)
output, leftOverGas, stats, err := timedExec(bench, execFunc) output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.GlobalBool(DumpFlag.Name) { if ctx.Bool(DumpFlag.Name) {
statedb.Commit(true) statedb.Commit(true)
statedb.IntermediateRoot(true) statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump(nil))) fmt.Println(string(statedb.Dump(nil)))
} }
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" { if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath) f, err := os.Create(memProfilePath)
if err != nil { if err != nil {
fmt.Println("could not create memory profile: ", err) fmt.Println("could not create memory profile: ", err)
@ -291,7 +292,7 @@ func runCmd(ctx *cli.Context) error {
f.Close() f.Close()
} }
if ctx.GlobalBool(DebugFlag.Name) { if ctx.Bool(DebugFlag.Name) {
if debugLogger != nil { if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####") fmt.Fprintln(os.Stderr, "#### TRACE ####")
logger.WriteTrace(os.Stderr, debugLogger.StructLogs()) logger.WriteTrace(os.Stderr, debugLogger.StructLogs())
@ -300,7 +301,7 @@ func runCmd(ctx *cli.Context) error {
logger.WriteLogs(os.Stderr, statedb.Logs()) logger.WriteLogs(os.Stderr, statedb.Logs())
} }
if bench || ctx.GlobalBool(StatDumpFlag.Name) { if bench || ctx.Bool(StatDumpFlag.Name) {
fmt.Fprintf(os.Stderr, `EVM gas used: %d fmt.Fprintf(os.Stderr, `EVM gas used: %d
execution time: %v execution time: %v
allocations: %d allocations: %d
@ -308,7 +309,7 @@ allocated bytes: %d
`, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated) `, initialGas-leftOverGas, stats.time, stats.allocs, stats.bytesAllocated)
} }
if tracer == nil { if tracer == nil {
fmt.Printf("0x%x\n", output) fmt.Printf("%#x\n", output)
if err != nil { if err != nil {
fmt.Printf(" error: %v\n", err) fmt.Printf(" error: %v\n", err)
} }

View File

@ -28,10 +28,10 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var stateTestCommand = cli.Command{ var stateTestCommand = &cli.Command{
Action: stateTestCmd, Action: stateTestCmd,
Name: "statetest", Name: "statetest",
Usage: "executes the given state tests", Usage: "executes the given state tests",
@ -54,25 +54,25 @@ func stateTestCmd(ctx *cli.Context) error {
} }
// Configure the go-ethereum logger // Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger) log.Root().SetHandler(glogger)
// Configure the EVM logger // Configure the EVM logger
config := &logger.Config{ config := &logger.Config{
EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.GlobalBool(DisableStackFlag.Name), DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name), EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
} }
var ( var (
tracer vm.EVMLogger tracer vm.EVMLogger
debugger *logger.StructLogger debugger *logger.StructLogger
) )
switch { switch {
case ctx.GlobalBool(MachineFlag.Name): case ctx.Bool(MachineFlag.Name):
tracer = logger.NewJSONLogger(config, os.Stderr) tracer = logger.NewJSONLogger(config, os.Stderr)
case ctx.GlobalBool(DebugFlag.Name): case ctx.Bool(DebugFlag.Name):
debugger = logger.NewStructLogger(config) debugger = logger.NewStructLogger(config)
tracer = debugger tracer = debugger
@ -91,7 +91,7 @@ func stateTestCmd(ctx *cli.Context) error {
// Iterate over all the tests, run them and aggregate the results // Iterate over all the tests, run them and aggregate the results
cfg := vm.Config{ cfg := vm.Config{
Tracer: tracer, Tracer: tracer,
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name), Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
} }
results := make([]StatetestResult, 0, len(tests)) results := make([]StatetestResult, 0, len(tests))
for key, test := range tests { for key, test := range tests {
@ -100,13 +100,13 @@ func stateTestCmd(ctx *cli.Context) error {
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
_, s, err := test.Run(st, cfg, false) _, s, err := test.Run(st, cfg, false)
// print state root for evmlab tracing // print state root for evmlab tracing
if ctx.GlobalBool(MachineFlag.Name) && s != nil { if ctx.Bool(MachineFlag.Name) && s != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false)) fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false))
} }
if err != nil { if err != nil {
// Test failed, mark as so and dump any state to aid debugging // Test failed, mark as so and dump any state to aid debugging
result.Pass, result.Error = false, err.Error() result.Pass, result.Error = false, err.Error()
if ctx.GlobalBool(DumpFlag.Name) && s != nil { if ctx.Bool(DumpFlag.Name) && s != nil {
dump := s.RawDump(nil) dump := s.RawDump(nil)
result.State = &dump result.State = &dump
} }
@ -115,7 +115,7 @@ func stateTestCmd(ctx *cli.Context) error {
results = append(results, *result) results = append(results, *result)
// Print any structured logs collected // Print any structured logs collected
if ctx.GlobalBool(DebugFlag.Name) { if ctx.Bool(DebugFlag.Name) {
if debugger != nil { if debugger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####") fmt.Fprintln(os.Stderr, "#### TRACE ####")
logger.WriteTrace(os.Stderr, debugger.StructLogs()) logger.WriteTrace(os.Stderr, debugger.StructLogs())

View File

@ -211,6 +211,14 @@ func TestT8n(t *testing.T) {
output: t8nOutput{result: true}, output: t8nOutput{result: true},
expOut: "exp_arrowglacier.json", expOut: "exp_arrowglacier.json",
}, },
{ // Difficulty calculation on gray glacier
base: "./testdata/19",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "GrayGlacier", "",
},
output: t8nOutput{result: true},
expOut: "exp_grayglacier.json",
},
{ // Sign unprotected (pre-EIP155) transaction { // Sign unprotected (pre-EIP155) transaction
base: "./testdata/23", base: "./testdata/23",
input: t8nInput{ input: t8nInput{
@ -236,7 +244,6 @@ func TestT8n(t *testing.T) {
expExitCode: 3, expExitCode: 3,
}, },
} { } {
args := []string{"t8n"} args := []string{"t8n"}
args = append(args, tc.output.get()...) args = append(args, tc.output.get()...)
args = append(args, tc.input.get(tc.base)...) args = append(args, tc.input.get(tc.base)...)
@ -347,7 +354,6 @@ func TestT9n(t *testing.T) {
expExitCode: t8ntool.ErrorIO, expExitCode: t8ntool.ErrorIO,
}, },
} { } {
args := []string{"t9n"} args := []string{"t9n"}
args = append(args, tc.input.get(tc.base)...) args = append(args, tc.input.get(tc.base)...)
@ -467,7 +473,6 @@ func TestB11r(t *testing.T) {
expOut: "exp.json", expOut: "exp.json",
}, },
} { } {
args := []string{"b11r"} args := []string{"b11r"}
args = append(args, tc.input.get(tc.base)...) args = append(args, tc.input.get(tc.base)...)

View File

@ -0,0 +1,12 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x2000000004000",
"gasUsed": "0x0"
}
}

View File

@ -1,9 +1,9 @@
## Difficulty calculation ## Difficulty calculation
This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller, This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller,
this time on `ArrowGlacier` (Eip 4345). this time on `GrayGlacier` (Eip 5133).
Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block): Calculating it (with an empty set of txs) using `GrayGlacier` rules (and no provided unclehash for the parent block):
``` ```
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier [user@work evm]$ ./evm t8n --input.alloc=./testdata/19/alloc.json --input.txs=./testdata/19/txs.json --input.env=./testdata/19/env.json --output.result=stdout --state.fork=GrayGlacier
``` ```

View File

@ -10,9 +10,10 @@ The `faucet` is a single binary app (everything included) with all configuration
First thing's first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set: First thing's first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set:
- `-genesis` is a path to a file containin the network `genesis.json`. or using: - `-genesis` is a path to a file containing the network `genesis.json`. or using:
- `-goerli` with the faucet with Görli network config - `-goerli` with the faucet with Görli network config
- `-rinkeby` with the faucet with Rinkeby network config - `-rinkeby` with the faucet with Rinkeby network config
- `-sepolia` with the faucet with Sepolia network config
- `-network` is the devp2p network id used during connection - `-network` is the devp2p network id used during connection
- `-bootnodes` is a list of `enode://` ids to join the network through - `-bootnodes` is a list of `enode://` ids to join the network through

View File

@ -86,6 +86,7 @@ var (
goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config") goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config")
rinkebyFlag = flag.Bool("rinkeby", false, "Initializes the faucet with Rinkeby network config") rinkebyFlag = flag.Bool("rinkeby", false, "Initializes the faucet with Rinkeby network config")
sepoliaFlag = flag.Bool("sepolia", false, "Initializes the faucet with Sepolia network config")
) )
var ( var (
@ -143,7 +144,7 @@ func main() {
log.Crit("Failed to render the faucet template", "err", err) log.Crit("Failed to render the faucet template", "err", err)
} }
// Load and parse the genesis block requested by the user // Load and parse the genesis block requested by the user
genesis, err := getGenesis(*genesisFlag, *goerliFlag, *rinkebyFlag) genesis, err := getGenesis(*genesisFlag, *goerliFlag, *rinkebyFlag, *sepoliaFlag)
if err != nil { if err != nil {
log.Crit("Failed to parse genesis config", "err", err) log.Crit("Failed to parse genesis config", "err", err)
} }
@ -860,7 +861,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body))) address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) { if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser //lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund") return "", "", common.Address{}, errors.New("No Ethereum address found to fund. Please check the post URL and verify that it can be viewed publicly.")
} }
var avatar string var avatar string
if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 { if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
@ -882,7 +883,7 @@ func authNoAuth(url string) (string, string, common.Address, error) {
} }
// getGenesis returns a genesis based on input args // getGenesis returns a genesis based on input args
func getGenesis(genesisFlag string, goerliFlag bool, rinkebyFlag bool) (*core.Genesis, error) { func getGenesis(genesisFlag string, goerliFlag bool, rinkebyFlag bool, sepoliaFlag bool) (*core.Genesis, error) {
switch { switch {
case genesisFlag != "": case genesisFlag != "":
var genesis core.Genesis var genesis core.Genesis
@ -892,6 +893,8 @@ func getGenesis(genesisFlag string, goerliFlag bool, rinkebyFlag bool) (*core.Ge
return core.DefaultGoerliGenesisBlock(), nil return core.DefaultGoerliGenesisBlock(), nil
case rinkebyFlag: case rinkebyFlag:
return core.DefaultRinkebyGenesisBlock(), nil return core.DefaultRinkebyGenesisBlock(), nil
case sepoliaFlag:
return core.DefaultSepoliaGenesisBlock(), nil
default: default:
return nil, fmt.Errorf("no genesis flag provided") return nil, fmt.Errorf("no genesis flag provided")
} }

View File

@ -25,29 +25,27 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
walletCommand = cli.Command{ walletCommand = &cli.Command{
Name: "wallet", Name: "wallet",
Usage: "Manage Ethereum presale wallets", Usage: "Manage Ethereum presale wallets",
ArgsUsage: "", ArgsUsage: "",
Category: "ACCOUNT COMMANDS",
Description: ` Description: `
geth wallet import /path/to/my/presale.wallet geth wallet import /path/to/my/presale.wallet
will prompt for your password and imports your ether presale account. will prompt for your password and imports your ether presale account.
It can be used non-interactively with the --password option taking a It can be used non-interactively with the --password option taking a
passwordfile as argument containing the wallet password in plaintext.`, passwordfile as argument containing the wallet password in plaintext.`,
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
{ {
Name: "import", Name: "import",
Usage: "Import Ethereum presale wallet", Usage: "Import Ethereum presale wallet",
ArgsUsage: "<keyFile>", ArgsUsage: "<keyFile>",
Action: utils.MigrateFlags(importWallet), Action: importWallet,
Category: "ACCOUNT COMMANDS",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.KeyStoreDirFlag, utils.KeyStoreDirFlag,
@ -64,10 +62,9 @@ passwordfile as argument containing the wallet password in plaintext.`,
}, },
} }
accountCommand = cli.Command{ accountCommand = &cli.Command{
Name: "account", Name: "account",
Usage: "Manage accounts", Usage: "Manage accounts",
Category: "ACCOUNT COMMANDS",
Description: ` Description: `
Manage accounts, list all existing accounts, import a private key into a new Manage accounts, list all existing accounts, import a private key into a new
@ -88,11 +85,11 @@ It is safe to transfer the entire directory or the individual keys therein
between ethereum nodes by simply copying. between ethereum nodes by simply copying.
Make sure you backup your keys regularly.`, Make sure you backup your keys regularly.`,
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
{ {
Name: "list", Name: "list",
Usage: "Print summary of existing accounts", Usage: "Print summary of existing accounts",
Action: utils.MigrateFlags(accountList), Action: accountList,
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.KeyStoreDirFlag, utils.KeyStoreDirFlag,
@ -103,7 +100,7 @@ Print a short summary of all accounts`,
{ {
Name: "new", Name: "new",
Usage: "Create a new account", Usage: "Create a new account",
Action: utils.MigrateFlags(accountCreate), Action: accountCreate,
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.KeyStoreDirFlag, utils.KeyStoreDirFlag,
@ -128,7 +125,7 @@ password to file or expose in any other way.
{ {
Name: "update", Name: "update",
Usage: "Update an existing account", Usage: "Update an existing account",
Action: utils.MigrateFlags(accountUpdate), Action: accountUpdate,
ArgsUsage: "<address>", ArgsUsage: "<address>",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
@ -157,7 +154,7 @@ changing your password is only possible interactively.
{ {
Name: "import", Name: "import",
Usage: "Import a private key into a new account", Usage: "Import a private key into a new account",
Action: utils.MigrateFlags(accountImport), Action: accountImport,
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.KeyStoreDirFlag, utils.KeyStoreDirFlag,
@ -239,14 +236,15 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
} }
fmt.Println("Testing your password against all of them...") fmt.Println("Testing your password against all of them...")
var match *accounts.Account var match *accounts.Account
for _, a := range err.Matches { for i, a := range err.Matches {
if err := ks.Unlock(a, auth); err == nil { if e := ks.Unlock(a, auth); e == nil {
match = &a match = &err.Matches[i]
break break
} }
} }
if match == nil { if match == nil {
utils.Fatalf("None of the listed files could be unlocked.") utils.Fatalf("None of the listed files could be unlocked.")
return accounts.Account{}
} }
fmt.Printf("Your password unlocked %s\n", match.URL) fmt.Printf("Your password unlocked %s\n", match.URL)
fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:") fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:")
@ -262,7 +260,7 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
func accountCreate(ctx *cli.Context) error { func accountCreate(ctx *cli.Context) error {
cfg := gethConfig{Node: defaultNodeConfig()} cfg := gethConfig{Node: defaultNodeConfig()}
// Load config file. // Load config file.
if file := ctx.GlobalString(configFileFlag.Name); file != "" { if file := ctx.String(configFileFlag.Name); file != "" {
if err := loadConfig(file, &cfg); err != nil { if err := loadConfig(file, &cfg); err != nil {
utils.Fatalf("%v", err) utils.Fatalf("%v", err)
} }
@ -299,13 +297,13 @@ func accountCreate(ctx *cli.Context) error {
// accountUpdate transitions an account from a previous format to the current // accountUpdate transitions an account from a previous format to the current
// one, also providing the possibility to change the pass-phrase. // one, also providing the possibility to change the pass-phrase.
func accountUpdate(ctx *cli.Context) error { func accountUpdate(ctx *cli.Context) error {
if len(ctx.Args()) == 0 { if ctx.Args().Len() == 0 {
utils.Fatalf("No accounts specified to update") utils.Fatalf("No accounts specified to update")
} }
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
for _, addr := range ctx.Args() { for _, addr := range ctx.Args().Slice() {
account, oldPassword := unlockAccount(ks, addr, 0, nil) account, oldPassword := unlockAccount(ks, addr, 0, nil)
newPassword := utils.GetPassPhraseWithList("Please give a new password. Do not forget this password.", true, 0, nil) newPassword := utils.GetPassPhraseWithList("Please give a new password. Do not forget this password.", true, 0, nil)
if err := ks.Update(account, oldPassword, newPassword); err != nil { if err := ks.Update(account, oldPassword, newPassword); err != nil {
@ -316,10 +314,10 @@ func accountUpdate(ctx *cli.Context) error {
} }
func importWallet(ctx *cli.Context) error { func importWallet(ctx *cli.Context) error {
keyfile := ctx.Args().First() if ctx.Args().Len() != 1 {
if len(keyfile) == 0 { utils.Fatalf("keyfile must be given as the only argument")
utils.Fatalf("keyfile must be given as argument")
} }
keyfile := ctx.Args().First()
keyJSON, err := os.ReadFile(keyfile) keyJSON, err := os.ReadFile(keyfile)
if err != nil { if err != nil {
utils.Fatalf("Could not read wallet file: %v", err) utils.Fatalf("Could not read wallet file: %v", err)
@ -338,10 +336,10 @@ func importWallet(ctx *cli.Context) error {
} }
func accountImport(ctx *cli.Context) error { func accountImport(ctx *cli.Context) error {
keyfile := ctx.Args().First() if ctx.Args().Len() != 1 {
if len(keyfile) == 0 { utils.Fatalf("keyfile must be given as the only argument")
utils.Fatalf("keyfile must be given as argument")
} }
keyfile := ctx.Args().First()
key, err := crypto.LoadECDSA(keyfile) key, err := crypto.LoadECDSA(keyfile)
if err != nil { if err != nil {
utils.Fatalf("Failed to load the private key: %v", err) utils.Fatalf("Failed to load the private key: %v", err)

View File

@ -49,20 +49,27 @@ func TestAccountListEmpty(t *testing.T) {
func TestAccountList(t *testing.T) { func TestAccountList(t *testing.T) {
datadir := tmpDatadirWithKeystore(t) datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, "account", "list", "--datadir", datadir) var want = `
defer geth.ExpectExit()
if runtime.GOOS == "windows" {
geth.Expect(`
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}\keystore\UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}\keystore\aaa
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\keystore\zzz
`)
} else {
geth.Expect(`
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}/keystore/aaa Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}/keystore/aaa
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/keystore/zzz Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/keystore/zzz
`) `
if runtime.GOOS == "windows" {
want = `
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}\keystore\UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}\keystore\aaa
Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\keystore\zzz
`
}
{
geth := runGeth(t, "account", "list", "--datadir", datadir)
geth.Expect(want)
geth.ExpectExit()
}
{
geth := runGeth(t, "--datadir", datadir, "account", "list")
geth.Expect(want)
geth.ExpectExit()
} }
} }
@ -110,6 +117,20 @@ func TestAccountImport(t *testing.T) {
} }
} }
func TestAccountHelp(t *testing.T) {
geth := runGeth(t, "account", "-h")
geth.WaitExit()
if have, want := geth.ExitStatus(), 0; have != want {
t.Errorf("exit error, have %d want %d", have, want)
}
geth = runGeth(t, "account", "import", "-h")
geth.WaitExit()
if have, want := geth.ExitStatus(), 0; have != want {
t.Errorf("exit error, have %d want %d", have, want)
}
}
func importAccountWithExpect(t *testing.T, key string, expected string) { func importAccountWithExpect(t *testing.T, key string, expected string) {
dir := t.TempDir() dir := t.TempDir()
keyfile := filepath.Join(dir, "key.prv") keyfile := filepath.Join(dir, "key.prv")
@ -120,7 +141,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
if err := os.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil { if err := os.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
t.Error(err) t.Error(err)
} }
geth := runGeth(t, "--lightkdf", "account", "import", keyfile, "-password", passwordFile) geth := runGeth(t, "--lightkdf", "account", "import", "-password", passwordFile, keyfile)
defer geth.ExpectExit() defer geth.ExpectExit()
geth.Expect(expected) geth.Expect(expected)
} }
@ -180,11 +201,12 @@ Fatal: could not decrypt key with given password
func TestUnlockFlag(t *testing.T) { func TestUnlockFlag(t *testing.T) {
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
geth.Expect(` geth.Expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed. !! Unsupported terminal, password will be echoed.
Password: {{.InputLine "foobar"}} Password: {{.InputLine "foobar"}}
undefined
`) `)
geth.ExpectExit() geth.ExpectExit()
@ -201,7 +223,7 @@ Password: {{.InputLine "foobar"}}
func TestUnlockFlagWrongPassword(t *testing.T) { func TestUnlockFlagWrongPassword(t *testing.T) {
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
defer geth.ExpectExit() defer geth.ExpectExit()
geth.Expect(` geth.Expect(`
@ -219,7 +241,7 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could
// https://github.com/ethereum/go-ethereum/issues/1785 // https://github.com/ethereum/go-ethereum/issues/1785
func TestUnlockFlagMultiIndex(t *testing.T) { func TestUnlockFlagMultiIndex(t *testing.T) {
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "js", "testdata/empty.js") "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
geth.Expect(` geth.Expect(`
Unlocking account 0 | Attempt 1/3 Unlocking account 0 | Attempt 1/3
@ -227,6 +249,7 @@ Unlocking account 0 | Attempt 1/3
Password: {{.InputLine "foobar"}} Password: {{.InputLine "foobar"}}
Unlocking account 2 | Attempt 1/3 Unlocking account 2 | Attempt 1/3
Password: {{.InputLine "foobar"}} Password: {{.InputLine "foobar"}}
undefined
`) `)
geth.ExpectExit() geth.ExpectExit()
@ -244,8 +267,11 @@ Password: {{.InputLine "foobar"}}
func TestUnlockFlagPasswordFile(t *testing.T) { func TestUnlockFlagPasswordFile(t *testing.T) {
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "js", "testdata/empty.js") "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
geth.Expect(`
undefined
`)
geth.ExpectExit() geth.ExpectExit()
wantMessages := []string{ wantMessages := []string{
@ -275,7 +301,7 @@ func TestUnlockFlagAmbiguous(t *testing.T) {
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
"js", "testdata/empty.js") "console", "--exec", "loadScript('testdata/empty.js')")
defer geth.ExpectExit() defer geth.ExpectExit()
// Helper for the expect template, returns absolute keystore path. // Helper for the expect template, returns absolute keystore path.
@ -294,6 +320,7 @@ Testing your password against all of them...
Your password unlocked keystore://{{keypath "1"}} Your password unlocked keystore://{{keypath "1"}}
In order to avoid this warning, you need to remove the following duplicate key files: In order to avoid this warning, you need to remove the following duplicate key files:
keystore://{{keypath "2"}} keystore://{{keypath "2"}}
undefined
`) `)
geth.ExpectExit() geth.ExpectExit()

View File

@ -35,20 +35,20 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
initCommand = cli.Command{ initCommand = &cli.Command{
Action: utils.MigrateFlags(initGenesis), Action: initGenesis,
Name: "init", Name: "init",
Usage: "Bootstrap and initialize a new genesis block", Usage: "Bootstrap and initialize a new genesis block",
ArgsUsage: "<genesisPath>", ArgsUsage: "<genesisPath>",
Flags: utils.DatabasePathFlags, Flags: utils.DatabasePathFlags,
Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The init command initializes a new genesis block and definition for the network. The init command initializes a new genesis block and definition for the network.
This is a destructive action and changes the network in which you will be This is a destructive action and changes the network in which you will be
@ -56,22 +56,21 @@ participating.
It expects the genesis file as argument.`, It expects the genesis file as argument.`,
} }
dumpGenesisCommand = cli.Command{ dumpGenesisCommand = &cli.Command{
Action: utils.MigrateFlags(dumpGenesis), Action: dumpGenesis,
Name: "dumpgenesis", Name: "dumpgenesis",
Usage: "Dumps genesis block JSON configuration to stdout", Usage: "Dumps genesis block JSON configuration to stdout",
ArgsUsage: "", ArgsUsage: "",
Flags: utils.NetworkFlags, Flags: utils.NetworkFlags,
Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`, The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
} }
importCommand = cli.Command{ importCommand = &cli.Command{
Action: utils.MigrateFlags(importChain), Action: importChain,
Name: "import", Name: "import",
Usage: "Import a blockchain file", Usage: "Import a blockchain file",
ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
Flags: append([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.GCModeFlag, utils.GCModeFlag,
@ -93,8 +92,7 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to
utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag, utils.MetricsInfluxDBOrganizationFlag,
utils.TxLookupLimitFlag, utils.TxLookupLimitFlag,
}, utils.DatabasePathFlags...), }, utils.DatabasePathFlags),
Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The import command imports blocks from an RLP-encoded form. The form can be one file The import command imports blocks from an RLP-encoded form. The form can be one file
with several RLP-encoded blocks, or several files can be used. with several RLP-encoded blocks, or several files can be used.
@ -102,16 +100,15 @@ with several RLP-encoded blocks, or several files can be used.
If only one file is used, import error will result in failure. If several files are used, If only one file is used, import error will result in failure. If several files are used,
processing will proceed even if an individual RLP-file import failure occurs.`, processing will proceed even if an individual RLP-file import failure occurs.`,
} }
exportCommand = cli.Command{ exportCommand = &cli.Command{
Action: utils.MigrateFlags(exportChain), Action: exportChain,
Name: "export", Name: "export",
Usage: "Export blockchain into file", Usage: "Export blockchain into file",
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
Flags: append([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.DatabasePathFlags...), }, utils.DatabasePathFlags),
Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
Requires a first argument of the file to write to. Requires a first argument of the file to write to.
Optional second and third arguments control the first and Optional second and third arguments control the first and
@ -119,42 +116,40 @@ last block to write. In this mode, the file will be appended
if already existing. If the file ends with .gz, the output will if already existing. If the file ends with .gz, the output will
be gzipped.`, be gzipped.`,
} }
importPreimagesCommand = cli.Command{ importPreimagesCommand = &cli.Command{
Action: utils.MigrateFlags(importPreimages), Action: importPreimages,
Name: "import-preimages", Name: "import-preimages",
Usage: "Import the preimage database from an RLP stream", Usage: "Import the preimage database from an RLP stream",
ArgsUsage: "<datafile>", ArgsUsage: "<datafile>",
Flags: append([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.DatabasePathFlags...), }, utils.DatabasePathFlags),
Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The import-preimages command imports hash preimages from an RLP encoded stream. The import-preimages command imports hash preimages from an RLP encoded stream.
It's deprecated, please use "geth db import" instead. It's deprecated, please use "geth db import" instead.
`, `,
} }
exportPreimagesCommand = cli.Command{ exportPreimagesCommand = &cli.Command{
Action: utils.MigrateFlags(exportPreimages), Action: exportPreimages,
Name: "export-preimages", Name: "export-preimages",
Usage: "Export the preimage database into an RLP stream", Usage: "Export the preimage database into an RLP stream",
ArgsUsage: "<dumpfile>", ArgsUsage: "<dumpfile>",
Flags: append([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.DatabasePathFlags...), }, utils.DatabasePathFlags),
Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The export-preimages command exports hash preimages to an RLP encoded stream. The export-preimages command exports hash preimages to an RLP encoded stream.
It's deprecated, please use "geth db export" instead. It's deprecated, please use "geth db export" instead.
`, `,
} }
dumpCommand = cli.Command{ dumpCommand = &cli.Command{
Action: utils.MigrateFlags(dump), Action: dump,
Name: "dump", Name: "dump",
Usage: "Dump a specific block from storage", Usage: "Dump a specific block from storage",
ArgsUsage: "[? <blockHash> | <blockNum>]", ArgsUsage: "[? <blockHash> | <blockNum>]",
Flags: append([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.CacheFlag, utils.CacheFlag,
utils.IterativeOutputFlag, utils.IterativeOutputFlag,
utils.ExcludeCodeFlag, utils.ExcludeCodeFlag,
@ -162,8 +157,7 @@ It's deprecated, please use "geth db export" instead.
utils.IncludeIncompletesFlag, utils.IncludeIncompletesFlag,
utils.StartKeyFlag, utils.StartKeyFlag,
utils.DumpLimitFlag, utils.DumpLimitFlag,
}, utils.DatabasePathFlags...), }, utils.DatabasePathFlags),
Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
This command dumps out the state for a given block (or latest, if none provided). This command dumps out the state for a given block (or latest, if none provided).
`, `,
@ -173,10 +167,12 @@ This command dumps out the state for a given block (or latest, if none provided)
// initGenesis will initialise the given JSON format genesis file and writes it as // initGenesis will initialise the given JSON format genesis file and writes it as
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed. // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
func initGenesis(ctx *cli.Context) error { func initGenesis(ctx *cli.Context) error {
// Make sure we have a valid genesis JSON if ctx.Args().Len() != 1 {
utils.Fatalf("need genesis.json file as the only argument")
}
genesisPath := ctx.Args().First() genesisPath := ctx.Args().First()
if len(genesisPath) == 0 { if len(genesisPath) == 0 {
utils.Fatalf("Must supply path to genesis JSON file") utils.Fatalf("invalid path to genesis file")
} }
file, err := os.Open(genesisPath) file, err := os.Open(genesisPath)
if err != nil { if err != nil {
@ -192,7 +188,7 @@ func initGenesis(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
for _, name := range []string{"chaindata", "lightchaindata"} { for _, name := range []string{"chaindata", "lightchaindata"} {
chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.GlobalString(utils.AncientFlag.Name), "", false) chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
if err != nil { if err != nil {
utils.Fatalf("Failed to open database: %v", err) utils.Fatalf("Failed to open database: %v", err)
} }
@ -219,7 +215,7 @@ func dumpGenesis(ctx *cli.Context) error {
} }
func importChain(ctx *cli.Context) error { func importChain(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if ctx.Args().Len() < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
// Start metrics export if enabled // Start metrics export if enabled
@ -253,13 +249,13 @@ func importChain(ctx *cli.Context) error {
var importErr error var importErr error
if len(ctx.Args()) == 1 { if ctx.Args().Len() == 1 {
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
importErr = err importErr = err
log.Error("Import error", "err", err) log.Error("Import error", "err", err)
} }
} else { } else {
for _, arg := range ctx.Args() { for _, arg := range ctx.Args().Slice() {
if err := utils.ImportChain(chain, arg); err != nil { if err := utils.ImportChain(chain, arg); err != nil {
importErr = err importErr = err
log.Error("Import error", "file", arg, "err", err) log.Error("Import error", "file", arg, "err", err)
@ -281,7 +277,7 @@ func importChain(ctx *cli.Context) error {
fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
if ctx.GlobalBool(utils.NoCompactionFlag.Name) { if ctx.Bool(utils.NoCompactionFlag.Name) {
return nil return nil
} }
@ -298,7 +294,7 @@ func importChain(ctx *cli.Context) error {
} }
func exportChain(ctx *cli.Context) error { func exportChain(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if ctx.Args().Len() < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
@ -310,7 +306,7 @@ func exportChain(ctx *cli.Context) error {
var err error var err error
fp := ctx.Args().First() fp := ctx.Args().First()
if len(ctx.Args()) < 3 { if ctx.Args().Len() < 3 {
err = utils.ExportChain(chain, fp) err = utils.ExportChain(chain, fp)
} else { } else {
// This can be improved to allow for numbers larger than 9223372036854775807 // This can be improved to allow for numbers larger than 9223372036854775807
@ -337,7 +333,7 @@ func exportChain(ctx *cli.Context) error {
// importPreimages imports preimage data from the specified file. // importPreimages imports preimage data from the specified file.
func importPreimages(ctx *cli.Context) error { func importPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if ctx.Args().Len() < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
@ -356,7 +352,7 @@ func importPreimages(ctx *cli.Context) error {
// exportPreimages dumps the preimage data to specified json file in streaming way. // exportPreimages dumps the preimage data to specified json file in streaming way.
func exportPreimages(ctx *cli.Context) error { func exportPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if ctx.Args().Len() < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)

View File

@ -25,7 +25,7 @@ import (
"reflect" "reflect"
"unicode" "unicode"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
"github.com/ethereum/go-ethereum/accounts/external" "github.com/ethereum/go-ethereum/accounts/external"
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
@ -43,19 +44,19 @@ import (
) )
var ( var (
dumpConfigCommand = cli.Command{ dumpConfigCommand = &cli.Command{
Action: utils.MigrateFlags(dumpConfig), Action: dumpConfig,
Name: "dumpconfig", Name: "dumpconfig",
Usage: "Show configuration values", Usage: "Show configuration values",
ArgsUsage: "", ArgsUsage: "",
Flags: utils.GroupFlags(nodeFlags, rpcFlags), Flags: flags.Merge(nodeFlags, rpcFlags),
Category: "MISCELLANEOUS COMMANDS",
Description: `The dumpconfig command shows configuration values.`, Description: `The dumpconfig command shows configuration values.`,
} }
configFileFlag = cli.StringFlag{ configFileFlag = &cli.StringFlag{
Name: "config", Name: "config",
Usage: "TOML configuration file", Usage: "TOML configuration file",
Category: flags.EthCategory,
} }
) )
@ -127,7 +128,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
} }
// Load config file. // Load config file.
if file := ctx.GlobalString(configFileFlag.Name); file != "" { if file := ctx.String(configFileFlag.Name); file != "" {
if err := loadConfig(file, &cfg); err != nil { if err := loadConfig(file, &cfg); err != nil {
utils.Fatalf("%v", err) utils.Fatalf("%v", err)
} }
@ -145,8 +146,8 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
} }
utils.SetEthConfig(ctx, stack, &cfg.Eth) utils.SetEthConfig(ctx, stack, &cfg.Eth)
if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) { if ctx.IsSet(utils.EthStatsURLFlag.Name) {
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) cfg.Ethstats.URL = ctx.String(utils.EthStatsURLFlag.Name)
} }
applyMetricConfig(ctx, &cfg) applyMetricConfig(ctx, &cfg)
@ -156,15 +157,15 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
// makeFullNode loads geth configuration and creates the Ethereum backend. // makeFullNode loads geth configuration and creates the Ethereum backend.
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
stack, cfg := makeConfigNode(ctx) stack, cfg := makeConfigNode(ctx)
if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) { if ctx.IsSet(utils.OverrideGrayGlacierFlag.Name) {
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name)) cfg.Eth.OverrideGrayGlacier = new(big.Int).SetUint64(ctx.Uint64(utils.OverrideGrayGlacierFlag.Name))
} }
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) { if ctx.IsSet(utils.OverrideTerminalTotalDifficulty.Name) {
cfg.Eth.OverrideTerminalTotalDifficulty = utils.GlobalBig(ctx, utils.OverrideTerminalTotalDifficulty.Name) cfg.Eth.OverrideTerminalTotalDifficulty = flags.GlobalBig(ctx, utils.OverrideTerminalTotalDifficulty.Name)
} }
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Warn users to migrate if they have a legacy freezer format. // Warn users to migrate if they have a legacy freezer format.
if eth != nil { if eth != nil && !ctx.IsSet(utils.IgnoreLegacyReceiptsFlag.Name) {
firstIdx := uint64(0) firstIdx := uint64(0)
// Hack to speed up check for mainnet because we know // Hack to speed up check for mainnet because we know
// the first non-empty block. // the first non-empty block.
@ -176,12 +177,13 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if err != nil { if err != nil {
log.Error("Failed to check db for legacy receipts", "err", err) log.Error("Failed to check db for legacy receipts", "err", err)
} else if isLegacy { } else if isLegacy {
log.Warn("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.") stack.Close()
utils.Fatalf("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
} }
} }
// Configure GraphQL if requested // Configure GraphQL if requested
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) { if ctx.IsSet(utils.GraphQLEnabledFlag.Name) {
utils.RegisterGraphQLService(stack, backend, cfg.Node) utils.RegisterGraphQLService(stack, backend, cfg.Node)
} }
// Add the Ethereum Stats daemon if requested. // Add the Ethereum Stats daemon if requested.
@ -221,47 +223,47 @@ func dumpConfig(ctx *cli.Context) error {
} }
func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) { func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
if ctx.GlobalIsSet(utils.MetricsEnabledFlag.Name) { if ctx.IsSet(utils.MetricsEnabledFlag.Name) {
cfg.Metrics.Enabled = ctx.GlobalBool(utils.MetricsEnabledFlag.Name) cfg.Metrics.Enabled = ctx.Bool(utils.MetricsEnabledFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsEnabledExpensiveFlag.Name) { if ctx.IsSet(utils.MetricsEnabledExpensiveFlag.Name) {
cfg.Metrics.EnabledExpensive = ctx.GlobalBool(utils.MetricsEnabledExpensiveFlag.Name) cfg.Metrics.EnabledExpensive = ctx.Bool(utils.MetricsEnabledExpensiveFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsHTTPFlag.Name) { if ctx.IsSet(utils.MetricsHTTPFlag.Name) {
cfg.Metrics.HTTP = ctx.GlobalString(utils.MetricsHTTPFlag.Name) cfg.Metrics.HTTP = ctx.String(utils.MetricsHTTPFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsPortFlag.Name) { if ctx.IsSet(utils.MetricsPortFlag.Name) {
cfg.Metrics.Port = ctx.GlobalInt(utils.MetricsPortFlag.Name) cfg.Metrics.Port = ctx.Int(utils.MetricsPortFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsEnableInfluxDBFlag.Name) { if ctx.IsSet(utils.MetricsEnableInfluxDBFlag.Name) {
cfg.Metrics.EnableInfluxDB = ctx.GlobalBool(utils.MetricsEnableInfluxDBFlag.Name) cfg.Metrics.EnableInfluxDB = ctx.Bool(utils.MetricsEnableInfluxDBFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsInfluxDBEndpointFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBEndpointFlag.Name) {
cfg.Metrics.InfluxDBEndpoint = ctx.GlobalString(utils.MetricsInfluxDBEndpointFlag.Name) cfg.Metrics.InfluxDBEndpoint = ctx.String(utils.MetricsInfluxDBEndpointFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsInfluxDBDatabaseFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBDatabaseFlag.Name) {
cfg.Metrics.InfluxDBDatabase = ctx.GlobalString(utils.MetricsInfluxDBDatabaseFlag.Name) cfg.Metrics.InfluxDBDatabase = ctx.String(utils.MetricsInfluxDBDatabaseFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsInfluxDBUsernameFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBUsernameFlag.Name) {
cfg.Metrics.InfluxDBUsername = ctx.GlobalString(utils.MetricsInfluxDBUsernameFlag.Name) cfg.Metrics.InfluxDBUsername = ctx.String(utils.MetricsInfluxDBUsernameFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsInfluxDBPasswordFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBPasswordFlag.Name) {
cfg.Metrics.InfluxDBPassword = ctx.GlobalString(utils.MetricsInfluxDBPasswordFlag.Name) cfg.Metrics.InfluxDBPassword = ctx.String(utils.MetricsInfluxDBPasswordFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsInfluxDBTagsFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBTagsFlag.Name) {
cfg.Metrics.InfluxDBTags = ctx.GlobalString(utils.MetricsInfluxDBTagsFlag.Name) cfg.Metrics.InfluxDBTags = ctx.String(utils.MetricsInfluxDBTagsFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsEnableInfluxDBV2Flag.Name) { if ctx.IsSet(utils.MetricsEnableInfluxDBV2Flag.Name) {
cfg.Metrics.EnableInfluxDBV2 = ctx.GlobalBool(utils.MetricsEnableInfluxDBV2Flag.Name) cfg.Metrics.EnableInfluxDBV2 = ctx.Bool(utils.MetricsEnableInfluxDBV2Flag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsInfluxDBTokenFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBTokenFlag.Name) {
cfg.Metrics.InfluxDBToken = ctx.GlobalString(utils.MetricsInfluxDBTokenFlag.Name) cfg.Metrics.InfluxDBToken = ctx.String(utils.MetricsInfluxDBTokenFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsInfluxDBBucketFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBBucketFlag.Name) {
cfg.Metrics.InfluxDBBucket = ctx.GlobalString(utils.MetricsInfluxDBBucketFlag.Name) cfg.Metrics.InfluxDBBucket = ctx.String(utils.MetricsInfluxDBBucketFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsInfluxDBOrganizationFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBOrganizationFlag.Name) {
cfg.Metrics.InfluxDBOrganization = ctx.GlobalString(utils.MetricsInfluxDBOrganizationFlag.Name) cfg.Metrics.InfluxDBOrganization = ctx.String(utils.MetricsInfluxDBOrganizationFlag.Name)
} }
} }

View File

@ -18,39 +18,36 @@ package main
import ( import (
"fmt" "fmt"
"path/filepath"
"strings" "strings"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag} consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag}
consoleCommand = cli.Command{ consoleCommand = &cli.Command{
Action: utils.MigrateFlags(localConsole), Action: localConsole,
Name: "console", Name: "console",
Usage: "Start an interactive JavaScript environment", Usage: "Start an interactive JavaScript environment",
Flags: utils.GroupFlags(nodeFlags, rpcFlags, consoleFlags), Flags: flags.Merge(nodeFlags, rpcFlags, consoleFlags),
Category: "CONSOLE COMMANDS",
Description: ` Description: `
The Geth console is an interactive shell for the JavaScript runtime environment The Geth console is an interactive shell for the JavaScript runtime environment
which exposes a node admin interface as well as the Ðapp JavaScript API. which exposes a node admin interface as well as the Ðapp JavaScript API.
See https://geth.ethereum.org/docs/interface/javascript-console.`, See https://geth.ethereum.org/docs/interface/javascript-console.`,
} }
attachCommand = cli.Command{ attachCommand = &cli.Command{
Action: utils.MigrateFlags(remoteConsole), Action: remoteConsole,
Name: "attach", Name: "attach",
Usage: "Start an interactive JavaScript environment (connect to node)", Usage: "Start an interactive JavaScript environment (connect to node)",
ArgsUsage: "[endpoint]", ArgsUsage: "[endpoint]",
Flags: utils.GroupFlags([]cli.Flag{utils.DataDirFlag}, consoleFlags), Flags: flags.Merge([]cli.Flag{utils.DataDirFlag}, consoleFlags),
Category: "CONSOLE COMMANDS",
Description: ` Description: `
The Geth console is an interactive shell for the JavaScript runtime environment The Geth console is an interactive shell for the JavaScript runtime environment
which exposes a node admin interface as well as the Ðapp JavaScript API. which exposes a node admin interface as well as the Ðapp JavaScript API.
@ -58,13 +55,12 @@ See https://geth.ethereum.org/docs/interface/javascript-console.
This command allows to open a console on a running geth node.`, This command allows to open a console on a running geth node.`,
} }
javascriptCommand = cli.Command{ javascriptCommand = &cli.Command{
Action: utils.MigrateFlags(ephemeralConsole), Action: ephemeralConsole,
Name: "js", Name: "js",
Usage: "Execute the specified JavaScript files", Usage: "(DEPRECATED) Execute the specified JavaScript files",
ArgsUsage: "<jsfile> [jsfile...]", ArgsUsage: "<jsfile> [jsfile...]",
Flags: utils.GroupFlags(nodeFlags, consoleFlags), Flags: flags.Merge(nodeFlags, consoleFlags),
Category: "CONSOLE COMMANDS",
Description: ` Description: `
The JavaScript VM exposes a node admin interface as well as the Ðapp The JavaScript VM exposes a node admin interface as well as the Ðapp
JavaScript API. See https://geth.ethereum.org/docs/interface/javascript-console`, JavaScript API. See https://geth.ethereum.org/docs/interface/javascript-console`,
@ -87,7 +83,7 @@ func localConsole(ctx *cli.Context) error {
} }
config := console.Config{ config := console.Config{
DataDir: utils.MakeDataDir(ctx), DataDir: utils.MakeDataDir(ctx),
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), DocRoot: ctx.String(utils.JSpathFlag.Name),
Client: client, Client: client,
Preload: utils.MakeConsolePreloads(ctx), Preload: utils.MakeConsolePreloads(ctx),
} }
@ -98,7 +94,7 @@ func localConsole(ctx *cli.Context) error {
defer console.Stop(false) defer console.Stop(false)
// If only a short execution was requested, evaluate and return. // If only a short execution was requested, evaluate and return.
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { if script := ctx.String(utils.ExecFlag.Name); script != "" {
console.Evaluate(script) console.Evaluate(script)
return nil return nil
} }
@ -119,33 +115,15 @@ func localConsole(ctx *cli.Context) error {
// remoteConsole will connect to a remote geth instance, attaching a JavaScript // remoteConsole will connect to a remote geth instance, attaching a JavaScript
// console to it. // console to it.
func remoteConsole(ctx *cli.Context) error { func remoteConsole(ctx *cli.Context) error {
if ctx.Args().Len() > 1 {
utils.Fatalf("invalid command-line: too many arguments")
}
endpoint := ctx.Args().First() endpoint := ctx.Args().First()
if endpoint == "" { if endpoint == "" {
path := node.DefaultDataDir() cfg := defaultNodeConfig()
if ctx.GlobalIsSet(utils.DataDirFlag.Name) { utils.SetDataDir(ctx, &cfg)
path = ctx.GlobalString(utils.DataDirFlag.Name) endpoint = cfg.IPCEndpoint()
}
if path != "" {
if ctx.GlobalBool(utils.RopstenFlag.Name) {
// Maintain compatibility with older Geth configurations storing the
// Ropsten database in `testnet` instead of `ropsten`.
legacyPath := filepath.Join(path, "testnet")
if common.FileExist(legacyPath) {
path = legacyPath
} else {
path = filepath.Join(path, "ropsten")
}
} else if ctx.GlobalBool(utils.RinkebyFlag.Name) {
path = filepath.Join(path, "rinkeby")
} else if ctx.GlobalBool(utils.GoerliFlag.Name) {
path = filepath.Join(path, "goerli")
} else if ctx.GlobalBool(utils.SepoliaFlag.Name) {
path = filepath.Join(path, "sepolia")
} else if ctx.GlobalBool(utils.KilnFlag.Name) {
path = filepath.Join(path, "kiln")
}
}
endpoint = fmt.Sprintf("%s/geth.ipc", path)
} }
client, err := dialRPC(endpoint) client, err := dialRPC(endpoint)
if err != nil { if err != nil {
@ -153,7 +131,7 @@ func remoteConsole(ctx *cli.Context) error {
} }
config := console.Config{ config := console.Config{
DataDir: utils.MakeDataDir(ctx), DataDir: utils.MakeDataDir(ctx),
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), DocRoot: ctx.String(utils.JSpathFlag.Name),
Client: client, Client: client,
Preload: utils.MakeConsolePreloads(ctx), Preload: utils.MakeConsolePreloads(ctx),
} }
@ -163,7 +141,7 @@ func remoteConsole(ctx *cli.Context) error {
} }
defer console.Stop(false) defer console.Stop(false)
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { if script := ctx.String(utils.ExecFlag.Name); script != "" {
console.Evaluate(script) console.Evaluate(script)
return nil return nil
} }
@ -174,6 +152,19 @@ func remoteConsole(ctx *cli.Context) error {
return nil return nil
} }
// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript
// console to it, executes each of the files specified as arguments and tears
// everything down.
func ephemeralConsole(ctx *cli.Context) error {
var b strings.Builder
for _, file := range ctx.Args().Slice() {
b.Write([]byte(fmt.Sprintf("loadScript('%s');", file)))
}
utils.Fatalf(`The "js" command is deprecated. Please use the following instead:
geth --exec "%s" console`, b.String())
return nil
}
// dialRPC returns a RPC client which connects to the given endpoint. // dialRPC returns a RPC client which connects to the given endpoint.
// The check for empty endpoint implements the defaulting logic // The check for empty endpoint implements the defaulting logic
// for "geth attach" with no argument. // for "geth attach" with no argument.
@ -187,48 +178,3 @@ func dialRPC(endpoint string) (*rpc.Client, error) {
} }
return rpc.Dial(endpoint) return rpc.Dial(endpoint)
} }
// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript
// console to it, executes each of the files specified as arguments and tears
// everything down.
func ephemeralConsole(ctx *cli.Context) error {
// Create and start the node based on the CLI flags
stack, backend := makeFullNode(ctx)
startNode(ctx, stack, backend, false)
defer stack.Close()
// Attach to the newly started node and start the JavaScript console
client, err := stack.Attach()
if err != nil {
return fmt.Errorf("Failed to attach to the inproc geth: %v", err)
}
config := console.Config{
DataDir: utils.MakeDataDir(ctx),
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
Client: client,
Preload: utils.MakeConsolePreloads(ctx),
}
console, err := console.New(config)
if err != nil {
return fmt.Errorf("Failed to start the JavaScript console: %v", err)
}
defer console.Stop(false)
// Interrupt the JS interpreter when node is stopped.
go func() {
stack.Wait()
console.Stop(false)
}()
// Evaluate each of the specified JavaScript files.
for _, file := range ctx.Args() {
if err = console.Execute(file); err != nil {
return fmt.Errorf("Failed to execute %s: %v", file, err)
}
}
// The main script is now done, but keep running timers/callbacks.
console.Stop(true)
return nil
}

View File

@ -41,7 +41,7 @@ func runMinimalGeth(t *testing.T, args ...string) *testgeth {
// --ropsten to make the 'writing genesis to disk' faster (no accounts) // --ropsten to make the 'writing genesis to disk' faster (no accounts)
// --networkid=1337 to avoid cache bump // --networkid=1337 to avoid cache bump
// --syncmode=full to avoid allocating fast sync bloom // --syncmode=full to avoid allocating fast sync bloom
allArgs := []string{"--ropsten", "--networkid", "1337", "--syncmode=full", "--port", "0", allArgs := []string{"--ropsten", "--networkid", "1337", "--authrpc.port", "0", "--syncmode=full", "--port", "0",
"--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64", "--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64",
"--datadir.minfreedisk", "0"} "--datadir.minfreedisk", "0"}
return runGeth(t, append(allArgs, args...)...) return runGeth(t, append(allArgs, args...)...)

View File

@ -37,29 +37,28 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
removedbCommand = cli.Command{ removedbCommand = &cli.Command{
Action: utils.MigrateFlags(removeDB), Action: removeDB,
Name: "removedb", Name: "removedb",
Usage: "Remove blockchain and state databases", Usage: "Remove blockchain and state databases",
ArgsUsage: "", ArgsUsage: "",
Flags: utils.DatabasePathFlags, Flags: utils.DatabasePathFlags,
Category: "DATABASE COMMANDS",
Description: ` Description: `
Remove blockchain and state databases`, Remove blockchain and state databases`,
} }
dbCommand = cli.Command{ dbCommand = &cli.Command{
Name: "db", Name: "db",
Usage: "Low level database operations", Usage: "Low level database operations",
ArgsUsage: "", ArgsUsage: "",
Category: "DATABASE COMMANDS", Subcommands: []*cli.Command{
Subcommands: []cli.Command{
dbInspectCmd, dbInspectCmd,
dbStatCmd, dbStatCmd,
dbCompactCmd, dbCompactCmd,
@ -75,39 +74,39 @@ Remove blockchain and state databases`,
dbCheckStateContentCmd, dbCheckStateContentCmd,
}, },
} }
dbInspectCmd = cli.Command{ dbInspectCmd = &cli.Command{
Action: utils.MigrateFlags(inspect), Action: inspect,
Name: "inspect", Name: "inspect",
ArgsUsage: "<prefix> <start>", ArgsUsage: "<prefix> <start>",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Usage: "Inspect the storage size for each type of data in the database", Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
} }
dbCheckStateContentCmd = cli.Command{ dbCheckStateContentCmd = &cli.Command{
Action: utils.MigrateFlags(checkStateContent), Action: checkStateContent,
Name: "check-state-content", Name: "check-state-content",
ArgsUsage: "<start (optional)>", ArgsUsage: "<start (optional)>",
Flags: utils.GroupFlags(utils.NetworkFlags, utils.DatabasePathFlags), Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Usage: "Verify that state data is cryptographically correct", Usage: "Verify that state data is cryptographically correct",
Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes. Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
a data corruption.`, a data corruption.`,
} }
dbStatCmd = cli.Command{ dbStatCmd = &cli.Command{
Action: utils.MigrateFlags(dbStats), Action: dbStats,
Name: "stats", Name: "stats",
Usage: "Print leveldb statistics", Usage: "Print leveldb statistics",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
} }
dbCompactCmd = cli.Command{ dbCompactCmd = &cli.Command{
Action: utils.MigrateFlags(dbCompact), Action: dbCompact,
Name: "compact", Name: "compact",
Usage: "Compact leveldb database. WARNING: May take a very long time", Usage: "Compact leveldb database. WARNING: May take a very long time",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
utils.CacheFlag, utils.CacheFlag,
utils.CacheDatabaseFlag, utils.CacheDatabaseFlag,
@ -116,93 +115,93 @@ a data corruption.`,
WARNING: This operation may take a very long time to finish, and may cause database WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`, corruption if it is aborted during execution'!`,
} }
dbGetCmd = cli.Command{ dbGetCmd = &cli.Command{
Action: utils.MigrateFlags(dbGet), Action: dbGet,
Name: "get", Name: "get",
Usage: "Show the value of a database key", Usage: "Show the value of a database key",
ArgsUsage: "<hex-encoded key>", ArgsUsage: "<hex-encoded key>",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: "This command looks up the specified database key from the database.", Description: "This command looks up the specified database key from the database.",
} }
dbDeleteCmd = cli.Command{ dbDeleteCmd = &cli.Command{
Action: utils.MigrateFlags(dbDelete), Action: dbDelete,
Name: "delete", Name: "delete",
Usage: "Delete a database key (WARNING: may corrupt your database)", Usage: "Delete a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key>", ArgsUsage: "<hex-encoded key>",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: `This command deletes the specified database key from the database. Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`, WARNING: This is a low-level operation which may cause database corruption!`,
} }
dbPutCmd = cli.Command{ dbPutCmd = &cli.Command{
Action: utils.MigrateFlags(dbPut), Action: dbPut,
Name: "put", Name: "put",
Usage: "Set the value of a database key (WARNING: may corrupt your database)", Usage: "Set the value of a database key (WARNING: may corrupt your database)",
ArgsUsage: "<hex-encoded key> <hex-encoded value>", ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: `This command sets a given database key to the given value. Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`, WARNING: This is a low-level operation which may cause database corruption!`,
} }
dbGetSlotsCmd = cli.Command{ dbGetSlotsCmd = &cli.Command{
Action: utils.MigrateFlags(dbDumpTrie), Action: dbDumpTrie,
Name: "dumptrie", Name: "dumptrie",
Usage: "Show the storage key/values of a given storage trie", Usage: "Show the storage key/values of a given storage trie",
ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>", ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: "This command looks up the specified database key from the database.", Description: "This command looks up the specified database key from the database.",
} }
dbDumpFreezerIndex = cli.Command{ dbDumpFreezerIndex = &cli.Command{
Action: utils.MigrateFlags(freezerInspect), Action: freezerInspect,
Name: "freezer-index", Name: "freezer-index",
Usage: "Dump out the index of a given freezer type", Usage: "Dump out the index of a given freezer type",
ArgsUsage: "<type> <start (int)> <end (int)>", ArgsUsage: "<type> <start (int)> <end (int)>",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: "This command displays information about the freezer index.", Description: "This command displays information about the freezer index.",
} }
dbImportCmd = cli.Command{ dbImportCmd = &cli.Command{
Action: utils.MigrateFlags(importLDBdata), Action: importLDBdata,
Name: "import", Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.", Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)", ArgsUsage: "<dumpfile> <start (optional)",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: "The import command imports the specific chain data from an RLP encoded stream.", Description: "The import command imports the specific chain data from an RLP encoded stream.",
} }
dbExportCmd = cli.Command{ dbExportCmd = &cli.Command{
Action: utils.MigrateFlags(exportChaindata), Action: exportChaindata,
Name: "export", Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.", Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>", ArgsUsage: "<type> <dumpfile>",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.", Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
} }
dbMetadataCmd = cli.Command{ dbMetadataCmd = &cli.Command{
Action: utils.MigrateFlags(showMetaData), Action: showMetaData,
Name: "metadata", Name: "metadata",
Usage: "Shows metadata about the chain status.", Usage: "Shows metadata about the chain status.",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: "Shows metadata about the chain status.", Description: "Shows metadata about the chain status.",
} }
dbMigrateFreezerCmd = cli.Command{ dbMigrateFreezerCmd = &cli.Command{
Action: utils.MigrateFlags(freezerMigrate), Action: freezerMigrate,
Name: "freezer-migrate", Name: "freezer-migrate",
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)", Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
ArgsUsage: "", ArgsUsage: "",
Flags: utils.GroupFlags([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those. Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
@ -307,7 +306,7 @@ func checkStateContent(ctx *cli.Context) error {
start []byte start []byte
) )
if ctx.NArg() > 1 { if ctx.NArg() > 1 {
return fmt.Errorf("Max 1 argument: %v", ctx.Command.ArgsUsage) return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
} }
if ctx.NArg() > 0 { if ctx.NArg() > 0 {
if d, err := hexutil.Decode(ctx.Args().First()); err != nil { if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
@ -332,16 +331,16 @@ func checkStateContent(ctx *cli.Context) error {
) )
for it.Next() { for it.Next() {
count++ count++
v := it.Value()
k := it.Key() k := it.Key()
v := it.Value()
hasher.Reset() hasher.Reset()
hasher.Write(v) hasher.Write(v)
hasher.Read(got) hasher.Read(got)
if !bytes.Equal(k, got) { if !bytes.Equal(k, got) {
errs++ errs++
fmt.Printf("Error at 0x%x\n", k) fmt.Printf("Error at %#x\n", k)
fmt.Printf(" Hash: 0x%x\n", got) fmt.Printf(" Hash: %#x\n", got)
fmt.Printf(" Data: 0x%x\n", v) fmt.Printf(" Data: %#x\n", v)
} }
if time.Since(lastLog) > 8*time.Second { if time.Since(lastLog) > 8*time.Second {
log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime))) log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
@ -418,7 +417,7 @@ func dbGet(ctx *cli.Context) error {
data, err := db.Get(key) data, err := db.Get(key)
if err != nil { if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err) log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
return err return err
} }
fmt.Printf("key %#x: %#x\n", key, data) fmt.Printf("key %#x: %#x\n", key, data)
@ -446,7 +445,7 @@ func dbDelete(ctx *cli.Context) error {
fmt.Printf("Previous value: %#x\n", data) fmt.Printf("Previous value: %#x\n", data)
} }
if err = db.Delete(key); err != nil { if err = db.Delete(key); err != nil {
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err) log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
return err return err
} }
return nil return nil
@ -519,7 +518,7 @@ func dbDumpTrie(ctx *cli.Context) error {
return err return err
} }
} }
theTrie, err := trie.New(stRoot, trie.NewDatabase(db)) theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db))
if err != nil { if err != nil {
return err return err
} }
@ -718,7 +717,7 @@ func showMetaData(ctx *cli.Context) error {
if val == nil { if val == nil {
return "<nil>" return "<nil>"
} }
return fmt.Sprintf("%d (0x%x)", *val, *val) return fmt.Sprintf("%d (%#x)", *val, *val)
} }
data := [][]string{ data := [][]string{
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))}, {"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
@ -728,7 +727,7 @@ func showMetaData(ctx *cli.Context) error {
if b := rawdb.ReadHeadBlock(db); b != nil { if b := rawdb.ReadHeadBlock(db); b != nil {
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())}) data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())}) data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())}) data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
} }
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil { if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)}) data = append(data, []string{"SkeletonSyncStatus", string(b)})
@ -736,7 +735,7 @@ func showMetaData(ctx *cli.Context) error {
if h := rawdb.ReadHeadHeader(db); h != nil { if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())}) data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)}) data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)}) data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
} }
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)}, data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))}, {"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},

View File

@ -83,7 +83,7 @@ func TestCustomGenesis(t *testing.T) {
// Query the custom genesis block // Query the custom genesis block
geth := runGeth(t, "--networkid", "1337", "--syncmode=full", "--cache", "16", geth := runGeth(t, "--networkid", "1337", "--syncmode=full", "--cache", "16",
"--datadir", datadir, "--maxpeers", "0", "--port", "0", "--datadir", datadir, "--maxpeers", "0", "--port", "0", "--authrpc.port", "0",
"--nodiscover", "--nat", "none", "--ipcdisable", "--nodiscover", "--nat", "none", "--ipcdisable",
"--exec", tt.query, "console") "--exec", tt.query, "console")
geth.ExpectRegexp(tt.result) geth.ExpectRegexp(tt.result)

View File

@ -81,41 +81,6 @@ func (g *gethrpc) getNodeInfo() *p2p.NodeInfo {
return g.nodeInfo return g.nodeInfo
} }
func (g *gethrpc) waitSynced() {
// Check if it's synced now
var result interface{}
g.callRPC(&result, "eth_syncing")
syncing, ok := result.(bool)
if ok && !syncing {
g.geth.Logf("%v already synced", g.name)
return
}
// Actually wait, subscribe to the event
ch := make(chan interface{})
sub, err := g.rpc.Subscribe(context.Background(), "eth", ch, "syncing")
if err != nil {
g.geth.Fatalf("%v syncing: %v", g.name, err)
}
defer sub.Unsubscribe()
timeout := time.After(4 * time.Second)
select {
case ev := <-ch:
g.geth.Log("'syncing' event", ev)
syncing, ok := ev.(bool)
if ok && !syncing {
break
}
g.geth.Log("Other 'syncing' event", ev)
case err := <-sub.Err():
g.geth.Fatalf("%v notification: %v", g.name, err)
break
case <-timeout:
g.geth.Fatalf("%v timeout syncing", g.name)
break
}
}
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into // ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
// account the set data folders as well as the designated platform we're currently // account the set data folders as well as the designated platform we're currently
// running on. // running on.
@ -146,7 +111,7 @@ var nextIPC = uint32(0)
func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1)) ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1))
args = append([]string{"--networkid=42", "--port=0", "--ipcpath", ipcName}, args...) args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...)
t.Logf("Starting %v with rpc: %v", name, args) t.Logf("Starting %v with rpc: %v", name, args)
g := &gethrpc{ g := &gethrpc{
@ -179,7 +144,7 @@ func initGeth(t *testing.T) string {
func startLightServer(t *testing.T) *gethrpc { func startLightServer(t *testing.T) *gethrpc {
datadir := initGeth(t) datadir := initGeth(t)
t.Logf("Importing keys to geth") t.Logf("Importing keys to geth")
runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv", "--lightkdf").WaitExit() runGeth(t, "account", "import", "--datadir", datadir, "--password", "./testdata/password.txt", "--lightkdf", "./testdata/key.prv").WaitExit()
account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105" account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4") server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4")
return server return server

View File

@ -47,7 +47,7 @@ import (
_ "github.com/ethereum/go-ethereum/eth/tracers/js" _ "github.com/ethereum/go-ethereum/eth/tracers/js"
_ "github.com/ethereum/go-ethereum/eth/tracers/native" _ "github.com/ethereum/go-ethereum/eth/tracers/native"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
const ( const (
@ -61,7 +61,7 @@ var (
// The app that holds all commands and flags. // The app that holds all commands and flags.
app = flags.NewApp(gitCommit, gitDate, "the go-ethereum command line interface") app = flags.NewApp(gitCommit, gitDate, "the go-ethereum command line interface")
// flags that configure the node // flags that configure the node
nodeFlags = utils.GroupFlags([]cli.Flag{ nodeFlags = flags.Merge([]cli.Flag{
utils.IdentityFlag, utils.IdentityFlag,
utils.UnlockedAccountFlag, utils.UnlockedAccountFlag,
utils.PasswordFileFlag, utils.PasswordFileFlag,
@ -72,7 +72,7 @@ var (
utils.NoUSBFlag, utils.NoUSBFlag,
utils.USBFlag, utils.USBFlag,
utils.SmartCardDaemonPathFlag, utils.SmartCardDaemonPathFlag,
utils.OverrideArrowGlacierFlag, utils.OverrideGrayGlacierFlag,
utils.OverrideTerminalTotalDifficulty, utils.OverrideTerminalTotalDifficulty,
utils.EthashCacheDirFlag, utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag, utils.EthashCachesInMemoryFlag,
@ -122,6 +122,7 @@ var (
utils.CachePreimagesFlag, utils.CachePreimagesFlag,
utils.FDLimitFlag, utils.FDLimitFlag,
utils.ListenPortFlag, utils.ListenPortFlag,
utils.DiscoveryPortFlag,
utils.MaxPeersFlag, utils.MaxPeersFlag,
utils.MaxPendingPeersFlag, utils.MaxPendingPeersFlag,
utils.MiningEnabledFlag, utils.MiningEnabledFlag,
@ -154,6 +155,7 @@ var (
utils.GpoMaxGasPriceFlag, utils.GpoMaxGasPriceFlag,
utils.GpoIgnoreGasPriceFlag, utils.GpoIgnoreGasPriceFlag,
utils.MinerNotifyFullFlag, utils.MinerNotifyFullFlag,
utils.IgnoreLegacyReceiptsFlag,
configFileFlag, configFileFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags) }, utils.NetworkFlags, utils.DatabasePathFlags)
@ -210,7 +212,7 @@ func init() {
app.Action = geth app.Action = geth
app.HideVersion = true // we have a command to print the version app.HideVersion = true // we have a command to print the version
app.Copyright = "Copyright 2013-2022 The go-ethereum Authors" app.Copyright = "Copyright 2013-2022 The go-ethereum Authors"
app.Commands = []cli.Command{ app.Commands = []*cli.Command{
// See chaincmd.go: // See chaincmd.go:
initCommand, initCommand,
importCommand, importCommand,
@ -244,13 +246,16 @@ func init() {
} }
sort.Sort(cli.CommandsByName(app.Commands)) sort.Sort(cli.CommandsByName(app.Commands))
app.Flags = utils.GroupFlags(nodeFlags, app.Flags = flags.Merge(
nodeFlags,
rpcFlags, rpcFlags,
consoleFlags, consoleFlags,
debug.Flags, debug.Flags,
metricsFlags) metricsFlags,
)
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
flags.MigrateGlobalFlags(ctx)
return debug.Setup(ctx) return debug.Setup(ctx)
} }
app.After = func(ctx *cli.Context) error { app.After = func(ctx *cli.Context) error {
@ -272,22 +277,22 @@ func main() {
func prepare(ctx *cli.Context) { func prepare(ctx *cli.Context) {
// If we're running a known preset, log it for convenience. // If we're running a known preset, log it for convenience.
switch { switch {
case ctx.GlobalIsSet(utils.RopstenFlag.Name): case ctx.IsSet(utils.RopstenFlag.Name):
log.Info("Starting Geth on Ropsten testnet...") log.Info("Starting Geth on Ropsten testnet...")
case ctx.GlobalIsSet(utils.RinkebyFlag.Name): case ctx.IsSet(utils.RinkebyFlag.Name):
log.Info("Starting Geth on Rinkeby testnet...") log.Info("Starting Geth on Rinkeby testnet...")
case ctx.GlobalIsSet(utils.GoerliFlag.Name): case ctx.IsSet(utils.GoerliFlag.Name):
log.Info("Starting Geth on Görli testnet...") log.Info("Starting Geth on Görli testnet...")
case ctx.GlobalIsSet(utils.SepoliaFlag.Name): case ctx.IsSet(utils.SepoliaFlag.Name):
log.Info("Starting Geth on Sepolia testnet...") log.Info("Starting Geth on Sepolia testnet...")
case ctx.GlobalIsSet(utils.KilnFlag.Name): case ctx.IsSet(utils.KilnFlag.Name):
log.Info("Starting Geth on Kiln testnet...") log.Info("Starting Geth on Kiln testnet...")
case ctx.GlobalIsSet(utils.DeveloperFlag.Name): case ctx.IsSet(utils.DeveloperFlag.Name):
log.Info("Starting Geth in ephemeral dev mode...") log.Info("Starting Geth in ephemeral dev mode...")
log.Warn(`You are running Geth in --dev mode. Please note the following: log.Warn(`You are running Geth in --dev mode. Please note the following:
@ -305,27 +310,27 @@ func prepare(ctx *cli.Context) {
to 0, and discovery is disabled. to 0, and discovery is disabled.
`) `)
case !ctx.GlobalIsSet(utils.NetworkIdFlag.Name): case !ctx.IsSet(utils.NetworkIdFlag.Name):
log.Info("Starting Geth on Ethereum mainnet...") log.Info("Starting Geth on Ethereum mainnet...")
} }
// If we're a full node on mainnet without --cache specified, bump default cache allowance // If we're a full node on mainnet without --cache specified, bump default cache allowance
if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) { if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
// Make sure we're not on any supported preconfigured testnet either // Make sure we're not on any supported preconfigured testnet either
if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && if !ctx.IsSet(utils.RopstenFlag.Name) &&
!ctx.GlobalIsSet(utils.SepoliaFlag.Name) && !ctx.IsSet(utils.SepoliaFlag.Name) &&
!ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.IsSet(utils.RinkebyFlag.Name) &&
!ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.IsSet(utils.GoerliFlag.Name) &&
!ctx.GlobalIsSet(utils.KilnFlag.Name) && !ctx.IsSet(utils.KilnFlag.Name) &&
!ctx.GlobalIsSet(utils.DeveloperFlag.Name) { !ctx.IsSet(utils.DeveloperFlag.Name) {
// Nope, we're really on mainnet. Bump that cache up! // Nope, we're really on mainnet. Bump that cache up!
log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096) log.Info("Bumping default cache on mainnet", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 4096)
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096)) ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096))
} }
} }
// If we're running a light client on any network, drop the cache to some meaningfully low amount // If we're running a light client on any network, drop the cache to some meaningfully low amount
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) { if ctx.String(utils.SyncModeFlag.Name) == "light" && !ctx.IsSet(utils.CacheFlag.Name) {
log.Info("Dropping default light client cache", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 128) log.Info("Dropping default light client cache", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 128)
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(128)) ctx.Set(utils.CacheFlag.Name, strconv.Itoa(128))
} }
// Start metrics export if enabled // Start metrics export if enabled
@ -340,12 +345,12 @@ func prepare(ctx *cli.Context) {
// blocking mode, waiting for it to be shut down. // blocking mode, waiting for it to be shut down.
func geth(ctx *cli.Context) error { func geth(ctx *cli.Context) error {
//begin PluGeth code injection //begin PluGeth code injection
if err := plugins.Initialize(path.Join(ctx.GlobalString(utils.DataDirFlag.Name), "plugins"), ctx); err != nil { if err := plugins.Initialize(path.Join(ctx.String(utils.DataDirFlag.Name), "plugins"), ctx); err != nil {
return err return err
} }
prepare(ctx) prepare(ctx)
if !plugins.ParseFlags(ctx.Args()) { if !plugins.ParseFlags(ctx.Args().Slice()) {
if args := ctx.Args(); len(args) > 0 { if args := ctx.Args().Slice(); len(args) > 0 {
return fmt.Errorf("invalid command: %q", args[0]) return fmt.Errorf("invalid command: %q", args[0])
} }
} }
@ -422,7 +427,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
// Spawn a standalone goroutine for status synchronization monitoring, // Spawn a standalone goroutine for status synchronization monitoring,
// close the node when synchronization is complete if user required. // close the node when synchronization is complete if user required.
if ctx.GlobalBool(utils.ExitWhenSyncedFlag.Name) { if ctx.Bool(utils.ExitWhenSyncedFlag.Name) {
go func() { go func() {
sub := stack.EventMux().Subscribe(downloader.DoneEvent{}) sub := stack.EventMux().Subscribe(downloader.DoneEvent{})
defer sub.Unsubscribe() defer sub.Unsubscribe()
@ -445,9 +450,9 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
} }
// Start auxiliary services if enabled // Start auxiliary services if enabled
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) { if ctx.Bool(utils.MiningEnabledFlag.Name) || ctx.Bool(utils.DeveloperFlag.Name) {
// Mining only makes sense if a full Ethereum node is running // Mining only makes sense if a full Ethereum node is running
if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { if ctx.String(utils.SyncModeFlag.Name) == "light" {
utils.Fatalf("Light clients do not support mining") utils.Fatalf("Light clients do not support mining")
} }
ethBackend, ok := backend.(*eth.EthAPIBackend) ethBackend, ok := backend.(*eth.EthAPIBackend)
@ -455,10 +460,10 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
utils.Fatalf("Ethereum service not running") utils.Fatalf("Ethereum service not running")
} }
// Set the gas price to the limits from the CLI and start mining // Set the gas price to the limits from the CLI and start mining
gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
ethBackend.TxPool().SetGasPrice(gasprice) ethBackend.TxPool().SetGasPrice(gasprice)
// start mining // start mining
threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name) threads := ctx.Int(utils.MinerThreadsFlag.Name)
if err := ethBackend.StartMining(threads); err != nil { if err := ethBackend.StartMining(threads); err != nil {
utils.Fatalf("Failed to start mining: %v", err) utils.Fatalf("Failed to start mining: %v", err)
} }
@ -468,7 +473,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
// unlockAccounts unlocks any account specifically requested. // unlockAccounts unlocks any account specifically requested.
func unlockAccounts(ctx *cli.Context, stack *node.Node) { func unlockAccounts(ctx *cli.Context, stack *node.Node) {
var unlocks []string var unlocks []string
inputs := strings.Split(ctx.GlobalString(utils.UnlockedAccountFlag.Name), ",") inputs := strings.Split(ctx.String(utils.UnlockedAccountFlag.Name), ",")
for _, input := range inputs { for _, input := range inputs {
if trimmed := strings.TrimSpace(input); trimmed != "" { if trimmed := strings.TrimSpace(input); trimmed != "" {
unlocks = append(unlocks, trimmed) unlocks = append(unlocks, trimmed)

View File

@ -26,28 +26,27 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var ( var (
VersionCheckUrlFlag = cli.StringFlag{ VersionCheckUrlFlag = &cli.StringFlag{
Name: "check.url", Name: "check.url",
Usage: "URL to use when checking vulnerabilities", Usage: "URL to use when checking vulnerabilities",
Value: "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json", Value: "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json",
} }
VersionCheckVersionFlag = cli.StringFlag{ VersionCheckVersionFlag = &cli.StringFlag{
Name: "check.version", Name: "check.version",
Usage: "Version to check", Usage: "Version to check",
Value: fmt.Sprintf("Geth/v%v/%v-%v/%v", Value: fmt.Sprintf("Geth/v%v/%v-%v/%v",
params.VersionWithCommit(gitCommit, gitDate), params.VersionWithCommit(gitCommit, gitDate),
runtime.GOOS, runtime.GOARCH, runtime.Version()), runtime.GOOS, runtime.GOARCH, runtime.Version()),
} }
makecacheCommand = cli.Command{ makecacheCommand = &cli.Command{
Action: utils.MigrateFlags(makecache), Action: makecache,
Name: "makecache", Name: "makecache",
Usage: "Generate ethash verification cache (for testing)", Usage: "Generate ethash verification cache (for testing)",
ArgsUsage: "<blockNum> <outputDir>", ArgsUsage: "<blockNum> <outputDir>",
Category: "MISCELLANEOUS COMMANDS",
Description: ` Description: `
The makecache command generates an ethash cache in <outputDir>. The makecache command generates an ethash cache in <outputDir>.
@ -55,12 +54,11 @@ This command exists to support the system testing project.
Regular users do not need to execute it. Regular users do not need to execute it.
`, `,
} }
makedagCommand = cli.Command{ makedagCommand = &cli.Command{
Action: utils.MigrateFlags(makedag), Action: makedag,
Name: "makedag", Name: "makedag",
Usage: "Generate ethash mining DAG (for testing)", Usage: "Generate ethash mining DAG (for testing)",
ArgsUsage: "<blockNum> <outputDir>", ArgsUsage: "<blockNum> <outputDir>",
Category: "MISCELLANEOUS COMMANDS",
Description: ` Description: `
The makedag command generates an ethash DAG in <outputDir>. The makedag command generates an ethash DAG in <outputDir>.
@ -68,43 +66,40 @@ This command exists to support the system testing project.
Regular users do not need to execute it. Regular users do not need to execute it.
`, `,
} }
versionCommand = cli.Command{ versionCommand = &cli.Command{
Action: utils.MigrateFlags(version), Action: version,
Name: "version", Name: "version",
Usage: "Print version numbers", Usage: "Print version numbers",
ArgsUsage: " ", ArgsUsage: " ",
Category: "MISCELLANEOUS COMMANDS",
Description: ` Description: `
The output of this command is supposed to be machine-readable. The output of this command is supposed to be machine-readable.
`, `,
} }
versionCheckCommand = cli.Command{ versionCheckCommand = &cli.Command{
Action: utils.MigrateFlags(versionCheck), Action: versionCheck,
Flags: []cli.Flag{ Flags: []cli.Flag{
VersionCheckUrlFlag, VersionCheckUrlFlag,
VersionCheckVersionFlag, VersionCheckVersionFlag,
}, },
Name: "version-check", Name: "version-check",
Usage: "Checks (online) whether the current version suffers from any known security vulnerabilities", Usage: "Checks (online) for known Geth security vulnerabilities",
ArgsUsage: "<versionstring (optional)>", ArgsUsage: "<versionstring (optional)>",
Category: "MISCELLANEOUS COMMANDS",
Description: ` Description: `
The version-check command fetches vulnerability-information from https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json, The version-check command fetches vulnerability-information from https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json,
and displays information about any security vulnerabilities that affect the currently executing version. and displays information about any security vulnerabilities that affect the currently executing version.
`, `,
} }
licenseCommand = cli.Command{ licenseCommand = &cli.Command{
Action: utils.MigrateFlags(license), Action: license,
Name: "license", Name: "license",
Usage: "Display license information", Usage: "Display license information",
ArgsUsage: " ", ArgsUsage: " ",
Category: "MISCELLANEOUS COMMANDS",
} }
) )
// makecache generates an ethash verification cache into the provided folder. // makecache generates an ethash verification cache into the provided folder.
func makecache(ctx *cli.Context) error { func makecache(ctx *cli.Context) error {
args := ctx.Args() args := ctx.Args().Slice()
if len(args) != 2 { if len(args) != 2 {
utils.Fatalf(`Usage: geth makecache <block number> <outputdir>`) utils.Fatalf(`Usage: geth makecache <block number> <outputdir>`)
} }
@ -119,7 +114,7 @@ func makecache(ctx *cli.Context) error {
// makedag generates an ethash mining DAG into the provided folder. // makedag generates an ethash mining DAG into the provided folder.
func makedag(ctx *cli.Context) error { func makedag(ctx *cli.Context) error {
args := ctx.Args() args := ctx.Args().Slice()
if len(args) != 2 { if len(args) != 2 {
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`) utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
} }

View File

@ -31,10 +31,11 @@ import (
"github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
cli "gopkg.in/urfave/cli.v1" cli "github.com/urfave/cli/v2"
) )
var ( var (
@ -46,19 +47,17 @@ var (
) )
var ( var (
snapshotCommand = cli.Command{ snapshotCommand = &cli.Command{
Name: "snapshot", Name: "snapshot",
Usage: "A set of commands based on the snapshot", Usage: "A set of commands based on the snapshot",
Category: "MISCELLANEOUS COMMANDS",
Description: "", Description: "",
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
{ {
Name: "prune-state", Name: "prune-state",
Usage: "Prune stale ethereum state data based on the snapshot", Usage: "Prune stale ethereum state data based on the snapshot",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: utils.MigrateFlags(pruneState), Action: pruneState,
Category: "MISCELLANEOUS COMMANDS", Flags: flags.Merge([]cli.Flag{
Flags: utils.GroupFlags([]cli.Flag{
utils.CacheTrieJournalFlag, utils.CacheTrieJournalFlag,
utils.BloomFilterSizeFlag, utils.BloomFilterSizeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
@ -81,9 +80,8 @@ the trie clean cache with default directory will be deleted.
Name: "verify-state", Name: "verify-state",
Usage: "Recalculate state hash based on the snapshot for verification", Usage: "Recalculate state hash based on the snapshot for verification",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: utils.MigrateFlags(verifyState), Action: verifyState,
Category: "MISCELLANEOUS COMMANDS", Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: utils.GroupFlags(utils.NetworkFlags, utils.DatabasePathFlags),
Description: ` Description: `
geth snapshot verify-state <state-root> geth snapshot verify-state <state-root>
will traverse the whole accounts and storages set based on the specified will traverse the whole accounts and storages set based on the specified
@ -95,21 +93,30 @@ In other words, this command does the snapshot to trie conversion.
Name: "check-dangling-storage", Name: "check-dangling-storage",
Usage: "Check that there is no 'dangling' snap storage", Usage: "Check that there is no 'dangling' snap storage",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: utils.MigrateFlags(checkDanglingStorage), Action: checkDanglingStorage,
Category: "MISCELLANEOUS COMMANDS", Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: utils.GroupFlags(utils.NetworkFlags, utils.DatabasePathFlags),
Description: ` Description: `
geth snapshot check-dangling-storage <state-root> traverses the snap storage geth snapshot check-dangling-storage <state-root> traverses the snap storage
data, and verifies that all snapshot storage data has a corresponding account. data, and verifies that all snapshot storage data has a corresponding account.
`,
},
{
Name: "inspect-account",
Usage: "Check all snapshot layers for the a specific account",
ArgsUsage: "<address | hash>",
Action: checkAccount,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Description: `
geth snapshot inspect-account <address | hash> checks all snapshot layers and prints out
information about the specified address.
`, `,
}, },
{ {
Name: "traverse-state", Name: "traverse-state",
Usage: "Traverse the state with given root hash for verification", Usage: "Traverse the state with given root hash and perform quick verification",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: utils.MigrateFlags(traverseState), Action: traverseState,
Category: "MISCELLANEOUS COMMANDS", Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: utils.GroupFlags(utils.NetworkFlags, utils.DatabasePathFlags),
Description: ` Description: `
geth snapshot traverse-state <state-root> geth snapshot traverse-state <state-root>
will traverse the whole state from the given state root and will abort if any will traverse the whole state from the given state root and will abort if any
@ -121,11 +128,10 @@ It's also usable without snapshot enabled.
}, },
{ {
Name: "traverse-rawstate", Name: "traverse-rawstate",
Usage: "Traverse the state with given root hash for verification", Usage: "Traverse the state with given root hash and perform detailed verification",
ArgsUsage: "<root>", ArgsUsage: "<root>",
Action: utils.MigrateFlags(traverseRawState), Action: traverseRawState,
Category: "MISCELLANEOUS COMMANDS", Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: utils.GroupFlags(utils.NetworkFlags, utils.DatabasePathFlags),
Description: ` Description: `
geth snapshot traverse-rawstate <state-root> geth snapshot traverse-rawstate <state-root>
will traverse the whole state from the given root and will abort if any referenced will traverse the whole state from the given root and will abort if any referenced
@ -140,9 +146,8 @@ It's also usable without snapshot enabled.
Name: "dump", Name: "dump",
Usage: "Dump a specific block from storage (same as 'geth dump' but using snapshots)", Usage: "Dump a specific block from storage (same as 'geth dump' but using snapshots)",
ArgsUsage: "[? <blockHash> | <blockNum>]", ArgsUsage: "[? <blockHash> | <blockNum>]",
Action: utils.MigrateFlags(dumpState), Action: dumpState,
Category: "MISCELLANEOUS COMMANDS", Flags: flags.Merge([]cli.Flag{
Flags: utils.GroupFlags([]cli.Flag{
utils.ExcludeCodeFlag, utils.ExcludeCodeFlag,
utils.ExcludeStorageFlag, utils.ExcludeStorageFlag,
utils.StartKeyFlag, utils.StartKeyFlag,
@ -165,7 +170,7 @@ func pruneState(ctx *cli.Context) error {
defer stack.Close() defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, false) chaindb := utils.MakeChainDatabase(ctx, stack, false)
pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name)) pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.Uint64(utils.BloomFilterSizeFlag.Name))
if err != nil { if err != nil {
log.Error("Failed to open snapshot tree", "err", err) log.Error("Failed to open snapshot tree", "err", err)
return err return err
@ -176,7 +181,7 @@ func pruneState(ctx *cli.Context) error {
} }
var targetRoot common.Hash var targetRoot common.Hash
if ctx.NArg() == 1 { if ctx.NArg() == 1 {
targetRoot, err = parseRoot(ctx.Args()[0]) targetRoot, err = parseRoot(ctx.Args().First())
if err != nil { if err != nil {
log.Error("Failed to resolve state root", "err", err) log.Error("Failed to resolve state root", "err", err)
return err return err
@ -210,7 +215,7 @@ func verifyState(ctx *cli.Context) error {
} }
var root = headBlock.Root() var root = headBlock.Root()
if ctx.NArg() == 1 { if ctx.NArg() == 1 {
root, err = parseRoot(ctx.Args()[0]) root, err = parseRoot(ctx.Args().First())
if err != nil { if err != nil {
log.Error("Failed to resolve state root", "err", err) log.Error("Failed to resolve state root", "err", err)
return err return err
@ -255,7 +260,7 @@ func traverseState(ctx *cli.Context) error {
err error err error
) )
if ctx.NArg() == 1 { if ctx.NArg() == 1 {
root, err = parseRoot(ctx.Args()[0]) root, err = parseRoot(ctx.Args().First())
if err != nil { if err != nil {
log.Error("Failed to resolve state root", "err", err) log.Error("Failed to resolve state root", "err", err)
return err return err
@ -266,7 +271,7 @@ func traverseState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
} }
triedb := trie.NewDatabase(chaindb) triedb := trie.NewDatabase(chaindb)
t, err := trie.NewSecure(root, triedb) t, err := trie.NewSecure(common.Hash{}, root, triedb)
if err != nil { if err != nil {
log.Error("Failed to open trie", "root", root, "err", err) log.Error("Failed to open trie", "root", root, "err", err)
return err return err
@ -287,7 +292,7 @@ func traverseState(ctx *cli.Context) error {
return err return err
} }
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storageTrie, err := trie.NewSecure(acc.Root, triedb) storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.Key), acc.Root, triedb)
if err != nil { if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err) log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return err return err
@ -344,7 +349,7 @@ func traverseRawState(ctx *cli.Context) error {
err error err error
) )
if ctx.NArg() == 1 { if ctx.NArg() == 1 {
root, err = parseRoot(ctx.Args()[0]) root, err = parseRoot(ctx.Args().First())
if err != nil { if err != nil {
log.Error("Failed to resolve state root", "err", err) log.Error("Failed to resolve state root", "err", err)
return err return err
@ -355,7 +360,7 @@ func traverseRawState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
} }
triedb := trie.NewDatabase(chaindb) triedb := trie.NewDatabase(chaindb)
t, err := trie.NewSecure(root, triedb) t, err := trie.NewSecure(common.Hash{}, root, triedb)
if err != nil { if err != nil {
log.Error("Failed to open trie", "root", root, "err", err) log.Error("Failed to open trie", "root", root, "err", err)
return err return err
@ -367,6 +372,8 @@ func traverseRawState(ctx *cli.Context) error {
codes int codes int
lastReport time.Time lastReport time.Time
start = time.Now() start = time.Now()
hasher = crypto.NewKeccakState()
got = make([]byte, 32)
) )
accIter := t.NodeIterator(nil) accIter := t.NodeIterator(nil)
for accIter.Next(true) { for accIter.Next(true) {
@ -376,10 +383,18 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't // Check the present for non-empty hash node(embedded node doesn't
// have their own hash). // have their own hash).
if node != (common.Hash{}) { if node != (common.Hash{}) {
if !rawdb.HasTrieNode(chaindb, node) { blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(account)", "hash", node) log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account") return errors.New("missing account")
} }
hasher.Reset()
hasher.Write(blob)
hasher.Read(got)
if !bytes.Equal(got, node.Bytes()) {
log.Error("Invalid trie node(account)", "hash", node.Hex(), "value", blob)
return errors.New("invalid account node")
}
} }
// If it's a leaf node, yes we are touching an account, // If it's a leaf node, yes we are touching an account,
// dig into the storage trie further. // dig into the storage trie further.
@ -391,7 +406,7 @@ func traverseRawState(ctx *cli.Context) error {
return errors.New("invalid account") return errors.New("invalid account")
} }
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storageTrie, err := trie.NewSecure(acc.Root, triedb) storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.LeafKey()), acc.Root, triedb)
if err != nil { if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err) log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return errors.New("missing storage trie") return errors.New("missing storage trie")
@ -404,10 +419,18 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't // Check the present for non-empty hash node(embedded node doesn't
// have their own hash). // have their own hash).
if node != (common.Hash{}) { if node != (common.Hash{}) {
if !rawdb.HasTrieNode(chaindb, node) { blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(storage)", "hash", node) log.Error("Missing trie node(storage)", "hash", node)
return errors.New("missing storage") return errors.New("missing storage")
} }
hasher.Reset()
hasher.Write(blob)
hasher.Read(got)
if !bytes.Equal(got, node.Bytes()) {
log.Error("Invalid trie node(storage)", "hash", node.Hex(), "value", blob)
return errors.New("invalid storage node")
}
} }
// Bump the counter if it's leaf node. // Bump the counter if it's leaf node.
if storageIter.Leaf() { if storageIter.Leaf() {
@ -517,3 +540,35 @@ func dumpState(ctx *cli.Context) error {
"elapsed", common.PrettyDuration(time.Since(start))) "elapsed", common.PrettyDuration(time.Since(start)))
return nil return nil
} }
// checkAccount iterates the snap data layers, and looks up the given account
// across all layers.
func checkAccount(ctx *cli.Context) error {
if ctx.NArg() != 1 {
return errors.New("need <address|hash> arg")
}
var (
hash common.Hash
addr common.Address
)
switch arg := ctx.Args().First(); len(arg) {
case 40, 42:
addr = common.HexToAddress(arg)
hash = crypto.Keccak256Hash(addr.Bytes())
case 64, 66:
hash = common.HexToHash(arg)
default:
return errors.New("malformed address or hash")
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
start := time.Now()
log.Info("Checking difflayer journal", "address", addr, "hash", hash)
if err := snapshot.CheckJournalAccount(chaindb, hash); err != nil {
return err
}
log.Info("Checked the snapshot journalled storage", "time", common.PrettyDuration(time.Since(start)))
return nil
}

View File

@ -1,300 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// Contains the geth command usage template and generator.
package main
import (
"io"
"sort"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/flags"
"gopkg.in/urfave/cli.v1"
)
// AppHelpFlagGroups is the application flags, grouped by functionality.
var AppHelpFlagGroups = []flags.FlagGroup{
{
Name: "ETHEREUM",
Flags: utils.GroupFlags([]cli.Flag{
configFileFlag,
utils.MinFreeDiskSpaceFlag,
utils.KeyStoreDirFlag,
utils.USBFlag,
utils.SmartCardDaemonPathFlag,
utils.NetworkIdFlag,
utils.SyncModeFlag,
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
utils.TxLookupLimitFlag,
utils.EthStatsURLFlag,
utils.IdentityFlag,
utils.LightKDFFlag,
utils.EthRequiredBlocksFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
},
{
Name: "LIGHT CLIENT",
Flags: []cli.Flag{
utils.LightServeFlag,
utils.LightIngressFlag,
utils.LightEgressFlag,
utils.LightMaxPeersFlag,
utils.UltraLightServersFlag,
utils.UltraLightFractionFlag,
utils.UltraLightOnlyAnnounceFlag,
utils.LightNoPruneFlag,
utils.LightNoSyncServeFlag,
},
},
{
Name: "DEVELOPER CHAIN",
Flags: []cli.Flag{
utils.DeveloperFlag,
utils.DeveloperPeriodFlag,
utils.DeveloperGasLimitFlag,
},
},
{
Name: "ETHASH",
Flags: []cli.Flag{
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashCachesLockMmapFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.EthashDatasetsLockMmapFlag,
},
},
{
Name: "TRANSACTION POOL",
Flags: []cli.Flag{
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
utils.TxPoolJournalFlag,
utils.TxPoolRejournalFlag,
utils.TxPoolPriceLimitFlag,
utils.TxPoolPriceBumpFlag,
utils.TxPoolAccountSlotsFlag,
utils.TxPoolGlobalSlotsFlag,
utils.TxPoolAccountQueueFlag,
utils.TxPoolGlobalQueueFlag,
utils.TxPoolLifetimeFlag,
},
},
{
Name: "PERFORMANCE TUNING",
Flags: []cli.Flag{
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
utils.CacheTrieJournalFlag,
utils.CacheTrieRejournalFlag,
utils.CacheGCFlag,
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.FDLimitFlag,
},
},
{
Name: "ACCOUNT",
Flags: []cli.Flag{
utils.UnlockedAccountFlag,
utils.PasswordFileFlag,
utils.ExternalSignerFlag,
utils.InsecureUnlockAllowedFlag,
},
},
{
Name: "API AND CONSOLE",
Flags: []cli.Flag{
utils.IPCDisabledFlag,
utils.IPCPathFlag,
utils.HTTPEnabledFlag,
utils.HTTPListenAddrFlag,
utils.HTTPPortFlag,
utils.HTTPApiFlag,
utils.HTTPPathPrefixFlag,
utils.HTTPCORSDomainFlag,
utils.HTTPVirtualHostsFlag,
utils.WSEnabledFlag,
utils.WSListenAddrFlag,
utils.WSPortFlag,
utils.WSApiFlag,
utils.WSPathPrefixFlag,
utils.WSAllowedOriginsFlag,
utils.JWTSecretFlag,
utils.AuthListenFlag,
utils.AuthPortFlag,
utils.AuthVirtualHostsFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
utils.RPCGlobalGasCapFlag,
utils.RPCGlobalEVMTimeoutFlag,
utils.RPCGlobalTxFeeCapFlag,
utils.AllowUnprotectedTxs,
utils.JSpathFlag,
utils.ExecFlag,
utils.PreloadJSFlag,
},
},
{
Name: "NETWORKING",
Flags: []cli.Flag{
utils.BootnodesFlag,
utils.DNSDiscoveryFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
utils.NATFlag,
utils.NoDiscoverFlag,
utils.DiscoveryV5Flag,
utils.NetrestrictFlag,
utils.NodeKeyFileFlag,
utils.NodeKeyHexFlag,
},
},
{
Name: "MINER",
Flags: []cli.Flag{
utils.MiningEnabledFlag,
utils.MinerThreadsFlag,
utils.MinerNotifyFlag,
utils.MinerNotifyFullFlag,
utils.MinerGasPriceFlag,
utils.MinerGasLimitFlag,
utils.MinerEtherbaseFlag,
utils.MinerExtraDataFlag,
utils.MinerRecommitIntervalFlag,
utils.MinerNoVerifyFlag,
},
},
{
Name: "GAS PRICE ORACLE",
Flags: []cli.Flag{
utils.GpoBlocksFlag,
utils.GpoPercentileFlag,
utils.GpoMaxGasPriceFlag,
utils.GpoIgnoreGasPriceFlag,
},
},
{
Name: "VIRTUAL MACHINE",
Flags: []cli.Flag{
utils.VMEnableDebugFlag,
},
},
{
Name: "LOGGING AND DEBUGGING",
Flags: append([]cli.Flag{
utils.FakePoWFlag,
utils.NoCompactionFlag,
}, debug.Flags...),
},
{
Name: "METRICS AND STATS",
Flags: metricsFlags,
},
{
Name: "ALIASED (deprecated)",
Flags: []cli.Flag{
utils.NoUSBFlag,
utils.LegacyWhitelistFlag,
},
},
{
Name: "MISC",
Flags: []cli.Flag{
utils.SnapshotFlag,
utils.BloomFilterSizeFlag,
cli.HelpFlag,
},
},
}
func init() {
// Override the default app help template
cli.AppHelpTemplate = flags.AppHelpTemplate
// Override the default app help printer, but only for the global app help
originalHelpPrinter := cli.HelpPrinter
cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) {
if tmpl == flags.AppHelpTemplate {
// Iterate over all the flags and add any uncategorized ones
categorized := make(map[string]struct{})
for _, group := range AppHelpFlagGroups {
for _, flag := range group.Flags {
categorized[flag.String()] = struct{}{}
}
}
deprecated := make(map[string]struct{})
for _, flag := range utils.DeprecatedFlags {
deprecated[flag.String()] = struct{}{}
}
// Only add uncategorized flags if they are not deprecated
var uncategorized []cli.Flag
for _, flag := range data.(*cli.App).Flags {
if _, ok := categorized[flag.String()]; !ok {
if _, ok := deprecated[flag.String()]; !ok {
uncategorized = append(uncategorized, flag)
}
}
}
if len(uncategorized) > 0 {
// Append all ungategorized options to the misc group
miscs := len(AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags)
AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags = append(AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags, uncategorized...)
// Make sure they are removed afterwards
defer func() {
AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags = AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags[:miscs]
}()
}
// Render out custom usage screen
originalHelpPrinter(w, tmpl, flags.HelpData{App: data, FlagGroups: AppHelpFlagGroups})
} else if tmpl == flags.CommandHelpTemplate {
// Iterate over all command specific flags and categorize them
categorized := make(map[string][]cli.Flag)
for _, flag := range data.(cli.Command).Flags {
if _, ok := categorized[flag.String()]; !ok {
categorized[flags.FlagCategory(flag, AppHelpFlagGroups)] = append(categorized[flags.FlagCategory(flag, AppHelpFlagGroups)], flag)
}
}
// sort to get a stable ordering
sorted := make([]flags.FlagGroup, 0, len(categorized))
for cat, flgs := range categorized {
sorted = append(sorted, flags.FlagGroup{Name: cat, Flags: flgs})
}
sort.Sort(flags.ByCategory(sorted))
// add sorted array to data and render with default printer
originalHelpPrinter(w, tmpl, map[string]interface{}{
"cmd": data,
"categorizedFlags": sorted,
})
} else {
originalHelpPrinter(w, tmpl, data)
}
}
}

View File

@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/jedisct1/go-minisign" "github.com/jedisct1/go-minisign"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var gethPubKeys []string = []string{ var gethPubKeys []string = []string{

View File

@ -118,7 +118,6 @@ func TestMatching(t *testing.T) {
version, vuln.Introduced, vuln.Fixed, vuln.Name, vulnIntro, current, vulnFixed) version, vuln.Introduced, vuln.Fixed, vuln.Name, vulnIntro, current, vulnFixed)
} }
} }
} }
} }
for major := 1; major < 2; major++ { for major := 1; major < 2; major++ {

View File

@ -46,71 +46,77 @@ import (
"text/tabwriter" "text/tabwriter"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations" "github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
var client *simulations.Client var client *simulations.Client
var ( var (
// global command flags // global command flags
apiFlag = cli.StringFlag{ apiFlag = &cli.StringFlag{
Name: "api", Name: "api",
Value: "http://localhost:8888", Value: "http://localhost:8888",
Usage: "simulation API URL", Usage: "simulation API URL",
EnvVar: "P2PSIM_API_URL", EnvVars: []string{"P2PSIM_API_URL"},
} }
// events subcommand flags // events subcommand flags
currentFlag = cli.BoolFlag{ currentFlag = &cli.BoolFlag{
Name: "current", Name: "current",
Usage: "get existing nodes and conns first", Usage: "get existing nodes and conns first",
} }
filterFlag = cli.StringFlag{ filterFlag = &cli.StringFlag{
Name: "filter", Name: "filter",
Value: "", Value: "",
Usage: "message filter", Usage: "message filter",
} }
// node create subcommand flags // node create subcommand flags
nameFlag = cli.StringFlag{ nameFlag = &cli.StringFlag{
Name: "name", Name: "name",
Value: "", Value: "",
Usage: "node name", Usage: "node name",
} }
servicesFlag = cli.StringFlag{ servicesFlag = &cli.StringFlag{
Name: "services", Name: "services",
Value: "", Value: "",
Usage: "node services (comma separated)", Usage: "node services (comma separated)",
} }
keyFlag = cli.StringFlag{ keyFlag = &cli.StringFlag{
Name: "key", Name: "key",
Value: "", Value: "",
Usage: "node private key (hex encoded)", Usage: "node private key (hex encoded)",
} }
// node rpc subcommand flags // node rpc subcommand flags
subscribeFlag = cli.BoolFlag{ subscribeFlag = &cli.BoolFlag{
Name: "subscribe", Name: "subscribe",
Usage: "method is a subscription", Usage: "method is a subscription",
} }
) )
var (
// Git information set by linker when building with ci.go.
gitCommit string
gitDate string
)
func main() { func main() {
app := cli.NewApp() app := flags.NewApp(gitCommit, gitDate, "devp2p simulation command-line client")
app.Usage = "devp2p simulation command-line client"
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
apiFlag, apiFlag,
} }
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
client = simulations.NewClient(ctx.GlobalString(apiFlag.Name)) client = simulations.NewClient(ctx.String(apiFlag.Name))
return nil return nil
} }
app.Commands = []cli.Command{ app.Commands = []*cli.Command{
{ {
Name: "show", Name: "show",
Usage: "show network information", Usage: "show network information",
@ -139,7 +145,7 @@ func main() {
Name: "node", Name: "node",
Usage: "manage simulation nodes", Usage: "manage simulation nodes",
Action: listNodes, Action: listNodes,
Subcommands: []cli.Command{ Subcommands: []*cli.Command{
{ {
Name: "list", Name: "list",
Usage: "list nodes", Usage: "list nodes",
@ -204,7 +210,7 @@ func main() {
} }
func showNetwork(ctx *cli.Context) error { func showNetwork(ctx *cli.Context) error {
if len(ctx.Args()) != 0 { if ctx.NArg() != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
network, err := client.GetNetwork() network, err := client.GetNetwork()
@ -219,7 +225,7 @@ func showNetwork(ctx *cli.Context) error {
} }
func streamNetwork(ctx *cli.Context) error { func streamNetwork(ctx *cli.Context) error {
if len(ctx.Args()) != 0 { if ctx.NArg() != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
events := make(chan *simulations.Event) events := make(chan *simulations.Event)
@ -245,7 +251,7 @@ func streamNetwork(ctx *cli.Context) error {
} }
func createSnapshot(ctx *cli.Context) error { func createSnapshot(ctx *cli.Context) error {
if len(ctx.Args()) != 0 { if ctx.NArg() != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
snap, err := client.CreateSnapshot() snap, err := client.CreateSnapshot()
@ -256,7 +262,7 @@ func createSnapshot(ctx *cli.Context) error {
} }
func loadSnapshot(ctx *cli.Context) error { func loadSnapshot(ctx *cli.Context) error {
if len(ctx.Args()) != 0 { if ctx.NArg() != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
snap := &simulations.Snapshot{} snap := &simulations.Snapshot{}
@ -267,7 +273,7 @@ func loadSnapshot(ctx *cli.Context) error {
} }
func listNodes(ctx *cli.Context) error { func listNodes(ctx *cli.Context) error {
if len(ctx.Args()) != 0 { if ctx.NArg() != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
nodes, err := client.GetNodes() nodes, err := client.GetNodes()
@ -292,7 +298,7 @@ func protocolList(node *p2p.NodeInfo) []string {
} }
func createNode(ctx *cli.Context) error { func createNode(ctx *cli.Context) error {
if len(ctx.Args()) != 0 { if ctx.NArg() != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
config := adapters.RandomNodeConfig() config := adapters.RandomNodeConfig()
@ -317,11 +323,10 @@ func createNode(ctx *cli.Context) error {
} }
func showNode(ctx *cli.Context) error { func showNode(ctx *cli.Context) error {
args := ctx.Args() if ctx.NArg() != 1 {
if len(args) != 1 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
nodeName := args[0] nodeName := ctx.Args().First()
node, err := client.GetNode(nodeName) node, err := client.GetNode(nodeName)
if err != nil { if err != nil {
return err return err
@ -342,11 +347,10 @@ func showNode(ctx *cli.Context) error {
} }
func startNode(ctx *cli.Context) error { func startNode(ctx *cli.Context) error {
args := ctx.Args() if ctx.NArg() != 1 {
if len(args) != 1 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
nodeName := args[0] nodeName := ctx.Args().First()
if err := client.StartNode(nodeName); err != nil { if err := client.StartNode(nodeName); err != nil {
return err return err
} }
@ -355,11 +359,10 @@ func startNode(ctx *cli.Context) error {
} }
func stopNode(ctx *cli.Context) error { func stopNode(ctx *cli.Context) error {
args := ctx.Args() if ctx.NArg() != 1 {
if len(args) != 1 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
nodeName := args[0] nodeName := ctx.Args().First()
if err := client.StopNode(nodeName); err != nil { if err := client.StopNode(nodeName); err != nil {
return err return err
} }
@ -368,12 +371,12 @@ func stopNode(ctx *cli.Context) error {
} }
func connectNode(ctx *cli.Context) error { func connectNode(ctx *cli.Context) error {
args := ctx.Args() if ctx.NArg() != 2 {
if len(args) != 2 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
nodeName := args[0] args := ctx.Args()
peerName := args[1] nodeName := args.Get(0)
peerName := args.Get(1)
if err := client.ConnectNode(nodeName, peerName); err != nil { if err := client.ConnectNode(nodeName, peerName); err != nil {
return err return err
} }
@ -383,11 +386,11 @@ func connectNode(ctx *cli.Context) error {
func disconnectNode(ctx *cli.Context) error { func disconnectNode(ctx *cli.Context) error {
args := ctx.Args() args := ctx.Args()
if len(args) != 2 { if args.Len() != 2 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
nodeName := args[0] nodeName := args.Get(0)
peerName := args[1] peerName := args.Get(1)
if err := client.DisconnectNode(nodeName, peerName); err != nil { if err := client.DisconnectNode(nodeName, peerName); err != nil {
return err return err
} }
@ -397,21 +400,21 @@ func disconnectNode(ctx *cli.Context) error {
func rpcNode(ctx *cli.Context) error { func rpcNode(ctx *cli.Context) error {
args := ctx.Args() args := ctx.Args()
if len(args) < 2 { if args.Len() < 2 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
nodeName := args[0] nodeName := args.Get(0)
method := args[1] method := args.Get(1)
rpcClient, err := client.RPCClient(context.Background(), nodeName) rpcClient, err := client.RPCClient(context.Background(), nodeName)
if err != nil { if err != nil {
return err return err
} }
if ctx.Bool(subscribeFlag.Name) { if ctx.Bool(subscribeFlag.Name) {
return rpcSubscribe(rpcClient, ctx.App.Writer, method, args[3:]...) return rpcSubscribe(rpcClient, ctx.App.Writer, method, args.Slice()[3:]...)
} }
var result interface{} var result interface{}
params := make([]interface{}, len(args[3:])) params := make([]interface{}, len(args.Slice()[3:]))
for i, v := range args[3:] { for i, v := range args.Slice()[3:] {
params[i] = v params[i] = v
} }
if err := rpcClient.Call(&result, method, params...); err != nil { if err := rpcClient.Call(&result, method, params...); err != nil {

View File

@ -1,626 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"errors"
"math"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
math2 "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
// alethGenesisSpec represents the genesis specification format used by the
// C++ Ethereum implementation.
type alethGenesisSpec struct {
SealEngine string `json:"sealEngine"`
Params struct {
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
HomesteadForkBlock *hexutil.Big `json:"homesteadForkBlock,omitempty"`
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
EIP150ForkBlock *hexutil.Big `json:"EIP150ForkBlock,omitempty"`
EIP158ForkBlock *hexutil.Big `json:"EIP158ForkBlock,omitempty"`
ByzantiumForkBlock *hexutil.Big `json:"byzantiumForkBlock,omitempty"`
ConstantinopleForkBlock *hexutil.Big `json:"constantinopleForkBlock,omitempty"`
ConstantinopleFixForkBlock *hexutil.Big `json:"constantinopleFixForkBlock,omitempty"`
IstanbulForkBlock *hexutil.Big `json:"istanbulForkBlock,omitempty"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
TieBreakingGas bool `json:"tieBreakingGas"`
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
NetworkID hexutil.Uint64 `json:"networkID"`
ChainID hexutil.Uint64 `json:"chainID"`
AllowFutureBlocks bool `json:"allowFutureBlocks"`
} `json:"params"`
Genesis struct {
Nonce types.BlockNonce `json:"nonce"`
Difficulty *hexutil.Big `json:"difficulty"`
MixHash common.Hash `json:"mixHash"`
Author common.Address `json:"author"`
Timestamp hexutil.Uint64 `json:"timestamp"`
ParentHash common.Hash `json:"parentHash"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
}
// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type alethGenesisSpecAccount struct {
Balance *math2.HexOrDecimal256 `json:"balance,omitempty"`
Nonce uint64 `json:"nonce,omitempty"`
Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
}
// alethGenesisSpecBuiltin is the precompiled contract definition.
type alethGenesisSpecBuiltin struct {
Name string `json:"name,omitempty"`
StartingBlock *hexutil.Big `json:"startingBlock,omitempty"`
Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
}
type alethGenesisSpecLinearPricing struct {
Base uint64 `json:"base"`
Word uint64 `json:"word"`
}
// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific
// chain specification format.
func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) {
// Only ethash is currently supported between go-ethereum and aleth
if genesis.Config.Ethash == nil {
return nil, errors.New("unsupported consensus engine")
}
// Reconstruct the chain spec in Aleth format
spec := &alethGenesisSpec{
SealEngine: "Ethash",
}
// Some defaults
spec.Params.AccountStartNonce = 0
spec.Params.TieBreakingGas = false
spec.Params.AllowFutureBlocks = false
// Dao hardfork block is a special one. The fork block is listed as 0 in the
// config but aleth will sync with ETC clients up until the actual dao hard
// fork block.
spec.Params.DaoHardforkBlock = 0
if num := genesis.Config.HomesteadBlock; num != nil {
spec.Params.HomesteadForkBlock = (*hexutil.Big)(num)
}
if num := genesis.Config.EIP150Block; num != nil {
spec.Params.EIP150ForkBlock = (*hexutil.Big)(num)
}
if num := genesis.Config.EIP158Block; num != nil {
spec.Params.EIP158ForkBlock = (*hexutil.Big)(num)
}
if num := genesis.Config.ByzantiumBlock; num != nil {
spec.Params.ByzantiumForkBlock = (*hexutil.Big)(num)
}
if num := genesis.Config.ConstantinopleBlock; num != nil {
spec.Params.ConstantinopleForkBlock = (*hexutil.Big)(num)
}
if num := genesis.Config.PetersburgBlock; num != nil {
spec.Params.ConstantinopleFixForkBlock = (*hexutil.Big)(num)
}
if num := genesis.Config.IstanbulBlock; num != nil {
spec.Params.IstanbulForkBlock = (*hexutil.Big)(num)
}
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64)
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor)
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Genesis.Nonce = types.EncodeNonce(genesis.Nonce)
spec.Genesis.MixHash = genesis.Mixhash
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
spec.Genesis.Author = genesis.Coinbase
spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp)
spec.Genesis.ParentHash = genesis.ParentHash
spec.Genesis.ExtraData = genesis.ExtraData
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
for address, account := range genesis.Alloc {
spec.setAccount(address, account)
}
spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover",
Linear: &alethGenesisSpecLinearPricing{Base: 3000}})
spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256",
Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}})
spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160",
Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}})
spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity",
Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
if genesis.Config.ByzantiumBlock != nil {
spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)})
spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Linear: &alethGenesisSpecLinearPricing{Base: 500}})
spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)})
}
if genesis.Config.IstanbulBlock != nil {
if genesis.Config.ByzantiumBlock == nil {
return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not")
}
spec.setPrecompile(6, &alethGenesisSpecBuiltin{
Name: "alt_bn128_G1_add",
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
}) // Aleth hardcoded the gas policy
spec.setPrecompile(7, &alethGenesisSpecBuiltin{
Name: "alt_bn128_G1_mul",
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
}) // Aleth hardcoded the gas policy
spec.setPrecompile(9, &alethGenesisSpecBuiltin{
Name: "blake2_compression",
StartingBlock: (*hexutil.Big)(genesis.Config.IstanbulBlock),
})
}
return spec, nil
}
func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
addr := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
if _, exist := spec.Accounts[addr]; !exist {
spec.Accounts[addr] = &alethGenesisSpecAccount{}
}
spec.Accounts[addr].Precompiled = data
}
func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
a, exist := spec.Accounts[common.UnprefixedAddress(address)]
if !exist {
a = &alethGenesisSpecAccount{}
spec.Accounts[common.UnprefixedAddress(address)] = a
}
a.Balance = (*math2.HexOrDecimal256)(account.Balance)
a.Nonce = account.Nonce
}
// parityChainSpec is the chain specification format used by Parity.
type parityChainSpec struct {
Name string `json:"name"`
Datadir string `json:"dataDir"`
Engine struct {
Ethash struct {
Params struct {
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward map[string]string `json:"blockReward"`
DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
} `json:"params"`
} `json:"Ethash"`
} `json:"engine"`
Params struct {
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
NetworkID hexutil.Uint64 `json:"networkID"`
ChainID hexutil.Uint64 `json:"chainID"`
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
EIP1283ReenableTransition hexutil.Uint64 `json:"eip1283ReenableTransition"`
EIP1344Transition hexutil.Uint64 `json:"eip1344Transition"`
EIP1884Transition hexutil.Uint64 `json:"eip1884Transition"`
EIP2028Transition hexutil.Uint64 `json:"eip2028Transition"`
} `json:"params"`
Genesis struct {
Seal struct {
Ethereum struct {
Nonce types.BlockNonce `json:"nonce"`
MixHash hexutil.Bytes `json:"mixHash"`
} `json:"ethereum"`
} `json:"seal"`
Difficulty *hexutil.Big `json:"difficulty"`
Author common.Address `json:"author"`
Timestamp hexutil.Uint64 `json:"timestamp"`
ParentHash common.Hash `json:"parentHash"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
Nodes []string `json:"nodes"`
Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"`
}
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type parityChainSpecAccount struct {
Balance math2.HexOrDecimal256 `json:"balance"`
Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"`
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
}
// parityChainSpecBuiltin is the precompiled contract definition.
type parityChainSpecBuiltin struct {
Name string `json:"name"` // Each builtin should has it own name
Pricing interface{} `json:"pricing"` // Each builtin should has it own price strategy
ActivateAt *hexutil.Big `json:"activate_at,omitempty"` // ActivateAt can't be omitted if empty, default means no fork
}
// parityChainSpecPricing represents the different pricing models that builtin
// contracts might advertise using.
type parityChainSpecPricing struct {
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
// Before the https://github.com/paritytech/parity-ethereum/pull/11039,
// Parity uses this format to config bn pairing price policy.
AltBnPairing *parityChainSepcAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
// Blake2F is the price per round of Blake2 compression
Blake2F *parityChainSpecBlakePricing `json:"blake2_f,omitempty"`
}
type parityChainSpecLinearPricing struct {
Base uint64 `json:"base"`
Word uint64 `json:"word"`
}
type parityChainSpecModExpPricing struct {
Divisor uint64 `json:"divisor"`
}
// parityChainSpecAltBnConstOperationPricing defines the price
// policy for bn const operation(used after istanbul)
type parityChainSpecAltBnConstOperationPricing struct {
Price uint64 `json:"price"`
}
// parityChainSepcAltBnPairingPricing defines the price policy
// for bn pairing.
type parityChainSepcAltBnPairingPricing struct {
Base uint64 `json:"base"`
Pair uint64 `json:"pair"`
}
// parityChainSpecBlakePricing defines the price policy for blake2 f
// compression.
type parityChainSpecBlakePricing struct {
GasPerRound uint64 `json:"gas_per_round"`
}
type parityChainSpecAlternativePrice struct {
AltBnConstOperationPrice *parityChainSpecAltBnConstOperationPricing `json:"alt_bn128_const_operations,omitempty"`
AltBnPairingPrice *parityChainSepcAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
}
// parityChainSpecVersionedPricing represents a single version price policy.
type parityChainSpecVersionedPricing struct {
Price *parityChainSpecAlternativePrice `json:"price,omitempty"`
Info string `json:"info,omitempty"`
}
// newParityChainSpec converts a go-ethereum genesis block into a Parity specific
// chain specification format.
func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []string) (*parityChainSpec, error) {
// Only ethash is currently supported between go-ethereum and Parity
if genesis.Config.Ethash == nil {
return nil, errors.New("unsupported consensus engine")
}
// Reconstruct the chain spec in Parity's format
spec := &parityChainSpec{
Name: network,
Nodes: bootnodes,
Datadir: strings.ToLower(network),
}
spec.Engine.Ethash.Params.BlockReward = make(map[string]string)
spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string)
// Frontier
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward)
// Homestead
spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64())
// Tangerine Whistle : 150
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md
spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64())
// Spurious Dragon: 155, 160, 161, 170
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md
spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
// Byzantium
if num := genesis.Config.ByzantiumBlock; num != nil {
spec.setByzantium(num)
}
// Constantinople
if num := genesis.Config.ConstantinopleBlock; num != nil {
spec.setConstantinople(num)
}
// ConstantinopleFix (remove eip-1283)
if num := genesis.Config.PetersburgBlock; num != nil {
spec.setConstantinopleFix(num)
}
// Istanbul
if num := genesis.Config.IstanbulBlock; num != nil {
spec.setIstanbul(num)
}
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaxCodeSize = params.MaxCodeSize
// geth has it set from zero
spec.Params.MaxCodeSizeTransition = 0
// Disable this one
spec.Params.EIP98Transition = math.MaxInt64
spec.Genesis.Seal.Ethereum.Nonce = types.EncodeNonce(genesis.Nonce)
spec.Genesis.Seal.Ethereum.MixHash = genesis.Mixhash[:]
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
spec.Genesis.Author = genesis.Coinbase
spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp)
spec.Genesis.ParentHash = genesis.ParentHash
spec.Genesis.ExtraData = genesis.ExtraData
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
for address, account := range genesis.Alloc {
bal := math2.HexOrDecimal256(*account.Balance)
spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{
Balance: bal,
Nonce: math2.HexOrDecimal64(account.Nonce),
}
}
spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover",
Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}})
spec.setPrecompile(2, &parityChainSpecBuiltin{
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
})
spec.setPrecompile(3, &parityChainSpecBuiltin{
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
})
spec.setPrecompile(4, &parityChainSpecBuiltin{
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
})
if genesis.Config.ByzantiumBlock != nil {
spec.setPrecompile(5, &parityChainSpecBuiltin{
Name: "modexp",
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Pricing: &parityChainSpecPricing{
ModExp: &parityChainSpecModExpPricing{Divisor: 20},
},
})
spec.setPrecompile(6, &parityChainSpecBuiltin{
Name: "alt_bn128_add",
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Pricing: &parityChainSpecPricing{
Linear: &parityChainSpecLinearPricing{Base: 500, Word: 0},
},
})
spec.setPrecompile(7, &parityChainSpecBuiltin{
Name: "alt_bn128_mul",
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Pricing: &parityChainSpecPricing{
Linear: &parityChainSpecLinearPricing{Base: 40000, Word: 0},
},
})
spec.setPrecompile(8, &parityChainSpecBuiltin{
Name: "alt_bn128_pairing",
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Pricing: &parityChainSpecPricing{
AltBnPairing: &parityChainSepcAltBnPairingPricing{Base: 100000, Pair: 80000},
},
})
}
if genesis.Config.IstanbulBlock != nil {
if genesis.Config.ByzantiumBlock == nil {
return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not")
}
spec.setPrecompile(6, &parityChainSpecBuiltin{
Name: "alt_bn128_add",
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
(*hexutil.Big)(big.NewInt(0)): {
Price: &parityChainSpecAlternativePrice{
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 500},
},
},
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
Price: &parityChainSpecAlternativePrice{
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 150},
},
},
},
})
spec.setPrecompile(7, &parityChainSpecBuiltin{
Name: "alt_bn128_mul",
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
(*hexutil.Big)(big.NewInt(0)): {
Price: &parityChainSpecAlternativePrice{
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 40000},
},
},
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
Price: &parityChainSpecAlternativePrice{
AltBnConstOperationPrice: &parityChainSpecAltBnConstOperationPricing{Price: 6000},
},
},
},
})
spec.setPrecompile(8, &parityChainSpecBuiltin{
Name: "alt_bn128_pairing",
ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
Pricing: map[*hexutil.Big]*parityChainSpecVersionedPricing{
(*hexutil.Big)(big.NewInt(0)): {
Price: &parityChainSpecAlternativePrice{
AltBnPairingPrice: &parityChainSepcAltBnPairingPricing{Base: 100000, Pair: 80000},
},
},
(*hexutil.Big)(genesis.Config.IstanbulBlock): {
Price: &parityChainSpecAlternativePrice{
AltBnPairingPrice: &parityChainSepcAltBnPairingPricing{Base: 45000, Pair: 34000},
},
},
},
})
spec.setPrecompile(9, &parityChainSpecBuiltin{
Name: "blake2_f",
ActivateAt: (*hexutil.Big)(genesis.Config.IstanbulBlock),
Pricing: &parityChainSpecPricing{
Blake2F: &parityChainSpecBlakePricing{GasPerRound: 1},
},
})
}
return spec, nil
}
func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
}
a := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
if _, exist := spec.Accounts[a]; !exist {
spec.Accounts[a] = &parityChainSpecAccount{}
}
spec.Accounts[a].Builtin = data
}
func (spec *parityChainSpec) setByzantium(num *big.Int) {
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward)
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000)
n := hexutil.Uint64(num.Uint64())
spec.Engine.Ethash.Params.EIP100bTransition = n
spec.Params.EIP140Transition = n
spec.Params.EIP211Transition = n
spec.Params.EIP214Transition = n
spec.Params.EIP658Transition = n
}
func (spec *parityChainSpec) setConstantinople(num *big.Int) {
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward)
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000)
n := hexutil.Uint64(num.Uint64())
spec.Params.EIP145Transition = n
spec.Params.EIP1014Transition = n
spec.Params.EIP1052Transition = n
spec.Params.EIP1283Transition = n
}
func (spec *parityChainSpec) setConstantinopleFix(num *big.Int) {
spec.Params.EIP1283DisableTransition = hexutil.Uint64(num.Uint64())
}
func (spec *parityChainSpec) setIstanbul(num *big.Int) {
spec.Params.EIP1344Transition = hexutil.Uint64(num.Uint64())
spec.Params.EIP1884Transition = hexutil.Uint64(num.Uint64())
spec.Params.EIP2028Transition = hexutil.Uint64(num.Uint64())
spec.Params.EIP1283ReenableTransition = hexutil.Uint64(num.Uint64())
}
// pyEthereumGenesisSpec represents the genesis specification format used by the
// Python Ethereum implementation.
type pyEthereumGenesisSpec struct {
Nonce types.BlockNonce `json:"nonce"`
Timestamp hexutil.Uint64 `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extraData"`
GasLimit hexutil.Uint64 `json:"gasLimit"`
Difficulty *hexutil.Big `json:"difficulty"`
Mixhash common.Hash `json:"mixhash"`
Coinbase common.Address `json:"coinbase"`
Alloc core.GenesisAlloc `json:"alloc"`
ParentHash common.Hash `json:"parentHash"`
}
// newPyEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
// chain specification format.
func newPyEthereumGenesisSpec(network string, genesis *core.Genesis) (*pyEthereumGenesisSpec, error) {
// Only ethash is currently supported between go-ethereum and pyethereum
if genesis.Config.Ethash == nil {
return nil, errors.New("unsupported consensus engine")
}
spec := &pyEthereumGenesisSpec{
Nonce: types.EncodeNonce(genesis.Nonce),
Timestamp: (hexutil.Uint64)(genesis.Timestamp),
ExtraData: genesis.ExtraData,
GasLimit: (hexutil.Uint64)(genesis.GasLimit),
Difficulty: (*hexutil.Big)(genesis.Difficulty),
Mixhash: genesis.Mixhash,
Coinbase: genesis.Coinbase,
Alloc: genesis.Alloc,
ParentHash: genesis.ParentHash,
}
return spec, nil
}

View File

@ -1,95 +0,0 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"encoding/json"
"os"
"reflect"
"strings"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/core"
)
// Tests the go-ethereum to Aleth chainspec conversion for the Stureby testnet.
func TestAlethSturebyConverter(t *testing.T) {
blob, err := os.ReadFile("testdata/stureby_geth.json")
if err != nil {
t.Fatalf("could not read file: %v", err)
}
var genesis core.Genesis
if err := json.Unmarshal(blob, &genesis); err != nil {
t.Fatalf("failed parsing genesis: %v", err)
}
spec, err := newAlethGenesisSpec("stureby", &genesis)
if err != nil {
t.Fatalf("failed creating chainspec: %v", err)
}
expBlob, err := os.ReadFile("testdata/stureby_aleth.json")
if err != nil {
t.Fatalf("could not read file: %v", err)
}
expspec := &alethGenesisSpec{}
if err := json.Unmarshal(expBlob, expspec); err != nil {
t.Fatalf("failed parsing genesis: %v", err)
}
if !reflect.DeepEqual(expspec, spec) {
t.Errorf("chainspec mismatch")
c := spew.ConfigState{
DisablePointerAddresses: true,
SortKeys: true,
}
exp := strings.Split(c.Sdump(expspec), "\n")
got := strings.Split(c.Sdump(spec), "\n")
for i := 0; i < len(exp) && i < len(got); i++ {
if exp[i] != got[i] {
t.Logf("got: %v\nexp: %v\n", exp[i], got[i])
}
}
}
}
// Tests the go-ethereum to Parity chainspec conversion for the Stureby testnet.
func TestParitySturebyConverter(t *testing.T) {
blob, err := os.ReadFile("testdata/stureby_geth.json")
if err != nil {
t.Fatalf("could not read file: %v", err)
}
var genesis core.Genesis
if err := json.Unmarshal(blob, &genesis); err != nil {
t.Fatalf("failed parsing genesis: %v", err)
}
spec, err := newParityChainSpec("stureby", &genesis, []string{})
if err != nil {
t.Fatalf("failed creating chainspec: %v", err)
}
enc, err := json.MarshalIndent(spec, "", " ")
if err != nil {
t.Fatalf("failed encoding chainspec: %v", err)
}
expBlob, err := os.ReadFile("testdata/stureby_parity.json")
if err != nil {
t.Fatalf("could not read file: %v", err)
}
if !bytes.Equal(expBlob, enc) {
t.Fatalf("chainspec mismatch")
}
}

View File

@ -18,7 +18,6 @@ package main
import ( import (
"bytes" "bytes"
"encoding/json"
"fmt" "fmt"
"html/template" "html/template"
"math/rand" "math/rand"
@ -582,36 +581,6 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
// Marshal the genesis spec files for go-ethereum and all the other clients // Marshal the genesis spec files for go-ethereum and all the other clients
genesis, _ := conf.Genesis.MarshalJSON() genesis, _ := conf.Genesis.MarshalJSON()
files[filepath.Join(workdir, network+".json")] = genesis files[filepath.Join(workdir, network+".json")] = genesis
if conf.Genesis.Config.Ethash != nil {
cppSpec, err := newAlethGenesisSpec(network, conf.Genesis)
if err != nil {
return nil, err
}
cppSpecJSON, _ := json.Marshal(cppSpec)
files[filepath.Join(workdir, network+"-cpp.json")] = cppSpecJSON
harmonySpecJSON, _ := conf.Genesis.MarshalJSON()
files[filepath.Join(workdir, network+"-harmony.json")] = harmonySpecJSON
paritySpec, err := newParityChainSpec(network, conf.Genesis, conf.bootnodes)
if err != nil {
return nil, err
}
paritySpecJSON, _ := json.Marshal(paritySpec)
files[filepath.Join(workdir, network+"-parity.json")] = paritySpecJSON
pyethSpec, err := newPyEthereumGenesisSpec(network, conf.Genesis)
if err != nil {
return nil, err
}
pyethSpecJSON, _ := json.Marshal(pyethSpec)
files[filepath.Join(workdir, network+"-python.json")] = pyethSpecJSON
} else {
for _, client := range []string{"cpp", "harmony", "parity", "python"} {
files[filepath.Join(workdir, network+"-"+client+".json")] = []byte{}
}
}
files[filepath.Join(workdir, "puppeth.png")] = dashboardMascot files[filepath.Join(workdir, "puppeth.png")] = dashboardMascot
// Upload the deployment files to the remote server (and clean up afterwards) // Upload the deployment files to the remote server (and clean up afterwards)

View File

@ -24,7 +24,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli/v2"
) )
// main is just a boring entry point to set up the CLI app. // main is just a boring entry point to set up the CLI app.
@ -33,11 +33,11 @@ func main() {
app.Name = "puppeth" app.Name = "puppeth"
app.Usage = "assemble and maintain private Ethereum networks" app.Usage = "assemble and maintain private Ethereum networks"
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
cli.StringFlag{ &cli.StringFlag{
Name: "network", Name: "network",
Usage: "name of the network to administer (no spaces or hyphens, please)", Usage: "name of the network to administer (no spaces or hyphens, please)",
}, },
cli.IntFlag{ &cli.IntFlag{
Name: "loglevel", Name: "loglevel",
Value: 3, Value: 3,
Usage: "log level to emit to the screen", Usage: "log level to emit to the screen",

View File

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent" "golang.org/x/crypto/ssh/agent"
"golang.org/x/crypto/ssh/terminal" "golang.org/x/term"
) )
// sshClient is a small wrapper around Go's SSH client with a few utility methods // sshClient is a small wrapper around Go's SSH client with a few utility methods
@ -101,7 +101,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
key, err := ssh.ParsePrivateKey(buf) key, err := ssh.ParsePrivateKey(buf)
if err != nil { if err != nil {
fmt.Printf("What's the decryption password for %s? (won't be echoed)\n>", path) fmt.Printf("What's the decryption password for %s? (won't be echoed)\n>", path)
blob, err := terminal.ReadPassword(int(os.Stdin.Fd())) blob, err := term.ReadPassword(int(os.Stdin.Fd()))
fmt.Println() fmt.Println()
if err != nil { if err != nil {
log.Warn("Couldn't read password", "err", err) log.Warn("Couldn't read password", "err", err)
@ -118,7 +118,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
} }
auths = append(auths, ssh.PasswordCallback(func() (string, error) { auths = append(auths, ssh.PasswordCallback(func() (string, error) {
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server) fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
blob, err := terminal.ReadPassword(int(os.Stdin.Fd())) blob, err := term.ReadPassword(int(os.Stdin.Fd()))
fmt.Println() fmt.Println()
return string(blob), err return string(blob), err

View File

@ -34,7 +34,7 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/peterh/liner" "github.com/peterh/liner"
"golang.org/x/crypto/ssh/terminal" "golang.org/x/term"
) )
// config contains all the configurations needed by puppeth that should be saved // config contains all the configurations needed by puppeth that should be saved
@ -228,7 +228,7 @@ func (w *wizard) readDefaultFloat(def float64) float64 {
// line and returns it. The input will not be echoed. // line and returns it. The input will not be echoed.
func (w *wizard) readPassword() string { func (w *wizard) readPassword() string {
fmt.Printf("> ") fmt.Printf("> ")
text, err := terminal.ReadPassword(int(os.Stdin.Fd())) text, err := term.ReadPassword(int(os.Stdin.Fd()))
if err != nil { if err != nil {
log.Crit("Failed to read password", "err", err) log.Crit("Failed to read password", "err", err)
} }

View File

@ -250,8 +250,8 @@ func (w *wizard) manageGenesis() {
case "2": case "2":
// Save whatever genesis configuration we currently have // Save whatever genesis configuration we currently have
fmt.Println() fmt.Println()
fmt.Printf("Which folder to save the genesis specs into? (default = current)\n") fmt.Printf("Which folder to save the genesis spec into? (default = current)\n")
fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network) fmt.Printf(" Will create %s.json\n", w.network)
folder := w.readDefaultString(".") folder := w.readDefaultString(".")
if err := os.MkdirAll(folder, 0755); err != nil { if err := os.MkdirAll(folder, 0755); err != nil {
@ -268,21 +268,6 @@ func (w *wizard) manageGenesis() {
} }
log.Info("Saved native genesis chain spec", "path", gethJson) log.Info("Saved native genesis chain spec", "path", gethJson)
// Export the genesis spec used by Aleth (formerly C++ Ethereum)
if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil {
log.Error("Failed to create Aleth chain spec", "err", err)
} else {
saveGenesis(folder, w.network, "aleth", spec)
}
// Export the genesis spec used by Parity
if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil {
log.Error("Failed to create Parity chain spec", "err", err)
} else {
saveGenesis(folder, w.network, "parity", spec)
}
// Export the genesis spec used by Harmony (formerly EthereumJ)
saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
case "3": case "3":
// Make sure we don't have any services running // Make sure we don't have any services running
if len(w.conf.servers()) > 0 { if len(w.conf.servers()) > 0 {
@ -298,15 +283,3 @@ func (w *wizard) manageGenesis() {
return return
} }
} }
// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file.
func saveGenesis(folder, network, client string, spec interface{}) {
path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
out, _ := json.MarshalIndent(spec, "", " ")
if err := os.WriteFile(path, out, 0644); err != nil {
log.Error("Failed to save genesis file", "client", client, "err", err)
return
}
log.Info("Saved genesis chain spec", "client", client, "path", path)
}

Some files were not shown because too many files have changed in this diff Show More