Merge pull request #7 from openrelayxyz/updates/1.10.10-support

Updates/1.10.10 support
This commit is contained in:
philip-morlier 2021-10-18 14:54:22 -07:00 committed by GitHub
commit 11a11e38ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
417 changed files with 20077 additions and 4613 deletions

1
.gitmodules vendored
View File

@ -1,3 +1,4 @@
[submodule "tests"] [submodule "tests"]
path = tests/testdata path = tests/testdata
url = https://github.com/ethereum/tests url = https://github.com/ethereum/tests
shallow = true

View File

@ -1,7 +1,7 @@
# This file configures github.com/golangci/golangci-lint. # This file configures github.com/golangci/golangci-lint.
run: run:
timeout: 3m timeout: 5m
tests: true tests: true
# default is true. Enables skipping of directories: # default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$

View File

@ -5,7 +5,7 @@ jobs:
allow_failures: allow_failures:
- stage: build - stage: build
os: osx os: osx
go: 1.15.x go: 1.17.x
env: env:
- azure-osx - azure-osx
- azure-ios - azure-ios
@ -16,7 +16,7 @@ jobs:
- stage: lint - stage: lint
os: linux os: linux
dist: bionic dist: bionic
go: 1.16.x go: 1.17.x
env: env:
- lint - lint
git: git:
@ -31,7 +31,7 @@ jobs:
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: bionic
go: 1.16.x go: 1.17.x
env: env:
- docker - docker
services: services:
@ -41,14 +41,14 @@ jobs:
before_install: before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled - export DOCKER_CLI_EXPERIMENTAL=enabled
script: script:
- go run build/ci.go docker -image -manifest amd64,arm64 -upload karalabe/geth-docker-test - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
- stage: build - stage: build
if: type = push if: type = push
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: bionic
go: 1.16.x go: 1.17.x
env: env:
- docker - docker
services: services:
@ -58,14 +58,14 @@ jobs:
before_install: before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled - export DOCKER_CLI_EXPERIMENTAL=enabled
script: script:
- go run build/ci.go docker -image -manifest amd64,arm64 -upload karalabe/geth-docker-test - go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
# This builder does the Ubuntu PPA upload # This builder does the Ubuntu PPA upload
- stage: build - stage: build
if: type = push if: type = push
os: linux os: linux
dist: bionic dist: bionic
go: 1.16.x go: 1.17.x
env: env:
- ubuntu-ppa - ubuntu-ppa
- GO111MODULE=on - GO111MODULE=on
@ -90,7 +90,7 @@ jobs:
os: linux os: linux
dist: bionic dist: bionic
sudo: required sudo: required
go: 1.16.x go: 1.17.x
env: env:
- azure-linux - azure-linux
- GO111MODULE=on - GO111MODULE=on
@ -127,7 +127,7 @@ jobs:
dist: bionic dist: bionic
services: services:
- docker - docker
go: 1.16.x go: 1.17.x
env: env:
- azure-linux-mips - azure-linux-mips
- GO111MODULE=on - GO111MODULE=on
@ -192,7 +192,7 @@ jobs:
- stage: build - stage: build
if: type = push if: type = push
os: osx os: osx
go: 1.16.x go: 1.17.x
env: env:
- azure-osx - azure-osx
- azure-ios - azure-ios
@ -224,7 +224,7 @@ jobs:
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: bionic
go: 1.16.x go: 1.17.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -235,7 +235,7 @@ jobs:
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: bionic
go: 1.16.x go: 1.17.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -244,7 +244,7 @@ jobs:
- stage: build - stage: build
os: linux os: linux
dist: bionic dist: bionic
go: 1.15.x go: 1.16.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -255,7 +255,7 @@ jobs:
if: type = cron if: type = cron
os: linux os: linux
dist: bionic dist: bionic
go: 1.16.x go: 1.17.x
env: env:
- azure-purge - azure-purge
- GO111MODULE=on - GO111MODULE=on
@ -263,3 +263,15 @@ jobs:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
script: script:
- go run build/ci.go purge -store gethstore/builds -days 14 - go run build/ci.go purge -store gethstore/builds -days 14
# This builder executes race tests
- stage: build
if: type = cron
os: linux
dist: bionic
go: 1.17.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test -race -coverage $TEST_PACKAGES

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.16-alpine as builder FROM golang:1.17-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.16-alpine as builder FROM golang:1.17-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git

678
README.md
View File

@ -1,385 +1,363 @@
# PluGeth ## Go Ethereum
PluGeth is a fork of the [Go Ethereum Client](https://github.com/ethereum/go-ethereum) Official Golang implementation of the Ethereum protocol.
(Geth) that implements a plugin architecture, allowing developers to extend
Geth's capabilities in a number of different ways using plugins, rather than
having to create additional, new forks of Geth.
## WARNING: UNSTABLE API [![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum)
[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv)
Right now PluGeth is in early development. We are still settling on some of the Automated builds are available for stable releases and the unstable master branch. Binary
plugin APIs, and are not yet making official releases. From an operational archives are published at https://geth.ethereum.org/downloads/.
perspective, PluGeth should be as stable as upstream Geth less whatever
instability is added by plugins you might run. But if you plan to run PluGeth
today, be aware that future updates will likely break your plugins.
## System Requirements ## Building the source
System requirements will vary depending on which network you are connecting to. For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth).
On the Ethereum mainnet, you should have at least 8 GB RAM, 2 CPUs, and 350 GB
of SSD disks.
PluGeth relies on Golang's Plugin implementation, which is only supported on Building `geth` requires both a Go (version 1.14 or later) and a C compiler. You can install
Linux, FreeBSD, and macOS. Windows support is unlikely to be added in the them using your favourite package manager. Once the dependencies are installed, run
foreseeable future.
## Design Goals
The upstream Geth client exists primarily to serve as a client for the Ethereum
mainnet, though it also supports a number of popular testnets. Supporting the
Ethereum mainnet is a big enough challenge in its own right that the Geth team
generally avoids changes to support other networks, or to provide features only
a small handful of users would be interested in.
The result is that many projects have forked Geth. Some implement their own
consensus protocols or alter the behavior of the EVM to support other networks.
Others are designed to extract information from the Ethereum mainnet in ways
the standard Geth client does not support.
Creating numerous different forks to fill a variety of different needs comes
with a number of drawbacks. Forks tend to drift apart from each other. Many
networks that forked from Geth long ago have stopped merging updates from Geth;
this makes some sense, given that those networks have moved in different
directions than Geth and merging upstream changes while properly maintaining
consensus rules of an existing network could prove quite challenging. But not
merging changes from upstream can mean that security updates are easily missed,
especially when the upstream team [obscures security updates as optimizations](https://blog.openrelay.xyz/vulnerability-lifecycle-framework-geth/)
as a matter of process.
PluGeth aims to provide a single Geth fork that developers can choose to extend
rather than forking the Geth project. Out of the box, PluGeth behaves exactly
like upstream Geth, but by installing plugins written in Golang, developers can
extend its functionality in a wide variety of way.
## Anatomy of a Plugin
Plugins for Plugeth use Golang's [Native Plugin System](https://golang.org/pkg/plugin/).
Plugin modules must export variables using specific names and types. These will
be processed by the plugin loader, and invoked at certain points during Geth's
operations.
### API
#### Flags
* **Name**: Flags
* **Type**: [flag.FlagSet](https://golang.org/pkg/flag/#FlagSet)
* **Behavior**: This FlagSet will be parsed and your plugin will be able to
access the resulting flags. Note that if any flags are provided, certain
checks are disabled within Geth to avoid failing due to unexpected flags.
#### Subcommands
* **Name**: Subcommands
* **Type**: map[string]func(ctx [*cli.Context](https://pkg.go.dev/github.com/urfave/cli#Context), args []string) error
* **Behavior**: If Geth is invoked with `geth YOUR_COMMAND`, the plugin loader
will look for `YOUR_COMMAND` within this map, and invoke the corresponding
function. This can be useful for certain behaviors like manipulating Geth's
database without having to build a separate binary.
#### Initialize
* **Name**: Initialize
* **Type**: func(*cli.Context, *PluginLoader)
* **Behavior**: Called as soon as the plugin is loaded, with the cli context
and a reference to the plugin loader. This is your plugin's opportunity to
initialize required variables as needed. Note that using the context object
you can check arguments, and optionally can manipulate arguments if needed
for your plugin.
#### InitializeNode
* **Name**: InitializeNode
* **Type**: func(*node.Node, interfaces.Backend)
* **Behavior**: This is called as soon as the Geth node is initialized. The
`*node.Node` object represents the running node with p2p and RPC capabilities,
while the Backend gives you access to a wide array of data you may need to
access.
#### Tracers
* **Name**: Tracer
* **Type**: map[string]TracerResult
* **Behavior**: When calling debug.traceX functions (such as debug_traceCall
and debug_traceTransaction) the tracer can be specified as a key to this map
and the tracer used will be the TracerResult specified here. TracerResult
objects must match the interface:
```
// CaptureStart is called at the start of each transaction
CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {}
// CaptureState is called for each opcode
CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {}
// CaptureFault is called when an error occurs in the EVM
CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {}
// CaptureEnd is called at the end of each transaction
CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {}
// GetResult should return a JSON serializable result object to respond to the trace call
GetResult() (interface{}, error) {}
```shell
make geth
``` ```
* **Caution**: Modifying of the values passed into tracer functions can alter or, to build the full suite of utilities:
the results of the EVM execution in unpredictable ways. Additionally, some
objects may be reused across calls, so data you wish to capture should be
copied rather than retained by reference.
#### LiveTracer
* **Name**: LiveTracer
* **Type**: vm.Tracer
* **Behavior**: This tracer is used for tracing transactions as they are
processed within blocks. Note that if a block does not validate, some
transactions may be processed that don't end up in blocks, so be sure to
check transactions against finalized blocks.
The interface for a vm.Tracer is similar to a TracerResult (above), but does
not require a `GetResult()` function.
#### GetAPIs
* **Name**: GetAPIs
* **Type**: func(*node.Node, interfaces.Backend) []rpc.API
* **Behavior**: This allows you to register new RPC methods to run within Geth.
* **Example**:
The GetAPIs function itself will generally be fairly brief, and will looks
something like this:
```shell
make all
``` ```
func GetAPIs(stack *node.Node, backend plugins.Backend) []rpc.API {
return []rpc.API{ ## Executables
{
Namespace: "mynamespace", The go-ethereum project comes with several wrappers/executables found in the `cmd`
Version: "1.0", directory.
Service: &MyService{backend},
Public: true, | Command | Description |
}, | :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
} | **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. |
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
## Running `geth`
Going through all the possible command line flags is out of scope here (please consult our
[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)),
but we've enumerated a few common parameter combos to get you up to speed quickly
on how you can run your own `geth` instance.
### Full node on the main Ethereum network
By far the most common scenario is people wanting to simply interact with the Ethereum
network: create accounts; transfer funds; deploy and interact with contracts. For this
particular use-case the user doesn't care about years-old historical data, so we can
fast-sync quickly to the current state of the network. To do so:
```shell
$ geth console
```
This command will:
* Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag),
causing it to download more data in exchange for avoiding processing the entire history
of the Ethereum network, which is very CPU intensive.
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/)
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
This tool is optional and if you leave it out you can always attach to an already running
`geth` instance with `geth attach`.
### A Full node on the Görli test network
Transitioning towards developers, if you'd like to play around with creating Ethereum
contracts, you almost certainly would like to do that without any real money involved until
you get the hang of the entire system. In other words, instead of attaching to the main
network, you want to join the **test** network with your node, which is fully equivalent to
the main network, but with play-Ether only.
```shell
$ geth --goerli console
```
The `console` subcommand has the exact same meaning as above and they are equally
useful on the testnet too. Please, see above for their explanations if you've skipped here.
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
* Instead of connecting the main Ethereum network, the client will connect to the Görli
test network, which uses different P2P bootnodes, different network IDs and genesis
states.
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
requires the use of a custom endpoint since `geth attach` will try to attach to a
production node endpoint by default, e.g.,
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
this.
*Note: Although there are some internal protective measures to prevent transactions from
crossing over between the main network and test network, you should make sure to always
use separate accounts for play-money and real-money. Unless you manually move
accounts, `geth` will by default correctly separate the two networks and will not make any
accounts available between them.*
### Full node on the Rinkeby test network
Go Ethereum also supports connecting to the older proof-of-authority based test network
called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community.
```shell
$ geth --rinkeby console
```
### Full node on the Ropsten test network
In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The
Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such,
it has certain extra overhead and is more susceptible to reorganization attacks due to the
network's low difficulty/security.
```shell
$ geth --ropsten console
```
*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.*
### Configuration
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a
configuration file via:
```shell
$ geth --config /path/to/your_config.toml
```
To get an idea how the file should look like you can use the `dumpconfig` subcommand to
export your existing configuration:
```shell
$ geth --your-favourite-flags dumpconfig
```
*Note: This works only with `geth` v1.6.0 and above.*
#### Docker quick start
One of the quickest ways to get Ethereum up and running on your machine is by using
Docker:
```shell
docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
-p 8545:8545 -p 30303:30303 \
ethereum/client-go
```
This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the
above command does. It will also create a persistent volume in your home directory for
saving your blockchain as well as map the default ports. There is also an `alpine` tag
available for a slim version of the image.
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
accessible from the outside.
### Programmatically interfacing `geth` nodes
As a developer, sooner rather than later you'll want to start interacting with `geth` and the
Ethereum network via your own programs and not manually through the console. To aid
this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API)
and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)).
These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based
platforms, and named pipes on Windows).
The IPC interface is enabled by default and exposes all the APIs supported by `geth`,
whereas the HTTP and WS interfaces need to manually be enabled and only expose a
subset of APIs due to security reasons. These can be turned on/off and configured as
you'd expect.
HTTP based JSON-RPC API options:
* `--http` Enable the HTTP-RPC server
* `--http.addr` HTTP-RPC server listening interface (default: `localhost`)
* `--http.port` HTTP-RPC server listening port (default: `8545`)
* `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`)
* `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
* `--ws` Enable the WS-RPC server
* `--ws.addr` WS-RPC server listening interface (default: `localhost`)
* `--ws.port` WS-RPC server listening port (default: `8546`)
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept websockets requests
* `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to
connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll
need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You
can reuse the same connection for multiple requests!
**Note: Please understand the security implications of opening up an HTTP/WS based
transport before doing so! Hackers on the internet are actively trying to subvert
Ethereum nodes with exposed APIs! Further, all browser tabs can access locally
running web servers, so malicious web pages could try to subvert locally available
APIs!**
### Operating a private network
Maintaining your own private network is more involved as a lot of configurations taken for
granted in the official networks need to be manually set up.
#### Defining the private genesis state
First, you'll need to create the genesis state of your networks, which all nodes need to be
aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`):
```json
{
"config": {
"chainId": <arbitrary positive integer>,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0
},
"alloc": {},
"coinbase": "0x0000000000000000000000000000000000000000",
"difficulty": "0x20000",
"extraData": "",
"gasLimit": "0x2fefd8",
"nonce": "0x0000000000000042",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp": "0x00"
} }
``` ```
The bulk of the implementation will be in the `MyService` struct. MyService The above fields should be fine for most purposes, although we'd recommend changing
should be a struct with public functions. These functions can have two the `nonce` to some random value so you prevent unknown remote nodes from being able
different types of signatures: to connect to you. If you'd like to pre-fund some accounts for easier testing, create
the accounts and populate the `alloc` field with their addresses.
* RPC Calls: For straight RPC calls, a function should have a `context.Context` ```json
object as the first argument, followed by an arbitrary number of JSON "alloc": {
marshallable arguments, and return either a single JSON marshal object, or a "0x0000000000000000000000000000000000000001": {
JSON marshallable object and an error. The RPC framework will take care of "balance": "111111111"
decoding inputs to this function and encoding outputs, and if the error is },
non-nil it will serve an error response. "0x0000000000000000000000000000000000000002": {
* Subscriptions: For subscriptions (supported on IPC and websockets), a "balance": "222222222"
function should have a `context.Context` object as the first argument
followed by an arbitrary number of JSON marshallable arguments, and should
return an `*rpc.Subscription` object. The subscription object can be created
with `rpcSub := notifier.CreateSubscription()`, and JSON marshallable data
can be sent to the subscriber with `notifier.Notify(rpcSub.ID, b)`.
A very simple MyService might look like:
```
type MyService struct{}
func (h *MyService) HelloWorld(ctx context.Context) string {
return "Hello World"
}
```
And the client could then access this with an rpc call to `mynaespace_helloWorld`.
#### PreProcessBlock
* **Name**: PreProcessBlock
* **Type**: func(*types.Block)
* **Behavior**: Invoked before the transactions of a block are processed.
#### PreProcessTransaction
* **Name**: PreProcessTransaction
* **Type**: func(*types.Transaction, *types.Block, int)
* **Behavior**: Invoked before each individual transaction of a block is
processed.
#### BlockProcessingError
* **Name**: BlockProcessingError
* **Type**: func(*types.Transaction, *types.Block, error)
* **Behavior**: Invoked if an error occurs while processing a transaction. This
only applies to errors that would invalidate the block were this transaction
included, not errors such as reverts or opcode errors.
#### PostProcessTransaction
* **Name**: PostProcessTransaction
* **Type**: func(*types.Transaction, *types.Block, int, *types.Receipt)
* **Behavior**: Invoked after each individual transaction of a block is processed.
#### PostProcessBlock
* **Name**: PostProcessBlock
* **Type**: func(*types.Block)
* **Behavior**: Invoked after all transactions of a block are processed. Note
that this does not mean that the block can be considered canonical - it may
end up being uncled or side-chained. You should rely on `NewHead` to
determine which blocks are canonical.
#### NewHead
* **Name**: NewHead
* **Type**: func(*types.Block, common.Hash, []*types.Log)
* **Behavior**: Invoked when a new block becomes the canonical latest block.
Note that if several blocks are processed in a group (such as during a reorg)
this may not be called for each block. You should track the prior latest head
if you need to process intermediate blocks.
#### NewSideBlock
* **Name**: NewSideBlock
* **Type**: func(*types.Block, common.Hash, []*types.Log)
* **Behavior**: Invoked when a block is side-chained. Blocks passed to this
method are non-canonical blocks
#### Reorg
* **Name**: Reorg
* **Type**: func(common *types.Block, oldChain, newChain types.Blocks)
* **Behavior**: Invoked when a chain reorg occurs (at least one block is
removed and one block is added). `oldChain` is a list of removed blocks,
`newChain` is a list of newly added blocks, and `common` is the latest block
that is an ancestor to both oldChain and newChain.
#### StateUpdate
* **Name**: StateUpdate
* **Type**: func(root common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte)
* **Behavior**: Invoked for each new block, StateUpdate provides the changes to
the blockchain state. `root` corresponds to the state root of the new block.
`parentRoot` corresponds to the state root of the parent block. `destructs`
serves as a set of accounts that self-destructed in this block. `accounts`
maps the hash of each account address to the SlimRLP encoding of the account
data. `storage` maps the hash of each account to a map of that account's
stored data.
* **Warning**: StateUpdate is only called if Geth is running with
`--snapshot=true`. This is the default behavior for Geth, but if you are
explicitly running with `--snapshot=false` this function will not be invoked.
#### AppendAncient
* **Name**: AppendAncient
* **Type**: func(number uint64, hash, header, body, receipts, td []byte)
* **Behavior**: Invoked when the freezer moves a block from LevelDB to the
ancients database. `number` is the number of the block. `hash` is the 32 byte
hash of the block as a raw `[]byte`. `header`, `body`, and `receipts` are the
RLP encoded versions of their respective block elements. `td` is the byte
encoded total difficulty of the block.
## Extending The Plugin API
When extending the plugin API, a primary concern is leaving a minimal footprint
in the core Geth codebase to avoid future merge conflicts. To achieve this,
when we want to add a hook within some existing Geth code, we create a
`plugin_hooks.go` in the same package. For example, in the core/rawdb package
we have:
```
// This file is part of the package we are adding hooks to
package rawdb
// Import whatever is necessary
import (
"github.com/ethereum/go-ethereum/plugins"
"github.com/ethereum/go-ethereum/log"
)
// PluginAppendAncient is the public plugin hook function, available for testing
func PluginAppendAncient(pl *plugins.PluginLoader, number uint64, hash, header, body, receipts, td []byte) {
fnList := pl.Lookup("AppendAncient", func(item interface{}) bool {
_, ok := item.(func(number uint64, hash, header, body, receipts, td []byte))
return ok
})
for _, fni := range fnList {
if fn, ok := fni.(func(number uint64, hash, header, body, receipts, td []byte)); ok {
fn(number, hash, header, body, receipts, td)
}
} }
} }
// pluginAppendAncient is the private plugin hook function
func pluginAppendAncient(number uint64, hash, header, body, receipts, td []byte) {
if plugins.DefaultPluginLoader == nil {
log.Warn("Attempting AppendAncient, but default PluginLoader has not been initialized")
return
}
PluginAppendAncient(plugins.DefaultPluginLoader, number, hash, header, body, receipts, td)
}
``` ```
### The Public Plugin Hook Function With the genesis state defined in the above JSON file, you'll need to initialize **every**
`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly
set:
The public plugin hook function should follow the naming convention ```shell
`Plugin$HookName`. The first argument should be a *plugins.PluginLoader, $ geth init path/to/genesis.json
followed by any arguments required by the functions to be provided by nay ```
plugins implementing this hook.
The plugin hook function should use `PluginLoader.Lookup("$HookName", func(item interface{}) bool` #### Creating the rendezvous point
to get a list of the plugin-provided functions to be invoked. The provided
function should verify that the provided function implements the expected
interface. After the first time a given hook is looked up through the plugin
loader, the PluginLoader will cache references to those hooks.
Given the function list provided by the plugin loader, the public plugin hook With all nodes that you want to run initialized to the desired genesis state, you'll need to
function should iterate over the list, cast the elements to the appropriate start a bootstrap node that others can use to find each other in your network and/or over
type, and call the function with the provided arguments. the internet. The clean way is to configure and run a dedicated bootnode:
Unless there is a clear justification to the contrary, the function should be ```shell
called in the current goroutine. Plugins may choose to spawn off a separate $ bootnode --genkey=boot.key
goroutine as appropriate, but for the sake of thread safety we should generally $ bootnode --nodekey=boot.key
not assume that plugins will be implemented in a threadsafe manner. If a plugin ```
degrades the performance of Geth significantly, that will generally be obvious,
and plugin authors can take appropriate measures to improve performance. If a
plugin introduces thread safety issues, those can go unnoticed during testing.
### The Private Plugin Hook Function With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format)
that other nodes can use to connect to it and exchange peer information. Make sure to
replace the displayed IP address information (most probably `[::]`) with your externally
accessible IP to get the actual `enode` URL.
The private plugin hook function should bear the same name as the public plugin *Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less
hook function, but with a lower case first letter. The signature should match recommended way.*
the public plugin hook function, except that the first argument referencing the
PluginLoader should be removed. It should invoke the public plugin hook
function on `plugins.DefaultPluginLoader`. It should always verify that the
DefaultPluginLoader is non-nil, log warning and return if the
DefaultPluginLoader has not been initialized.
### In-Line Invocation #### Starting up your member nodes
Within the Geth codebase, the private plugin hook function should be invoked With the bootnode operational and externally reachable (you can try
with the appropriate arguments in a single line, to minimize unexpected `telnet <ip> <port>` to ensure it's indeed reachable), start every subsequent `geth`
conflicts merging the upstream geth codebase into plugeth. node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will
probably also be desirable to keep the data directory of your private network separated, so
do also specify a custom `--datadir` flag.
### Contact Us ```shell
$ geth --datadir=path/to/custom/data/folder --bootnodes=<bootnode-enode-url-from-above>
```
While we can imagine lots of ways plugins might like to extract or change *Note: Since your network will be completely cut off from the main and test networks, you'll
information in Geth, we're trying not to go too crazy with the plugin API based also need to configure a miner to process transactions and create new blocks for you.*
purely on hypotheticals. The Plugin API in its current form reflects the needs
of projects currently building on PluGeth, and we're happy to extend it for
people who are building something. If you're trying to do something that isn't
supported by the current plugin system, we're happy to help. Reach out to us on
[Discord](https://discord.gg/Epf7b7Gr) and we'll help you figure out how to
make it work.
# Licensing Considerations #### Running a private miner
The Geth codebase is licensed under the LGPL. By linking with Geth, you have an Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
obligation to enable anyone you provide your plugin binaries to run against requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
their own modified versions of Geth. Because of how Golang plugins work setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
running against updated versions of Geth may require recompiling the plugin. and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
If you plan to license your plugin under the LGPL or a more permissive license, In a private network setting, however a single CPU miner instance is more than enough for
you should be able to meet these requirements. If you plan to use your plugin practical purposes as it can produce a stable stream of blocks at the correct intervals
privately without distributing it, you should be fine. If you plan to release without needing heavy resources (consider running on a single thread, no need for multiple
your plugin without making the source available, you may find yourself in ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
violation of Geth's license unless you can provide a way to relink it against by:
more recent versions of Geth.
# Existing Plugins ```shell
$ geth <usual-flags> --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000
```
We currently provide the following plugins: Which will start mining blocks and transactions on a single CPU thread, crediting all
proceedings to the account specified by `--miner.etherbase`. You can further tune the mining
by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price
transactions are accepted at (`--miner.gasprice`).
* [BlockUpdates](./plugins/packages/blockupdates/main.go): A good reference ## Contribution
plugin, which leverages several hooks to provide a new BlockUpdates hook,
which plugins can use to get more cohesive updates about new blocks than can Thank you for considering to help out with the source code! We welcome contributions
easily be achieved with the standard PluGeth hooks. from anyone on the internet, and are grateful for even the smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
for the maintainers to review and merge into the main code base. If you wish to submit
more complex changes though, please check up with the core devs first on [our Discord Server](https://discord.gg/invite/nthXNEv)
to ensure those changes are in line with the general philosophy of the project and/or get
some early feedback which can make both your efforts much lighter as well as our review
and merge procedures quick and simple.
Please make sure your contributions adhere to our coding guidelines:
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting)
guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary)
guidelines.
* Pull requests need to be based on and opened against the `master` branch.
* Commit messages should be prefixed with the package(s) they modify.
* E.g. "eth, rpc: make trace configs optional"
Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide)
for more details on configuring your environment, managing project dependencies, and
testing procedures.
## License
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html),
also included in our repository in the `COPYING.LESSER` file.
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also
included in our repository in the `COPYING` file.

View File

@ -12,6 +12,8 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
| ------- | ------- | ----------- | | ------- | ------- | ----------- |
| `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) | | `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) |
| `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) | | `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) |
| `Discv5` | 20191015 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2019-10-15_Discv5_audit_LeastAuthority.pdf) |
| `Discv5` | 20200124 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2020-01-24_DiscV5_audit_Cure53.pdf) |
## Reporting a Vulnerability ## Reporting a Vulnerability

View File

@ -34,6 +34,7 @@ type ABI struct {
Constructor Method Constructor Method
Methods map[string]Method Methods map[string]Method
Events map[string]Event Events map[string]Event
Errors map[string]Error
// Additional "special" functions introduced in solidity v0.6.0. // Additional "special" functions introduced in solidity v0.6.0.
// It's separated from the original default fallback. Each contract // It's separated from the original default fallback. Each contract
@ -157,12 +158,13 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
} }
abi.Methods = make(map[string]Method) abi.Methods = make(map[string]Method)
abi.Events = make(map[string]Event) abi.Events = make(map[string]Event)
abi.Errors = make(map[string]Error)
for _, field := range fields { for _, field := range fields {
switch field.Type { switch field.Type {
case "constructor": case "constructor":
abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil) abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
case "function": case "function":
name := abi.overloadedMethodName(field.Name) name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Methods[s]; return ok })
abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs) abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
case "fallback": case "fallback":
// New introduced function type in v0.6.0, check more detail // New introduced function type in v0.6.0, check more detail
@ -182,8 +184,10 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
} }
abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil) abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
case "event": case "event":
name := abi.overloadedEventName(field.Name) name := overloadedName(field.Name, func(s string) bool { _, ok := abi.Events[s]; return ok })
abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs) abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
case "error":
abi.Errors[field.Name] = NewError(field.Name, field.Inputs)
default: default:
return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name) return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
} }
@ -191,36 +195,6 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// overloadedMethodName returns the next available name for a given function.
// Needed since solidity allows for function overload.
//
// e.g. if the abi contains Methods send, send1
// overloadedMethodName would return send2 for input send.
func (abi *ABI) overloadedMethodName(rawName string) string {
name := rawName
_, ok := abi.Methods[name]
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
_, ok = abi.Methods[name]
}
return name
}
// overloadedEventName returns the next available name for a given event.
// Needed since solidity allows for event overload.
//
// e.g. if the abi contains events received, received1
// overloadedEventName would return received2 for input received.
func (abi *ABI) overloadedEventName(rawName string) string {
name := rawName
_, ok := abi.Events[name]
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
_, ok = abi.Events[name]
}
return name
}
// MethodById looks up a method by the 4-byte id, // MethodById looks up a method by the 4-byte id,
// returns nil if none found. // returns nil if none found.
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) { func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
@ -277,3 +251,20 @@ func UnpackRevert(data []byte) (string, error) {
} }
return unpacked[0].(string), nil return unpacked[0].(string), nil
} }
// overloadedName returns the next available name for a given thing.
// Needed since solidity allows for overloading.
//
// e.g. if the abi contains Methods send, send1
// overloadedName would return send2 for input send.
//
// overloadedName works for methods, events and errors.
func overloadedName(rawName string, isAvail func(string) bool) string {
name := rawName
ok := isAvail(name)
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
ok = isAvail(name)
}
return name
}

View File

@ -295,6 +295,20 @@ func TestOverloadedMethodSignature(t *testing.T) {
check("bar0", "bar(uint256,uint256)", false) check("bar0", "bar(uint256,uint256)", false)
} }
func TestCustomErrors(t *testing.T) {
json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]`
abi, err := JSON(strings.NewReader(json))
if err != nil {
t.Fatal(err)
}
check := func(name string, expect string) {
if abi.Errors[name].Sig != expect {
t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig)
}
}
check("MyError", "MyError(uint256)")
}
func TestMultiPack(t *testing.T) { func TestMultiPack(t *testing.T) {
abi, err := JSON(strings.NewReader(jsondata)) abi, err := JSON(strings.NewReader(jsondata))
if err != nil { if err != nil {

View File

@ -137,7 +137,7 @@ func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{
dst := reflect.ValueOf(v).Elem() dst := reflect.ValueOf(v).Elem()
src := reflect.ValueOf(marshalledValues) src := reflect.ValueOf(marshalledValues)
if dst.Kind() == reflect.Struct && src.Kind() != reflect.Struct { if dst.Kind() == reflect.Struct {
return set(dst.Field(0), src) return set(dst.Field(0), src)
} }
return set(dst, src) return set(dst, src)

View File

@ -17,6 +17,7 @@
package bind package bind
import ( import (
"context"
"crypto/ecdsa" "crypto/ecdsa"
"errors" "errors"
"io" "io"
@ -74,6 +75,7 @@ func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account
} }
return tx.WithSignature(signer, signature) return tx.WithSignature(signer, signature)
}, },
Context: context.Background(),
}, nil }, nil
} }
@ -97,6 +99,7 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts {
} }
return tx.WithSignature(signer, signature) return tx.WithSignature(signer, signature)
}, },
Context: context.Background(),
} }
} }
@ -133,6 +136,7 @@ func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accou
} }
return tx.WithSignature(signer, signature) return tx.WithSignature(signer, signature)
}, },
Context: context.Background(),
}, nil }, nil
} }
@ -156,6 +160,7 @@ func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*Tr
} }
return tx.WithSignature(signer, signature) return tx.WithSignature(signer, signature)
}, },
Context: context.Background(),
}, nil }, nil
} }
@ -170,5 +175,6 @@ func NewClefTransactor(clef *external.ExternalSigner, account accounts.Account)
} }
return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id
}, },
Context: context.Background(),
} }
} }

View File

@ -488,8 +488,19 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
} else { } else {
hi = b.pendingBlock.GasLimit() hi = b.pendingBlock.GasLimit()
} }
// Normalize the max fee per gas the call is willing to spend.
var feeCap *big.Int
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
} else if call.GasPrice != nil {
feeCap = call.GasPrice
} else if call.GasFeeCap != nil {
feeCap = call.GasFeeCap
} else {
feeCap = common.Big0
}
// Recap the highest gas allowance with account's balance. // Recap the highest gas allowance with account's balance.
if call.GasPrice != nil && call.GasPrice.BitLen() != 0 { if feeCap.BitLen() != 0 {
balance := b.pendingState.GetBalance(call.From) // from can't be nil balance := b.pendingState.GetBalance(call.From) // from can't be nil
available := new(big.Int).Set(balance) available := new(big.Int).Set(balance)
if call.Value != nil { if call.Value != nil {
@ -498,14 +509,14 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
} }
available.Sub(available, call.Value) available.Sub(available, call.Value)
} }
allowance := new(big.Int).Div(available, call.GasPrice) allowance := new(big.Int).Div(available, feeCap)
if allowance.IsUint64() && hi > allowance.Uint64() { if allowance.IsUint64() && hi > allowance.Uint64() {
transfer := call.Value transfer := call.Value
if transfer == nil { if transfer == nil {
transfer = new(big.Int) transfer = new(big.Int)
} }
log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance,
"sent", transfer, "gasprice", call.GasPrice, "fundable", allowance) "sent", transfer, "feecap", feeCap, "fundable", allowance)
hi = allowance.Uint64() hi = allowance.Uint64()
} }
} }
@ -784,7 +795,7 @@ type callMsg struct {
func (m callMsg) From() common.Address { return m.CallMsg.From } func (m callMsg) From() common.Address { return m.CallMsg.From }
func (m callMsg) Nonce() uint64 { return 0 } func (m callMsg) Nonce() uint64 { return 0 }
func (m callMsg) CheckNonce() bool { return false } func (m callMsg) IsFake() bool { return true }
func (m callMsg) To() *common.Address { return m.CallMsg.To } func (m callMsg) To() *common.Address { return m.CallMsg.To }
func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap } func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap }

View File

@ -580,6 +580,26 @@ func TestEstimateGasWithPrice(t *testing.T) {
Value: big.NewInt(100000000000), Value: big.NewInt(100000000000),
Data: nil, Data: nil,
}, 21000, errors.New("gas required exceeds allowance (10999)")}, // 10999=(2.2ether-1000wei)/(2e14) }, 21000, errors.New("gas required exceeds allowance (10999)")}, // 10999=(2.2ether-1000wei)/(2e14)
{"EstimateEIP1559WithHighFees", ethereum.CallMsg{
From: addr,
To: &addr,
Gas: 0,
GasFeeCap: big.NewInt(1e14), // maxgascost = 2.1ether
GasTipCap: big.NewInt(1),
Value: big.NewInt(1e17), // the remaining balance for fee is 2.1ether
Data: nil,
}, params.TxGas, nil},
{"EstimateEIP1559WithSuperHighFees", ethereum.CallMsg{
From: addr,
To: &addr,
Gas: 0,
GasFeeCap: big.NewInt(1e14), // maxgascost = 2.1ether
GasTipCap: big.NewInt(1),
Value: big.NewInt(1e17 + 1), // the remaining balance for fee is 2.1ether
Data: nil,
}, params.TxGas, errors.New("gas required exceeds allowance (20999)")}, // 20999=(2.2ether-0.1ether-1wei)/(1e14)
} }
for i, c := range cases { for i, c := range cases {
got, err := sim.EstimateGas(context.Background(), c.message) got, err := sim.EstimateGas(context.Background(), c.message)
@ -592,6 +612,9 @@ func TestEstimateGasWithPrice(t *testing.T) {
} }
continue continue
} }
if c.expectError == nil && err != nil {
t.Fatalf("test %d: didn't expect error, got %v", i, err)
}
if got != c.expect { if got != c.expect {
t.Fatalf("test %d: gas estimation mismatch, want %d, got %d", i, c.expect, got) t.Fatalf("test %d: gas estimation mismatch, want %d, got %d", i, c.expect, got)
} }

View File

@ -21,6 +21,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"strings"
"sync"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
@ -76,6 +78,29 @@ type WatchOpts struct {
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout) Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
} }
// MetaData collects all metadata for a bound contract.
type MetaData struct {
mu sync.Mutex
Sigs map[string]string
Bin string
ABI string
ab *abi.ABI
}
func (m *MetaData) GetAbi() (*abi.ABI, error) {
m.mu.Lock()
defer m.mu.Unlock()
if m.ab != nil {
return m.ab, nil
}
if parsed, err := abi.JSON(strings.NewReader(m.ABI)); err != nil {
return nil, err
} else {
m.ab = &parsed
}
return m.ab, nil
}
// BoundContract is the base wrapper object that reflects a contract on the // BoundContract is the base wrapper object that reflects a contract on the
// Ethereum network. It contains a collection of methods that are used by the // Ethereum network. It contains a collection of methods that are used by the
// higher level contract bindings to operate. // higher level contract bindings to operate.
@ -206,108 +231,158 @@ func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error)
return c.transact(opts, &c.address, nil) return c.transact(opts, &c.address, nil)
} }
// transact executes an actual transaction invocation, first deriving any missing func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Address, input []byte, head *types.Header) (*types.Transaction, error) {
// authorization fields, and then scheduling the transaction for execution. // Normalize value
func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
var err error
// Ensure a valid value field and resolve the account nonce
value := opts.Value value := opts.Value
if value == nil { if value == nil {
value = new(big.Int) value = new(big.Int)
} }
var nonce uint64 // Estimate TipCap
if opts.Nonce == nil { gasTipCap := opts.GasTipCap
nonce, err = c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From) if gasTipCap == nil {
tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to retrieve account nonce: %v", err) return nil, err
} }
} else { gasTipCap = tip
nonce = opts.Nonce.Uint64()
} }
// Figure out reasonable gas price values // Estimate FeeCap
if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) { gasFeeCap := opts.GasFeeCap
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") if gasFeeCap == nil {
gasFeeCap = new(big.Int).Add(
gasTipCap,
new(big.Int).Mul(head.BaseFee, big.NewInt(2)),
)
} }
head, err := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil) if gasFeeCap.Cmp(gasTipCap) < 0 {
return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", gasFeeCap, gasTipCap)
}
// Estimate GasLimit
gasLimit := opts.GasLimit
if opts.GasLimit == 0 {
var err error
gasLimit, err = c.estimateGasLimit(opts, contract, input, nil, gasTipCap, gasFeeCap, value)
if err != nil {
return nil, err
}
}
// create the transaction
nonce, err := c.getNonce(opts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if head.BaseFee != nil && opts.GasPrice == nil { baseTx := &types.DynamicFeeTx{
if opts.GasTipCap == nil { To: contract,
tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context)) Nonce: nonce,
if err != nil { GasFeeCap: gasFeeCap,
return nil, err GasTipCap: gasTipCap,
} Gas: gasLimit,
opts.GasTipCap = tip Value: value,
} Data: input,
if opts.GasFeeCap == nil {
gasFeeCap := new(big.Int).Add(
opts.GasTipCap,
new(big.Int).Mul(head.BaseFee, big.NewInt(2)),
)
opts.GasFeeCap = gasFeeCap
}
if opts.GasFeeCap.Cmp(opts.GasTipCap) < 0 {
return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", opts.GasFeeCap, opts.GasTipCap)
}
} else {
if opts.GasFeeCap != nil || opts.GasTipCap != nil {
return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
}
if opts.GasPrice == nil {
price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context))
if err != nil {
return nil, err
}
opts.GasPrice = price
}
} }
gasLimit := opts.GasLimit return types.NewTx(baseTx), nil
if gasLimit == 0 { }
// Gas estimation cannot succeed without code for method invocations
if contract != nil { func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil { if opts.GasFeeCap != nil || opts.GasTipCap != nil {
return nil, err return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
} else if len(code) == 0 { }
return nil, ErrNoCode // Normalize value
} value := opts.Value
} if value == nil {
// If the contract surely has code (or code is not needed), estimate the transaction value = new(big.Int)
msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: opts.GasPrice, GasTipCap: opts.GasTipCap, GasFeeCap: opts.GasFeeCap, Value: value, Data: input} }
gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg) // Estimate GasPrice
gasPrice := opts.GasPrice
if gasPrice == nil {
price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to estimate gas needed: %v", err) return nil, err
}
gasPrice = price
}
// Estimate GasLimit
gasLimit := opts.GasLimit
if opts.GasLimit == 0 {
var err error
gasLimit, err = c.estimateGasLimit(opts, contract, input, gasPrice, nil, nil, value)
if err != nil {
return nil, err
} }
} }
// Create the transaction, sign it and schedule it for execution // create the transaction
var rawTx *types.Transaction nonce, err := c.getNonce(opts)
if opts.GasFeeCap == nil { if err != nil {
baseTx := &types.LegacyTx{ return nil, err
Nonce: nonce, }
GasPrice: opts.GasPrice, baseTx := &types.LegacyTx{
Gas: gasLimit, To: contract,
Value: value, Nonce: nonce,
Data: input, GasPrice: gasPrice,
Gas: gasLimit,
Value: value,
Data: input,
}
return types.NewTx(baseTx), nil
}
func (c *BoundContract) estimateGasLimit(opts *TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
if contract != nil {
// Gas estimation cannot succeed without code for method invocations.
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
return 0, err
} else if len(code) == 0 {
return 0, ErrNoCode
} }
if contract != nil { }
baseTx.To = &c.address msg := ethereum.CallMsg{
} From: opts.From,
rawTx = types.NewTx(baseTx) To: contract,
GasPrice: gasPrice,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Value: value,
Data: input,
}
return c.transactor.EstimateGas(ensureContext(opts.Context), msg)
}
func (c *BoundContract) getNonce(opts *TransactOpts) (uint64, error) {
if opts.Nonce == nil {
return c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From)
} else { } else {
baseTx := &types.DynamicFeeTx{ return opts.Nonce.Uint64(), nil
Nonce: nonce,
GasFeeCap: opts.GasFeeCap,
GasTipCap: opts.GasTipCap,
Gas: gasLimit,
Value: value,
Data: input,
}
if contract != nil {
baseTx.To = &c.address
}
rawTx = types.NewTx(baseTx)
} }
}
// transact executes an actual transaction invocation, first deriving any missing
// authorization fields, and then scheduling the transaction for execution.
func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
// Create the transaction
var (
rawTx *types.Transaction
err error
)
if opts.GasPrice != nil {
rawTx, err = c.createLegacyTx(opts, contract, input)
} else {
// Only query for basefee if gasPrice not specified
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); err != nil {
return nil, errHead
} else if head.BaseFee != nil {
rawTx, err = c.createDynamicTx(opts, contract, input, head)
} else {
// Chain is not London ready -> use legacy transaction
rawTx, err = c.createLegacyTx(opts, contract, input)
}
}
if err != nil {
return nil, err
}
// Sign the transaction and schedule it for execution
if opts.Signer == nil { if opts.Signer == nil {
return nil, errors.New("no signer to authorize the transaction with") return nil, errors.New("no signer to authorize the transaction with")
} }
@ -406,6 +481,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
// UnpackLog unpacks a retrieved log into the provided output structure. // UnpackLog unpacks a retrieved log into the provided output structure.
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error { func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.abi.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 { if len(log.Data) > 0 {
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil { if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
return err return err
@ -422,6 +500,9 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
// UnpackLogIntoMap unpacks a retrieved log into the provided map. // UnpackLogIntoMap unpacks a retrieved log into the provided map.
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error { func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
if log.Topics[0] != c.abi.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 { if len(log.Data) > 0 {
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil { if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
return err return err

View File

@ -31,8 +31,49 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/assert"
) )
func mockSign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return tx, nil }
type mockTransactor struct {
baseFee *big.Int
gasTipCap *big.Int
gasPrice *big.Int
suggestGasTipCapCalled bool
suggestGasPriceCalled bool
}
func (mt *mockTransactor) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
return &types.Header{BaseFee: mt.baseFee}, nil
}
func (mt *mockTransactor) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) {
return []byte{1}, nil
}
func (mt *mockTransactor) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
return 0, nil
}
func (mt *mockTransactor) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
mt.suggestGasPriceCalled = true
return mt.gasPrice, nil
}
func (mt *mockTransactor) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
mt.suggestGasTipCapCalled = true
return mt.gasTipCap, nil
}
func (mt *mockTransactor) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
return 0, nil
}
func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transaction) error {
return nil
}
type mockCaller struct { type mockCaller struct {
codeAtBlockNumber *big.Int codeAtBlockNumber *big.Int
callContractBlockNumber *big.Int callContractBlockNumber *big.Int
@ -110,7 +151,7 @@ const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) { func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
hash := crypto.Keccak256Hash([]byte("testName")) hash := crypto.Keccak256Hash([]byte("testName"))
topics := []common.Hash{ topics := []common.Hash{
common.HexToHash("0x0"), crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")),
hash, hash,
} }
mockLog := newMockLog(topics, common.HexToHash("0x0")) mockLog := newMockLog(topics, common.HexToHash("0x0"))
@ -135,7 +176,7 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
} }
hash := crypto.Keccak256Hash(sliceBytes) hash := crypto.Keccak256Hash(sliceBytes)
topics := []common.Hash{ topics := []common.Hash{
common.HexToHash("0x0"), crypto.Keccak256Hash([]byte("received(string[],address,uint256,bytes)")),
hash, hash,
} }
mockLog := newMockLog(topics, common.HexToHash("0x0")) mockLog := newMockLog(topics, common.HexToHash("0x0"))
@ -160,7 +201,7 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
} }
hash := crypto.Keccak256Hash(arrBytes) hash := crypto.Keccak256Hash(arrBytes)
topics := []common.Hash{ topics := []common.Hash{
common.HexToHash("0x0"), crypto.Keccak256Hash([]byte("received(address[2],address,uint256,bytes)")),
hash, hash,
} }
mockLog := newMockLog(topics, common.HexToHash("0x0")) mockLog := newMockLog(topics, common.HexToHash("0x0"))
@ -187,7 +228,7 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
var functionTy [24]byte var functionTy [24]byte
copy(functionTy[:], functionTyBytes[0:24]) copy(functionTy[:], functionTyBytes[0:24])
topics := []common.Hash{ topics := []common.Hash{
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), crypto.Keccak256Hash([]byte("received(function,address,uint256,bytes)")),
common.BytesToHash(functionTyBytes), common.BytesToHash(functionTyBytes),
} }
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
@ -208,7 +249,7 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
bytes := []byte{1, 2, 3, 4, 5} bytes := []byte{1, 2, 3, 4, 5}
hash := crypto.Keccak256Hash(bytes) hash := crypto.Keccak256Hash(bytes)
topics := []common.Hash{ topics := []common.Hash{
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"), crypto.Keccak256Hash([]byte("received(bytes,address,uint256,bytes)")),
hash, hash,
} }
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
@ -226,6 +267,51 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
unpackAndCheck(t, bc, expectedReceivedMap, mockLog) unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
} }
func TestTransactGasFee(t *testing.T) {
assert := assert.New(t)
// GasTipCap and GasFeeCap
// When opts.GasTipCap and opts.GasFeeCap are nil
mt := &mockTransactor{baseFee: big.NewInt(100), gasTipCap: big.NewInt(5)}
bc := bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil)
opts := &bind.TransactOpts{Signer: mockSign}
tx, err := bc.Transact(opts, "")
assert.Nil(err)
assert.Equal(big.NewInt(5), tx.GasTipCap())
assert.Equal(big.NewInt(205), tx.GasFeeCap())
assert.Nil(opts.GasTipCap)
assert.Nil(opts.GasFeeCap)
assert.True(mt.suggestGasTipCapCalled)
// Second call to Transact should use latest suggested GasTipCap
mt.gasTipCap = big.NewInt(6)
mt.suggestGasTipCapCalled = false
tx, err = bc.Transact(opts, "")
assert.Nil(err)
assert.Equal(big.NewInt(6), tx.GasTipCap())
assert.Equal(big.NewInt(206), tx.GasFeeCap())
assert.True(mt.suggestGasTipCapCalled)
// GasPrice
// When opts.GasPrice is nil
mt = &mockTransactor{gasPrice: big.NewInt(5)}
bc = bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil)
opts = &bind.TransactOpts{Signer: mockSign}
tx, err = bc.Transact(opts, "")
assert.Nil(err)
assert.Equal(big.NewInt(5), tx.GasPrice())
assert.Nil(opts.GasPrice)
assert.True(mt.suggestGasPriceCalled)
// Second call to Transact should use latest suggested GasPrice
mt.gasPrice = big.NewInt(6)
mt.suggestGasPriceCalled = false
tx, err = bc.Transact(opts, "")
assert.Nil(err)
assert.Equal(big.NewInt(6), tx.GasPrice())
assert.True(mt.suggestGasPriceCalled)
}
func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) { func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) {
received := make(map[string]interface{}) received := make(map[string]interface{})
if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil { if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil {

View File

@ -1785,6 +1785,132 @@ var bindTests = []struct {
nil, nil,
nil, nil,
}, },
// Test resolving single struct argument
{
`NewSingleStructArgument`,
`
pragma solidity ^0.8.0;
contract NewSingleStructArgument {
struct MyStruct{
uint256 a;
uint256 b;
}
event StructEvent(MyStruct s);
function TestEvent() public {
emit StructEvent(MyStruct({a: 1, b: 2}));
}
}
`,
[]string{"608060405234801561001057600080fd5b50610113806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806324ec1d3f14602d575b600080fd5b60336035565b005b7fb4b2ff75e30cb4317eaae16dd8a187dd89978df17565104caa6c2797caae27d460405180604001604052806001815260200160028152506040516078919060ba565b60405180910390a1565b6040820160008201516096600085018260ad565b50602082015160a7602085018260ad565b50505050565b60b48160d3565b82525050565b600060408201905060cd60008301846082565b92915050565b600081905091905056fea26469706673582212208823628796125bf9941ce4eda18da1be3cf2931b231708ab848e1bd7151c0c9a64736f6c63430008070033"},
[]string{`[{"anonymous":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"indexed":false,"internalType":"struct Test.MyStruct","name":"s","type":"tuple"}],"name":"StructEvent","type":"event"},{"inputs":[],"name":"TestEvent","outputs":[],"stateMutability":"nonpayable","type":"function"}]`},
`
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
`
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
defer sim.Close()
_, _, d, err := DeployNewSingleStructArgument(user, sim)
if err != nil {
t.Fatalf("Failed to deploy contract %v", err)
}
sim.Commit()
_, err = d.TestEvent(user)
if err != nil {
t.Fatalf("Failed to call contract %v", err)
}
sim.Commit()
it, err := d.FilterStructEvent(nil)
if err != nil {
t.Fatalf("Failed to filter contract event %v", err)
}
var count int
for it.Next() {
if it.Event.S.A.Cmp(big.NewInt(1)) != 0 {
t.Fatal("Unexpected contract event")
}
if it.Event.S.B.Cmp(big.NewInt(2)) != 0 {
t.Fatal("Unexpected contract event")
}
count += 1
}
if count != 1 {
t.Fatal("Unexpected contract event number")
}
`,
nil,
nil,
nil,
nil,
},
// Test errors introduced in v0.8.4
{
`NewErrors`,
`
pragma solidity >0.8.4;
contract NewErrors {
error MyError(uint256);
error MyError1(uint256);
error MyError2(uint256, uint256);
error MyError3(uint256 a, uint256 b, uint256 c);
function Error() public pure {
revert MyError3(1,2,3);
}
}
`,
[]string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"},
[]string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`},
`
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
`
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
defer sim.Close()
_, tx, contract, err := DeployNewErrors(user, sim)
if err != nil {
t.Fatal(err)
}
sim.Commit()
_, err = bind.WaitDeployed(nil, sim, tx)
if err != nil {
t.Error(err)
}
if err := contract.Error(new(bind.CallOpts)); err == nil {
t.Fatalf("expected contract to throw error")
}
// TODO (MariusVanDerWijden unpack error using abigen
// once that is implemented
`,
nil,
nil,
nil,
nil,
},
} }
// Tests that packages generated by the binder can be successfully compiled and // Tests that packages generated by the binder can be successfully compiled and

View File

@ -90,6 +90,7 @@ package {{.Package}}
import ( import (
"math/big" "math/big"
"strings" "strings"
"errors"
ethereum "github.com/ethereum/go-ethereum" ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
@ -101,6 +102,7 @@ import (
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var ( var (
_ = errors.New
_ = big.NewInt _ = big.NewInt
_ = strings.NewReader _ = strings.NewReader
_ = ethereum.NotFound _ = ethereum.NotFound
@ -120,32 +122,48 @@ var (
{{end}} {{end}}
{{range $contract := .Contracts}} {{range $contract := .Contracts}}
// {{.Type}}ABI is the input ABI used to generate the binding from. // {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract.
const {{.Type}}ABI = "{{.InputABI}}" var {{.Type}}MetaData = &bind.MetaData{
ABI: "{{.InputABI}}",
{{if $contract.FuncSigs}} {{if $contract.FuncSigs -}}
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation. Sigs: map[string]string{
var {{.Type}}FuncSigs = map[string]string{
{{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}", {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}",
{{end}} {{end}}
} },
{{end -}}
{{if .InputBin -}}
Bin: "0x{{.InputBin}}",
{{end}}
}
// {{.Type}}ABI is the input ABI used to generate the binding from.
// Deprecated: Use {{.Type}}MetaData.ABI instead.
var {{.Type}}ABI = {{.Type}}MetaData.ABI
{{if $contract.FuncSigs}}
// Deprecated: Use {{.Type}}MetaData.Sigs instead.
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs
{{end}} {{end}}
{{if .InputBin}} {{if .InputBin}}
// {{.Type}}Bin is the compiled bytecode used for deploying new contracts. // {{.Type}}Bin is the compiled bytecode used for deploying new contracts.
var {{.Type}}Bin = "0x{{.InputBin}}" // Deprecated: Use {{.Type}}MetaData.Bin instead.
var {{.Type}}Bin = {{.Type}}MetaData.Bin
// Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it. // Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) { func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) {
parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI)) parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil { if err != nil {
return common.Address{}, nil, nil, err return common.Address{}, nil, nil, err
} }
if parsed == nil {
return common.Address{}, nil, nil, errors.New("GetABI returned nil")
}
{{range $pattern, $name := .Libraries}} {{range $pattern, $name := .Libraries}}
{{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend) {{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend)
{{$contract.Type}}Bin = strings.Replace({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:], -1) {{$contract.Type}}Bin = strings.Replace({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:], -1)
{{end}} {{end}}
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}}) address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}})
if err != nil { if err != nil {
return common.Address{}, nil, nil, err return common.Address{}, nil, nil, err
} }

View File

@ -1,4 +1,4 @@
// Copyright 2016 The go-ethereum Authors // Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
@ -17,66 +17,75 @@
package abi package abi
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"reflect" "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
) )
var ( type Error struct {
errBadBool = errors.New("abi: improperly encoded boolean value") Name string
) Inputs Arguments
str string
// formatSliceString formats the reflection kind with the given slice size // Sig contains the string signature according to the ABI spec.
// and returns a formatted string representation. // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
func formatSliceString(kind reflect.Kind, sliceSize int) string { // Please note that "int" is substitute for its canonical representation "int256"
if sliceSize == -1 { Sig string
return fmt.Sprintf("[]%v", kind) // ID returns the canonical representation of the event's signature used by the
} // abi definition to identify event names and types.
return fmt.Sprintf("[%d]%v", sliceSize, kind) ID common.Hash
} }
// sliceTypeCheck checks that the given slice can by assigned to the reflection func NewError(name string, inputs Arguments) Error {
// type in t. // sanitize inputs to remove inputs without names
func sliceTypeCheck(t Type, val reflect.Value) error { // and precompute string and sig representation.
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { names := make([]string, len(inputs))
return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type()) types := make([]string, len(inputs))
} for i, input := range inputs {
if input.Name == "" {
if t.T == ArrayTy && val.Len() != t.Size { inputs[i] = Argument{
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len())) Name: fmt.Sprintf("arg%d", i),
} Indexed: input.Indexed,
Type: input.Type,
if t.Elem.T == SliceTy || t.Elem.T == ArrayTy { }
if val.Len() > 0 { } else {
return sliceTypeCheck(*t.Elem, val.Index(0)) inputs[i] = input
} }
// string representation
names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name)
if input.Indexed {
names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name)
}
// sig representation
types[i] = input.Type.String()
} }
if val.Type().Elem().Kind() != t.Elem.GetType().Kind() { str := fmt.Sprintf("error %v(%v)", name, strings.Join(names, ", "))
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type()) sig := fmt.Sprintf("%v(%v)", name, strings.Join(types, ","))
id := common.BytesToHash(crypto.Keccak256([]byte(sig)))
return Error{
Name: name,
Inputs: inputs,
str: str,
Sig: sig,
ID: id,
} }
return nil
} }
// typeCheck checks that the given reflection value can be assigned to the reflection func (e *Error) String() string {
// type in t. return e.str
func typeCheck(t Type, value reflect.Value) error {
if t.T == SliceTy || t.T == ArrayTy {
return sliceTypeCheck(t, value)
}
// Check base type validity. Element types will be checked later on.
if t.GetType().Kind() != value.Kind() {
return typeErr(t.GetType().Kind(), value.Kind())
} else if t.T == FixedBytesTy && t.Size != value.Len() {
return typeErr(t.GetType(), value.Type())
} else {
return nil
}
} }
// typeErr returns a formatted type casting error. func (e *Error) Unpack(data []byte) (interface{}, error) {
func typeErr(expected, got interface{}) error { if len(data) < 4 {
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected) return "", errors.New("invalid data for unpacking")
}
if !bytes.Equal(data[:4], e.ID[:4]) {
return "", errors.New("invalid data for unpacking")
}
return e.Inputs.Unpack(data[4:])
} }

View File

@ -0,0 +1,82 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"errors"
"fmt"
"reflect"
)
var (
errBadBool = errors.New("abi: improperly encoded boolean value")
)
// formatSliceString formats the reflection kind with the given slice size
// and returns a formatted string representation.
func formatSliceString(kind reflect.Kind, sliceSize int) string {
if sliceSize == -1 {
return fmt.Sprintf("[]%v", kind)
}
return fmt.Sprintf("[%d]%v", sliceSize, kind)
}
// sliceTypeCheck checks that the given slice can by assigned to the reflection
// type in t.
func sliceTypeCheck(t Type, val reflect.Value) error {
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type())
}
if t.T == ArrayTy && val.Len() != t.Size {
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
}
if t.Elem.T == SliceTy || t.Elem.T == ArrayTy {
if val.Len() > 0 {
return sliceTypeCheck(*t.Elem, val.Index(0))
}
}
if val.Type().Elem().Kind() != t.Elem.GetType().Kind() {
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type())
}
return nil
}
// typeCheck checks that the given reflection value can be assigned to the reflection
// type in t.
func typeCheck(t Type, value reflect.Value) error {
if t.T == SliceTy || t.T == ArrayTy {
return sliceTypeCheck(t, value)
}
// Check base type validity. Element types will be checked later on.
if t.GetType().Kind() != value.Kind() {
return typeErr(t.GetType().Kind(), value.Kind())
} else if t.T == FixedBytesTy && t.Size != value.Len() {
return typeErr(t.GetType(), value.Type())
} else {
return nil
}
}
// typeErr returns a formatted type casting error.
func typeErr(expected, got interface{}) error {
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected)
}

View File

@ -123,15 +123,8 @@ func set(dst, src reflect.Value) error {
func setSlice(dst, src reflect.Value) error { func setSlice(dst, src reflect.Value) error {
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len()) slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ { for i := 0; i < src.Len(); i++ {
if src.Index(i).Kind() == reflect.Struct { if err := set(slice.Index(i), src.Index(i)); err != nil {
if err := set(slice.Index(i), src.Index(i)); err != nil { return err
return err
}
} else {
// e.g. [][32]uint8 to []common.Hash
if err := set(slice.Index(i), src.Index(i)); err != nil {
return err
}
} }
} }
if dst.CanSet() { if dst.CanSet() {

View File

@ -762,20 +762,24 @@ func TestUnpackTuple(t *testing.T) {
buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1 buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
// If the result is single tuple, use struct as return value container directly. // If the result is single tuple, use struct as return value container directly.
v := struct { type v struct {
A *big.Int A *big.Int
B *big.Int B *big.Int
}{new(big.Int), new(big.Int)} }
type r struct {
Result v
}
var ret0 = new(r)
err = abi.UnpackIntoInterface(ret0, "tuple", buff.Bytes())
err = abi.UnpackIntoInterface(&v, "tuple", buff.Bytes())
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} else { } else {
if v.A.Cmp(big.NewInt(1)) != 0 { if ret0.Result.A.Cmp(big.NewInt(1)) != 0 {
t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.A) t.Errorf("unexpected value unpacked: want %x, got %x", 1, ret0.Result.A)
} }
if v.B.Cmp(big.NewInt(-1)) != 0 { if ret0.Result.B.Cmp(big.NewInt(-1)) != 0 {
t.Errorf("unexpected value unpacked: want %x, got %x", -1, v.B) t.Errorf("unexpected value unpacked: want %x, got %x", -1, ret0.Result.B)
} }
} }

View File

@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/signer/core" "github.com/ethereum/go-ethereum/signer/core/apitypes"
) )
type ExternalBackend struct { type ExternalBackend struct {
@ -196,6 +196,10 @@ type signTransactionResult struct {
Tx *types.Transaction `json:"tx"` Tx *types.Transaction `json:"tx"`
} }
// SignTx sends the transaction to the external signer.
// If chainID is nil, or tx.ChainID is zero, the chain ID will be assigned
// by the external signer. For non-legacy transactions, the chain ID of the
// transaction overrides the chainID parameter.
func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
data := hexutil.Bytes(tx.Data()) data := hexutil.Bytes(tx.Data())
var to *common.MixedcaseAddress var to *common.MixedcaseAddress
@ -203,7 +207,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
t := common.NewMixedcaseAddress(*tx.To()) t := common.NewMixedcaseAddress(*tx.To())
to = &t to = &t
} }
args := &core.SendTxArgs{ args := &apitypes.SendTxArgs{
Data: &data, Data: &data,
Nonce: hexutil.Uint64(tx.Nonce()), Nonce: hexutil.Uint64(tx.Nonce()),
Value: hexutil.Big(*tx.Value()), Value: hexutil.Big(*tx.Value()),
@ -211,21 +215,24 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
To: to, To: to,
From: common.NewMixedcaseAddress(account.Address), From: common.NewMixedcaseAddress(account.Address),
} }
if tx.GasFeeCap() != nil { switch tx.Type() {
case types.LegacyTxType, types.AccessListTxType:
args.GasPrice = (*hexutil.Big)(tx.GasPrice())
case types.DynamicFeeTxType:
args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap())
args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap())
} else { default:
args.GasPrice = (*hexutil.Big)(tx.GasPrice()) return nil, fmt.Errorf("unsupported tx type %d", tx.Type())
} }
// We should request the default chain id that we're operating with // We should request the default chain id that we're operating with
// (the chain we're executing on) // (the chain we're executing on)
if chainID != nil { if chainID != nil && chainID.Sign() != 0 {
args.ChainID = (*hexutil.Big)(chainID) args.ChainID = (*hexutil.Big)(chainID)
} }
if tx.Type() != types.LegacyTxType { if tx.Type() != types.LegacyTxType {
// However, if the user asked for a particular chain id, then we should // However, if the user asked for a particular chain id, then we should
// use that instead. // use that instead.
if tx.ChainId() != nil { if tx.ChainId().Sign() != 0 {
args.ChainID = (*hexutil.Big)(tx.ChainId()) args.ChainID = (*hexutil.Big)(tx.ChainId())
} }
accessList := tx.AccessList() accessList := tx.AccessList()

View File

@ -96,7 +96,7 @@ func TestWatchNoDir(t *testing.T) {
// Create ks but not the directory that it watches. // Create ks but not the directory that it watches.
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP) ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts() list := ks.Accounts()
@ -322,7 +322,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
// Create a temporary kesytore to test with // Create a temporary kesytore to test with
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP) ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts() list := ks.Accounts()

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (darwin && !ios && cgo) || freebsd || (linux && !arm64) || netbsd || solaris
// +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris // +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris
package keystore package keystore

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (darwin && !cgo) || ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris)
// +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris // +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris
// This is the fallback implementation of directory watching. // This is the fallback implementation of directory watching.

View File

@ -25,6 +25,10 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
) )
// managerSubBufferSize determines how many incoming wallet events
// the manager will buffer in its channel.
const managerSubBufferSize = 50
// Config contains the settings of the global account manager. // Config contains the settings of the global account manager.
// //
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management // TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
@ -33,18 +37,27 @@ type Config struct {
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
} }
// newBackendEvent lets the manager know it should
// track the given backend for wallet updates.
type newBackendEvent struct {
backend Backend
processed chan struct{} // Informs event emitter that backend has been integrated
}
// Manager is an overarching account manager that can communicate with various // Manager is an overarching account manager that can communicate with various
// backends for signing transactions. // backends for signing transactions.
type Manager struct { type Manager struct {
config *Config // Global account manager configurations config *Config // Global account manager configurations
backends map[reflect.Type][]Backend // Index of backends currently registered backends map[reflect.Type][]Backend // Index of backends currently registered
updaters []event.Subscription // Wallet update subscriptions for all backends updaters []event.Subscription // Wallet update subscriptions for all backends
updates chan WalletEvent // Subscription sink for backend wallet changes updates chan WalletEvent // Subscription sink for backend wallet changes
wallets []Wallet // Cache of all wallets from all registered backends newBackends chan newBackendEvent // Incoming backends to be tracked by the manager
wallets []Wallet // Cache of all wallets from all registered backends
feed event.Feed // Wallet feed notifying of arrivals/departures feed event.Feed // Wallet feed notifying of arrivals/departures
quit chan chan error quit chan chan error
term chan struct{} // Channel is closed upon termination of the update loop
lock sync.RWMutex lock sync.RWMutex
} }
@ -57,7 +70,7 @@ func NewManager(config *Config, backends ...Backend) *Manager {
wallets = merge(wallets, backend.Wallets()...) wallets = merge(wallets, backend.Wallets()...)
} }
// Subscribe to wallet notifications from all backends // Subscribe to wallet notifications from all backends
updates := make(chan WalletEvent, 4*len(backends)) updates := make(chan WalletEvent, managerSubBufferSize)
subs := make([]event.Subscription, len(backends)) subs := make([]event.Subscription, len(backends))
for i, backend := range backends { for i, backend := range backends {
@ -65,12 +78,14 @@ func NewManager(config *Config, backends ...Backend) *Manager {
} }
// Assemble the account manager and return // Assemble the account manager and return
am := &Manager{ am := &Manager{
config: config, config: config,
backends: make(map[reflect.Type][]Backend), backends: make(map[reflect.Type][]Backend),
updaters: subs, updaters: subs,
updates: updates, updates: updates,
wallets: wallets, newBackends: make(chan newBackendEvent),
quit: make(chan chan error), wallets: wallets,
quit: make(chan chan error),
term: make(chan struct{}),
} }
for _, backend := range backends { for _, backend := range backends {
kind := reflect.TypeOf(backend) kind := reflect.TypeOf(backend)
@ -93,6 +108,14 @@ func (am *Manager) Config() *Config {
return am.config return am.config
} }
// AddBackend starts the tracking of an additional backend for wallet updates.
// cmd/geth assumes once this func returns the backends have been already integrated.
func (am *Manager) AddBackend(backend Backend) {
done := make(chan struct{})
am.newBackends <- newBackendEvent{backend, done}
<-done
}
// update is the wallet event loop listening for notifications from the backends // update is the wallet event loop listening for notifications from the backends
// and updating the cache of wallets. // and updating the cache of wallets.
func (am *Manager) update() { func (am *Manager) update() {
@ -122,10 +145,22 @@ func (am *Manager) update() {
// Notify any listeners of the event // Notify any listeners of the event
am.feed.Send(event) am.feed.Send(event)
case event := <-am.newBackends:
am.lock.Lock()
// Update caches
backend := event.backend
am.wallets = merge(am.wallets, backend.Wallets()...)
am.updaters = append(am.updaters, backend.Subscribe(am.updates))
kind := reflect.TypeOf(backend)
am.backends[kind] = append(am.backends[kind], backend)
am.lock.Unlock()
close(event.processed)
case errc := <-am.quit: case errc := <-am.quit:
// Manager terminating, return // Manager terminating, return
errc <- nil errc <- nil
// Signals event emitters the loop is not receiving values
// to prevent them from getting stuck.
close(am.term)
return return
} }
} }
@ -133,6 +168,9 @@ func (am *Manager) update() {
// Backends retrieves the backend(s) with the given type from the account manager. // Backends retrieves the backend(s) with the given type from the account manager.
func (am *Manager) Backends(kind reflect.Type) []Backend { func (am *Manager) Backends(kind reflect.Type) []Backend {
am.lock.RLock()
defer am.lock.RUnlock()
return am.backends[kind] return am.backends[kind]
} }

View File

@ -1,29 +1,57 @@
os: Visual Studio 2019
clone_depth: 5 clone_depth: 5
version: "{branch}.{build}" version: "{branch}.{build}"
image:
- Ubuntu
- Visual Studio 2019
environment: environment:
matrix: matrix:
# We use gcc from MSYS2 because it is the most recent compiler version available on
# AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
# contained in PATH.
- GETH_ARCH: amd64 - GETH_ARCH: amd64
GETH_CC: C:\msys64\mingw64\bin\gcc.exe GETH_MINGW: 'C:\msys64\mingw64'
PATH: C:\msys64\mingw64\bin;C:\Program Files (x86)\NSIS\;%PATH%
- GETH_ARCH: 386 - GETH_ARCH: 386
GETH_CC: C:\msys64\mingw32\bin\gcc.exe GETH_MINGW: 'C:\msys64\mingw32'
PATH: C:\msys64\mingw32\bin;C:\Program Files (x86)\NSIS\;%PATH%
install: install:
- git submodule update --init --depth 1 - git submodule update --init --depth 1
- go version - go version
- "%GETH_CC% --version"
build_script: for:
- go run build\ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC% # Linux has its own script without -arch and -cc.
# The linux builder also runs lint.
- matrix:
only:
- image: Ubuntu
build_script:
- go run build/ci.go lint
- go run build/ci.go install -dlgo
test_script:
- go run build/ci.go test -dlgo -coverage
after_build: # linux/386 is disabled.
- go run build\ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - matrix:
- go run build\ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds exclude:
- image: Ubuntu
GETH_ARCH: 386
test_script: # Windows builds for amd64 + 386.
- go run build\ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage - matrix:
only:
- image: Visual Studio 2019
environment:
# We use gcc from MSYS2 because it is the most recent compiler version available on
# AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
# contained in PATH.
GETH_CC: '%GETH_MINGW%\bin\gcc.exe'
PATH: '%GETH_MINGW%\bin;C:\Program Files (x86)\NSIS\;%PATH%'
build_script:
- 'echo %GETH_ARCH%'
- 'echo %GETH_CC%'
- '%GETH_CC% --version'
- go run build/ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
after_build:
# Upload builds. Note that ci.go makes this a no-op PR builds.
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
test_script:
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage

View File

@ -1,33 +1,37 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
ae4f6b6e2a1677d31817984655a762074b5356da50fb58722b99104870d43503 go1.16.4.src.tar.gz 2255eb3e4e824dd7d5fcdc2e7f84534371c186312e546fb1086a34c17752f431 go1.17.2.src.tar.gz
18fe94775763db3878717393b6d41371b0b45206055e49b3838328120c977d13 go1.16.4.darwin-amd64.tar.gz 7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94 go1.17.2.darwin-amd64.tar.gz
cb6b972cc42e669f3585c648198cd5b6f6d7a0811d413ad64b50c02ba06ccc3a go1.16.4.darwin-arm64.tar.gz ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904 go1.17.2.darwin-arm64.tar.gz
cd1b146ef6e9006f27dd99e9687773e7fef30e8c985b7d41bff33e955a3bb53a go1.16.4.linux-386.tar.gz 8cea5b8d1f8e8cbb58069bfed58954c71c5b1aca2f3c857765dae83bf724d0d7 go1.17.2.freebsd-386.tar.gz
7154e88f5a8047aad4b80ebace58a059e36e7e2e4eb3b383127a28c711b4ff59 go1.16.4.linux-amd64.tar.gz c96e57218fb03e74d683ad63b1684d44c89d5e5b994f36102b33dce21b58499a go1.17.2.freebsd-amd64.tar.gz
8b18eb05ddda2652d69ab1b1dd1f40dd731799f43c6a58b512ad01ae5b5bba21 go1.16.4.linux-arm64.tar.gz 8617f2e40d51076983502894181ae639d1d8101bfbc4d7463a2b442f239f5596 go1.17.2.linux-386.tar.gz
a53391a800ddec749ee90d38992babb27b95cfb864027350c737b9aa8e069494 go1.16.4.linux-armv6l.tar.gz f242a9db6a0ad1846de7b6d94d507915d14062660616a61ef7c808a76e4f1676 go1.17.2.linux-amd64.tar.gz
e75c0b114a09eb5499874162b208931dc260de0fedaeedac8621bf263c974605 go1.16.4.windows-386.zip a5a43c9cdabdb9f371d56951b14290eba8ce2f9b0db48fb5fc657943984fd4fc go1.17.2.linux-arm64.tar.gz
d40139b7ade8a3008e3240a6f86fe8f899a9c465c917e11dac8758af216f5eb0 go1.16.4.windows-amd64.zip 04d16105008230a9763005be05606f7eb1c683a3dbf0fbfed4034b23889cb7f2 go1.17.2.linux-armv6l.tar.gz
7cf2bc8a175d6d656861165bfc554f92dc78d2abf5afe5631db3579555d97409 go1.16.4.freebsd-386.tar.gz 12e2dc7e0ffeebe77083f267ef6705fec1621cdf2ed6489b3af04a13597ed68d go1.17.2.linux-ppc64le.tar.gz
ccdd2b76de1941b60734408fda0d750aaa69330d8a07430eed4c56bdb3502f6f go1.16.4.freebsd-amd64.tar.gz c4b2349a8d11350ca038b8c57f3cc58dc0b31284bcbed4f7fca39aeed28b4a51 go1.17.2.linux-s390x.tar.gz
80cfac566e344096a8df8f37bbd21f89e76a6fbe601406565d71a87a665fc125 go1.16.4.linux-ppc64le.tar.gz 8a85257a351996fdf045fe95ed5fdd6917dd48636d562dd11dedf193005a53e0 go1.17.2.windows-386.zip
d6431881b3573dc29ecc24fbeab5e5ec25d8c9273aa543769c86a1a3bbac1ddf go1.16.4.linux-s390x.tar.gz fa6da0b829a66f5fab7e4e312fd6aa1b2d8f045c7ecee83b3d00f6fe5306759a go1.17.2.windows-amd64.zip
00575c85dc7a129ba892685a456b27a3f3670f71c8bfde1c5ad151f771d55df7 go1.17.2.windows-arm64.zip
7e9a47ab540aa3e8472fbf8120d28bed3b9d9cf625b955818e8bc69628d7187c golangci-lint-1.39.0-darwin-amd64.tar.gz d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
574daa2c9c299b01672a6daeb1873b5f12e413cdb6dc0e30f2ff163956778064 golangci-lint-1.39.0-darwin-arm64.tar.gz e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
6225f7014987324ab78e9b511f294e3f25be013728283c33918c67c8576d543e golangci-lint-1.39.0-freebsd-386.tar.gz 14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz
6b3e76e1e5eaf0159411c8e2727f8d533989d3bb19f10e9caa6e0b9619ee267d golangci-lint-1.39.0-freebsd-amd64.tar.gz 337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz
a301cacfff87ed9b00313d95278533c25a4527a06b040a17d969b4b7e1b8a90d golangci-lint-1.39.0-freebsd-armv7.tar.gz 6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz
25bfd96a29c3112f508d5e4fc860dbad7afce657233c343acfa20715717d51e7 golangci-lint-1.39.0-freebsd-armv6.tar.gz 878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz
9687e4ff15545cfc722b0e46107a94195166a505023b48a316579af25ad09505 golangci-lint-1.39.0-linux-armv7.tar.gz 42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz
a7fa7ab2bfc99cbe5e5bcbf5684f5a997f920afbbe2f253d2feb1001d5e3c8b3 golangci-lint-1.39.0-linux-armv6.tar.gz 6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz
c8f9634115beddb4ed9129c1f7ecd4c97c99d07aeef33e3707234097eeb51b7b golangci-lint-1.39.0-linux-mips64le.tar.gz 2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz
d1234c213b74751f1af413302dde0e9a6d4d29aecef034af7abb07dc1b6e887f golangci-lint-1.39.0-linux-arm64.tar.gz 08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz
df25d9267168323b163147acb823ab0215a8a3bb6898a4a9320afdfedde66817 golangci-lint-1.39.0-linux-386.tar.gz c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz
1767e75fba357b7651b1a796d38453558f371c60af805505ec99e166908c04b5 golangci-lint-1.39.0-linux-ppc64le.tar.gz 3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz
25fd75bf3186b3d930ecae10185689968fd18fd8fa6f9f555d6beb04348c20f6 golangci-lint-1.39.0-linux-s390x.tar.gz f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz
3a73aa7468087caa62673c8adea99b4e4dff846dc72707222db85f8679b40cbf golangci-lint-1.39.0-linux-amd64.tar.gz 1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz
578caceccf81739bda67dbfec52816709d03608c6878888ecdc0e186a094a41b golangci-lint-1.39.0-linux-mips64.tar.gz 8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz
494b66ba0e32c8ddf6c4f6b1d05729b110900f6017eda943057e43598c17d7a8 golangci-lint-1.39.0-windows-386.zip 5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz
52ec2e13a3cbb47147244dff8cfc35103563deb76e0459133058086fc35fb2c7 golangci-lint-1.39.0-windows-amd64.zip e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip
7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip
59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip
65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build none
// +build none // +build none
/* /*
@ -129,19 +130,13 @@ var (
// Distros for which packages are created. // Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it. // Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: wily is unsupported because it was officially deprecated on Launchpad. // Note: the following Ubuntu releases have been officially deprecated on Launchpad:
// Note: yakkety is unsupported because it was officially deprecated on Launchpad. // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy
// Note: zesty is unsupported because it was officially deprecated on Launchpad.
// Note: artful is unsupported because it was officially deprecated on Launchpad.
// Note: cosmic is unsupported because it was officially deprecated on Launchpad.
// Note: disco is unsupported because it was officially deprecated on Launchpad.
// Note: eoan is unsupported because it was officially deprecated on Launchpad.
debDistroGoBoots = map[string]string{ debDistroGoBoots = map[string]string{
"trusty": "golang-1.11", "trusty": "golang-1.11",
"xenial": "golang-go", "xenial": "golang-go",
"bionic": "golang-go", "bionic": "golang-go",
"focal": "golang-go", "focal": "golang-go",
"groovy": "golang-go",
"hirsute": "golang-go", "hirsute": "golang-go",
} }
@ -153,7 +148,7 @@ var (
// This is the version of go that will be downloaded by // This is the version of go that will be downloaded by
// //
// go run ci.go install -dlgo // go run ci.go install -dlgo
dlgoVersion = "1.16.4" dlgoVersion = "1.17.2"
) )
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -265,6 +260,11 @@ func buildFlags(env build.Environment) (flags []string) {
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
ld = append(ld, "-s") ld = append(ld, "-s")
} }
// Enforce the stacksize to 8M, which is the case on most platforms apart from
// alpine Linux.
if runtime.GOOS == "linux" {
ld = append(ld, "-extldflags", "-Wl,-z,stack-size=0x800000")
}
if len(ld) > 0 { if len(ld) > 0 {
flags = append(flags, "-ldflags", strings.Join(ld, " ")) flags = append(flags, "-ldflags", strings.Join(ld, " "))
} }
@ -282,6 +282,7 @@ func doTest(cmdline []string) {
cc = flag.String("cc", "", "Sets C compiler binary") cc = flag.String("cc", "", "Sets C compiler binary")
coverage = flag.Bool("coverage", false, "Whether to record code coverage") coverage = flag.Bool("coverage", false, "Whether to record code coverage")
verbose = flag.Bool("v", false, "Whether to log verbosely") verbose = flag.Bool("v", false, "Whether to log verbosely")
race = flag.Bool("race", false, "Execute the race detector")
) )
flag.CommandLine.Parse(cmdline) flag.CommandLine.Parse(cmdline)
@ -302,6 +303,9 @@ func doTest(cmdline []string) {
if *verbose { if *verbose {
gotest.Args = append(gotest.Args, "-v") gotest.Args = append(gotest.Args, "-v")
} }
if *race {
gotest.Args = append(gotest.Args, "-race")
}
packages := []string{"./..."} packages := []string{"./..."}
if len(flag.CommandLine.Args()) > 0 { if len(flag.CommandLine.Args()) > 0 {
@ -330,7 +334,7 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint. // downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string { func downloadLinter(cachedir string) string {
const version = "1.39.0" const version = "1.42.0"
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH) base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
@ -450,7 +454,7 @@ func maybeSkipArchive(env build.Environment) {
os.Exit(0) os.Exit(0)
} }
if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") { if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") {
log.Printf("skipping archive creation because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag) log.Printf("skipping archive creation because branch %q, tag %q is not on the inclusion list", env.Branch, env.Tag)
os.Exit(0) os.Exit(0)
} }
} }
@ -492,7 +496,7 @@ func doDocker(cmdline []string) {
case env.Branch == "master": case env.Branch == "master":
tags = []string{"latest"} tags = []string{"latest"}
case strings.HasPrefix(env.Tag, "v1."): case strings.HasPrefix(env.Tag, "v1."):
tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), params.Version} tags = []string{"stable", fmt.Sprintf("release-1.%d", params.VersionMinor), "v" + params.Version}
} }
// If architecture specific image builds are requested, build and push them // If architecture specific image builds are requested, build and push them
if *image { if *image {

View File

@ -23,7 +23,7 @@ import (
"os" "os"
"strings" "strings"
"github.com/ethereum/go-ethereum/signer/core" "github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/ethereum/go-ethereum/signer/fourbyte" "github.com/ethereum/go-ethereum/signer/fourbyte"
) )
@ -41,7 +41,7 @@ func parse(data []byte) {
if err != nil { if err != nil {
die(err) die(err)
} }
messages := core.ValidationMessages{} messages := apitypes.ValidationMessages{}
db.ValidateCallData(nil, data, &messages) db.ValidateCallData(nil, data, &messages)
for _, m := range messages.Messages { for _, m := range messages.Messages {
fmt.Printf("%v: %v\n", m.Typ, m.Message) fmt.Printf("%v: %v\n", m.Typ, m.Message)

View File

@ -50,10 +50,10 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/signer/core" "github.com/ethereum/go-ethereum/signer/core"
"github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/ethereum/go-ethereum/signer/fourbyte" "github.com/ethereum/go-ethereum/signer/fourbyte"
"github.com/ethereum/go-ethereum/signer/rules" "github.com/ethereum/go-ethereum/signer/rules"
"github.com/ethereum/go-ethereum/signer/storage" "github.com/ethereum/go-ethereum/signer/storage"
"github.com/mattn/go-colorable" "github.com/mattn/go-colorable"
"github.com/mattn/go-isatty" "github.com/mattn/go-isatty"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
@ -657,7 +657,7 @@ func signer(c *cli.Context) error {
cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name)) cors := utils.SplitAndTrim(c.GlobalString(utils.HTTPCORSDomainFlag.Name))
srv := rpc.NewServer() srv := rpc.NewServer()
err := node.RegisterApisFromWhitelist(rpcAPI, []string{"account"}, srv, false) err := node.RegisterApis(rpcAPI, []string{"account"}, srv, false)
if err != nil { if err != nil {
utils.Fatalf("Could not register API: %w", err) utils.Fatalf("Could not register API: %w", err)
} }
@ -923,7 +923,7 @@ func testExternalUI(api *core.SignerAPI) {
time.Sleep(delay) time.Sleep(delay)
data := hexutil.Bytes([]byte{}) data := hexutil.Bytes([]byte{})
to := common.NewMixedcaseAddress(a) to := common.NewMixedcaseAddress(a)
tx := core.SendTxArgs{ tx := apitypes.SendTxArgs{
Data: &data, Data: &data,
Nonce: 0x1, Nonce: 0x1,
Value: hexutil.Big(*big.NewInt(6)), Value: hexutil.Big(*big.NewInt(6)),
@ -1055,11 +1055,11 @@ func GenDoc(ctx *cli.Context) {
data := hexutil.Bytes([]byte{0x01, 0x02, 0x03, 0x04}) data := hexutil.Bytes([]byte{0x01, 0x02, 0x03, 0x04})
add("SignTxRequest", desc, &core.SignTxRequest{ add("SignTxRequest", desc, &core.SignTxRequest{
Meta: meta, Meta: meta,
Callinfo: []core.ValidationInfo{ Callinfo: []apitypes.ValidationInfo{
{Typ: "Warning", Message: "Something looks odd, show this message as a warning"}, {Typ: "Warning", Message: "Something looks odd, show this message as a warning"},
{Typ: "Info", Message: "User should see this as well"}, {Typ: "Info", Message: "User should see this as well"},
}, },
Transaction: core.SendTxArgs{ Transaction: apitypes.SendTxArgs{
Data: &data, Data: &data,
Nonce: 0x1, Nonce: 0x1,
Value: hexutil.Big(*big.NewInt(6)), Value: hexutil.Big(*big.NewInt(6)),
@ -1075,7 +1075,7 @@ func GenDoc(ctx *cli.Context) {
add("SignTxResponse - approve", "Response to request to sign a transaction. This response needs to contain the `transaction`"+ add("SignTxResponse - approve", "Response to request to sign a transaction. This response needs to contain the `transaction`"+
", because the UI is free to make modifications to the transaction.", ", because the UI is free to make modifications to the transaction.",
&core.SignTxResponse{Approved: true, &core.SignTxResponse{Approved: true,
Transaction: core.SendTxArgs{ Transaction: apitypes.SendTxArgs{
Data: &data, Data: &data,
Nonce: 0x4, Nonce: 0x4,
Value: hexutil.Big(*big.NewInt(6)), Value: hexutil.Big(*big.NewInt(6)),

View File

@ -39,16 +39,6 @@ type Chain struct {
chainConfig *params.ChainConfig chainConfig *params.ChainConfig
} }
func (c *Chain) WriteTo(writer io.Writer) error {
for _, block := range c.blocks {
if err := rlp.Encode(writer, block); err != nil {
return err
}
}
return nil
}
// Len returns the length of the chain. // Len returns the length of the chain.
func (c *Chain) Len() int { func (c *Chain) Len() int {
return len(c.blocks) return len(c.blocks)

View File

@ -242,9 +242,17 @@ func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) {
return sendConn, recvConn, nil return sendConn, recvConn, nil
} }
func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
if c.negotiatedProtoVersion == 66 {
_, msg := c.readAndServe66(chain, timeout)
return msg
}
return c.readAndServe65(chain, timeout)
}
// readAndServe serves GetBlockHeaders requests while waiting // readAndServe serves GetBlockHeaders requests while waiting
// on another message from the node. // on another message from the node.
func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message { func (c *Conn) readAndServe65(chain *Chain, timeout time.Duration) Message {
start := time.Now() start := time.Now()
for time.Since(start) < timeout { for time.Since(start) < timeout {
c.SetReadDeadline(time.Now().Add(5 * time.Second)) c.SetReadDeadline(time.Now().Add(5 * time.Second))
@ -279,8 +287,8 @@ func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Mess
switch msg := msg.(type) { switch msg := msg.(type) {
case *Ping: case *Ping:
c.Write(&Pong{}) c.Write(&Pong{})
case *GetBlockHeaders: case GetBlockHeaders:
headers, err := chain.GetHeaders(*msg) headers, err := chain.GetHeaders(msg)
if err != nil { if err != nil {
return 0, errorf("could not get headers for inbound header request: %v", err) return 0, errorf("could not get headers for inbound header request: %v", err)
} }

View File

@ -45,7 +45,7 @@ func TestEthSuite(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("could not create new test suite: %v", err) t.Fatalf("could not create new test suite: %v", err)
} }
for _, test := range suite.AllEthTests() { for _, test := range suite.Eth66Tests() {
t.Run(test.Name, func(t *testing.T) { t.Run(test.Name, func(t *testing.T) {
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
if result[0].Failed { if result[0].Failed {

View File

@ -79,14 +79,44 @@ func BasicPing(t *utesting.T) {
To: te.remoteEndpoint(), To: te.remoteEndpoint(),
Expiration: futureExpiration(), Expiration: futureExpiration(),
}) })
if err := te.checkPingPong(pingHash); err != nil {
reply, _, _ := te.read(te.l1)
if err := te.checkPong(reply, pingHash); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
// checkPong verifies that reply is a valid PONG matching the given ping hash. // checkPingPong verifies that the remote side sends both a PONG with the
// correct hash, and a PING.
// The two packets do not have to be in any particular order.
func (te *testenv) checkPingPong(pingHash []byte) error {
var (
pings int
pongs int
)
for i := 0; i < 2; i++ {
reply, _, err := te.read(te.l1)
if err != nil {
return err
}
switch reply.Kind() {
case v4wire.PongPacket:
if err := te.checkPong(reply, pingHash); err != nil {
return err
}
pongs++
case v4wire.PingPacket:
pings++
default:
return fmt.Errorf("expected PING or PONG, got %v %v", reply.Name(), reply)
}
}
if pongs == 1 && pings == 1 {
return nil
}
return fmt.Errorf("expected 1 PING (got %d) and 1 PONG (got %d)", pings, pongs)
}
// checkPong verifies that reply is a valid PONG matching the given ping hash,
// and a PING. The two packets do not have to be in any particular order.
func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error { func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error {
if reply == nil { if reply == nil {
return fmt.Errorf("expected PONG reply, got nil") return fmt.Errorf("expected PONG reply, got nil")
@ -119,9 +149,7 @@ func PingWrongTo(t *utesting.T) {
To: wrongEndpoint, To: wrongEndpoint,
Expiration: futureExpiration(), Expiration: futureExpiration(),
}) })
if err := te.checkPingPong(pingHash); err != nil {
reply, _, _ := te.read(te.l1)
if err := te.checkPong(reply, pingHash); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -139,8 +167,7 @@ func PingWrongFrom(t *utesting.T) {
Expiration: futureExpiration(), Expiration: futureExpiration(),
}) })
reply, _, _ := te.read(te.l1) if err := te.checkPingPong(pingHash); err != nil {
if err := te.checkPong(reply, pingHash); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -161,8 +188,7 @@ func PingExtraData(t *utesting.T) {
JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1},
}) })
reply, _, _ := te.read(te.l1) if err := te.checkPingPong(pingHash); err != nil {
if err := te.checkPong(reply, pingHash); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -183,8 +209,7 @@ func PingExtraDataWrongFrom(t *utesting.T) {
JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, JunkData2: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1},
} }
pingHash := te.send(te.l1, &req) pingHash := te.send(te.l1, &req)
reply, _, _ := te.read(te.l1) if err := te.checkPingPong(pingHash); err != nil {
if err := te.checkPong(reply, pingHash); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -240,9 +265,9 @@ func BondThenPingWithWrongFrom(t *utesting.T) {
To: te.remoteEndpoint(), To: te.remoteEndpoint(),
Expiration: futureExpiration(), Expiration: futureExpiration(),
}) })
if reply, _, err := te.read(te.l1); err != nil {
reply, _, _ := te.read(te.l1) t.Fatal(err)
if err := te.checkPong(reply, pingHash); err != nil { } else if err := te.checkPong(reply, pingHash); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -4,15 +4,15 @@ The `evm t8n` tool is a stateless state transition utility. It is a utility
which can which can
1. Take a prestate, including 1. Take a prestate, including
- Accounts, - Accounts,
- Block context information, - Block context information,
- Previous blockshashes (*optional) - Previous blockshashes (*optional)
2. Apply a set of transactions, 2. Apply a set of transactions,
3. Apply a mining-reward (*optional), 3. Apply a mining-reward (*optional),
4. And generate a post-state, including 4. And generate a post-state, including
- State root, transaction root, receipt root, - State root, transaction root, receipt root,
- Information about rejected transactions, - Information about rejected transactions,
- Optionally: a full or partial post-state dump - Optionally: a full or partial post-state dump
## Specification ## Specification
@ -37,6 +37,8 @@ Command line params that has to be supported are
--output.result result Determines where to put the result (stateroot, txroot etc) of the post-state. --output.result result Determines where to put the result (stateroot, txroot etc) of the post-state.
`stdout` - into the stdout output `stdout` - into the stdout output
`stderr` - into the stderr output `stderr` - into the stderr output
--output.body value If set, the RLP of the transactions (block body) will be written to this file.
--input.txs stdin stdin or file name of where to find the transactions to apply. If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions.The '.rlp' format is identical to the output.body format. (default: "txs.json")
--state.fork value Name of ruleset to use. --state.fork value Name of ruleset to use.
--state.chainid value ChainID to use (default: 1) --state.chainid value ChainID to use (default: 1)
--state.reward value Mining reward. Set to -1 to disable (default: 0) --state.reward value Mining reward. Set to -1 to disable (default: 0)
@ -110,7 +112,10 @@ Two resulting files:
} }
], ],
"rejected": [ "rejected": [
1 {
"index": 1,
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
}
] ]
} }
``` ```
@ -156,7 +161,10 @@ Output:
} }
], ],
"rejected": [ "rejected": [
1 {
"index": 1,
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
}
] ]
} }
} }
@ -168,9 +176,9 @@ Mining rewards and ommer rewards might need to be added. This is how those are a
- `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`. - `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`.
- For each ommer (mined by `0xbb`), with blocknumber `N-delta` - For each ommer (mined by `0xbb`), with blocknumber `N-delta`
- (where `delta` is the difference between the current block and the ommer) - (where `delta` is the difference between the current block and the ommer)
- The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward` - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward`
- The account `0xaa` (block miner) is awarded `block_reward / 32` - The account `0xaa` (block miner) is awarded `block_reward / 32`
To make `state_t8n` apply these, the following inputs are required: To make `state_t8n` apply these, the following inputs are required:
@ -200,7 +208,7 @@ Example:
] ]
} }
``` ```
When applying this, using a reward of `0x08` When applying this, using a reward of `0x80`
Output: Output:
```json ```json
{ {
@ -220,7 +228,7 @@ Output:
### Future EIPS ### Future EIPS
It is also possible to experiment with future eips that are not yet defined in a hard fork. It is also possible to experiment with future eips that are not yet defined in a hard fork.
Example, putting EIP-1344 into Frontier: Example, putting EIP-1344 into Frontier:
``` ```
./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json ./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json
``` ```
@ -229,41 +237,102 @@ Example, putting EIP-1344 into Frontier:
The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`. The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`.
If a required blockhash is not provided, the exit code should be `4`: If a required blockhash is not provided, the exit code should be `4`:
Example where blockhashes are provided: Example where blockhashes are provided:
``` ```
./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace ./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace
INFO [07-27|11:53:40.960] Trie dumping started root=b7341d..857ea1
INFO [07-27|11:53:40.960] Trie dumping complete accounts=3 elapsed="103.298µs"
INFO [07-27|11:53:40.960] Wrote file file=alloc.json
INFO [07-27|11:53:40.960] Wrote file file=result.json
``` ```
``` ```
cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2
``` ```
``` ```
{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"PUSH1","error":""} {"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnData":"0x","depth":1,"refund":0,"opName":"PUSH1","error":""}
{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"BLOCKHASH","error":""} {"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnData":"0x","depth":1,"refund":0,"opName":"BLOCKHASH","error":""}
{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"STOP","error":""} {"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnData":"0x","depth":1,"refund":0,"opName":"STOP","error":""}
{"output":"","gasUsed":"0x17","time":142709} {"output":"","gasUsed":"0x17","time":156276}
``` ```
In this example, the caller has not provided the required blockhash: In this example, the caller has not provided the required blockhash:
``` ```
./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace ./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace
```
```
ERROR(4): getHash(3) invoked, blockhash for that block not provided ERROR(4): getHash(3) invoked, blockhash for that block not provided
``` ```
Error code: 4 Error code: 4
### Chaining ### Chaining
Another thing that can be done, is to chain invocations: Another thing that can be done, is to chain invocations:
``` ```
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json
INFO [01-21|22:41:22.963] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" INFO [07-27|11:53:41.049] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
INFO [01-21|22:41:22.966] rejected tx index=0 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" INFO [07-27|11:53:41.050] Trie dumping started root=84208a..ae4e13
INFO [01-21|22:41:22.967] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" INFO [07-27|11:53:41.050] Trie dumping complete accounts=3 elapsed="59.412µs"
INFO [07-27|11:53:41.050] Wrote file file=result.json
INFO [07-27|11:53:41.051] rejected tx index=0 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
INFO [07-27|11:53:41.051] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
INFO [07-27|11:53:41.052] Trie dumping started root=84208a..ae4e13
INFO [07-27|11:53:41.052] Trie dumping complete accounts=3 elapsed="45.734µs"
INFO [07-27|11:53:41.052] Wrote file file=alloc.json
INFO [07-27|11:53:41.052] Wrote file file=result.json
``` ```
What happened here, is that we first applied two identical transactions, so the second one was rejected. What happened here, is that we first applied two identical transactions, so the second one was rejected.
Then, taking the poststate alloc as the input for the next state, we tried again to include Then, taking the poststate alloc as the input for the next state, we tried again to include
the same two transactions: this time, both failed due to too low nonce. the same two transactions: this time, both failed due to too low nonce.
In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the
actual blocknumber (exposed to the EVM) would not increase. actual blocknumber (exposed to the EVM) would not increase.
### Transactions in RLP form
It is possible to provide already-signed transactions as input to, using an `input.txs` which ends with the `rlp` suffix.
The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible
to use the evm to go from `json` input to `rlp` input.
The following command takes **json** the transactions in `./testdata/13/txs.json` and signs them. After execution, they are output to `signed_txs.rlp`.:
```
./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp
INFO [07-27|11:53:41.124] Trie dumping started root=e4b924..6aef61
INFO [07-27|11:53:41.124] Trie dumping complete accounts=3 elapsed="94.284µs"
INFO [07-27|11:53:41.125] Wrote file file=alloc.json
INFO [07-27|11:53:41.125] Wrote file file=alloc_jsontx.json
INFO [07-27|11:53:41.125] Wrote file file=signed_txs.rlp
```
The `output.body` is the rlp-list of transactions, encoded in hex and placed in a string a'la `json` encoding rules:
```
cat signed_txs.rlp
"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
```
We can use `rlpdump` to check what the contents are:
```
rlpdump -hex $(cat signed_txs.rlp | jq -r )
[
02f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904,
02f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9,
]
```
Now, we can now use those (or any other already signed transactions), as input, like so:
```
./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json
INFO [07-27|11:53:41.253] Trie dumping started root=e4b924..6aef61
INFO [07-27|11:53:41.253] Trie dumping complete accounts=3 elapsed="128.445µs"
INFO [07-27|11:53:41.253] Wrote file file=alloc.json
INFO [07-27|11:53:41.255] Wrote file file=alloc_rlptx.json
```
You might have noticed that the results from these two invocations were stored in two separate files.
And we can now finally check that they match.
```
cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot
"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61"
"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61"
```

View File

@ -46,7 +46,7 @@ func disasmCmd(ctx *cli.Context) error {
case ctx.GlobalIsSet(InputFlag.Name): case ctx.GlobalIsSet(InputFlag.Name):
in = ctx.GlobalString(InputFlag.Name) in = ctx.GlobalString(InputFlag.Name)
default: default:
return errors.New("Missing filename or --input value") return errors.New("missing filename or --input value")
} }
code := strings.TrimSpace(in) code := strings.TrimSpace(in)

View File

@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
@ -46,13 +47,14 @@ type Prestate struct {
// ExecutionResult contains the execution status after running a state test, any // ExecutionResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested. // error that might have occurred and a dump of the final state if requested.
type ExecutionResult struct { type ExecutionResult struct {
StateRoot common.Hash `json:"stateRoot"` StateRoot common.Hash `json:"stateRoot"`
TxRoot common.Hash `json:"txRoot"` TxRoot common.Hash `json:"txRoot"`
ReceiptRoot common.Hash `json:"receiptRoot"` ReceiptRoot common.Hash `json:"receiptRoot"`
LogsHash common.Hash `json:"logsHash"` LogsHash common.Hash `json:"logsHash"`
Bloom types.Bloom `json:"logsBloom" gencodec:"required"` Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
Receipts types.Receipts `json:"receipts"` Receipts types.Receipts `json:"receipts"`
Rejected []*rejectedTx `json:"rejected,omitempty"` Rejected []*rejectedTx `json:"rejected,omitempty"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
} }
type ommer struct { type ommer struct {
@ -62,23 +64,28 @@ type ommer struct {
//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go //go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
type stEnv struct { type stEnv struct {
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` Difficulty *big.Int `json:"currentDifficulty"`
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` ParentDifficulty *big.Int `json:"parentDifficulty"`
Number uint64 `json:"currentNumber" gencodec:"required"` GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` Number uint64 `json:"currentNumber" gencodec:"required"`
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
Ommers []ommer `json:"ommers,omitempty"` ParentTimestamp uint64 `json:"parentTimestamp,omitempty"`
BaseFee *big.Int `json:"currentBaseFee,omitempty"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
Ommers []ommer `json:"ommers,omitempty"`
BaseFee *big.Int `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"`
} }
type stEnvMarshaling struct { type stEnvMarshaling struct {
Coinbase common.UnprefixedAddress Coinbase common.UnprefixedAddress
Difficulty *math.HexOrDecimal256 Difficulty *math.HexOrDecimal256
GasLimit math.HexOrDecimal64 ParentDifficulty *math.HexOrDecimal256
Number math.HexOrDecimal64 GasLimit math.HexOrDecimal64
Timestamp math.HexOrDecimal64 Number math.HexOrDecimal64
BaseFee *math.HexOrDecimal256 Timestamp math.HexOrDecimal64
ParentTimestamp math.HexOrDecimal64
BaseFee *math.HexOrDecimal256
} }
type rejectedTx struct { type rejectedTx struct {
@ -247,6 +254,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
LogsHash: rlpHash(statedb.Logs()), LogsHash: rlpHash(statedb.Logs()),
Receipts: receipts, Receipts: receipts,
Rejected: rejectedTxs, Rejected: rejectedTxs,
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
} }
return statedb, execRs, nil return statedb, execRs, nil
} }
@ -274,3 +282,23 @@ func rlpHash(x interface{}) (h common.Hash) {
hw.Sum(h[:0]) hw.Sum(h[:0])
return h return h
} }
// calcDifficulty is based on ethash.CalcDifficulty. This method is used in case
// the caller does not provide an explicit difficulty, but instead provides only
// parent timestamp + difficulty.
// Note: this method only works for ethash engine.
func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime uint64,
parentDifficulty *big.Int, parentUncleHash common.Hash) *big.Int {
uncleHash := parentUncleHash
if uncleHash == (common.Hash{}) {
uncleHash = types.EmptyUncleHash
}
parent := &types.Header{
ParentHash: common.Hash{},
UncleHash: uncleHash,
Difficulty: parentDifficulty,
Number: new(big.Int).SetUint64(number - 1),
Time: parentTime,
}
return ethash.CalcDifficulty(config, currentTime, parent)
}

View File

@ -30,7 +30,7 @@ var (
Name: "trace", Name: "trace",
Usage: "Output full trace logs to files <txhash>.jsonl", Usage: "Output full trace logs to files <txhash>.jsonl",
} }
TraceDisableMemoryFlag = cli.BoolFlag{ TraceDisableMemoryFlag = cli.BoolTFlag{
Name: "trace.nomemory", Name: "trace.nomemory",
Usage: "Disable full memory dump in traces", Usage: "Disable full memory dump in traces",
} }
@ -38,7 +38,7 @@ var (
Name: "trace.nostack", Name: "trace.nostack",
Usage: "Disable stack output in traces", Usage: "Disable stack output in traces",
} }
TraceDisableReturnDataFlag = cli.BoolFlag{ TraceDisableReturnDataFlag = cli.BoolTFlag{
Name: "trace.noreturndata", Name: "trace.noreturndata",
Usage: "Disable return data output in traces", Usage: "Disable return data output in traces",
} }
@ -79,8 +79,10 @@ var (
Value: "env.json", Value: "env.json",
} }
InputTxsFlag = cli.StringFlag{ InputTxsFlag = cli.StringFlag{
Name: "input.txs", Name: "input.txs",
Usage: "`stdin` or file name of where to find the transactions to apply.", Usage: "`stdin` or file name of where to find the transactions to apply. " +
"If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions." +
"The '.rlp' format is identical to the output.body format.",
Value: "txs.json", Value: "txs.json",
} }
RewardFlag = cli.Int64Flag{ RewardFlag = cli.Int64Flag{

View File

@ -16,38 +16,47 @@ var _ = (*stEnvMarshaling)(nil)
// MarshalJSON marshals as JSON. // MarshalJSON marshals as JSON.
func (s stEnv) MarshalJSON() ([]byte, error) { func (s stEnv) MarshalJSON() ([]byte, error) {
type stEnv struct { type stEnv struct {
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
Ommers []ommer `json:"ommers,omitempty"` ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
Ommers []ommer `json:"ommers,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"`
} }
var enc stEnv var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase) enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
enc.GasLimit = math.HexOrDecimal64(s.GasLimit) enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
enc.Number = math.HexOrDecimal64(s.Number) enc.Number = math.HexOrDecimal64(s.Number)
enc.Timestamp = math.HexOrDecimal64(s.Timestamp) enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp)
enc.BlockHashes = s.BlockHashes enc.BlockHashes = s.BlockHashes
enc.Ommers = s.Ommers enc.Ommers = s.Ommers
enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee)
enc.ParentUncleHash = s.ParentUncleHash
return json.Marshal(&enc) return json.Marshal(&enc)
} }
// UnmarshalJSON unmarshals from JSON. // UnmarshalJSON unmarshals from JSON.
func (s *stEnv) UnmarshalJSON(input []byte) error { func (s *stEnv) UnmarshalJSON(input []byte) error {
type stEnv struct { type stEnv struct {
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
Ommers []ommer `json:"ommers,omitempty"` ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
Ommers []ommer `json:"ommers,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash *common.Hash `json:"parentUncleHash"`
} }
var dec stEnv var dec stEnv
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -57,10 +66,12 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'currentCoinbase' for stEnv") return errors.New("missing required field 'currentCoinbase' for stEnv")
} }
s.Coinbase = common.Address(*dec.Coinbase) s.Coinbase = common.Address(*dec.Coinbase)
if dec.Difficulty == nil { if dec.Difficulty != nil {
return errors.New("missing required field 'currentDifficulty' for stEnv") s.Difficulty = (*big.Int)(dec.Difficulty)
}
if dec.ParentDifficulty != nil {
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
} }
s.Difficulty = (*big.Int)(dec.Difficulty)
if dec.GasLimit == nil { if dec.GasLimit == nil {
return errors.New("missing required field 'currentGasLimit' for stEnv") return errors.New("missing required field 'currentGasLimit' for stEnv")
} }
@ -73,6 +84,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'currentTimestamp' for stEnv") return errors.New("missing required field 'currentTimestamp' for stEnv")
} }
s.Timestamp = uint64(*dec.Timestamp) s.Timestamp = uint64(*dec.Timestamp)
if dec.ParentTimestamp != nil {
s.ParentTimestamp = uint64(*dec.ParentTimestamp)
}
if dec.BlockHashes != nil { if dec.BlockHashes != nil {
s.BlockHashes = dec.BlockHashes s.BlockHashes = dec.BlockHashes
} }
@ -82,5 +96,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.BaseFee != nil { if dec.BaseFee != nil {
s.BaseFee = (*big.Int)(dec.BaseFee) s.BaseFee = (*big.Int)(dec.BaseFee)
} }
if dec.ParentUncleHash != nil {
s.ParentUncleHash = *dec.ParentUncleHash
}
return nil return nil
} }

View File

@ -0,0 +1,147 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package t8ntool
import (
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests"
"gopkg.in/urfave/cli.v1"
)
type result struct {
Error error
Address common.Address
Hash common.Hash
}
// MarshalJSON marshals as JSON with a hash.
func (r *result) MarshalJSON() ([]byte, error) {
type xx struct {
Error string `json:"error,omitempty"`
Address *common.Address `json:"address,omitempty"`
Hash *common.Hash `json:"hash,omitempty"`
}
var out xx
if r.Error != nil {
out.Error = r.Error.Error()
}
if r.Address != (common.Address{}) {
out.Address = &r.Address
}
if r.Hash != (common.Hash{}) {
out.Hash = &r.Hash
}
return json.Marshal(out)
}
func Transaction(ctx *cli.Context) error {
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
var (
err error
)
// We need to load the transactions. May be either in stdin input or in files.
// Check if anything needs to be read from stdin
var (
txStr = ctx.String(InputTxsFlag.Name)
inputData = &input{}
chainConfig *params.ChainConfig
)
// Construct the chainconfig
if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
} else {
chainConfig = cConf
}
// Set the chain id
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
var body hexutil.Bytes
if txStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(inputData); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
}
// Decode the body of already signed transactions
body = common.FromHex(inputData.TxRlp)
} else {
// Read input from file
inFile, err := os.Open(txStr)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
}
defer inFile.Close()
decoder := json.NewDecoder(inFile)
if strings.HasSuffix(txStr, ".rlp") {
if err := decoder.Decode(&body); err != nil {
return err
}
} else {
return NewError(ErrorIO, errors.New("only rlp supported"))
}
}
signer := types.MakeSigner(chainConfig, new(big.Int))
// We now have the transactions in 'body', which is supposed to be an
// rlp list of transactions
it, err := rlp.NewListIterator([]byte(body))
if err != nil {
return err
}
var results []result
for it.Next() {
var tx types.Transaction
err := rlp.DecodeBytes(it.Value(), &tx)
if err != nil {
results = append(results, result{Error: err})
continue
}
r := result{Hash: tx.Hash()}
if sender, err := types.Sender(signer, &tx); err != nil {
r.Error = err
results = append(results, r)
continue
} else {
r.Address = sender
}
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil,
chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int))); err != nil {
r.Error = err
} else if tx.Gas() < gas {
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas)
}
results = append(results, r)
}
out, err := json.MarshalIndent(results, "", " ")
fmt.Println(string(out))
return err
}

View File

@ -25,6 +25,7 @@ import (
"math/big" "math/big"
"os" "os"
"path" "path"
"strings"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -64,17 +65,23 @@ func (n *NumberedError) Error() string {
return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error()) return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error())
} }
func (n *NumberedError) Code() int { func (n *NumberedError) ExitCode() int {
return n.errorCode return n.errorCode
} }
// compile-time conformance test
var (
_ cli.ExitCoder = (*NumberedError)(nil)
)
type input struct { type input struct {
Alloc core.GenesisAlloc `json:"alloc,omitempty"` Alloc core.GenesisAlloc `json:"alloc,omitempty"`
Env *stEnv `json:"env,omitempty"` Env *stEnv `json:"env,omitempty"`
Txs []*txWithKey `json:"txs,omitempty"` Txs []*txWithKey `json:"txs,omitempty"`
TxRlp string `json:"txsRlp,omitempty"`
} }
func Main(ctx *cli.Context) error { func Transition(ctx *cli.Context) error {
// Configure the go-ethereum logger // Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
@ -100,10 +107,10 @@ func Main(ctx *cli.Context) error {
if ctx.Bool(TraceFlag.Name) { if ctx.Bool(TraceFlag.Name) {
// Configure the EVM logger // Configure the EVM logger
logConfig := &vm.LogConfig{ logConfig := &vm.LogConfig{
DisableStack: ctx.Bool(TraceDisableStackFlag.Name), DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
DisableMemory: ctx.Bool(TraceDisableMemoryFlag.Name), EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name),
DisableReturnData: ctx.Bool(TraceDisableReturnDataFlag.Name), EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name),
Debug: true, Debug: true,
} }
var prevFile *os.File var prevFile *os.File
// This one closes the last file // This one closes the last file
@ -199,11 +206,44 @@ func Main(ctx *cli.Context) error {
} }
defer inFile.Close() defer inFile.Close()
decoder := json.NewDecoder(inFile) decoder := json.NewDecoder(inFile)
if err := decoder.Decode(&txsWithKeys); err != nil { if strings.HasSuffix(txStr, ".rlp") {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) var body hexutil.Bytes
if err := decoder.Decode(&body); err != nil {
return err
}
var txs types.Transactions
if err := rlp.DecodeBytes(body, &txs); err != nil {
return err
}
for _, tx := range txs {
txsWithKeys = append(txsWithKeys, &txWithKey{
key: nil,
tx: tx,
})
}
} else {
if err := decoder.Decode(&txsWithKeys); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err))
}
} }
} else { } else {
txsWithKeys = inputData.Txs if len(inputData.TxRlp) > 0 {
// Decode the body of already signed transactions
body := common.FromHex(inputData.TxRlp)
var txs types.Transactions
if err := rlp.DecodeBytes(body, &txs); err != nil {
return err
}
for _, tx := range txs {
txsWithKeys = append(txsWithKeys, &txWithKey{
key: nil,
tx: tx,
})
}
} else {
// JSON encoded transactions
txsWithKeys = inputData.Txs
}
} }
// We may have to sign the transactions. // We may have to sign the transactions.
signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number))) signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number)))
@ -217,6 +257,20 @@ func Main(ctx *cli.Context) error {
return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
} }
} }
if env := prestate.Env; env.Difficulty == nil {
// If difficulty was not provided by caller, we need to calculate it.
switch {
case env.ParentDifficulty == nil:
return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
case env.Number == 0:
return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
case env.Timestamp <= env.ParentTimestamp:
return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
env.Timestamp, env.ParentTimestamp))
}
prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash)
}
// Run the test and aggregate the result // Run the test and aggregate the result
s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
if err != nil { if err != nil {
@ -360,18 +414,20 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
return err return err
} }
if len(stdOutObject) > 0 { if len(stdOutObject) > 0 {
b, err := json.MarshalIndent(stdOutObject, "", " ") b, err := json.MarshalIndent(stdOutObject, "", " ")
if err != nil { if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
} }
os.Stdout.Write(b) os.Stdout.Write(b)
os.Stdout.WriteString("\n")
} }
if len(stdErrObject) > 0 { if len(stdErrObject) > 0 {
b, err := json.MarshalIndent(stdErrObject, "", " ") b, err := json.MarshalIndent(stdErrObject, "", " ")
if err != nil { if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
} }
os.Stderr.Write(b) os.Stderr.Write(b)
os.Stderr.WriteString("\n")
} }
return nil return nil
} }

View File

@ -113,7 +113,7 @@ var (
Name: "receiver", Name: "receiver",
Usage: "The transaction receiver (execution context)", Usage: "The transaction receiver (execution context)",
} }
DisableMemoryFlag = cli.BoolFlag{ DisableMemoryFlag = cli.BoolTFlag{
Name: "nomemory", Name: "nomemory",
Usage: "disable memory output", Usage: "disable memory output",
} }
@ -125,9 +125,9 @@ var (
Name: "nostorage", Name: "nostorage",
Usage: "disable storage output", Usage: "disable storage output",
} }
DisableReturnDataFlag = cli.BoolFlag{ DisableReturnDataFlag = cli.BoolTFlag{
Name: "noreturndata", Name: "noreturndata",
Usage: "disable return data output", Usage: "enable return data output",
} }
) )
@ -135,7 +135,7 @@ var stateTransitionCommand = cli.Command{
Name: "transition", Name: "transition",
Aliases: []string{"t8n"}, Aliases: []string{"t8n"},
Usage: "executes a full state transition", Usage: "executes a full state transition",
Action: t8ntool.Main, Action: t8ntool.Transition,
Flags: []cli.Flag{ Flags: []cli.Flag{
t8ntool.TraceFlag, t8ntool.TraceFlag,
t8ntool.TraceDisableMemoryFlag, t8ntool.TraceDisableMemoryFlag,
@ -154,6 +154,18 @@ var stateTransitionCommand = cli.Command{
t8ntool.VerbosityFlag, t8ntool.VerbosityFlag,
}, },
} }
var transactionCommand = cli.Command{
Name: "transaction",
Aliases: []string{"t9n"},
Usage: "performs transaction validation",
Action: t8ntool.Transaction,
Flags: []cli.Flag{
t8ntool.InputTxsFlag,
t8ntool.ChainIDFlag,
t8ntool.ForknameFlag,
t8ntool.VerbosityFlag,
},
}
func init() { func init() {
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
@ -187,6 +199,7 @@ func init() {
runCommand, runCommand,
stateTestCommand, stateTestCommand,
stateTransitionCommand, stateTransitionCommand,
transactionCommand,
} }
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
} }
@ -195,7 +208,7 @@ func main() {
if err := app.Run(os.Args); err != nil { if err := app.Run(os.Args); err != nil {
code := 1 code := 1
if ec, ok := err.(*t8ntool.NumberedError); ok { if ec, ok := err.(*t8ntool.NumberedError); ok {
code = ec.Code() code = ec.ExitCode()
} }
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
os.Exit(code) os.Exit(code)

View File

@ -108,11 +108,11 @@ func runCmd(ctx *cli.Context) error {
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
log.Root().SetHandler(glogger) log.Root().SetHandler(glogger)
logconfig := &vm.LogConfig{ logconfig := &vm.LogConfig{
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
DisableStack: ctx.GlobalBool(DisableStackFlag.Name), DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name), EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
Debug: ctx.GlobalBool(DebugFlag.Name), Debug: ctx.GlobalBool(DebugFlag.Name),
} }
var ( var (

View File

@ -59,10 +59,10 @@ func stateTestCmd(ctx *cli.Context) error {
// Configure the EVM logger // Configure the EVM logger
config := &vm.LogConfig{ config := &vm.LogConfig{
DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name), EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
DisableStack: ctx.GlobalBool(DisableStackFlag.Name), DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name), EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
} }
var ( var (
tracer vm.Tracer tracer vm.Tracer

300
cmd/evm/t8n_test.go Normal file
View File

@ -0,0 +1,300 @@
package main
import (
"encoding/json"
"fmt"
"os"
"reflect"
"strings"
"testing"
"github.com/docker/docker/pkg/reexec"
"github.com/ethereum/go-ethereum/internal/cmdtest"
)
func TestMain(m *testing.M) {
// Run the app if we've been exec'd as "ethkey-test" in runEthkey.
reexec.Register("evm-test", func() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Exit(0)
})
// check if we have been reexec'd
if reexec.Init() {
return
}
os.Exit(m.Run())
}
type testT8n struct {
*cmdtest.TestCmd
}
type t8nInput struct {
inAlloc string
inTxs string
inEnv string
stFork string
stReward string
}
func (args *t8nInput) get(base string) []string {
var out []string
if opt := args.inAlloc; opt != "" {
out = append(out, "--input.alloc")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inTxs; opt != "" {
out = append(out, "--input.txs")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inEnv; opt != "" {
out = append(out, "--input.env")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.stFork; opt != "" {
out = append(out, "--state.fork", opt)
}
if opt := args.stReward; opt != "" {
out = append(out, "--state.reward", opt)
}
return out
}
type t8nOutput struct {
alloc bool
result bool
body bool
}
func (args *t8nOutput) get() (out []string) {
if args.body {
out = append(out, "--output.body", "stdout")
} else {
out = append(out, "--output.body", "") // empty means ignore
}
if args.result {
out = append(out, "--output.result", "stdout")
} else {
out = append(out, "--output.result", "")
}
if args.alloc {
out = append(out, "--output.alloc", "stdout")
} else {
out = append(out, "--output.alloc", "")
}
return out
}
func TestT8n(t *testing.T) {
tt := new(testT8n)
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
for i, tc := range []struct {
base string
input t8nInput
output t8nOutput
expExitCode int
expOut string
}{
{ // Test exit (3) on bad config
base: "./testdata/1",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Frontier+1346", "",
},
output: t8nOutput{alloc: true, result: true},
expExitCode: 3,
},
{
base: "./testdata/1",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Byzantium", "",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
{ // blockhash test
base: "./testdata/3",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Berlin", "",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
{ // missing blockhash test
base: "./testdata/4",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Berlin", "",
},
output: t8nOutput{alloc: true, result: true},
expExitCode: 4,
},
{ // Ommer test
base: "./testdata/5",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Byzantium", "0x80",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
{ // Sign json transactions
base: "./testdata/13",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "London", "",
},
output: t8nOutput{body: true},
expOut: "exp.json",
},
{ // Already signed transactions
base: "./testdata/13",
input: t8nInput{
"alloc.json", "signed_txs.rlp", "env.json", "London", "",
},
output: t8nOutput{result: true},
expOut: "exp2.json",
},
{ // Difficulty calculation - no uncles
base: "./testdata/14",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "London", "",
},
output: t8nOutput{result: true},
expOut: "exp.json",
},
{ // Difficulty calculation - with uncles
base: "./testdata/14",
input: t8nInput{
"alloc.json", "txs.json", "env.uncles.json", "London", "",
},
output: t8nOutput{result: true},
expOut: "exp2.json",
},
} {
args := []string{"t8n"}
args = append(args, tc.output.get()...)
args = append(args, tc.input.get(tc.base)...)
tt.Run("evm-test", args...)
tt.Logf("args: %v\n", strings.Join(args, " "))
// Compare the expected output, if provided
if tc.expOut != "" {
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
if err != nil {
t.Fatalf("test %d: could not read expected output: %v", i, err)
}
have := tt.Output()
ok, err := cmpJson(have, want)
switch {
case err != nil:
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
}
}
tt.WaitExit()
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
}
}
}
type t9nInput struct {
inTxs string
stFork string
}
func (args *t9nInput) get(base string) []string {
var out []string
if opt := args.inTxs; opt != "" {
out = append(out, "--input.txs")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.stFork; opt != "" {
out = append(out, "--state.fork", opt)
}
return out
}
func TestT9n(t *testing.T) {
tt := new(testT8n)
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
for i, tc := range []struct {
base string
input t9nInput
expExitCode int
expOut string
}{
{ // London txs on homestead
base: "./testdata/15",
input: t9nInput{
inTxs: "signed_txs.rlp",
stFork: "Homestead",
},
expOut: "exp.json",
},
{ // London txs on London
base: "./testdata/15",
input: t9nInput{
inTxs: "signed_txs.rlp",
stFork: "London",
},
expOut: "exp2.json",
},
{ // An RLP list (a blockheader really)
base: "./testdata/15",
input: t9nInput{
inTxs: "blockheader.rlp",
stFork: "London",
},
expOut: "exp3.json",
},
{ // Transactions with too low gas
base: "./testdata/16",
input: t9nInput{
inTxs: "signed_txs.rlp",
stFork: "London",
},
expOut: "exp.json",
},
} {
args := []string{"t9n"}
args = append(args, tc.input.get(tc.base)...)
tt.Run("evm-test", args...)
tt.Logf("args:\n go run . %v\n", strings.Join(args, " "))
// Compare the expected output, if provided
if tc.expOut != "" {
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
if err != nil {
t.Fatalf("test %d: could not read expected output: %v", i, err)
}
have := tt.Output()
ok, err := cmpJson(have, want)
switch {
case err != nil:
t.Logf(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
}
}
tt.WaitExit()
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
}
}
}
// cmpJson compares the JSON in two byte slices.
func cmpJson(a, b []byte) (bool, error) {
var j, j2 interface{}
if err := json.Unmarshal(a, &j); err != nil {
return false, err
}
if err := json.Unmarshal(b, &j2); err != nil {
return false, err
}
return reflect.DeepEqual(j2, j), nil
}

43
cmd/evm/testdata/1/exp.json vendored Normal file
View File

@ -0,0 +1,43 @@
{
"alloc": {
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
"balance": "0xfeed1a9d",
"nonce": "0x1"
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x5ffd4878be161d74",
"nonce": "0xac"
},
"0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0xa410"
}
},
"result": {
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
{
"root": "0x",
"status": "0x1",
"cumulativeGasUsed": "0x5208",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null,
"transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208",
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionIndex": "0x0"
}
],
"rejected": [
{
"index": 1,
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
}
],
"currentDifficulty": "0x20000"
}
}

11
cmd/evm/testdata/12/alloc.json vendored Normal file
View File

@ -0,0 +1,11 @@
{
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "84000000",
"code" : "0x",
"nonce" : "0x00",
"storage" : {
"0x00" : "0x00"
}
}
}

10
cmd/evm/testdata/12/env.json vendored Normal file
View File

@ -0,0 +1,10 @@
{
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty" : "0x020000",
"currentNumber" : "0x01",
"currentTimestamp" : "0x03e8",
"previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2",
"currentGasLimit" : "0x0f4240",
"currentBaseFee" : "0x20"
}

40
cmd/evm/testdata/12/readme.md vendored Normal file
View File

@ -0,0 +1,40 @@
## Test 1559 balance + gasCap
This test contains an EIP-1559 consensus issue which happened on Ropsten, where
`geth` did not properly account for the value transfer while doing the check on `max_fee_per_gas * gas_limit`.
Before the issue was fixed, this invocation allowed the transaction to pass into a block:
```
dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout
```
With the fix applied, the result is:
```
dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout
INFO [07-21|19:03:50.276] rejected tx index=0 hash=ccc996..d83435 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032"
INFO [07-21|19:03:50.276] Trie dumping started root=e05f81..6597a5
INFO [07-21|19:03:50.276] Trie dumping complete accounts=1 elapsed="39.549µs"
{
"alloc": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x501bd00"
}
},
"result": {
"stateRoot": "0xe05f81f8244a76503ceec6f88abfcd03047a612a1001217f37d30984536597a5",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"rejected": [
{
"index": 0,
"error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032"
}
]
}
}
```
The transaction is rejected.

20
cmd/evm/testdata/12/txs.json vendored Normal file
View File

@ -0,0 +1,20 @@
[
{
"input" : "0x",
"gas" : "0x5208",
"nonce" : "0x0",
"to" : "0x1111111111111111111111111111111111111111",
"value" : "0x20",
"v" : "0x0",
"r" : "0x0",
"s" : "0x0",
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"chainId" : "0x1",
"type" : "0x2",
"maxFeePerGas" : "0xfa0",
"maxPriorityFeePerGas" : "0x20",
"accessList" : [
]
}
]

23
cmd/evm/testdata/13/alloc.json vendored Normal file
View File

@ -0,0 +1,23 @@
{
"0x1111111111111111111111111111111111111111" : {
"balance" : "0x010000000000",
"code" : "0xfe",
"nonce" : "0x01",
"storage" : {
}
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0x010000000000",
"code" : "0x",
"nonce" : "0x01",
"storage" : {
}
},
"0xd02d72e067e77158444ef2020ff2d325f929b363" : {
"balance" : "0x01000000000000",
"code" : "0x",
"nonce" : "0x01",
"storage" : {
}
}
}

12
cmd/evm/testdata/13/env.json vendored Normal file
View File

@ -0,0 +1,12 @@
{
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty" : "0x020000",
"currentNumber" : "0x01",
"currentTimestamp" : "0x079e",
"previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f",
"currentGasLimit" : "0x40000000",
"currentBaseFee" : "0x036b",
"blockHashes" : {
"0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f"
}
}

3
cmd/evm/testdata/13/exp.json vendored Normal file
View File

@ -0,0 +1,3 @@
{
"body": "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
}

38
cmd/evm/testdata/13/exp2.json vendored Normal file
View File

@ -0,0 +1,38 @@
{
"result": {
"stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61",
"txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
"receiptRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
{
"type": "0x2",
"root": "0x",
"status": "0x0",
"cumulativeGasUsed": "0x84d0",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null,
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x84d0",
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionIndex": "0x0"
},
{
"type": "0x2",
"root": "0x",
"status": "0x0",
"cumulativeGasUsed": "0x109a0",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null,
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x84d0",
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionIndex": "0x1"
}
],
"currentDifficulty": "0x20000"
}
}

4
cmd/evm/testdata/13/readme.md vendored Normal file
View File

@ -0,0 +1,4 @@
## Input transactions in RLP form
This testdata folder is used to examplify how transaction input can be provided in rlp form.
Please see the README in `evm` folder for how this is performed.

1
cmd/evm/testdata/13/signed_txs.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"

34
cmd/evm/testdata/13/txs.json vendored Normal file
View File

@ -0,0 +1,34 @@
[
{
"input" : "0x",
"gas" : "0x84d0",
"nonce" : "0x1",
"to" : "0x1111111111111111111111111111111111111111",
"value" : "0x0",
"v" : "0x0",
"r" : "0x0",
"s" : "0x0",
"secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298",
"chainId" : "0x1",
"type" : "0x2",
"maxFeePerGas" : "0xfa0",
"maxPriorityFeePerGas" : "0x0",
"accessList" : []
},
{
"input" : "0x",
"gas" : "0x84d0",
"nonce" : "0x2",
"to" : "0x1111111111111111111111111111111111111111",
"value" : "0x0",
"v" : "0x0",
"r" : "0x0",
"s" : "0x0",
"secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298",
"chainId" : "0x1",
"type" : "0x2",
"maxFeePerGas" : "0xfa0",
"maxPriorityFeePerGas" : "0x0",
"accessList" : []
}
]

12
cmd/evm/testdata/14/alloc.json vendored Normal file
View File

@ -0,0 +1,12 @@
{
"a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x5ffd4878be161d74",
"code": "0x",
"nonce": "0xac",
"storage": {}
},
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
"balance": "0xfeedbead",
"nonce" : "0x00"
}
}

9
cmd/evm/testdata/14/env.json vendored Normal file
View File

@ -0,0 +1,9 @@
{
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"currentGasLimit": "0x750a163df65e8a",
"currentBaseFee": "0x500",
"currentNumber": "12800000",
"currentTimestamp": "100015",
"parentTimestamp" : "99999",
"parentDifficulty" : "0x2000000000000"
}

10
cmd/evm/testdata/14/env.uncles.json vendored Normal file
View File

@ -0,0 +1,10 @@
{
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"currentGasLimit": "0x750a163df65e8a",
"currentBaseFee": "0x500",
"currentNumber": "12800000",
"currentTimestamp": "100035",
"parentTimestamp" : "99999",
"parentDifficulty" : "0x2000000000000",
"parentUncleHash" : "0x000000000000000000000000000000000000000000000000000000000000beef"
}

11
cmd/evm/testdata/14/exp.json vendored Normal file
View File

@ -0,0 +1,11 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000020000000",
"receipts": []
}
}

11
cmd/evm/testdata/14/exp2.json vendored Normal file
View File

@ -0,0 +1,11 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x1ff8020000000"
}
}

41
cmd/evm/testdata/14/readme.md vendored Normal file
View File

@ -0,0 +1,41 @@
## Difficulty calculation
This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller.
Calculating it (with an empty set of txs) using `London` rules (and no provided unclehash for the parent block):
```
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=London
INFO [08-30|20:43:09.352] Trie dumping started root=6f0588..7f4bdc
INFO [08-30|20:43:09.352] Trie dumping complete accounts=2 elapsed="82.533µs"
INFO [08-30|20:43:09.352] Wrote file file=alloc.json
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x2000020000000"
}
}
```
Same thing, but this time providing a non-empty (and non-`emptyKeccak`) unclehash, which leads to a slightly different result:
```
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.uncles.json --output.result=stdout --state.fork=London
INFO [08-30|20:44:33.102] Trie dumping started root=6f0588..7f4bdc
INFO [08-30|20:44:33.102] Trie dumping complete accounts=2 elapsed="72.91µs"
INFO [08-30|20:44:33.102] Wrote file file=alloc.json
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x1ff8020000000"
}
}
```

1
cmd/evm/testdata/14/txs.json vendored Normal file
View File

@ -0,0 +1 @@
[]

1
cmd/evm/testdata/15/blockheader.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf901f0a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007b0101020383010203a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"

10
cmd/evm/testdata/15/exp.json vendored Normal file
View File

@ -0,0 +1,10 @@
[
{
"error": "transaction type not supported",
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
},
{
"error": "transaction type not supported",
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
}
]

10
cmd/evm/testdata/15/exp2.json vendored Normal file
View File

@ -0,0 +1,10 @@
[
{
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
},
{
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
}
]

47
cmd/evm/testdata/15/exp3.json vendored Normal file
View File

@ -0,0 +1,47 @@
[
{
"error": "transaction type not supported"
},
{
"error": "transaction type not supported"
},
{
"error": "transaction type not supported"
},
{
"error": "transaction type not supported"
},
{
"error": "transaction type not supported"
},
{
"error": "transaction type not supported"
},
{
"error": "transaction type not supported"
},
{
"error": "rlp: expected List"
},
{
"error": "rlp: expected List"
},
{
"error": "rlp: expected List"
},
{
"error": "rlp: expected List"
},
{
"error": "rlp: expected List"
},
{
"error": "rlp: expected input list for types.AccessListTx"
},
{
"error": "transaction type not supported"
},
{
"error": "transaction type not supported"
}
]

1
cmd/evm/testdata/15/signed_txs.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"

View File

@ -0,0 +1,4 @@
{
"txsRlp" : "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
}

11
cmd/evm/testdata/16/exp.json vendored Normal file
View File

@ -0,0 +1,11 @@
[
{
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6"
},
{
"error": "intrinsic gas too low: have 82, want 21000",
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b"
}
]

1
cmd/evm/testdata/16/signed_txs.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf8cab86401f8610180018252089411111111111111111111111111111111111111112080c001a0937f65ef1deece46c473b99962678fb7c38425cf303d1e8fa9717eb4b9d012b5a01940c5a5647c4940217ffde1051a5fd92ec8551e275c1787f81f50a2ad84de43b86201f85f018001529411111111111111111111111111111111111111112080c001a0241c3aec732205542a87fef8c76346741e85480bce5a42d05a9a73dac892f84ca04f52e2dfce57f3a02ed10e085e1a154edf38a726da34127c85fc53b4921759c8"

34
cmd/evm/testdata/16/unsigned_txs.json vendored Normal file
View File

@ -0,0 +1,34 @@
[
{
"input" : "0x",
"gas" : "0x5208",
"nonce" : "0x0",
"to" : "0x1111111111111111111111111111111111111111",
"value" : "0x20",
"v" : "0x0",
"r" : "0x0",
"s" : "0x0",
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"chainId" : "0x1",
"type" : "0x1",
"gasPrice": "0x1",
"accessList" : [
]
},
{
"input" : "0x",
"gas" : "0x52",
"nonce" : "0x0",
"to" : "0x1111111111111111111111111111111111111111",
"value" : "0x20",
"v" : "0x0",
"r" : "0x0",
"s" : "0x0",
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"chainId" : "0x1",
"type" : "0x1",
"gasPrice": "0x1",
"accessList" : [
]
}
]

37
cmd/evm/testdata/3/exp.json vendored Normal file
View File

@ -0,0 +1,37 @@
{
"alloc": {
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87": {
"code": "0x600140",
"balance": "0xde0b6b3a76586a0"
},
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": {
"balance": "0x521f"
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0xde0b6b3a7622741",
"nonce": "0x1"
}
},
"result": {
"stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1",
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
"receiptRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
{
"root": "0x",
"status": "0x1",
"cumulativeGasUsed": "0x521f",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null,
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x521f",
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionIndex": "0x0"
}
],
"currentDifficulty": "0x20000"
}
}

22
cmd/evm/testdata/5/exp.json vendored Normal file
View File

@ -0,0 +1,22 @@
{
"alloc": {
"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {
"balance": "0x88"
},
"0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": {
"balance": "0x70"
},
"0xcccccccccccccccccccccccccccccccccccccccc": {
"balance": "0x60"
}
},
"result": {
"stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x20000"
}
}

View File

@ -11,6 +11,8 @@ function showjson(){
function demo(){ function demo(){
echo "$ticks" echo "$ticks"
echo "$1" echo "$1"
$1
echo ""
echo "$ticks" echo "$ticks"
echo "" echo ""
} }
@ -152,9 +154,7 @@ echo ""
echo "The \`BLOCKHASH\` opcode requires blockhashes to be provided by the caller, inside the \`env\`." echo "The \`BLOCKHASH\` opcode requires blockhashes to be provided by the caller, inside the \`env\`."
echo "If a required blockhash is not provided, the exit code should be \`4\`:" echo "If a required blockhash is not provided, the exit code should be \`4\`:"
echo "Example where blockhashes are provided: " echo "Example where blockhashes are provided: "
cmd="./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace" demo "./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace"
tick && echo $cmd && tick
$cmd 2>&1 >/dev/null
cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2" cmd="cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2"
tick && echo $cmd && tick tick && echo $cmd && tick
echo "$ticks" echo "$ticks"
@ -164,13 +164,11 @@ echo ""
echo "In this example, the caller has not provided the required blockhash:" echo "In this example, the caller has not provided the required blockhash:"
cmd="./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace" cmd="./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace"
tick && echo $cmd && tick tick && echo $cmd && $cmd
tick
$cmd
errc=$? errc=$?
tick tick
echo "Error code: $errc" echo "Error code: $errc"
echo ""
echo "### Chaining" echo "### Chaining"
echo "" echo ""
@ -189,3 +187,28 @@ echo ""
echo "In order to meaningfully chain invocations, one would need to provide meaningful new \`env\`, otherwise the" echo "In order to meaningfully chain invocations, one would need to provide meaningful new \`env\`, otherwise the"
echo "actual blocknumber (exposed to the EVM) would not increase." echo "actual blocknumber (exposed to the EVM) would not increase."
echo "" echo ""
echo "### Transactions in RLP form"
echo ""
echo "It is possible to provide already-signed transactions as input to, using an \`input.txs\` which ends with the \`rlp\` suffix."
echo "The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible"
echo "to use the evm to go from \`json\` input to \`rlp\` input."
echo ""
echo "The following command takes **json** the transactions in \`./testdata/13/txs.json\` and signs them. After execution, they are output to \`signed_txs.rlp\`.:"
demo "./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp"
echo "The \`output.body\` is the rlp-list of transactions, encoded in hex and placed in a string a'la \`json\` encoding rules:"
demo "cat signed_txs.rlp"
echo "We can use \`rlpdump\` to check what the contents are: "
echo "$ticks"
echo "rlpdump -hex \$(cat signed_txs.rlp | jq -r )"
rlpdump -hex $(cat signed_txs.rlp | jq -r )
echo "$ticks"
echo "Now, we can now use those (or any other already signed transactions), as input, like so: "
demo "./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json"
echo "You might have noticed that the results from these two invocations were stored in two separate files. "
echo "And we can now finally check that they match."
echo "$ticks"
echo "cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot"
cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot
echo "$ticks"

View File

@ -741,7 +741,7 @@ func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, c
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
} }
var avatar string var avatar string
if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 { if parts = regexp.MustCompile(`src="([^"]+twimg\.com/profile_images[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1] avatar = parts[1]
} }
return username + "@twitter", username, avatar, address, nil return username + "@twitter", username, avatar, address, nil
@ -867,7 +867,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund") return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
} }
var avatar string var avatar string
if parts = regexp.MustCompile("src=\"([^\"]+fbcdn.net[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 { if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1] avatar = parts[1]
} }
return username + "@facebook", avatar, address, nil return username + "@facebook", avatar, address, nil

View File

@ -268,11 +268,16 @@ func accountCreate(ctx *cli.Context) error {
} }
} }
utils.SetNodeConfig(ctx, &cfg.Node) utils.SetNodeConfig(ctx, &cfg.Node)
scryptN, scryptP, keydir, err := cfg.Node.AccountConfig() keydir, err := cfg.Node.KeyDirConfig()
if err != nil { if err != nil {
utils.Fatalf("Failed to read configuration: %v", err) utils.Fatalf("Failed to read configuration: %v", err)
} }
scryptN := keystore.StandardScryptN
scryptP := keystore.StandardScryptP
if cfg.Node.UseLightweightKDF {
scryptN = keystore.LightScryptN
scryptP = keystore.LightScryptP
}
password := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) password := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))

View File

@ -68,7 +68,6 @@ It expects the genesis file as argument.`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
@ -92,11 +91,15 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to
utils.MetricsHTTPFlag, utils.MetricsHTTPFlag,
utils.MetricsPortFlag, utils.MetricsPortFlag,
utils.MetricsEnableInfluxDBFlag, utils.MetricsEnableInfluxDBFlag,
utils.MetricsEnableInfluxDBV2Flag,
utils.MetricsInfluxDBEndpointFlag, utils.MetricsInfluxDBEndpointFlag,
utils.MetricsInfluxDBDatabaseFlag, utils.MetricsInfluxDBDatabaseFlag,
utils.MetricsInfluxDBUsernameFlag, utils.MetricsInfluxDBUsernameFlag,
utils.MetricsInfluxDBPasswordFlag, utils.MetricsInfluxDBPasswordFlag,
utils.MetricsInfluxDBTagsFlag, utils.MetricsInfluxDBTagsFlag,
utils.MetricsInfluxDBTokenFlag,
utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag,
utils.TxLookupLimitFlag, utils.TxLookupLimitFlag,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",

View File

@ -27,6 +27,10 @@ import (
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
"github.com/ethereum/go-ethereum/accounts/external"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
@ -135,6 +139,11 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
if err != nil { if err != nil {
utils.Fatalf("Failed to create the protocol stack: %v", err) utils.Fatalf("Failed to create the protocol stack: %v", err)
} }
// Node doesn't by default populate account manager backends
if err := setAccountManagerBackends(stack); err != nil {
utils.Fatalf("Failed to set account manager backends: %v", err)
}
utils.SetEthConfig(ctx, stack, &cfg.Eth) utils.SetEthConfig(ctx, stack, &cfg.Eth)
if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) { if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) {
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
@ -233,6 +242,18 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
if ctx.GlobalIsSet(utils.MetricsInfluxDBTagsFlag.Name) { if ctx.GlobalIsSet(utils.MetricsInfluxDBTagsFlag.Name) {
cfg.Metrics.InfluxDBTags = ctx.GlobalString(utils.MetricsInfluxDBTagsFlag.Name) cfg.Metrics.InfluxDBTags = ctx.GlobalString(utils.MetricsInfluxDBTagsFlag.Name)
} }
if ctx.GlobalIsSet(utils.MetricsEnableInfluxDBV2Flag.Name) {
cfg.Metrics.EnableInfluxDBV2 = ctx.GlobalBool(utils.MetricsEnableInfluxDBV2Flag.Name)
}
if ctx.GlobalIsSet(utils.MetricsInfluxDBTokenFlag.Name) {
cfg.Metrics.InfluxDBToken = ctx.GlobalString(utils.MetricsInfluxDBTokenFlag.Name)
}
if ctx.GlobalIsSet(utils.MetricsInfluxDBBucketFlag.Name) {
cfg.Metrics.InfluxDBBucket = ctx.GlobalString(utils.MetricsInfluxDBBucketFlag.Name)
}
if ctx.GlobalIsSet(utils.MetricsInfluxDBOrganizationFlag.Name) {
cfg.Metrics.InfluxDBOrganization = ctx.GlobalString(utils.MetricsInfluxDBOrganizationFlag.Name)
}
} }
func deprecated(field string) bool { func deprecated(field string) bool {
@ -245,3 +266,62 @@ func deprecated(field string) bool {
return false return false
} }
} }
func setAccountManagerBackends(stack *node.Node) error {
conf := stack.Config()
am := stack.AccountManager()
keydir := stack.KeyStoreDir()
scryptN := keystore.StandardScryptN
scryptP := keystore.StandardScryptP
if conf.UseLightweightKDF {
scryptN = keystore.LightScryptN
scryptP = keystore.LightScryptP
}
// Assemble the supported backends
if len(conf.ExternalSigner) > 0 {
log.Info("Using external signer", "url", conf.ExternalSigner)
if extapi, err := external.NewExternalBackend(conf.ExternalSigner); err == nil {
am.AddBackend(extapi)
return nil
} else {
return fmt.Errorf("error connecting to external signer: %v", err)
}
}
// For now, we're using EITHER external signer OR local signers.
// If/when we implement some form of lockfile for USB and keystore wallets,
// we can have both, but it's very confusing for the user to see the same
// accounts in both externally and locally, plus very racey.
am.AddBackend(keystore.NewKeyStore(keydir, scryptN, scryptP))
if conf.USB {
// Start a USB hub for Ledger hardware wallets
if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil {
log.Warn(fmt.Sprintf("Failed to start Ledger hub, disabling: %v", err))
} else {
am.AddBackend(ledgerhub)
}
// Start a USB hub for Trezor hardware wallets (HID version)
if trezorhub, err := usbwallet.NewTrezorHubWithHID(); err != nil {
log.Warn(fmt.Sprintf("Failed to start HID Trezor hub, disabling: %v", err))
} else {
am.AddBackend(trezorhub)
}
// Start a USB hub for Trezor hardware wallets (WebUSB version)
if trezorhub, err := usbwallet.NewTrezorHubWithWebUSB(); err != nil {
log.Warn(fmt.Sprintf("Failed to start WebUSB Trezor hub, disabling: %v", err))
} else {
am.AddBackend(trezorhub)
}
}
if len(conf.SmartCardDaemonPath) > 0 {
// Start a smart card hub
if schub, err := scwallet.NewHub(conf.SmartCardDaemonPath, scwallet.Scheme, keydir); err != nil {
log.Warn(fmt.Sprintf("Failed to start smart card hub, disabling: %v", err))
} else {
am.AddBackend(schub)
}
}
return nil
}

View File

@ -134,8 +134,6 @@ func remoteConsole(ctx *cli.Context) error {
path = filepath.Join(path, "rinkeby") path = filepath.Join(path, "rinkeby")
} else if ctx.GlobalBool(utils.GoerliFlag.Name) { } else if ctx.GlobalBool(utils.GoerliFlag.Name) {
path = filepath.Join(path, "goerli") path = filepath.Join(path, "goerli")
} else if ctx.GlobalBool(utils.CalaverasFlag.Name) {
path = filepath.Join(path, "calaveras")
} }
} }
endpoint = fmt.Sprintf("%s/geth.ipc", path) endpoint = fmt.Sprintf("%s/geth.ipc", path)

View File

@ -75,7 +75,7 @@ at block: 0 ({{niltime}})
datadir: {{.Datadir}} datadir: {{.Datadir}}
modules: {{apis}} modules: {{apis}}
To exit, press ctrl-d To exit, press ctrl-d or type exit
> {{.InputLine "exit"}} > {{.InputLine "exit"}}
`) `)
geth.ExpectExit() geth.ExpectExit()
@ -149,7 +149,7 @@ at block: 0 ({{niltime}}){{if ipc}}
datadir: {{datadir}}{{end}} datadir: {{datadir}}{{end}}
modules: {{apis}} modules: {{apis}}
To exit, press ctrl-d To exit, press ctrl-d or type exit
> {{.InputLine "exit" }} > {{.InputLine "exit" }}
`) `)
attach.ExpectExit() attach.ExpectExit()

View File

@ -75,7 +75,6 @@ Remove blockchain and state databases`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
}, },
Usage: "Inspect the storage size for each type of data in the database", Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
@ -91,7 +90,6 @@ Remove blockchain and state databases`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
}, },
} }
dbCompactCmd = cli.Command{ dbCompactCmd = cli.Command{
@ -105,7 +103,6 @@ Remove blockchain and state databases`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
utils.CacheFlag, utils.CacheFlag,
utils.CacheDatabaseFlag, utils.CacheDatabaseFlag,
}, },
@ -125,7 +122,6 @@ corruption if it is aborted during execution'!`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
}, },
Description: "This command looks up the specified database key from the database.", Description: "This command looks up the specified database key from the database.",
} }
@ -141,7 +137,6 @@ corruption if it is aborted during execution'!`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
}, },
Description: `This command deletes the specified database key from the database. Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`, WARNING: This is a low-level operation which may cause database corruption!`,
@ -158,7 +153,6 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
}, },
Description: `This command sets a given database key to the given value. Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`, WARNING: This is a low-level operation which may cause database corruption!`,
@ -175,7 +169,6 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
}, },
Description: "This command looks up the specified database key from the database.", Description: "This command looks up the specified database key from the database.",
} }
@ -191,7 +184,6 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
}, },
Description: "This command displays information about the freezer index.", Description: "This command displays information about the freezer index.",
} }

View File

@ -121,13 +121,13 @@ var (
utils.MiningEnabledFlag, utils.MiningEnabledFlag,
utils.MinerThreadsFlag, utils.MinerThreadsFlag,
utils.MinerNotifyFlag, utils.MinerNotifyFlag,
utils.MinerGasTargetFlag, utils.LegacyMinerGasTargetFlag,
utils.MinerGasLimitFlag, utils.MinerGasLimitFlag,
utils.MinerGasPriceFlag, utils.MinerGasPriceFlag,
utils.MinerEtherbaseFlag, utils.MinerEtherbaseFlag,
utils.MinerExtraDataFlag, utils.MinerExtraDataFlag,
utils.MinerRecommitIntervalFlag, utils.MinerRecommitIntervalFlag,
utils.MinerNoVerfiyFlag, utils.MinerNoVerifyFlag,
utils.NATFlag, utils.NATFlag,
utils.NoDiscoverFlag, utils.NoDiscoverFlag,
utils.DiscoveryV5Flag, utils.DiscoveryV5Flag,
@ -141,7 +141,6 @@ var (
utils.RopstenFlag, utils.RopstenFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CalaverasFlag,
utils.VMEnableDebugFlag, utils.VMEnableDebugFlag,
utils.NetworkIdFlag, utils.NetworkIdFlag,
utils.EthStatsURLFlag, utils.EthStatsURLFlag,
@ -162,12 +161,6 @@ var (
utils.HTTPPortFlag, utils.HTTPPortFlag,
utils.HTTPCORSDomainFlag, utils.HTTPCORSDomainFlag,
utils.HTTPVirtualHostsFlag, utils.HTTPVirtualHostsFlag,
utils.LegacyRPCEnabledFlag,
utils.LegacyRPCListenAddrFlag,
utils.LegacyRPCPortFlag,
utils.LegacyRPCCORSDomainFlag,
utils.LegacyRPCVirtualHostsFlag,
utils.LegacyRPCApiFlag,
utils.GraphQLEnabledFlag, utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag, utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag, utils.GraphQLVirtualHostsFlag,
@ -183,6 +176,7 @@ var (
utils.IPCPathFlag, utils.IPCPathFlag,
utils.InsecureUnlockAllowedFlag, utils.InsecureUnlockAllowedFlag,
utils.RPCGlobalGasCapFlag, utils.RPCGlobalGasCapFlag,
utils.RPCGlobalEVMTimeoutFlag,
utils.RPCGlobalTxFeeCapFlag, utils.RPCGlobalTxFeeCapFlag,
utils.AllowUnprotectedTxs, utils.AllowUnprotectedTxs,
} }
@ -198,6 +192,10 @@ var (
utils.MetricsInfluxDBUsernameFlag, utils.MetricsInfluxDBUsernameFlag,
utils.MetricsInfluxDBPasswordFlag, utils.MetricsInfluxDBPasswordFlag,
utils.MetricsInfluxDBTagsFlag, utils.MetricsInfluxDBTagsFlag,
utils.MetricsEnableInfluxDBV2Flag,
utils.MetricsInfluxDBTokenFlag,
utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag,
} }
) )
@ -277,9 +275,6 @@ func prepare(ctx *cli.Context) {
case ctx.GlobalIsSet(utils.GoerliFlag.Name): case ctx.GlobalIsSet(utils.GoerliFlag.Name):
log.Info("Starting Geth on Görli testnet...") log.Info("Starting Geth on Görli testnet...")
case ctx.GlobalIsSet(utils.CalaverasFlag.Name):
log.Info("Starting Geth on Calaveras testnet...")
case ctx.GlobalIsSet(utils.DeveloperFlag.Name): case ctx.GlobalIsSet(utils.DeveloperFlag.Name):
log.Info("Starting Geth in ephemeral dev mode...") log.Info("Starting Geth in ephemeral dev mode...")
@ -422,7 +417,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) {
} }
ethBackend, ok := backend.(*eth.EthAPIBackend) ethBackend, ok := backend.(*eth.EthAPIBackend)
if !ok { if !ok {
utils.Fatalf("Ethereum service not running: %v", err) utils.Fatalf("Ethereum service not running")
} }
// Set the gas price to the limits from the CLI and start mining // Set the gas price to the limits from the CLI and start mining
gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/pruner" "github.com/ethereum/go-ethereum/core/state/pruner"
"github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -232,7 +233,7 @@ func verifyState(ctx *cli.Context) error {
} }
} }
if err := snaptree.Verify(root); err != nil { if err := snaptree.Verify(root); err != nil {
log.Error("Failed to verfiy state", "root", root, "err", err) log.Error("Failed to verify state", "root", root, "err", err)
return err return err
} }
log.Info("Verified the state", "root", root) log.Info("Verified the state", "root", root)
@ -287,7 +288,7 @@ func traverseState(ctx *cli.Context) error {
accIter := trie.NewIterator(t.NodeIterator(nil)) accIter := trie.NewIterator(t.NodeIterator(nil))
for accIter.Next() { for accIter.Next() {
accounts += 1 accounts += 1
var acc state.Account var acc types.StateAccount
if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil { if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil {
log.Error("Invalid account encountered during traversal", "err", err) log.Error("Invalid account encountered during traversal", "err", err)
return err return err
@ -393,7 +394,7 @@ func traverseRawState(ctx *cli.Context) error {
// dig into the storage trie further. // dig into the storage trie further.
if accIter.Leaf() { if accIter.Leaf() {
accounts += 1 accounts += 1
var acc state.Account var acc types.StateAccount
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
log.Error("Invalid account encountered during traversal", "err", err) log.Error("Invalid account encountered during traversal", "err", err)
return errors.New("invalid account") return errors.New("invalid account")

View File

@ -52,13 +52,16 @@
"check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$"
}, },
{ {
"name": "GethCrash", "name": "Geth DoS via MULMOD",
"uid": "GETH-2020-04", "uid": "GETH-2020-04",
"summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing",
"description": "Full details to be disclosed at a later date", "description": "Affected versions suffer from a vulnerability which can be exploited through the `MULMOD` operation, by specifying a modulo of `0`: `mulmod(a,b,0)`, causing a `panic` in the underlying library. \nThe crash was in the `uint256` library, where a buffer [underflowed](https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L442).\n\n\tif `d == 0`, `dLen` remains `0`\n\nand https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L451 will try to access index `[-1]`.\n\nThe `uint256` library was first merged in this [commit](https://github.com/ethereum/go-ethereum/commit/cf6674539c589f80031f3371a71c6a80addbe454), on 2020-06-08. \nExploiting this vulnerabilty would cause all vulnerable nodes to drop off the network. \n\nThe issue was brought to our attention through a [bug report](https://github.com/ethereum/go-ethereum/issues/21367), showing a `panic` occurring on sync from genesis on the Ropsten network.\n \nIt was estimated that the least obvious way to fix this would be to merge the fix into `uint256`, make a new release of that library and then update the geth-dependency.\n",
"links": [ "links": [
"https://blog.ethereum.org/2020/11/12/geth_security_release/", "https://blog.ethereum.org/2020/11/12/geth_security_release/",
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m" "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m",
"https://github.com/holiman/uint256/releases/tag/v1.1.1",
"https://github.com/holiman/uint256/pull/80",
"https://github.com/ethereum/go-ethereum/pull/21368"
], ],
"introduced": "v1.9.16", "introduced": "v1.9.16",
"fixed": "v1.9.18", "fixed": "v1.9.18",
@ -66,5 +69,51 @@
"severity": "Critical", "severity": "Critical",
"CVE": "CVE-2020-26242", "CVE": "CVE-2020-26242",
"check": "Geth\\/v1\\.9.(16|17).*$" "check": "Geth\\/v1\\.9.(16|17).*$"
},
{
"name": "LES Server DoS via GetProofsV2",
"uid": "GETH-2020-05",
"summary": "A DoS vulnerability can make a LES server crash.",
"description": "A DoS vulnerability can make a LES server crash via malicious GetProofsV2 request from a connected LES client.\n\nThe vulnerability was patched in #21896.\n\nThis vulnerability only concern users explicitly running geth as a light server",
"links": [
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-r33q-22hv-j29q",
"https://github.com/ethereum/go-ethereum/pull/21896"
],
"introduced": "v1.8.0",
"fixed": "v1.9.25",
"published": "2020-12-10",
"severity": "Medium",
"CVE": "CVE-2020-26264",
"check": "(Geth\\/v1\\.8\\.*)|(Geth\\/v1\\.9\\.\\d-.*)|(Geth\\/v1\\.9\\.1\\d-.*)|(Geth\\/v1\\.9\\.(20|21|22|23|24)-.*)$"
},
{
"name": "SELFDESTRUCT-recreate consensus flaw",
"uid": "GETH-2020-06",
"introduced": "v1.9.4",
"fixed": "v1.9.20",
"summary": "A consensus-vulnerability in Geth could cause a chain split, where vulnerable versions refuse to accept the canonical chain.",
"description": "A flaw was repoted at 2020-08-11 by John Youngseok Yang (Software Platform Lab), where a particular sequence of transactions could cause a consensus failure.\n\n- Tx 1:\n - `sender` invokes `caller`.\n - `caller` invokes `0xaa`. `0xaa` has 3 wei, does a self-destruct-to-self\n - `caller` does a `1 wei` -call to `0xaa`, who thereby has 1 wei (the code in `0xaa` still executed, since the tx is still ongoing, but doesn't redo the selfdestruct, it takes a different path if callvalue is non-zero)\n\n-Tx 2:\n - `sender` does a 5-wei call to 0xaa. No exec (since no code). \n\nIn geth, the result would be that `0xaa` had `6 wei`, whereas OE reported (correctly) `5` wei. Furthermore, in geth, if the second tx was not executed, the `0xaa` would be destructed, resulting in `0 wei`. Thus obviously wrong. \n\nIt was determined that the root cause was this [commit](https://github.com/ethereum/go-ethereum/commit/223b950944f494a5b4e0957fd9f92c48b09037ad) from [this PR](https://github.com/ethereum/go-ethereum/pull/19953). The semantics of `createObject` was subtly changd, into returning a non-nil object (with `deleted=true`) where it previously did not if the account had been destructed. This return value caused the new object to inherit the old `balance`.\n",
"links": [
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4"
],
"published": "2020-12-10",
"severity": "High",
"CVE": "CVE-2020-26265",
"check": "(Geth\\/v1\\.9\\.(4|5|6|7|8|9)-.*)|(Geth\\/v1\\.9\\.1\\d-.*)$"
},
{
"name": "Not ready for London upgrade",
"uid": "GETH-2021-01",
"summary": "The client is not ready for the 'London' technical upgrade, and will deviate from the canonical chain when the London upgrade occurs (at block '12965000' around August 4, 2021.",
"description": "At (or around) August 4, Ethereum will undergo a technical upgrade called 'London'. Clients not upgraded will fail to progress on the canonical chain.",
"links": [
"https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/london.md",
"https://notes.ethereum.org/@timbeiko/ropsten-postmortem"
],
"introduced": "v1.10.1",
"fixed": "v1.10.6",
"published": "2020-12-10",
"severity": "High",
"check": "(Geth\\/v1\\.10\\.(1|2|3|4|5)-.*)$"
} }
] ]

View File

@ -44,7 +44,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.MainnetFlag, utils.MainnetFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.CalaverasFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.ExitWhenSyncedFlag, utils.ExitWhenSyncedFlag,
@ -151,6 +150,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.GraphQLCORSDomainFlag, utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag, utils.GraphQLVirtualHostsFlag,
utils.RPCGlobalGasCapFlag, utils.RPCGlobalGasCapFlag,
utils.RPCGlobalEVMTimeoutFlag,
utils.RPCGlobalTxFeeCapFlag, utils.RPCGlobalTxFeeCapFlag,
utils.AllowUnprotectedTxs, utils.AllowUnprotectedTxs,
utils.JSpathFlag, utils.JSpathFlag,
@ -182,12 +182,11 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.MinerNotifyFlag, utils.MinerNotifyFlag,
utils.MinerNotifyFullFlag, utils.MinerNotifyFullFlag,
utils.MinerGasPriceFlag, utils.MinerGasPriceFlag,
utils.MinerGasTargetFlag,
utils.MinerGasLimitFlag, utils.MinerGasLimitFlag,
utils.MinerEtherbaseFlag, utils.MinerEtherbaseFlag,
utils.MinerExtraDataFlag, utils.MinerExtraDataFlag,
utils.MinerRecommitIntervalFlag, utils.MinerRecommitIntervalFlag,
utils.MinerNoVerfiyFlag, utils.MinerNoVerifyFlag,
}, },
}, },
{ {
@ -220,12 +219,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Name: "ALIASED (deprecated)", Name: "ALIASED (deprecated)",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.NoUSBFlag, utils.NoUSBFlag,
utils.LegacyRPCEnabledFlag,
utils.LegacyRPCListenAddrFlag,
utils.LegacyRPCPortFlag,
utils.LegacyRPCCORSDomainFlag,
utils.LegacyRPCVirtualHostsFlag,
utils.LegacyRPCApiFlag,
}, },
}, },
{ {

View File

@ -158,7 +158,7 @@ func checkEthstats(client *sshClient, network string) (*ethstatsInfos, error) {
if port != 80 && port != 443 { if port != 80 && port != 443 {
config += fmt.Sprintf(":%d", port) config += fmt.Sprintf(":%d", port)
} }
// Retrieve the IP blacklist // Retrieve the IP banned list
banned := strings.Split(infos.envvars["BANNED"], ",") banned := strings.Split(infos.envvars["BANNED"], ",")
// Run a sanity check to see if the port is reachable // Run a sanity check to see if the port is reachable

View File

@ -35,8 +35,8 @@ FROM puppeth/blockscout:latest
ADD genesis.json /genesis.json ADD genesis.json /genesis.json
RUN \ RUN \
echo 'geth --cache 512 init /genesis.json' > explorer.sh && \ echo 'geth --cache 512 init /genesis.json' > explorer.sh && \
echo $'geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,shh,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" --exitwhensynced' >> explorer.sh && \ echo $'geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug,txpool" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" --exitwhensynced' >> explorer.sh && \
echo $'exec geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,shh,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" &' >> explorer.sh && \ echo $'exec geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug,txpool" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" &' >> explorer.sh && \
echo '/usr/local/bin/docker-entrypoint.sh postgres &' >> explorer.sh && \ echo '/usr/local/bin/docker-entrypoint.sh postgres &' >> explorer.sh && \
echo 'sleep 5' >> explorer.sh && \ echo 'sleep 5' >> explorer.sh && \
echo 'mix do ecto.drop --force, ecto.create, ecto.migrate' >> explorer.sh && \ echo 'mix do ecto.drop --force, ecto.create, ecto.migrate' >> explorer.sh && \

View File

@ -63,20 +63,20 @@ func (w *wizard) deployEthstats() {
fmt.Printf("What should be the secret password for the API? (default = %s)\n", infos.secret) fmt.Printf("What should be the secret password for the API? (default = %s)\n", infos.secret)
infos.secret = w.readDefaultString(infos.secret) infos.secret = w.readDefaultString(infos.secret)
} }
// Gather any blacklists to ban from reporting // Gather any banned lists to ban from reporting
if existed { if existed {
fmt.Println() fmt.Println()
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned) fmt.Printf("Keep existing IP %v in the banned list (y/n)? (default = yes)\n", infos.banned)
if !w.readDefaultYesNo(true) { if !w.readDefaultYesNo(true) {
// The user might want to clear the entire list, although generally probably not // The user might want to clear the entire list, although generally probably not
fmt.Println() fmt.Println()
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n") fmt.Printf("Clear out the banned list and start over (y/n)? (default = no)\n")
if w.readDefaultYesNo(false) { if w.readDefaultYesNo(false) {
infos.banned = nil infos.banned = nil
} }
// Offer the user to explicitly add/remove certain IP addresses // Offer the user to explicitly add/remove certain IP addresses
fmt.Println() fmt.Println()
fmt.Println("Which additional IP addresses should be blacklisted?") fmt.Println("Which additional IP addresses should be in the banned list?")
for { for {
if ip := w.readIPAddress(); ip != "" { if ip := w.readIPAddress(); ip != "" {
infos.banned = append(infos.banned, ip) infos.banned = append(infos.banned, ip)
@ -85,7 +85,7 @@ func (w *wizard) deployEthstats() {
break break
} }
fmt.Println() fmt.Println()
fmt.Println("Which IP addresses should not be blacklisted?") fmt.Println("Which IP addresses should not be in the banned list?")
for { for {
if ip := w.readIPAddress(); ip != "" { if ip := w.readIPAddress(); ip != "" {
for i, addr := range infos.banned { for i, addr := range infos.banned {

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !windows && !openbsd
// +build !windows,!openbsd // +build !windows,!openbsd
package utils package utils

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build openbsd
// +build openbsd // +build openbsd
package utils package utils

View File

@ -125,10 +125,6 @@ var (
Name: "keystore", Name: "keystore",
Usage: "Directory for the keystore (default = inside the datadir)", Usage: "Directory for the keystore (default = inside the datadir)",
} }
NoUSBFlag = cli.BoolFlag{
Name: "nousb",
Usage: "Disables monitoring for and managing USB hardware wallets (deprecated)",
}
USBFlag = cli.BoolFlag{ USBFlag = cli.BoolFlag{
Name: "usb", Name: "usb",
Usage: "Enable monitoring and management of USB hardware wallets", Usage: "Enable monitoring and management of USB hardware wallets",
@ -151,10 +147,6 @@ var (
Name: "goerli", Name: "goerli",
Usage: "Görli network: pre-configured proof-of-authority test network", Usage: "Görli network: pre-configured proof-of-authority test network",
} }
CalaverasFlag = cli.BoolFlag{
Name: "calaveras",
Usage: "Calaveras network: pre-configured proof-of-authority shortlived test network.",
}
RinkebyFlag = cli.BoolFlag{ RinkebyFlag = cli.BoolFlag{
Name: "rinkeby", Name: "rinkeby",
Usage: "Rinkeby network: pre-configured proof-of-authority test network", Usage: "Rinkeby network: pre-configured proof-of-authority test network",
@ -444,11 +436,6 @@ var (
Name: "miner.notify.full", Name: "miner.notify.full",
Usage: "Notify with pending block headers instead of work packages", Usage: "Notify with pending block headers instead of work packages",
} }
MinerGasTargetFlag = cli.Uint64Flag{
Name: "miner.gastarget",
Usage: "Target gas floor for mined blocks",
Value: ethconfig.Defaults.Miner.GasFloor,
}
MinerGasLimitFlag = cli.Uint64Flag{ MinerGasLimitFlag = cli.Uint64Flag{
Name: "miner.gaslimit", Name: "miner.gaslimit",
Usage: "Target gas ceiling for mined blocks", Usage: "Target gas ceiling for mined blocks",
@ -473,7 +460,7 @@ var (
Usage: "Time interval to recreate the block being mined", Usage: "Time interval to recreate the block being mined",
Value: ethconfig.Defaults.Miner.Recommit, Value: ethconfig.Defaults.Miner.Recommit,
} }
MinerNoVerfiyFlag = cli.BoolFlag{ MinerNoVerifyFlag = cli.BoolFlag{
Name: "miner.noverify", Name: "miner.noverify",
Usage: "Disable remote sealing verification", Usage: "Disable remote sealing verification",
} }
@ -506,6 +493,11 @@ var (
Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)", Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)",
Value: ethconfig.Defaults.RPCGasCap, Value: ethconfig.Defaults.RPCGasCap,
} }
RPCGlobalEVMTimeoutFlag = cli.DurationFlag{
Name: "rpc.evmtimeout",
Usage: "Sets a timeout used for eth_call (0=infinite)",
Value: ethconfig.Defaults.RPCEVMTimeout,
}
RPCGlobalTxFeeCapFlag = cli.Float64Flag{ RPCGlobalTxFeeCapFlag = cli.Float64Flag{
Name: "rpc.txfeecap", Name: "rpc.txfeecap",
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)", Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
@ -761,6 +753,29 @@ var (
Value: metrics.DefaultConfig.InfluxDBTags, Value: metrics.DefaultConfig.InfluxDBTags,
} }
MetricsEnableInfluxDBV2Flag = cli.BoolFlag{
Name: "metrics.influxdbv2",
Usage: "Enable metrics export/push to an external InfluxDB v2 database",
}
MetricsInfluxDBTokenFlag = cli.StringFlag{
Name: "metrics.influxdb.token",
Usage: "Token to authorize access to the database (v2 only)",
Value: metrics.DefaultConfig.InfluxDBToken,
}
MetricsInfluxDBBucketFlag = cli.StringFlag{
Name: "metrics.influxdb.bucket",
Usage: "InfluxDB bucket name to push reported metrics to (v2 only)",
Value: metrics.DefaultConfig.InfluxDBBucket,
}
MetricsInfluxDBOrganizationFlag = cli.StringFlag{
Name: "metrics.influxdb.organization",
Usage: "InfluxDB organization name (v2 only)",
Value: metrics.DefaultConfig.InfluxDBOrganization,
}
CatalystFlag = cli.BoolFlag{ CatalystFlag = cli.BoolFlag{
Name: "catalyst", Name: "catalyst",
Usage: "Catalyst mode (eth2 integration testing)", Usage: "Catalyst mode (eth2 integration testing)",
@ -783,9 +798,6 @@ func MakeDataDir(ctx *cli.Context) string {
if ctx.GlobalBool(GoerliFlag.Name) { if ctx.GlobalBool(GoerliFlag.Name) {
return filepath.Join(path, "goerli") return filepath.Join(path, "goerli")
} }
if ctx.GlobalBool(CalaverasFlag.Name) {
return filepath.Join(path, "calaveras")
}
return path return path
} }
Fatalf("Cannot determine default data directory, please set manually (--datadir)") Fatalf("Cannot determine default data directory, please set manually (--datadir)")
@ -838,8 +850,6 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls = params.RinkebyBootnodes urls = params.RinkebyBootnodes
case ctx.GlobalBool(GoerliFlag.Name): case ctx.GlobalBool(GoerliFlag.Name):
urls = params.GoerliBootnodes urls = params.GoerliBootnodes
case ctx.GlobalBool(CalaverasFlag.Name):
urls = params.CalaverasBootnodes
case cfg.BootstrapNodes != nil: case cfg.BootstrapNodes != nil:
return // already set, don't apply defaults. return // already set, don't apply defaults.
} }
@ -915,14 +925,6 @@ func SplitAndTrim(input string) (ret []string) {
// setHTTP creates the HTTP RPC listener interface string from the set // setHTTP creates the HTTP RPC listener interface string from the set
// command line flags, returning empty if the HTTP endpoint is disabled. // command line flags, returning empty if the HTTP endpoint is disabled.
func setHTTP(ctx *cli.Context, cfg *node.Config) { func setHTTP(ctx *cli.Context, cfg *node.Config) {
if ctx.GlobalBool(LegacyRPCEnabledFlag.Name) && cfg.HTTPHost == "" {
log.Warn("The flag --rpc is deprecated and will be removed June 2021, please use --http")
cfg.HTTPHost = "127.0.0.1"
if ctx.GlobalIsSet(LegacyRPCListenAddrFlag.Name) {
cfg.HTTPHost = ctx.GlobalString(LegacyRPCListenAddrFlag.Name)
log.Warn("The flag --rpcaddr is deprecated and will be removed June 2021, please use --http.addr")
}
}
if ctx.GlobalBool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" { if ctx.GlobalBool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" {
cfg.HTTPHost = "127.0.0.1" cfg.HTTPHost = "127.0.0.1"
if ctx.GlobalIsSet(HTTPListenAddrFlag.Name) { if ctx.GlobalIsSet(HTTPListenAddrFlag.Name) {
@ -930,34 +932,18 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
} }
} }
if ctx.GlobalIsSet(LegacyRPCPortFlag.Name) {
cfg.HTTPPort = ctx.GlobalInt(LegacyRPCPortFlag.Name)
log.Warn("The flag --rpcport is deprecated and will be removed June 2021, please use --http.port")
}
if ctx.GlobalIsSet(HTTPPortFlag.Name) { if ctx.GlobalIsSet(HTTPPortFlag.Name) {
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name) cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
} }
if ctx.GlobalIsSet(LegacyRPCCORSDomainFlag.Name) {
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(LegacyRPCCORSDomainFlag.Name))
log.Warn("The flag --rpccorsdomain is deprecated and will be removed June 2021, please use --http.corsdomain")
}
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) { if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name)) cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
} }
if ctx.GlobalIsSet(LegacyRPCApiFlag.Name) {
cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(LegacyRPCApiFlag.Name))
log.Warn("The flag --rpcapi is deprecated and will be removed June 2021, please use --http.api")
}
if ctx.GlobalIsSet(HTTPApiFlag.Name) { if ctx.GlobalIsSet(HTTPApiFlag.Name) {
cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(HTTPApiFlag.Name)) cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(HTTPApiFlag.Name))
} }
if ctx.GlobalIsSet(LegacyRPCVirtualHostsFlag.Name) {
cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(LegacyRPCVirtualHostsFlag.Name))
log.Warn("The flag --rpcvhosts is deprecated and will be removed June 2021, please use --http.vhosts")
}
if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) { if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) {
cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name)) cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name))
} }
@ -1283,8 +1269,6 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
case ctx.GlobalBool(CalaverasFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "calaveras")
} }
} }
@ -1386,9 +1370,6 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { if ctx.GlobalIsSet(MinerExtraDataFlag.Name) {
cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name))
} }
if ctx.GlobalIsSet(MinerGasTargetFlag.Name) {
cfg.GasFloor = ctx.GlobalUint64(MinerGasTargetFlag.Name)
}
if ctx.GlobalIsSet(MinerGasLimitFlag.Name) { if ctx.GlobalIsSet(MinerGasLimitFlag.Name) {
cfg.GasCeil = ctx.GlobalUint64(MinerGasLimitFlag.Name) cfg.GasCeil = ctx.GlobalUint64(MinerGasLimitFlag.Name)
} }
@ -1398,8 +1379,11 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) { if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) {
cfg.Recommit = ctx.GlobalDuration(MinerRecommitIntervalFlag.Name) cfg.Recommit = ctx.GlobalDuration(MinerRecommitIntervalFlag.Name)
} }
if ctx.GlobalIsSet(MinerNoVerfiyFlag.Name) { if ctx.GlobalIsSet(MinerNoVerifyFlag.Name) {
cfg.Noverify = ctx.GlobalBool(MinerNoVerfiyFlag.Name) cfg.Noverify = ctx.GlobalBool(MinerNoVerifyFlag.Name)
}
if ctx.GlobalIsSet(LegacyMinerGasTargetFlag.Name) {
log.Warn("The generic --miner.gastarget flag is deprecated and will be removed in the future!")
} }
} }
@ -1470,7 +1454,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
// SetEthConfig applies eth-related command line flags to the config. // SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags // Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, CalaverasFlag) CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 { if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
@ -1584,6 +1568,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
} else { } else {
log.Info("Global gas cap disabled") log.Info("Global gas cap disabled")
} }
if ctx.GlobalIsSet(RPCGlobalEVMTimeoutFlag.Name) {
cfg.RPCEVMTimeout = ctx.GlobalDuration(RPCGlobalEVMTimeoutFlag.Name)
}
if ctx.GlobalIsSet(RPCGlobalTxFeeCapFlag.Name) { if ctx.GlobalIsSet(RPCGlobalTxFeeCapFlag.Name) {
cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCapFlag.Name) cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCapFlag.Name)
} }
@ -1623,11 +1610,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
} }
cfg.Genesis = core.DefaultGoerliGenesisBlock() cfg.Genesis = core.DefaultGoerliGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash) SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
case ctx.GlobalBool(CalaverasFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 123 // https://gist.github.com/holiman/c5697b041b3dc18c50a5cdd382cbdd16
}
cfg.Genesis = core.DefaultCalaverasGenesisBlock()
case ctx.GlobalBool(DeveloperFlag.Name): case ctx.GlobalBool(DeveloperFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) { if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337 cfg.NetworkId = 1337
@ -1744,11 +1726,36 @@ func SetupMetrics(ctx *cli.Context) {
log.Info("Enabling metrics collection") log.Info("Enabling metrics collection")
var ( var (
enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name) enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name)
endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name) enableExportV2 = ctx.GlobalBool(MetricsEnableInfluxDBV2Flag.Name)
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name) )
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name) if enableExport || enableExportV2 {
CheckExclusive(ctx, MetricsEnableInfluxDBFlag, MetricsEnableInfluxDBV2Flag)
v1FlagIsSet := ctx.GlobalIsSet(MetricsInfluxDBUsernameFlag.Name) ||
ctx.GlobalIsSet(MetricsInfluxDBPasswordFlag.Name)
v2FlagIsSet := ctx.GlobalIsSet(MetricsInfluxDBTokenFlag.Name) ||
ctx.GlobalIsSet(MetricsInfluxDBOrganizationFlag.Name) ||
ctx.GlobalIsSet(MetricsInfluxDBBucketFlag.Name)
if enableExport && v2FlagIsSet {
Fatalf("Flags --influxdb.metrics.organization, --influxdb.metrics.token, --influxdb.metrics.bucket are only available for influxdb-v2")
} else if enableExportV2 && v1FlagIsSet {
Fatalf("Flags --influxdb.metrics.username, --influxdb.metrics.password are only available for influxdb-v1")
}
}
var (
endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
token = ctx.GlobalString(MetricsInfluxDBTokenFlag.Name)
bucket = ctx.GlobalString(MetricsInfluxDBBucketFlag.Name)
organization = ctx.GlobalString(MetricsInfluxDBOrganizationFlag.Name)
) )
if enableExport { if enableExport {
@ -1757,6 +1764,12 @@ func SetupMetrics(ctx *cli.Context) {
log.Info("Enabling metrics export to InfluxDB") log.Info("Enabling metrics export to InfluxDB")
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", tagsMap) go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", tagsMap)
} else if enableExportV2 {
tagsMap := SplitTagsFlag(ctx.GlobalString(MetricsInfluxDBTagsFlag.Name))
log.Info("Enabling metrics export to InfluxDB (v2)")
go influxdb.InfluxDBV2WithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, token, bucket, organization, "geth.", tagsMap)
} }
if ctx.GlobalIsSet(MetricsHTTPFlag.Name) { if ctx.GlobalIsSet(MetricsHTTPFlag.Name) {
@ -1817,8 +1830,6 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
genesis = core.DefaultRinkebyGenesisBlock() genesis = core.DefaultRinkebyGenesisBlock()
case ctx.GlobalBool(GoerliFlag.Name): case ctx.GlobalBool(GoerliFlag.Name):
genesis = core.DefaultGoerliGenesisBlock() genesis = core.DefaultGoerliGenesisBlock()
case ctx.GlobalBool(CalaverasFlag.Name):
genesis = core.DefaultCalaverasGenesisBlock()
case ctx.GlobalBool(DeveloperFlag.Name): case ctx.GlobalBool(DeveloperFlag.Name):
Fatalf("Developer chains are ephemeral") Fatalf("Developer chains are ephemeral")
} }

View File

@ -18,9 +18,8 @@ package utils
import ( import (
"fmt" "fmt"
"strings"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/eth/ethconfig"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -33,38 +32,22 @@ var ShowDeprecated = cli.Command{
Description: "Show flags that have been deprecated and will soon be removed", Description: "Show flags that have been deprecated and will soon be removed",
} }
var DeprecatedFlags = []cli.Flag{} var DeprecatedFlags = []cli.Flag{
LegacyMinerGasTargetFlag,
NoUSBFlag,
}
var ( var (
// (Deprecated May 2020, shown in aliased flags section) // (Deprecated May 2020, shown in aliased flags section)
LegacyRPCEnabledFlag = cli.BoolFlag{ NoUSBFlag = cli.BoolFlag{
Name: "rpc", Name: "nousb",
Usage: "Enable the HTTP-RPC server (deprecated and will be removed June 2021, use --http)", Usage: "Disables monitoring for and managing USB hardware wallets (deprecated)",
} }
LegacyRPCListenAddrFlag = cli.StringFlag{ // (Deprecated July 2021, shown in aliased flags section)
Name: "rpcaddr", LegacyMinerGasTargetFlag = cli.Uint64Flag{
Usage: "HTTP-RPC server listening interface (deprecated and will be removed June 2021, use --http.addr)", Name: "miner.gastarget",
Value: node.DefaultHTTPHost, Usage: "Target gas floor for mined blocks (deprecated)",
} Value: ethconfig.Defaults.Miner.GasFloor,
LegacyRPCPortFlag = cli.IntFlag{
Name: "rpcport",
Usage: "HTTP-RPC server listening port (deprecated and will be removed June 2021, use --http.port)",
Value: node.DefaultHTTPPort,
}
LegacyRPCCORSDomainFlag = cli.StringFlag{
Name: "rpccorsdomain",
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced) (deprecated and will be removed June 2021, use --http.corsdomain)",
Value: "",
}
LegacyRPCVirtualHostsFlag = cli.StringFlag{
Name: "rpcvhosts",
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (deprecated and will be removed June 2021, use --http.vhosts)",
Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
}
LegacyRPCApiFlag = cli.StringFlag{
Name: "rpcapi",
Usage: "API's offered over the HTTP-RPC interface (deprecated and will be removed June 2021, use --http.api)",
Value: "",
} }
) )
@ -74,7 +57,8 @@ func showDeprecated(*cli.Context) {
fmt.Println("The following flags are deprecated and will be removed in the future!") fmt.Println("The following flags are deprecated and will be removed in the future!")
fmt.Println("--------------------------------------------------------------------") fmt.Println("--------------------------------------------------------------------")
fmt.Println() fmt.Println()
// TODO remove when there are newly deprecated flags for _, flag := range DeprecatedFlags {
fmt.Println("no deprecated flags to show at this time") fmt.Println(flag.String())
}
fmt.Println() fmt.Println()
} }

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build freebsd || dragonfly
// +build freebsd dragonfly // +build freebsd dragonfly
package fdlimit package fdlimit

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build linux || netbsd || openbsd || solaris
// +build linux netbsd openbsd solaris // +build linux netbsd openbsd solaris
package fdlimit package fdlimit

View File

@ -86,7 +86,7 @@ func (h Hash) String() string {
} }
// Format implements fmt.Formatter. // Format implements fmt.Formatter.
// Hash supports the %v, %s, %v, %x, %X and %d format verbs. // Hash supports the %v, %s, %q, %x, %X and %d format verbs.
func (h Hash) Format(s fmt.State, c rune) { func (h Hash) Format(s fmt.State, c rune) {
hexb := make([]byte, 2+len(h)*2) hexb := make([]byte, 2+len(h)*2)
copy(hexb, "0x") copy(hexb, "0x")
@ -270,7 +270,7 @@ func (a Address) hex() []byte {
} }
// Format implements fmt.Formatter. // Format implements fmt.Formatter.
// Address supports the %v, %s, %v, %x, %X and %d format verbs. // Address supports the %v, %s, %q, %x, %X and %d format verbs.
func (a Address) Format(s fmt.State, c rune) { func (a Address) Format(s fmt.State, c rune) {
switch c { switch c {
case 'v', 's': case 'v', 's':

View File

@ -363,7 +363,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header
} }
} }
// All basic checks passed, verify the seal and return // All basic checks passed, verify the seal and return
return c.verifySeal(chain, header, parents) return c.verifySeal(snap, header, parents)
} }
// snapshot retrieves the authorization snapshot at a given point in time. // snapshot retrieves the authorization snapshot at a given point in time.
@ -460,18 +460,12 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e
// consensus protocol requirements. The method accepts an optional list of parent // consensus protocol requirements. The method accepts an optional list of parent
// headers that aren't yet part of the local blockchain to generate the snapshots // headers that aren't yet part of the local blockchain to generate the snapshots
// from. // from.
func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*types.Header) error {
// Verifying the genesis block is not supported // Verifying the genesis block is not supported
number := header.Number.Uint64() number := header.Number.Uint64()
if number == 0 { if number == 0 {
return errUnknownBlock return errUnknownBlock
} }
// Retrieve the snapshot needed to verify this header and cache it
snap, err := c.snapshot(chain, number-1, header.ParentHash, parents)
if err != nil {
return err
}
// Resolve the authorization key and check against signers // Resolve the authorization key and check against signers
signer, err := ecrecover(header, c.signatures) signer, err := ecrecover(header, c.signatures)
if err != nil { if err != nil {

View File

@ -215,7 +215,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
ancestors[parent] = ancestorHeader ancestors[parent] = ancestorHeader
// If the ancestor doesn't have any uncles, we don't have to iterate them // If the ancestor doesn't have any uncles, we don't have to iterate them
if ancestorHeader.UncleHash != types.EmptyUncleHash { if ancestorHeader.UncleHash != types.EmptyUncleHash {
// Need to add those uncles to the blacklist too // Need to add those uncles to the banned list too
ancestor := chain.GetBlock(parent, number) ancestor := chain.GetBlock(parent, number)
if ancestor == nil { if ancestor == nil {
break break
@ -330,8 +330,6 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, big1) next := new(big.Int).Add(parent.Number, big1)
switch { switch {
case config.IsCatalyst(next):
return big.NewInt(1)
case config.IsLondon(next): case config.IsLondon(next):
return calcDifficultyEip3554(time, parent) return calcDifficultyEip3554(time, parent)
case config.IsMuirGlacier(next): case config.IsMuirGlacier(next):
@ -639,10 +637,6 @@ var (
// reward. The total reward consists of the static block reward and rewards for // reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded. // included uncles. The coinbase of each uncle block is also rewarded.
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
// Skip block reward in catalyst mode
if config.IsCatalyst(header.Number) {
return
}
// Select the correct block reward based on chain progression // Select the correct block reward based on chain progression
blockReward := FrontierBlockReward blockReward := FrontierBlockReward
if config.IsByzantium(header.Number) { if config.IsByzantium(header.Number) {

Some files were not shown because too many files have changed in this diff Show More