Merge pull request #70 from openrelayxyz/merge/geth-v1.11.2

Merge/geth v1.11.2
This commit is contained in:
AusIV 2023-02-23 11:50:11 -06:00 committed by GitHub
commit 2c03e88687
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
741 changed files with 30417 additions and 19969 deletions

2
.github/CODEOWNERS vendored
View File

@ -5,7 +5,6 @@ accounts/usbwallet @karalabe
accounts/scwallet @gballet accounts/scwallet @gballet
accounts/abi @gballet @MariusVanDerWijden accounts/abi @gballet @MariusVanDerWijden
cmd/clef @holiman cmd/clef @holiman
cmd/puppeth @karalabe
consensus @karalabe consensus @karalabe
core/ @karalabe @holiman @rjl493456442 core/ @karalabe @holiman @rjl493456442
eth/ @karalabe @holiman @rjl493456442 eth/ @karalabe @holiman @rjl493456442
@ -14,7 +13,6 @@ eth/tracers/ @s1na
graphql/ @gballet @s1na graphql/ @gballet @s1na
les/ @zsfelfoldi @rjl493456442 les/ @zsfelfoldi @rjl493456442
light/ @zsfelfoldi @rjl493456442 light/ @zsfelfoldi @rjl493456442
mobile/ @karalabe @ligi
node/ @fjl node/ @fjl
p2p/ @fjl @zsfelfoldi p2p/ @fjl @zsfelfoldi
rpc/ @fjl @holiman rpc/ @fjl @holiman

View File

@ -12,7 +12,6 @@ run:
linters: linters:
disable-all: true disable-all: true
enable: enable:
- deadcode
- goconst - goconst
- goimports - goimports
- gosimple - gosimple
@ -20,14 +19,12 @@ linters:
- ineffassign - ineffassign
- misspell - misspell
- unconvert - unconvert
- varcheck
- typecheck - typecheck
- unused - unused
- staticcheck - staticcheck
- bidichk - bidichk
- durationcheck - durationcheck
- exportloopref - exportloopref
- gosec
- whitespace - whitespace
# - structcheck # lots of false positives # - structcheck # lots of false positives
@ -45,11 +42,6 @@ linters-settings:
goconst: goconst:
min-len: 3 # minimum length of string constant min-len: 3 # minimum length of string constant
min-occurrences: 6 # minimum number of occurrences min-occurrences: 6 # minimum number of occurrences
gosec:
excludes:
- G404 # Use of weak random number generator - lots of FP
- G107 # Potential http request -- those are intentional
- G306 # G306: Expect WriteFile permissions to be 0600 or less
issues: issues:
exclude-rules: exclude-rules:
@ -58,16 +50,15 @@ issues:
- deadcode - deadcode
- staticcheck - staticcheck
- path: internal/build/pgp.go - path: internal/build/pgp.go
text: 'SA1019: package golang.org/x/crypto/openpgp is deprecated' text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
- path: core/vm/contracts.go - path: core/vm/contracts.go
text: 'SA1019: package golang.org/x/crypto/ripemd160 is deprecated' text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.'
- path: accounts/usbwallet/trezor.go - path: accounts/usbwallet/trezor.go
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated' text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
- path: accounts/usbwallet/trezor/ - path: accounts/usbwallet/trezor/
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated' text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
exclude: exclude:
- 'SA1019: event.TypeMux is deprecated: use Feed' - 'SA1019: event.TypeMux is deprecated: use Feed'
- 'SA1019: strings.Title is deprecated' - 'SA1019: strings.Title is deprecated'
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.' - 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
- 'SA1029: should not use built-in type string as key for value' - 'SA1029: should not use built-in type string as key for value'
- 'G306: Expect WriteFile permissions to be 0600 or less'

View File

@ -8,15 +8,13 @@ jobs:
go: 1.17.x go: 1.17.x
env: env:
- azure-osx - azure-osx
- azure-ios
- cocoapods-ios
include: include:
# This builder only tests code linters on latest version of Go # This builder only tests code linters on latest version of Go
- stage: lint - stage: lint
os: linux os: linux
dist: bionic dist: bionic
go: 1.18.x go: 1.20.x
env: env:
- lint - lint
git: git:
@ -31,7 +29,7 @@ jobs:
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: bionic
go: 1.18.x go: 1.20.x
env: env:
- docker - docker
services: services:
@ -48,7 +46,7 @@ jobs:
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: bionic
go: 1.18.x go: 1.20.x
env: env:
- docker - docker
services: services:
@ -65,7 +63,7 @@ jobs:
if: type = push if: type = push
os: linux os: linux
dist: bionic dist: bionic
go: 1.18.x go: 1.20.x
env: env:
- ubuntu-ppa - ubuntu-ppa
- GO111MODULE=on - GO111MODULE=on
@ -90,7 +88,7 @@ jobs:
os: linux os: linux
dist: bionic dist: bionic
sudo: required sudo: required
go: 1.18.x go: 1.20.x
env: env:
- azure-linux - azure-linux
- GO111MODULE=on - GO111MODULE=on
@ -120,53 +118,13 @@ jobs:
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# This builder does the Android Maven and Azure uploads # This builder does the OSX Azure uploads
- stage: build
if: type = push
os: linux
dist: bionic
addons:
apt:
packages:
- openjdk-8-jdk
env:
- azure-android
- maven-android
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
before_install:
# Install Android and it's dependencies manually, Travis is stale
- export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
- curl https://dl.google.com/android/repository/commandlinetools-linux-6858069_latest.zip -o android.zip
- unzip -q android.zip -d $HOME/sdk && rm android.zip
- mv $HOME/sdk/cmdline-tools $HOME/sdk/latest && mkdir $HOME/sdk/cmdline-tools && mv $HOME/sdk/latest $HOME/sdk/cmdline-tools
- export PATH=$PATH:$HOME/sdk/cmdline-tools/latest/bin
- export ANDROID_HOME=$HOME/sdk
- yes | sdkmanager --licenses >/dev/null
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
# Install Go to allow building with
- curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
script:
# Build the Android archive and upload it to Maven Central and Azure
- mkdir -p $GOPATH/src/github.com/ethereum
- ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -signify SIGNIFY_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- stage: build - stage: build
if: type = push if: type = push
os: osx os: osx
go: 1.18.x go: 1.20.x
env: env:
- azure-osx - azure-osx
- azure-ios
- cocoapods-ios
- GO111MODULE=on - GO111MODULE=on
git: git:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
@ -174,27 +132,12 @@ jobs:
- go run build/ci.go install -dlgo - go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# Build the iOS framework and upload it to CocoaPods and Azure
- gem uninstall cocoapods -a -x
- gem install cocoapods
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
- sed -i '.bak' 's/repo.join/!repo.join/g' $(dirname `gem which cocoapods`)/cocoapods/sources_manager.rb
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then git clone --depth=1 https://github.com/CocoaPods/Specs.git ~/.cocoapods/repos/master && pod setup --verbose; fi
- xctool -version
- xcrun simctl list
# Workaround for https://github.com/golang/go/issues/23749
- export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -signify SIGNIFY_KEY -deploy trunk -upload gethstore/builds
# These builders run the tests # These builders run the tests
- stage: build - stage: build
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: bionic
go: 1.18.x go: 1.20.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -205,7 +148,7 @@ jobs:
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: bionic
go: 1.18.x go: 1.19.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -214,7 +157,7 @@ jobs:
- stage: build - stage: build
os: linux os: linux
dist: bionic dist: bionic
go: 1.17.x go: 1.19.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -225,7 +168,7 @@ jobs:
if: type = cron if: type = cron
os: linux os: linux
dist: bionic dist: bionic
go: 1.18.x go: 1.20.x
env: env:
- azure-purge - azure-purge
- GO111MODULE=on - GO111MODULE=on
@ -239,7 +182,7 @@ jobs:
if: type = cron if: type = cron
os: linux os: linux
dist: bionic dist: bionic
go: 1.18.x go: 1.20.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.18-alpine as builder FROM golang:1.20-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.18-alpine as builder FROM golang:1.20-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git

View File

@ -16,18 +16,6 @@ geth:
all: all:
$(GORUN) build/ci.go install $(GORUN) build/ci.go install
android:
$(GORUN) build/ci.go aar --local
@echo "Done building."
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
@echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs"
@echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc"
ios:
$(GORUN) build/ci.go xcode --local
@echo "Done building."
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
test: all test: all
$(GORUN) build/ci.go test $(GORUN) build/ci.go test

View File

@ -1,6 +1,6 @@
## Go Ethereum ## Go Ethereum
Official Golang implementation of the Ethereum protocol. Official Golang execution layer implementation of the Ethereum protocol.
[![API Reference]( [![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
@ -14,9 +14,9 @@ archives are published at https://geth.ethereum.org/downloads/.
## Building the source ## Building the source
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth). For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth).
Building `geth` requires both a Go (version 1.16 or later) and a C compiler. You can install Building `geth` requires both a Go (version 1.18 or later) and a C compiler. You can install
them using your favourite package manager. Once the dependencies are installed, run them using your favourite package manager. Once the dependencies are installed, run
```shell ```shell
@ -34,16 +34,15 @@ make all
The go-ethereum project comes with several wrappers/executables found in the `cmd` The go-ethereum project comes with several wrappers/executables found in the `cmd`
directory. directory.
| Command | Description | | Command | Description |
| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | :--------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | | **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. |
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. | | `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. | | `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | | `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | | `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | | `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | | `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
## Running `geth` ## Running `geth`
@ -65,14 +64,14 @@ Recommended:
* Fast CPU with 4+ cores * Fast CPU with 4+ cores
* 16GB+ RAM * 16GB+ RAM
* High Performance SSD with at least 1TB free space * High-performance SSD with at least 1TB of free space
* 25+ MBit/sec download Internet service * 25+ MBit/sec download Internet service
### Full node on the main Ethereum network ### Full node on the main Ethereum network
By far the most common scenario is people wanting to simply interact with the Ethereum By far the most common scenario is people wanting to simply interact with the Ethereum
network: create accounts; transfer funds; deploy and interact with contracts. For this network: create accounts; transfer funds; deploy and interact with contracts. For this
particular use-case the user doesn't care about years-old historical data, so we can particular use case, the user doesn't care about years-old historical data, so we can
sync quickly to the current state of the network. To do so: sync quickly to the current state of the network. To do so:
```shell ```shell
@ -83,11 +82,11 @@ This command will:
* Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag), * Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag),
causing it to download more data in exchange for avoiding processing the entire history causing it to download more data in exchange for avoiding processing the entire history
of the Ethereum network, which is very CPU intensive. of the Ethereum network, which is very CPU intensive.
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), * Start the built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md) (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md)
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs), (note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
This tool is optional and if you leave it out you can always attach to an already running This tool is optional and if you leave it out you can always attach it to an already running
`geth` instance with `geth attach`. `geth` instance with `geth attach`.
### A Full node on the Görli test network ### A Full node on the Görli test network
@ -102,12 +101,12 @@ the main network, but with play-Ether only.
$ geth --goerli console $ geth --goerli console
``` ```
The `console` subcommand has the exact same meaning as above and they are equally The `console` subcommand has the same meaning as above and is equally
useful on the testnet too. Please, see above for their explanations if you've skipped here. useful on the testnet too.
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit: Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
* Instead of connecting the main Ethereum network, the client will connect to the Görli * Instead of connecting to the main Ethereum network, the client will connect to the Görli
test network, which uses different P2P bootnodes, different network IDs and genesis test network, which uses different P2P bootnodes, different network IDs and genesis
states. states.
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
@ -118,9 +117,9 @@ Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by `geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
this. this.
*Note: Although there are some internal protective measures to prevent transactions from *Note: Although some internal protective measures prevent transactions from
crossing over between the main network and test network, you should make sure to always crossing over between the main network and test network, you should always
use separate accounts for play-money and real-money. Unless you manually move use separate accounts for play and real money. Unless you manually move
accounts, `geth` will by default correctly separate the two networks and will not make any accounts, `geth` will by default correctly separate the two networks and will not make any
accounts available between them.* accounts available between them.*
@ -133,19 +132,6 @@ called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the c
$ geth --rinkeby console $ geth --rinkeby console
``` ```
### Full node on the Ropsten test network
In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The
Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such,
it has certain extra overhead and is more susceptible to reorganization attacks due to the
network's low difficulty/security.
```shell
$ geth --ropsten console
```
*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.*
### Configuration ### Configuration
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a As an alternative to passing the numerous flags to the `geth` binary, you can also pass a
@ -155,7 +141,7 @@ configuration file via:
$ geth --config /path/to/your_config.toml $ geth --config /path/to/your_config.toml
``` ```
To get an idea how the file should look like you can use the `dumpconfig` subcommand to To get an idea of how the file should look like you can use the `dumpconfig` subcommand to
export your existing configuration: export your existing configuration:
```shell ```shell
@ -175,7 +161,7 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
ethereum/client-go ethereum/client-go
``` ```
This will start `geth` in snap-sync mode with a DB memory allowance of 1GB just as the This will start `geth` in snap-sync mode with a DB memory allowance of 1GB, as the
above command does. It will also create a persistent volume in your home directory for above command does. It will also create a persistent volume in your home directory for
saving your blockchain as well as map the default ports. There is also an `alpine` tag saving your blockchain as well as map the default ports. There is also an `alpine` tag
available for a slim version of the image. available for a slim version of the image.
@ -209,7 +195,7 @@ HTTP based JSON-RPC API options:
* `--ws.addr` WS-RPC server listening interface (default: `localhost`) * `--ws.addr` WS-RPC server listening interface (default: `localhost`)
* `--ws.port` WS-RPC server listening port (default: `8546`) * `--ws.port` WS-RPC server listening port (default: `8546`)
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept websockets requests * `--ws.origins` Origins from which to accept WebSocket requests
* `--ipcdisable` Disable the IPC-RPC server * `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`) * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
@ -322,12 +308,8 @@ also need to configure a miner to process transactions and create new blocks for
#### Running a private miner #### Running a private miner
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
In a private network setting, however a single CPU miner instance is more than enough for In a private network setting a single CPU miner instance is more than enough for
practical purposes as it can produce a stable stream of blocks at the correct intervals practical purposes as it can produce a stable stream of blocks at the correct intervals
without needing heavy resources (consider running on a single thread, no need for multiple without needing heavy resources (consider running on a single thread, no need for multiple
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
@ -344,7 +326,7 @@ transactions are accepted at (`--miner.gasprice`).
## Contribution ## Contribution
Thank you for considering to help out with the source code! We welcome contributions Thank you for considering helping out with the source code! We welcome contributions
from anyone on the internet, and are grateful for even the smallest of fixes! from anyone on the internet, and are grateful for even the smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
@ -364,16 +346,22 @@ Please make sure your contributions adhere to our coding guidelines:
* Commit messages should be prefixed with the package(s) they modify. * Commit messages should be prefixed with the package(s) they modify.
* E.g. "eth, rpc: make trace configs optional" * E.g. "eth, rpc: make trace configs optional"
Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/geth-developer/dev-guide)
for more details on configuring your environment, managing project dependencies, and for more details on configuring your environment, managing project dependencies, and
testing procedures. testing procedures.
### Contributing to geth.ethereum.org
For contributions to the [go-ethereum website](https://geth.ethereum.org), please checkout and raise pull requests against the `website` branch.
For more detailed instructions please see the `website` branch [README](https://github.com/ethereum/go-ethereum/tree/website#readme) or the
[contributing](https://geth.ethereum.org/docs/developers/geth-developer/contributing) page of the website.
## License ## License
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), [GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html),
also included in our repository in the `COPYING.LESSER` file. also included in our repository in the `COPYING.LESSER` file.
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the The go-ethereum binaries (i.e. all code inside of the `cmd` directory) are licensed under the
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also [GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also
included in our repository in the `COPYING` file. included in our repository in the `COPYING` file.

View File

@ -87,7 +87,7 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
var args Arguments var args Arguments
if method, ok := abi.Methods[name]; ok { if method, ok := abi.Methods[name]; ok {
if len(data)%32 != 0 { if len(data)%32 != 0 {
return nil, fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data) return nil, fmt.Errorf("abi: improperly formatted output: %q - Bytes: %+v", data, data)
} }
args = method.Outputs args = method.Outputs
} }

View File

@ -165,8 +165,9 @@ func TestInvalidABI(t *testing.T) {
// TestConstructor tests a constructor function. // TestConstructor tests a constructor function.
// The test is based on the following contract: // The test is based on the following contract:
// contract TestConstructor { //
// constructor(uint256 a, uint256 b) public{} // contract TestConstructor {
// constructor(uint256 a, uint256 b) public{}
// } // }
func TestConstructor(t *testing.T) { func TestConstructor(t *testing.T) {
json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]` json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
@ -724,16 +725,19 @@ func TestBareEvents(t *testing.T) {
} }
// TestUnpackEvent is based on this contract: // TestUnpackEvent is based on this contract:
// contract T { //
// event received(address sender, uint amount, bytes memo); // contract T {
// event receivedAddr(address sender); // event received(address sender, uint amount, bytes memo);
// function receive(bytes memo) external payable { // event receivedAddr(address sender);
// received(msg.sender, msg.value, memo); // function receive(bytes memo) external payable {
// receivedAddr(msg.sender); // received(msg.sender, msg.value, memo);
// } // receivedAddr(msg.sender);
// } // }
// }
//
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} //
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
func TestUnpackEvent(t *testing.T) { func TestUnpackEvent(t *testing.T) {
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]` const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
abi, err := JSON(strings.NewReader(abiJSON)) abi, err := JSON(strings.NewReader(abiJSON))
@ -1078,8 +1082,9 @@ func TestDoubleDuplicateMethodNames(t *testing.T) {
// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name // TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name
// conflict and that the second send event will be renamed send1. // conflict and that the second send event will be renamed send1.
// The test runs the abi of the following contract. // The test runs the abi of the following contract.
// contract DuplicateEvent { //
// event send(uint256 a); // contract DuplicateEvent {
// event send(uint256 a);
// event send0(); // event send0();
// event send(); // event send();
// } // }
@ -1106,7 +1111,8 @@ func TestDoubleDuplicateEventNames(t *testing.T) {
// TestUnnamedEventParam checks that an event with unnamed parameters is // TestUnnamedEventParam checks that an event with unnamed parameters is
// correctly handled. // correctly handled.
// The test runs the abi of the following contract. // The test runs the abi of the following contract.
// contract TestEvent { //
// contract TestEvent {
// event send(uint256, uint256); // event send(uint256, uint256);
// } // }
func TestUnnamedEventParam(t *testing.T) { func TestUnnamedEventParam(t *testing.T) {

View File

@ -187,6 +187,9 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
virtualArgs := 0 virtualArgs := 0
for index, arg := range nonIndexedArgs { for index, arg := range nonIndexedArgs {
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data) marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
if err != nil {
return nil, err
}
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) { if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
// If we have a static array, like [3]uint256, these are coded as // If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256. // just like uint256,uint256,uint256.
@ -204,9 +207,6 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
// coded as just like uint256,bool,uint256 // coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(arg.Type)/32 - 1 virtualArgs += getTypeSize(arg.Type)/32 - 1
} }
if err != nil {
return nil, err
}
retval = append(retval, marshalledValue) retval = append(retval, marshalledValue)
} }
return retval, nil return retval, nil

View File

@ -78,9 +78,12 @@ type SimulatedBackend struct {
// and uses a simulated blockchain for testing purposes. // and uses a simulated blockchain for testing purposes.
// A simulated backend always uses chainID 1337. // A simulated backend always uses chainID 1337.
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc} genesis := core.Genesis{
genesis.MustCommit(database) Config: params.AllEthashProtocolChanges,
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil, nil) GasLimit: gasLimit,
Alloc: alloc,
}
blockchain, _ := core.NewBlockChain(database, nil, &genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
backend := &SimulatedBackend{ backend := &SimulatedBackend{
database: database, database: database,
@ -524,7 +527,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
available := new(big.Int).Set(balance) available := new(big.Int).Set(balance)
if call.Value != nil { if call.Value != nil {
if call.Value.Cmp(available) >= 0 { if call.Value.Cmp(available) >= 0 {
return 0, errors.New("insufficient funds for transfer") return 0, core.ErrInsufficientFundsForTransfer
} }
available.Sub(available, call.Value) available.Sub(available, call.Value)
} }
@ -790,8 +793,13 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
if len(b.pendingBlock.Transactions()) != 0 { if len(b.pendingBlock.Transactions()) != 0 {
return errors.New("Could not adjust time on non-empty block") return errors.New("Could not adjust time on non-empty block")
} }
// Get the last block
block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash())
if block == nil {
return fmt.Errorf("could not find parent")
}
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
block.OffsetTime(int64(adjustment.Seconds())) block.OffsetTime(int64(adjustment.Seconds()))
}) })
stateDB, _ := b.blockchain.State() stateDB, _ := b.blockchain.State()
@ -836,17 +844,41 @@ func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") } func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) { func (fb *filterBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {
if block == rpc.LatestBlockNumber { switch number {
case rpc.PendingBlockNumber:
if block := fb.backend.pendingBlock; block != nil {
return block.Header(), nil
}
return nil, nil
case rpc.LatestBlockNumber:
return fb.bc.CurrentHeader(), nil return fb.bc.CurrentHeader(), nil
case rpc.FinalizedBlockNumber:
if block := fb.bc.CurrentFinalizedBlock(); block != nil {
return block.Header(), nil
}
return nil, errors.New("finalized block not found")
case rpc.SafeBlockNumber:
if block := fb.bc.CurrentSafeBlock(); block != nil {
return block.Header(), nil
}
return nil, errors.New("safe block not found")
default:
return fb.bc.GetHeaderByNumber(uint64(number.Int64())), nil
} }
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
} }
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
return fb.bc.GetHeaderByHash(hash), nil return fb.bc.GetHeaderByHash(hash), nil
} }
func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
if body := fb.bc.GetBody(hash); body != nil {
return body, nil
}
return nil, errors.New("block body not found")
}
func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
return fb.backend.pendingBlock, fb.backend.pendingReceipts return fb.backend.pendingBlock, fb.backend.pendingReceipts
} }
@ -890,6 +922,14 @@ func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.Matche
panic("not supported") panic("not supported")
} }
func (fb *filterBackend) ChainConfig() *params.ChainConfig {
panic("not supported")
}
func (fb *filterBackend) CurrentHeader() *types.Header {
panic("not supported")
}
func nullSubscription() event.Subscription { func nullSubscription() event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error { return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit <-quit

View File

@ -93,17 +93,18 @@ func TestSimulatedBackend(t *testing.T) {
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
// the following is based on this contract: // the following is based on this contract:
// contract T {
// event received(address sender, uint amount, bytes memo);
// event receivedAddr(address sender);
// //
// function receive(bytes calldata memo) external payable returns (string memory res) { // contract T {
// emit received(msg.sender, msg.value, memo); // event received(address sender, uint amount, bytes memo);
// emit receivedAddr(msg.sender); // event receivedAddr(address sender);
// return "hello world"; //
// } // function receive(bytes calldata memo) external payable returns (string memory res) {
// } // emit received(msg.sender, msg.value, memo);
// emit receivedAddr(msg.sender);
// return "hello world";
// }
// }
const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]` const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]`
const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
@ -417,12 +418,13 @@ func TestEstimateGas(t *testing.T) {
/* /*
pragma solidity ^0.6.4; pragma solidity ^0.6.4;
contract GasEstimation { contract GasEstimation {
function PureRevert() public { revert(); } function PureRevert() public { revert(); }
function Revert() public { revert("revert reason");} function Revert() public { revert("revert reason");}
function OOG() public { for (uint i = 0; ; i++) {}} function OOG() public { for (uint i = 0; ; i++) {}}
function Assert() public { assert(false);} function Assert() public { assert(false);}
function Valid() public {} function Valid() public {}
}*/ }
*/
const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033" const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033"
@ -994,7 +996,8 @@ func TestCodeAt(t *testing.T) {
} }
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} //
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
func TestPendingAndCallContract(t *testing.T) { func TestPendingAndCallContract(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey) testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
sim := simTestBackend(testAddr) sim := simTestBackend(testAddr)
@ -1057,27 +1060,27 @@ func TestPendingAndCallContract(t *testing.T) {
// This test is based on the following contract: // This test is based on the following contract:
/* /*
contract Reverter { contract Reverter {
function revertString() public pure{ function revertString() public pure{
require(false, "some error"); require(false, "some error");
} }
function revertNoString() public pure { function revertNoString() public pure {
require(false, ""); require(false, "");
} }
function revertASM() public pure { function revertASM() public pure {
assembly { assembly {
revert(0x0, 0x0) revert(0x0, 0x0)
} }
} }
function noRevert() public pure { function noRevert() public pure {
assembly { assembly {
// Assembles something that looks like require(false, "some error") but is not reverted // Assembles something that looks like require(false, "some error") but is not reverted
mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000) mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000)
mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020) mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020)
mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a) mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a)
mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000) mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000)
return(0x0, 0x64) return(0x0, 0x64)
} }
} }
}*/ }*/
func TestCallContractRevert(t *testing.T) { func TestCallContractRevert(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey) testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
@ -1204,11 +1207,11 @@ func TestFork(t *testing.T) {
/* /*
Example contract to test event emission: Example contract to test event emission:
pragma solidity >=0.7.0 <0.9.0; pragma solidity >=0.7.0 <0.9.0;
contract Callable { contract Callable {
event Called(); event Called();
function Call() public { emit Called(); } function Call() public { emit Called(); }
} }
*/ */
const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
@ -1226,7 +1229,7 @@ const callableBin = "6080604052348015600f57600080fd5b5060998061001e6000396000f3f
// 7. Mine two blocks to trigger a reorg. // 7. Mine two blocks to trigger a reorg.
// 8. Check that the event was removed. // 8. Check that the event was removed.
// 9. Re-send the transaction and mine a block. // 9. Re-send the transaction and mine a block.
// 10. Check that the event was reborn. // 10. Check that the event was reborn.
func TestForkLogsReborn(t *testing.T) { func TestForkLogsReborn(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey) testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
sim := simTestBackend(testAddr) sim := simTestBackend(testAddr)
@ -1374,3 +1377,23 @@ func TestCommitReturnValue(t *testing.T) {
t.Error("Could not retrieve the just created block (side-chain)") t.Error("Could not retrieve the just created block (side-chain)")
} }
} }
// TestAdjustTimeAfterFork ensures that after a fork, AdjustTime uses the pending fork
// block's parent rather than the canonical head's parent.
func TestAdjustTimeAfterFork(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
sim := simTestBackend(testAddr)
defer sim.Close()
sim.Commit() // h1
h1 := sim.blockchain.CurrentHeader().Hash()
sim.Commit() // h2
sim.Fork(context.Background(), h1)
sim.AdjustTime(1 * time.Second)
sim.Commit()
head := sim.blockchain.CurrentHeader()
if head.Number == common.Big2 && head.ParentHash != h1 {
t.Errorf("failed to build block on fork")
}
}

View File

@ -32,6 +32,8 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
) )
const basefeeWiggleMultiplier = 2
// SignerFn is a signer function callback when a contract requires a method to // SignerFn is a signer function callback when a contract requires a method to
// sign the transaction before submission. // sign the transaction before submission.
type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error) type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error)
@ -254,7 +256,7 @@ func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Add
if gasFeeCap == nil { if gasFeeCap == nil {
gasFeeCap = new(big.Int).Add( gasFeeCap = new(big.Int).Add(
gasTipCap, gasTipCap,
new(big.Int).Mul(head.BaseFee, big.NewInt(2)), new(big.Int).Mul(head.BaseFee, big.NewInt(basefeeWiggleMultiplier)),
) )
} }
if gasFeeCap.Cmp(gasTipCap) < 0 { if gasFeeCap.Cmp(gasTipCap) < 0 {
@ -371,6 +373,8 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
) )
if opts.GasPrice != nil { if opts.GasPrice != nil {
rawTx, err = c.createLegacyTx(opts, contract, input) rawTx, err = c.createLegacyTx(opts, contract, input)
} else if opts.GasFeeCap != nil && opts.GasTipCap != nil {
rawTx, err = c.createDynamicTx(opts, contract, input, nil)
} else { } else {
// Only query for basefee if gasPrice not specified // Only query for basefee if gasPrice not specified
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil { if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {

View File

@ -22,7 +22,6 @@ package bind
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"go/format" "go/format"
"regexp" "regexp"
@ -39,8 +38,6 @@ type Lang int
const ( const (
LangGo Lang = iota LangGo Lang = iota
LangJava
LangObjC
) )
func isKeyWord(arg string) bool { func isKeyWord(arg string) bool {
@ -221,11 +218,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
if evmABI.HasReceive() { if evmABI.HasReceive() {
receive = &tmplMethod{Original: evmABI.Receive} receive = &tmplMethod{Original: evmABI.Receive}
} }
// There is no easy way to pass arbitrary java objects to the Go side.
if len(structs) > 0 && lang == LangJava {
return "", errors.New("java binding for tuple arguments is not supported yet")
}
contracts[types[i]] = &tmplContract{ contracts[types[i]] = &tmplContract{
Type: capitalise(types[i]), Type: capitalise(types[i]),
InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""), InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""),
@ -298,8 +290,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
// bindType is a set of type binders that convert Solidity types to some supported // bindType is a set of type binders that convert Solidity types to some supported
// programming language types. // programming language types.
var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
LangGo: bindTypeGo, LangGo: bindTypeGo,
LangJava: bindTypeJava,
} }
// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones. // bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones.
@ -342,86 +333,10 @@ func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
} }
} }
// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java ones.
func bindBasicTypeJava(kind abi.Type) string {
switch kind.T {
case abi.AddressTy:
return "Address"
case abi.IntTy, abi.UintTy:
// Note that uint and int (without digits) are also matched,
// these are size 256, and will translate to BigInt (the default).
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String())
if len(parts) != 3 {
return kind.String()
}
// All unsigned integers should be translated to BigInt since gomobile doesn't
// support them.
if parts[1] == "u" {
return "BigInt"
}
namedSize := map[string]string{
"8": "byte",
"16": "short",
"32": "int",
"64": "long",
}[parts[2]]
// default to BigInt
if namedSize == "" {
namedSize = "BigInt"
}
return namedSize
case abi.FixedBytesTy, abi.BytesTy:
return "byte[]"
case abi.BoolTy:
return "boolean"
case abi.StringTy:
return "String"
case abi.FunctionTy:
return "byte[24]"
default:
return kind.String()
}
}
// pluralizeJavaType explicitly converts multidimensional types to predefined
// types in go side.
func pluralizeJavaType(typ string) string {
switch typ {
case "boolean":
return "Bools"
case "String":
return "Strings"
case "Address":
return "Addresses"
case "byte[]":
return "Binaries"
case "BigInt":
return "BigInts"
}
return typ + "[]"
}
// bindTypeJava converts a Solidity type to a Java one. Since there is no clear mapping
// from all Solidity types to Java ones (e.g. uint17), those that cannot be exactly
// mapped will use an upscaled type (e.g. BigDecimal).
func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
switch kind.T {
case abi.TupleTy:
return structs[kind.TupleRawName+kind.String()].Name
case abi.ArrayTy, abi.SliceTy:
return pluralizeJavaType(bindTypeJava(*kind.Elem, structs))
default:
return bindBasicTypeJava(kind)
}
}
// bindTopicType is a set of type binders that convert Solidity types to some // bindTopicType is a set of type binders that convert Solidity types to some
// supported programming language topic types. // supported programming language topic types.
var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
LangGo: bindTopicTypeGo, LangGo: bindTopicTypeGo,
LangJava: bindTopicTypeJava,
} }
// bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same // bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same
@ -441,28 +356,10 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
return bound return bound
} }
// bindTopicTypeJava converts a Solidity topic type to a Java one. It is almost the same
// functionality as for simple types, but dynamic types get converted to hashes.
func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
bound := bindTypeJava(kind, structs)
// todo(rjl493456442) according solidity documentation, indexed event
// parameters that are not value types i.e. arrays and structs are not
// stored directly but instead a keccak256-hash of an encoding is stored.
//
// We only convert strings and bytes to hash, still need to deal with
// array(both fixed-size and dynamic-size) and struct.
if bound == "String" || bound == "byte[]" {
bound = "Hash"
}
return bound
}
// bindStructType is a set of type binders that convert Solidity tuple types to some supported // bindStructType is a set of type binders that convert Solidity tuple types to some supported
// programming language struct definition. // programming language struct definition.
var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
LangGo: bindStructTypeGo, LangGo: bindStructTypeGo,
LangJava: bindStructTypeJava,
} }
// bindStructTypeGo converts a Solidity tuple type to a Go one and records the mapping // bindStructTypeGo converts a Solidity tuple type to a Go one and records the mapping
@ -511,74 +408,10 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
} }
} }
// bindStructTypeJava converts a Solidity tuple type to a Java one and records the mapping
// in the given map.
// Notably, this function will resolve and record nested struct recursively.
func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
switch kind.T {
case abi.TupleTy:
// We compose a raw struct name and a canonical parameter expression
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
// is empty, so we use canonical parameter expression to distinguish
// different struct definition. From the consideration of backward
// compatibility, we concat these two together so that if kind.TupleRawName
// is not empty, it can have unique id.
id := kind.TupleRawName + kind.String()
if s, exist := structs[id]; exist {
return s.Name
}
var fields []*tmplField
for i, elem := range kind.TupleElems {
field := bindStructTypeJava(*elem, structs)
fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem})
}
name := kind.TupleRawName
if name == "" {
name = fmt.Sprintf("Class%d", len(structs))
}
structs[id] = &tmplStruct{
Name: name,
Fields: fields,
}
return name
case abi.ArrayTy, abi.SliceTy:
return pluralizeJavaType(bindStructTypeJava(*kind.Elem, structs))
default:
return bindBasicTypeJava(kind)
}
}
// namedType is a set of functions that transform language specific types to // namedType is a set of functions that transform language specific types to
// named versions that may be used inside method names. // named versions that may be used inside method names.
var namedType = map[Lang]func(string, abi.Type) string{ var namedType = map[Lang]func(string, abi.Type) string{
LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") }, LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") },
LangJava: namedTypeJava,
}
// namedTypeJava converts some primitive data types to named variants that can
// be used as parts of method names.
func namedTypeJava(javaKind string, solKind abi.Type) string {
switch javaKind {
case "byte[]":
return "Binary"
case "boolean":
return "Bool"
default:
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String())
if len(parts) != 4 {
return javaKind
}
switch parts[2] {
case "8", "16", "32", "64":
if parts[3] == "" {
return capitalise(fmt.Sprintf("%sint%s", parts[1], parts[2]))
}
return capitalise(fmt.Sprintf("%sint%ss", parts[1], parts[2]))
default:
return javaKind
}
}
} }
// alias returns an alias of the given string based on the aliasing rules // alias returns an alias of the given string based on the aliasing rules
@ -593,8 +426,7 @@ func alias(aliases map[string]string, n string) string {
// methodNormalizer is a name transformer that modifies Solidity method names to // methodNormalizer is a name transformer that modifies Solidity method names to
// conform to target language naming conventions. // conform to target language naming conventions.
var methodNormalizer = map[Lang]func(string) string{ var methodNormalizer = map[Lang]func(string) string{
LangGo: abi.ToCamelCase, LangGo: abi.ToCamelCase,
LangJava: decapitalise,
} }
// capitalise makes a camel-case string which starts with an upper case character. // capitalise makes a camel-case string which starts with an upper case character.

File diff suppressed because one or more lines are too long

View File

@ -75,8 +75,7 @@ type tmplStruct struct {
// tmplSource is language to template mapping containing all the supported // tmplSource is language to template mapping containing all the supported
// programming languages the package can generate to. // programming languages the package can generate to.
var tmplSource = map[Lang]string{ var tmplSource = map[Lang]string{
LangGo: tmplSourceGo, LangGo: tmplSourceGo,
LangJava: tmplSourceJava,
} }
// tmplSourceGo is the Go source template that the generated Go contract binding // tmplSourceGo is the Go source template that the generated Go contract binding
@ -110,6 +109,7 @@ var (
_ = common.Big1 _ = common.Big1
_ = types.BloomLookup _ = types.BloomLookup
_ = event.NewSubscription _ = event.NewSubscription
_ = abi.ConvertType
) )
{{$structs := .Structs}} {{$structs := .Structs}}
@ -268,11 +268,11 @@ var (
// bind{{.Type}} binds a generic wrapper to an already deployed contract. // bind{{.Type}} binds a generic wrapper to an already deployed contract.
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI)) parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil { if err != nil {
return nil, err return nil, err
} }
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
} }
// Call invokes the (constant) contract method with params as input values and // Call invokes the (constant) contract method with params as input values and
@ -569,140 +569,3 @@ var (
{{end}} {{end}}
{{end}} {{end}}
` `
// tmplSourceJava is the Java source template that the generated Java contract binding
// is based on.
const tmplSourceJava = `
// This file is an automatically generated Java binding. Do not modify as any
// change will likely be lost upon the next re-generation!
package {{.Package}};
import org.ethereum.geth.*;
import java.util.*;
{{$structs := .Structs}}
{{range $contract := .Contracts}}
{{if not .Library}}public {{end}}class {{.Type}} {
// ABI is the input ABI used to generate the binding from.
public final static String ABI = "{{.InputABI}}";
{{if $contract.FuncSigs}}
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
public final static Map<String, String> {{.Type}}FuncSigs;
static {
Hashtable<String, String> temp = new Hashtable<String, String>();
{{range $strsig, $binsig := .FuncSigs}}temp.put("{{$binsig}}", "{{$strsig}}");
{{end}}
{{.Type}}FuncSigs = Collections.unmodifiableMap(temp);
}
{{end}}
{{if .InputBin}}
// BYTECODE is the compiled bytecode used for deploying new contracts.
public final static String BYTECODE = "0x{{.InputBin}}";
// deploy deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
public static {{.Type}} deploy(TransactOpts auth, EthereumClient client{{range .Constructor.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
Interfaces args = Geth.newInterfaces({{(len .Constructor.Inputs)}});
String bytecode = BYTECODE;
{{if .Libraries}}
// "link" contract to dependent libraries by deploying them first.
{{range $pattern, $name := .Libraries}}
{{capitalise $name}} {{decapitalise $name}}Inst = {{capitalise $name}}.deploy(auth, client);
bytecode = bytecode.replace("__${{$pattern}}$__", {{decapitalise $name}}Inst.Address.getHex().substring(2));
{{end}}
{{end}}
{{range $index, $element := .Constructor.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
{{end}}
return new {{.Type}}(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args));
}
// Internal constructor used by contract deployment.
private {{.Type}}(BoundContract deployment) {
this.Address = deployment.getAddress();
this.Deployer = deployment.getDeployer();
this.Contract = deployment;
}
{{end}}
// Ethereum address where this contract is located at.
public final Address Address;
// Ethereum transaction in which this contract was deployed (if known!).
public final Transaction Deployer;
// Contract instance bound to a blockchain address.
private final BoundContract Contract;
// Creates a new instance of {{.Type}}, bound to a specific deployed contract.
public {{.Type}}(Address address, EthereumClient client) throws Exception {
this(Geth.bindContract(address, ABI, client));
}
{{range .Calls}}
{{if gt (len .Normalized.Outputs) 1}}
// {{capitalise .Normalized.Name}}Results is the output of a call to {{.Normalized.Name}}.
public class {{capitalise .Normalized.Name}}Results {
{{range $index, $item := .Normalized.Outputs}}public {{bindtype .Type $structs}} {{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}};
{{end}}
}
{{end}}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
{{end}}
Interfaces results = Geth.newInterfaces({{(len .Normalized.Outputs)}});
{{range $index, $item := .Normalized.Outputs}}Interface result{{$index}} = Geth.newInterface(); result{{$index}}.setDefault{{namedtype (bindtype .Type $structs) .Type}}(); results.set({{$index}}, result{{$index}});
{{end}}
if (opts == null) {
opts = Geth.newCallOpts();
}
this.Contract.call(opts, results, "{{.Original.Name}}", args);
{{if gt (len .Normalized.Outputs) 1}}
{{capitalise .Normalized.Name}}Results result = new {{capitalise .Normalized.Name}}Results();
{{range $index, $item := .Normalized.Outputs}}result.{{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}} = results.get({{$index}}).get{{namedtype (bindtype .Type $structs) .Type}}();
{{end}}
return result;
{{else}}{{range .Normalized.Outputs}}return results.get(0).get{{namedtype (bindtype .Type $structs) .Type}}();{{end}}
{{end}}
}
{{end}}
{{range .Transacts}}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
public Transaction {{.Normalized.Name}}(TransactOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
{{end}}
return this.Contract.transact(opts, "{{.Original.Name}}" , args);
}
{{end}}
{{if .Fallback}}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception {
return this.Contract.rawTransact(opts, calldata);
}
{{end}}
{{if .Receive}}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
public Transaction Receive(TransactOpts opts) throws Exception {
return this.Contract.rawTransact(opts, null);
}
{{end}}
}
{{end}}
`

View File

@ -23,7 +23,15 @@ import (
) )
var ( var (
errBadBool = errors.New("abi: improperly encoded boolean value") errBadBool = errors.New("abi: improperly encoded boolean value")
errBadUint8 = errors.New("abi: improperly encoded uint8 value")
errBadUint16 = errors.New("abi: improperly encoded uint16 value")
errBadUint32 = errors.New("abi: improperly encoded uint32 value")
errBadUint64 = errors.New("abi: improperly encoded uint64 value")
errBadInt8 = errors.New("abi: improperly encoded int8 value")
errBadInt16 = errors.New("abi: improperly encoded int16 value")
errBadInt32 = errors.New("abi: improperly encoded int32 value")
errBadInt64 = errors.New("abi: improperly encoded int64 value")
) )
// formatSliceString formats the reflection kind with the given slice size // formatSliceString formats the reflection kind with the given slice size

View File

@ -25,16 +25,19 @@ import (
) )
// ConvertType converts an interface of a runtime type into a interface of the // ConvertType converts an interface of a runtime type into a interface of the
// given type // given type, e.g. turn this code:
// e.g. turn //
// var fields []reflect.StructField // var fields []reflect.StructField
// fields = append(fields, reflect.StructField{ //
// Name: "X", // fields = append(fields, reflect.StructField{
// Type: reflect.TypeOf(new(big.Int)), // Name: "X",
// Tag: reflect.StructTag("json:\"" + "x" + "\""), // Type: reflect.TypeOf(new(big.Int)),
// } // Tag: reflect.StructTag("json:\"" + "x" + "\""),
// into // }
// type TupleT struct { X *big.Int } //
// into:
//
// type TupleT struct { X *big.Int }
func ConvertType(in interface{}, proto interface{}) interface{} { func ConvertType(in interface{}, proto interface{}) interface{} {
protoType := reflect.TypeOf(proto) protoType := reflect.TypeOf(proto)
if reflect.TypeOf(in).ConvertibleTo(protoType) { if reflect.TypeOf(in).ConvertibleTo(protoType) {
@ -170,11 +173,13 @@ func setStruct(dst, src reflect.Value) error {
} }
// mapArgNamesToStructFields maps a slice of argument names to struct fields. // mapArgNamesToStructFields maps a slice of argument names to struct fields.
// first round: for each Exportable field that contains a `abi:""` tag //
// and this field name exists in the given argument name list, pair them together. // first round: for each Exportable field that contains a `abi:""` tag and this field name
// second round: for each argument name that has not been already linked, // exists in the given argument name list, pair them together.
// find what variable is expected to be mapped into, if it exists and has not been //
// used, pair them. // second round: for each argument name that has not been already linked, find what
// variable is expected to be mapped into, if it exists and has not been used, pair them.
//
// Note this function assumes the given value is a struct value. // Note this function assumes the given value is a struct value.
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) { func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
typ := value.Type() typ := value.Type()

View File

@ -154,6 +154,9 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
if varSize == 0 { if varSize == 0 {
typ.T = BytesTy typ.T = BytesTy
} else { } else {
if varSize > 32 {
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
typ.T = FixedBytesTy typ.T = FixedBytesTy
typ.Size = varSize typ.Size = varSize
} }

View File

@ -366,3 +366,10 @@ func TestGetTypeSize(t *testing.T) {
} }
} }
} }
func TestNewFixedBytesOver32(t *testing.T) {
_, err := NewType("bytes4096", "", nil)
if err == nil {
t.Errorf("fixed bytes with size over 32 is not spec'd")
}
}

View File

@ -19,6 +19,7 @@ package abi
import ( import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"math"
"math/big" "math/big"
"reflect" "reflect"
@ -33,43 +34,72 @@ var (
) )
// ReadInteger reads the integer based on its kind and returns the appropriate value. // ReadInteger reads the integer based on its kind and returns the appropriate value.
func ReadInteger(typ Type, b []byte) interface{} { func ReadInteger(typ Type, b []byte) (interface{}, error) {
ret := new(big.Int).SetBytes(b)
if typ.T == UintTy { if typ.T == UintTy {
u64, isu64 := ret.Uint64(), ret.IsUint64()
switch typ.Size { switch typ.Size {
case 8: case 8:
return b[len(b)-1] if !isu64 || u64 > math.MaxUint8 {
return nil, errBadUint8
}
return byte(u64), nil
case 16: case 16:
return binary.BigEndian.Uint16(b[len(b)-2:]) if !isu64 || u64 > math.MaxUint16 {
return nil, errBadUint16
}
return uint16(u64), nil
case 32: case 32:
return binary.BigEndian.Uint32(b[len(b)-4:]) if !isu64 || u64 > math.MaxUint32 {
return nil, errBadUint32
}
return uint32(u64), nil
case 64: case 64:
return binary.BigEndian.Uint64(b[len(b)-8:]) if !isu64 {
return nil, errBadUint64
}
return u64, nil
default: default:
// the only case left for unsigned integer is uint256. // the only case left for unsigned integer is uint256.
return new(big.Int).SetBytes(b) return ret, nil
} }
} }
// big.SetBytes can't tell if a number is negative or positive in itself.
// On EVM, if the returned number > max int256, it is negative.
// A number is > max int256 if the bit at position 255 is set.
if ret.Bit(255) == 1 {
ret.Add(MaxUint256, new(big.Int).Neg(ret))
ret.Add(ret, common.Big1)
ret.Neg(ret)
}
i64, isi64 := ret.Int64(), ret.IsInt64()
switch typ.Size { switch typ.Size {
case 8: case 8:
return int8(b[len(b)-1]) if !isi64 || i64 < math.MinInt8 || i64 > math.MaxInt8 {
return nil, errBadInt8
}
return int8(i64), nil
case 16: case 16:
return int16(binary.BigEndian.Uint16(b[len(b)-2:])) if !isi64 || i64 < math.MinInt16 || i64 > math.MaxInt16 {
return nil, errBadInt16
}
return int16(i64), nil
case 32: case 32:
return int32(binary.BigEndian.Uint32(b[len(b)-4:])) if !isi64 || i64 < math.MinInt32 || i64 > math.MaxInt32 {
return nil, errBadInt32
}
return int32(i64), nil
case 64: case 64:
return int64(binary.BigEndian.Uint64(b[len(b)-8:])) if !isi64 {
return nil, errBadInt64
}
return i64, nil
default: default:
// the only case left for integer is int256 // the only case left for integer is int256
// big.SetBytes can't tell if a number is negative or positive in itself.
// On EVM, if the returned number > max int256, it is negative. return ret, nil
// A number is > max int256 if the bit at position 255 is set.
ret := new(big.Int).SetBytes(b)
if ret.Bit(255) == 1 {
ret.Add(MaxUint256, new(big.Int).Neg(ret))
ret.Add(ret, common.Big1)
ret.Neg(ret)
}
return ret
} }
} }
@ -123,7 +153,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size) return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
} }
if start+32*size > len(output) { if start+32*size > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size) return nil, fmt.Errorf("abi: cannot marshal into go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
} }
// this value will become our slice or our array, depending on the type // this value will become our slice or our array, depending on the type
@ -162,6 +192,9 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) {
virtualArgs := 0 virtualArgs := 0
for index, elem := range t.TupleElems { for index, elem := range t.TupleElems {
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output) marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
if err != nil {
return nil, err
}
if elem.T == ArrayTy && !isDynamicType(*elem) { if elem.T == ArrayTy && !isDynamicType(*elem) {
// If we have a static array, like [3]uint256, these are coded as // If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256. // just like uint256,uint256,uint256.
@ -179,9 +212,6 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) {
// coded as just like uint256,bool,uint256 // coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(*elem)/32 - 1 virtualArgs += getTypeSize(*elem)/32 - 1
} }
if err != nil {
return nil, err
}
retval.Field(index).Set(reflect.ValueOf(marshalledValue)) retval.Field(index).Set(reflect.ValueOf(marshalledValue))
} }
return retval.Interface(), nil return retval.Interface(), nil
@ -234,7 +264,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
case StringTy: // variable arrays are written at the end of the return bytes case StringTy: // variable arrays are written at the end of the return bytes
return string(output[begin : begin+length]), nil return string(output[begin : begin+length]), nil
case IntTy, UintTy: case IntTy, UintTy:
return ReadInteger(t, returnOutput), nil return ReadInteger(t, returnOutput)
case BoolTy: case BoolTy:
return readBool(returnOutput) return readBool(returnOutput)
case AddressTy: case AddressTy:

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"math"
"math/big" "math/big"
"reflect" "reflect"
"strconv" "strconv"
@ -943,3 +944,164 @@ func TestOOMMaliciousInput(t *testing.T) {
} }
} }
} }
func TestPackAndUnpackIncompatibleNumber(t *testing.T) {
var encodeABI Arguments
uint256Ty, err := NewType("uint256", "", nil)
if err != nil {
panic(err)
}
encodeABI = Arguments{
{Type: uint256Ty},
}
maxU64, ok := new(big.Int).SetString(strconv.FormatUint(math.MaxUint64, 10), 10)
if !ok {
panic("bug")
}
maxU64Plus1 := new(big.Int).Add(maxU64, big.NewInt(1))
cases := []struct {
decodeType string
inputValue *big.Int
err error
expectValue interface{}
}{
{
decodeType: "uint8",
inputValue: big.NewInt(math.MaxUint8 + 1),
err: errBadUint8,
},
{
decodeType: "uint8",
inputValue: big.NewInt(math.MaxUint8),
err: nil,
expectValue: uint8(math.MaxUint8),
},
{
decodeType: "uint16",
inputValue: big.NewInt(math.MaxUint16 + 1),
err: errBadUint16,
},
{
decodeType: "uint16",
inputValue: big.NewInt(math.MaxUint16),
err: nil,
expectValue: uint16(math.MaxUint16),
},
{
decodeType: "uint32",
inputValue: big.NewInt(math.MaxUint32 + 1),
err: errBadUint32,
},
{
decodeType: "uint32",
inputValue: big.NewInt(math.MaxUint32),
err: nil,
expectValue: uint32(math.MaxUint32),
},
{
decodeType: "uint64",
inputValue: maxU64Plus1,
err: errBadUint64,
},
{
decodeType: "uint64",
inputValue: maxU64,
err: nil,
expectValue: uint64(math.MaxUint64),
},
{
decodeType: "uint256",
inputValue: maxU64Plus1,
err: nil,
expectValue: maxU64Plus1,
},
{
decodeType: "int8",
inputValue: big.NewInt(math.MaxInt8 + 1),
err: errBadInt8,
},
{
decodeType: "int8",
inputValue: big.NewInt(math.MinInt8 - 1),
err: errBadInt8,
},
{
decodeType: "int8",
inputValue: big.NewInt(math.MaxInt8),
err: nil,
expectValue: int8(math.MaxInt8),
},
{
decodeType: "int16",
inputValue: big.NewInt(math.MaxInt16 + 1),
err: errBadInt16,
},
{
decodeType: "int16",
inputValue: big.NewInt(math.MinInt16 - 1),
err: errBadInt16,
},
{
decodeType: "int16",
inputValue: big.NewInt(math.MaxInt16),
err: nil,
expectValue: int16(math.MaxInt16),
},
{
decodeType: "int32",
inputValue: big.NewInt(math.MaxInt32 + 1),
err: errBadInt32,
},
{
decodeType: "int32",
inputValue: big.NewInt(math.MinInt32 - 1),
err: errBadInt32,
},
{
decodeType: "int32",
inputValue: big.NewInt(math.MaxInt32),
err: nil,
expectValue: int32(math.MaxInt32),
},
{
decodeType: "int64",
inputValue: new(big.Int).Add(big.NewInt(math.MaxInt64), big.NewInt(1)),
err: errBadInt64,
},
{
decodeType: "int64",
inputValue: new(big.Int).Sub(big.NewInt(math.MinInt64), big.NewInt(1)),
err: errBadInt64,
},
{
decodeType: "int64",
inputValue: big.NewInt(math.MaxInt64),
err: nil,
expectValue: int64(math.MaxInt64),
},
}
for i, testCase := range cases {
packed, err := encodeABI.Pack(testCase.inputValue)
if err != nil {
panic(err)
}
ty, err := NewType(testCase.decodeType, "", nil)
if err != nil {
panic(err)
}
decodeABI := Arguments{
{Type: ty},
}
decoded, err := decodeABI.Unpack(packed)
if err != testCase.err {
t.Fatalf("Expected error %v, actual error %v. case %d", testCase.err, err, i)
}
if err != nil {
continue
}
if !reflect.DeepEqual(decoded[0], testCase.expectValue) {
t.Fatalf("Expected value %v, actual value %v", testCase.expectValue, decoded[0])
}
}
}

View File

@ -21,15 +21,14 @@ import "fmt"
// ResolveNameConflict returns the next available name for a given thing. // ResolveNameConflict returns the next available name for a given thing.
// This helper can be used for lots of purposes: // This helper can be used for lots of purposes:
// //
// - In solidity function overloading is supported, this function can fix // - In solidity function overloading is supported, this function can fix
// the name conflicts of overloaded functions. // the name conflicts of overloaded functions.
// - In golang binding generation, the parameter(in function, event, error, // - In golang binding generation, the parameter(in function, event, error,
// and struct definition) name will be converted to camelcase style which // and struct definition) name will be converted to camelcase style which
// may eventually lead to name conflicts. // may eventually lead to name conflicts.
// //
// Name conflicts are mostly resolved by adding number suffix. // Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains
// e.g. if the abi contains Methods send, send1 // Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send".
// ResolveNameConflict would return send2 for input send.
func ResolveNameConflict(rawName string, used func(string) bool) string { func ResolveNameConflict(rawName string, used func(string) bool) string {
name := rawName name := rawName
ok := used(name) ok := used(name)

View File

@ -177,7 +177,8 @@ type Backend interface {
// safely used to calculate a signature from. // safely used to calculate a signature from.
// //
// The hash is calculated as // The hash is calculated as
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). //
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
// //
// This gives context to the signed message and prevents signing of transactions. // This gives context to the signed message and prevents signing of transactions.
func TextHash(data []byte) []byte { func TextHash(data []byte) []byte {
@ -189,7 +190,8 @@ func TextHash(data []byte) []byte {
// safely used to calculate a signature from. // safely used to calculate a signature from.
// //
// The hash is calculated as // The hash is calculated as
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). //
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
// //
// This gives context to the signed message and prevents signing of transactions. // This gives context to the signed message and prevents signing of transactions.
func TextAndHash(data []byte) ([]byte, string) { func TextAndHash(data []byte) ([]byte, string) {

View File

@ -46,7 +46,7 @@ var LegacyLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000
// The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki // The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
// defines derivation paths to be of the form: // defines derivation paths to be of the form:
// //
// m / purpose' / coin_type' / account' / change / address_index // m / purpose' / coin_type' / account' / change / address_index
// //
// The BIP-44 spec https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki // The BIP-44 spec https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
// defines that the `purpose` be 44' (or 0x8000002C) for crypto currencies, and // defines that the `purpose` be 44' (or 0x8000002C) for crypto currencies, and

View File

@ -27,7 +27,7 @@ import (
"sync" "sync"
"time" "time"
mapset "github.com/deckarep/golang-set" mapset "github.com/deckarep/golang-set/v2"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -79,7 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
keydir: keydir, keydir: keydir,
byAddr: make(map[common.Address][]accounts.Account), byAddr: make(map[common.Address][]accounts.Account),
notify: make(chan struct{}, 1), notify: make(chan struct{}, 1),
fileC: fileCache{all: mapset.NewThreadUnsafeSet()}, fileC: fileCache{all: mapset.NewThreadUnsafeSet[string]()},
} }
ac.watcher = newWatcher(ac) ac.watcher = newWatcher(ac)
return ac, ac.notify return ac, ac.notify
@ -146,6 +146,14 @@ func (ac *accountCache) deleteByFile(path string) {
} }
} }
// watcherStarted returns true if the watcher loop started running (even if it
// has since also ended).
func (ac *accountCache) watcherStarted() bool {
ac.mu.Lock()
defer ac.mu.Unlock()
return ac.watcher.running || ac.watcher.runEnded
}
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account { func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
for i := range slice { for i := range slice {
if slice[i] == elem { if slice[i] == elem {
@ -275,16 +283,15 @@ func (ac *accountCache) scanAccounts() error {
// Process all the file diffs // Process all the file diffs
start := time.Now() start := time.Now()
for _, p := range creates.ToSlice() { for _, path := range creates.ToSlice() {
if a := readAccount(p.(string)); a != nil { if a := readAccount(path); a != nil {
ac.add(*a) ac.add(*a)
} }
} }
for _, p := range deletes.ToSlice() { for _, path := range deletes.ToSlice() {
ac.deleteByFile(p.(string)) ac.deleteByFile(path)
} }
for _, p := range updates.ToSlice() { for _, path := range updates.ToSlice() {
path := p.(string)
ac.deleteByFile(path) ac.deleteByFile(path)
if a := readAccount(path); a != nil { if a := readAccount(path); a != nil {
ac.add(*a) ac.add(*a)

View File

@ -50,6 +50,38 @@ var (
} }
) )
// waitWatcherStarts waits up to 1s for the keystore watcher to start.
func waitWatcherStart(ks *KeyStore) bool {
// On systems where file watch is not supported, just return "ok".
if !ks.cache.watcher.enabled() {
return true
}
// The watcher should start, and then exit.
for t0 := time.Now(); time.Since(t0) < 1*time.Second; time.Sleep(100 * time.Millisecond) {
if ks.cache.watcherStarted() {
return true
}
}
return false
}
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
var list []accounts.Account
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(200 * time.Millisecond) {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
select {
case <-ks.changes:
default:
return fmt.Errorf("wasn't notified of new accounts")
}
return nil
}
}
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
}
func TestWatchNewFile(t *testing.T) { func TestWatchNewFile(t *testing.T) {
t.Parallel() t.Parallel()
@ -57,8 +89,9 @@ func TestWatchNewFile(t *testing.T) {
// Ensure the watcher is started before adding any files. // Ensure the watcher is started before adding any files.
ks.Accounts() ks.Accounts()
time.Sleep(1000 * time.Millisecond) if !waitWatcherStart(ks) {
t.Fatal("keystore watcher didn't start in time")
}
// Move in the files. // Move in the files.
wantAccounts := make([]accounts.Account, len(cachetestAccounts)) wantAccounts := make([]accounts.Account, len(cachetestAccounts))
for i := range cachetestAccounts { for i := range cachetestAccounts {
@ -72,37 +105,24 @@ func TestWatchNewFile(t *testing.T) {
} }
// ks should see the accounts. // ks should see the accounts.
var list []accounts.Account if err := waitForAccounts(wantAccounts, ks); err != nil {
for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 { t.Error(err)
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
select {
case <-ks.changes:
default:
t.Fatalf("wasn't notified of new accounts")
}
return
}
time.Sleep(d)
} }
t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantAccounts))
} }
func TestWatchNoDir(t *testing.T) { func TestWatchNoDir(t *testing.T) {
t.Parallel() t.Parallel()
// Create ks but not the directory that it watches. // Create ks but not the directory that it watches.
rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int())) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP) ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts() list := ks.Accounts()
if len(list) > 0 { if len(list) > 0 {
t.Error("initial account list not empty:", list) t.Error("initial account list not empty:", list)
} }
time.Sleep(100 * time.Millisecond) // The watcher should start, and then exit.
if !waitWatcherStart(ks) {
t.Fatal("keystore watcher didn't start in time")
}
// Create the directory and copy a key file into it. // Create the directory and copy a key file into it.
os.MkdirAll(dir, 0700) os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
@ -295,31 +315,12 @@ func TestCacheFind(t *testing.T) {
} }
} }
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
var list []accounts.Account
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
select {
case <-ks.changes:
default:
return fmt.Errorf("wasn't notified of new accounts")
}
return nil
}
time.Sleep(d)
}
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
}
// TestUpdatedKeyfileContents tests that updating the contents of a keystore file // TestUpdatedKeyfileContents tests that updating the contents of a keystore file
// is noticed by the watcher, and the account cache is updated accordingly // is noticed by the watcher, and the account cache is updated accordingly
func TestUpdatedKeyfileContents(t *testing.T) { func TestUpdatedKeyfileContents(t *testing.T) {
t.Parallel() t.Parallel()
// Create a temporary keystore to test with // Create a temporary keystore to test with
rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int())) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP) ks := NewKeyStore(dir, LightScryptN, LightScryptP)
@ -327,8 +328,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
if len(list) > 0 { if len(list) > 0 {
t.Error("initial account list not empty:", list) t.Error("initial account list not empty:", list)
} }
time.Sleep(100 * time.Millisecond) if !waitWatcherStart(ks) {
t.Fatal("keystore watcher didn't start in time")
}
// Create the directory and copy a key file into it. // Create the directory and copy a key file into it.
os.MkdirAll(dir, 0700) os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
@ -346,9 +348,8 @@ func TestUpdatedKeyfileContents(t *testing.T) {
t.Error(err) t.Error(err)
return return
} }
// needed so that modTime of `file` is different to its current value after forceCopyFile // needed so that modTime of `file` is different to its current value after forceCopyFile
time.Sleep(1000 * time.Millisecond) time.Sleep(time.Second)
// Now replace file contents // Now replace file contents
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil { if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
@ -364,7 +365,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
} }
// needed so that modTime of `file` is different to its current value after forceCopyFile // needed so that modTime of `file` is different to its current value after forceCopyFile
time.Sleep(1000 * time.Millisecond) time.Sleep(time.Second)
// Now replace file contents again // Now replace file contents again
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil { if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
@ -380,7 +381,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
} }
// needed so that modTime of `file` is different to its current value after os.WriteFile // needed so that modTime of `file` is different to its current value after os.WriteFile
time.Sleep(1000 * time.Millisecond) time.Sleep(time.Second)
// Now replace file contents with crap // Now replace file contents with crap
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil { if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {

View File

@ -23,20 +23,20 @@ import (
"sync" "sync"
"time" "time"
mapset "github.com/deckarep/golang-set" mapset "github.com/deckarep/golang-set/v2"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
// fileCache is a cache of files seen during scan of keystore. // fileCache is a cache of files seen during scan of keystore.
type fileCache struct { type fileCache struct {
all mapset.Set // Set of all files from the keystore folder all mapset.Set[string] // Set of all files from the keystore folder
lastMod time.Time // Last time instance when a file was modified lastMod time.Time // Last time instance when a file was modified
mu sync.Mutex mu sync.Mutex
} }
// scan performs a new scan on the given directory, compares against the already // scan performs a new scan on the given directory, compares against the already
// cached filenames, and returns file sets: creates, deletes, updates. // cached filenames, and returns file sets: creates, deletes, updates.
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) { func (fc *fileCache) scan(keyDir string) (mapset.Set[string], mapset.Set[string], mapset.Set[string], error) {
t0 := time.Now() t0 := time.Now()
// List all the files from the keystore folder // List all the files from the keystore folder
@ -50,8 +50,8 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
defer fc.mu.Unlock() defer fc.mu.Unlock()
// Iterate all the files and gather their metadata // Iterate all the files and gather their metadata
all := mapset.NewThreadUnsafeSet() all := mapset.NewThreadUnsafeSet[string]()
mods := mapset.NewThreadUnsafeSet() mods := mapset.NewThreadUnsafeSet[string]()
var newLastMod time.Time var newLastMod time.Time
for _, fi := range files { for _, fi := range files {

View File

@ -498,6 +498,14 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account
return a, nil return a, nil
} }
// isUpdating returns whether the event notification loop is running.
// This method is mainly meant for tests.
func (ks *KeyStore) isUpdating() bool {
ks.mu.RLock()
defer ks.mu.RUnlock()
return ks.updating
}
// zeroKey zeroes a private key in memory. // zeroKey zeroes a private key in memory.
func zeroKey(k *ecdsa.PrivateKey) { func zeroKey(k *ecdsa.PrivateKey) {
b := k.D.Bits() b := k.D.Bits()

View File

@ -113,6 +113,7 @@ func TestSignWithPassphrase(t *testing.T) {
} }
func TestTimedUnlock(t *testing.T) { func TestTimedUnlock(t *testing.T) {
t.Parallel()
_, ks := tmpKeyStore(t, true) _, ks := tmpKeyStore(t, true)
pass := "foo" pass := "foo"
@ -147,6 +148,7 @@ func TestTimedUnlock(t *testing.T) {
} }
func TestOverrideUnlock(t *testing.T) { func TestOverrideUnlock(t *testing.T) {
t.Parallel()
_, ks := tmpKeyStore(t, false) _, ks := tmpKeyStore(t, false)
pass := "foo" pass := "foo"
@ -187,6 +189,7 @@ func TestOverrideUnlock(t *testing.T) {
// This test should fail under -race if signing races the expiration goroutine. // This test should fail under -race if signing races the expiration goroutine.
func TestSignRace(t *testing.T) { func TestSignRace(t *testing.T) {
t.Parallel()
_, ks := tmpKeyStore(t, false) _, ks := tmpKeyStore(t, false)
// Create a test account. // Create a test account.
@ -211,19 +214,33 @@ func TestSignRace(t *testing.T) {
t.Errorf("Account did not lock within the timeout") t.Errorf("Account did not lock within the timeout")
} }
// waitForKsUpdating waits until the updating-status of the ks reaches the
// desired wantStatus.
// It waits for a maximum time of maxTime, and returns false if it does not
// finish in time
func waitForKsUpdating(t *testing.T, ks *KeyStore, wantStatus bool, maxTime time.Duration) bool {
t.Helper()
// Wait max 250 ms, then return false
for t0 := time.Now(); time.Since(t0) < maxTime; {
if ks.isUpdating() == wantStatus {
return true
}
time.Sleep(25 * time.Millisecond)
}
return false
}
// Tests that the wallet notifier loop starts and stops correctly based on the // Tests that the wallet notifier loop starts and stops correctly based on the
// addition and removal of wallet event subscriptions. // addition and removal of wallet event subscriptions.
func TestWalletNotifierLifecycle(t *testing.T) { func TestWalletNotifierLifecycle(t *testing.T) {
t.Parallel()
// Create a temporary keystore to test with // Create a temporary keystore to test with
_, ks := tmpKeyStore(t, false) _, ks := tmpKeyStore(t, false)
// Ensure that the notification updater is not running yet // Ensure that the notification updater is not running yet
time.Sleep(250 * time.Millisecond) time.Sleep(250 * time.Millisecond)
ks.mu.RLock()
updating := ks.updating
ks.mu.RUnlock()
if updating { if ks.isUpdating() {
t.Errorf("wallet notifier running without subscribers") t.Errorf("wallet notifier running without subscribers")
} }
// Subscribe to the wallet feed and ensure the updater boots up // Subscribe to the wallet feed and ensure the updater boots up
@ -233,38 +250,26 @@ func TestWalletNotifierLifecycle(t *testing.T) {
for i := 0; i < len(subs); i++ { for i := 0; i < len(subs); i++ {
// Create a new subscription // Create a new subscription
subs[i] = ks.Subscribe(updates) subs[i] = ks.Subscribe(updates)
if !waitForKsUpdating(t, ks, true, 250*time.Millisecond) {
// Ensure the notifier comes online
time.Sleep(250 * time.Millisecond)
ks.mu.RLock()
updating = ks.updating
ks.mu.RUnlock()
if !updating {
t.Errorf("sub %d: wallet notifier not running after subscription", i) t.Errorf("sub %d: wallet notifier not running after subscription", i)
} }
} }
// Unsubscribe and ensure the updater terminates eventually // Close all but one sub
for i := 0; i < len(subs); i++ { for i := 0; i < len(subs)-1; i++ {
// Close an existing subscription // Close an existing subscription
subs[i].Unsubscribe() subs[i].Unsubscribe()
// Ensure the notifier shuts down at and only at the last close
for k := 0; k < int(walletRefreshCycle/(250*time.Millisecond))+2; k++ {
ks.mu.RLock()
updating = ks.updating
ks.mu.RUnlock()
if i < len(subs)-1 && !updating {
t.Fatalf("sub %d: event notifier stopped prematurely", i)
}
if i == len(subs)-1 && !updating {
return
}
time.Sleep(250 * time.Millisecond)
}
} }
t.Errorf("wallet notifier didn't terminate after unsubscribe") // Check that it is still running
time.Sleep(250 * time.Millisecond)
if !ks.isUpdating() {
t.Fatal("event notifier stopped prematurely")
}
// Unsubscribe the last one and ensure the updater terminates eventually.
subs[len(subs)-1].Unsubscribe()
if !waitForKsUpdating(t, ks, false, 4*time.Second) {
t.Errorf("wallet notifier didn't terminate after unsubscribe")
}
} }
type walletEvent struct { type walletEvent struct {

View File

@ -23,25 +23,27 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/rjeczalik/notify" "github.com/fsnotify/fsnotify"
) )
type watcher struct { type watcher struct {
ac *accountCache ac *accountCache
starting bool running bool // set to true when runloop begins
running bool runEnded bool // set to true when runloop ends
ev chan notify.EventInfo starting bool // set to true prior to runloop starting
quit chan struct{} quit chan struct{}
} }
func newWatcher(ac *accountCache) *watcher { func newWatcher(ac *accountCache) *watcher {
return &watcher{ return &watcher{
ac: ac, ac: ac,
ev: make(chan notify.EventInfo, 10),
quit: make(chan struct{}), quit: make(chan struct{}),
} }
} }
// enabled returns false on systems not supported.
func (*watcher) enabled() bool { return true }
// starts the watcher loop in the background. // starts the watcher loop in the background.
// Start a watcher in the background if that's not already in progress. // Start a watcher in the background if that's not already in progress.
// The caller must hold w.ac.mu. // The caller must hold w.ac.mu.
@ -62,16 +64,24 @@ func (w *watcher) loop() {
w.ac.mu.Lock() w.ac.mu.Lock()
w.running = false w.running = false
w.starting = false w.starting = false
w.runEnded = true
w.ac.mu.Unlock() w.ac.mu.Unlock()
}() }()
logger := log.New("path", w.ac.keydir) logger := log.New("path", w.ac.keydir)
if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil { // Create new watcher.
logger.Trace("Failed to watch keystore folder", "err", err) watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Error("Failed to start filesystem watcher", "err", err)
return return
} }
defer notify.Stop(w.ev) defer watcher.Close()
logger.Trace("Started watching keystore folder") if err := watcher.Add(w.ac.keydir); err != nil {
logger.Warn("Failed to watch keystore folder", "err", err)
return
}
logger.Trace("Started watching keystore folder", "folder", w.ac.keydir)
defer logger.Trace("Stopped watching keystore folder") defer logger.Trace("Stopped watching keystore folder")
w.ac.mu.Lock() w.ac.mu.Lock()
@ -95,12 +105,24 @@ func (w *watcher) loop() {
select { select {
case <-w.quit: case <-w.quit:
return return
case <-w.ev: case _, ok := <-watcher.Events:
if !ok {
return
}
// Trigger the scan (with delay), if not already triggered // Trigger the scan (with delay), if not already triggered
if !rescanTriggered { if !rescanTriggered {
debounce.Reset(debounceDuration) debounce.Reset(debounceDuration)
rescanTriggered = true rescanTriggered = true
} }
// The fsnotify library does provide more granular event-info, it
// would be possible to refresh individual affected files instead
// of scheduling a full rescan. For most cases though, the
// full rescan is quick and obviously simplest.
case err, ok := <-watcher.Errors:
if !ok {
return
}
log.Info("Filsystem watcher error", "err", err)
case <-debounce.C: case <-debounce.C:
w.ac.scanAccounts() w.ac.scanAccounts()
rescanTriggered = false rescanTriggered = false

View File

@ -22,8 +22,14 @@
package keystore package keystore
type watcher struct{ running bool } type watcher struct {
running bool
runEnded bool
}
func newWatcher(*accountCache) *watcher { return new(watcher) } func newWatcher(*accountCache) *watcher { return new(watcher) }
func (*watcher) start() {} func (*watcher) start() {}
func (*watcher) close() {} func (*watcher) close() {}
// enabled returns false on systems not supported.
func (*watcher) enabled() bool { return false }

View File

@ -257,7 +257,7 @@ func merge(slice []Wallet, wallets ...Wallet) []Wallet {
return slice return slice
} }
// drop is the couterpart of merge, which looks up wallets from within the sorted // drop is the counterpart of merge, which looks up wallets from within the sorted
// cache and removes the ones specified. // cache and removes the ones specified.
func drop(slice []Wallet, wallets ...Wallet) []Wallet { func drop(slice []Wallet, wallets ...Wallet) []Wallet {
for _, wallet := range wallets { for _, wallet := range wallets {

View File

@ -99,8 +99,8 @@ const (
P1DeriveKeyFromCurrent = uint8(0x10) P1DeriveKeyFromCurrent = uint8(0x10)
statusP1WalletStatus = uint8(0x00) statusP1WalletStatus = uint8(0x00)
statusP1Path = uint8(0x01) statusP1Path = uint8(0x01)
signP1PrecomputedHash = uint8(0x01) signP1PrecomputedHash = uint8(0x00)
signP2OnlyBlock = uint8(0x81) signP2OnlyBlock = uint8(0x00)
exportP1Any = uint8(0x00) exportP1Any = uint8(0x00)
exportP2Pubkey = uint8(0x01) exportP2Pubkey = uint8(0x01)
) )
@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
} }
// derivationPath fetches the wallet's current derivation path from the card. // derivationPath fetches the wallet's current derivation path from the card.
//
//lint:ignore U1000 needs to be added to the console interface //lint:ignore U1000 needs to be added to the console interface
func (s *Session) derivationPath() (accounts.DerivationPath, error) { func (s *Session) derivationPath() (accounts.DerivationPath, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil) response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
@ -994,6 +995,7 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
} }
// keyExport contains information on an exported keypair. // keyExport contains information on an exported keypair.
//
//lint:ignore U1000 needs to be added to the console interface //lint:ignore U1000 needs to be added to the console interface
type keyExport struct { type keyExport struct {
PublicKey []byte `asn1:"tag:0"` PublicKey []byte `asn1:"tag:0"`
@ -1001,6 +1003,7 @@ type keyExport struct {
} }
// publicKey returns the public key for the current derivation path. // publicKey returns the public key for the current derivation path.
//
//lint:ignore U1000 needs to be added to the console interface //lint:ignore U1000 needs to be added to the console interface
func (s *Session) publicKey() ([]byte, error) { func (s *Session) publicKey() ([]byte, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil) response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)

View File

@ -92,10 +92,9 @@ func (u *URL) UnmarshalJSON(input []byte) error {
// Cmp compares x and y and returns: // Cmp compares x and y and returns:
// //
// -1 if x < y // -1 if x < y
// 0 if x == y // 0 if x == y
// +1 if x > y // +1 if x > y
//
func (u URL) Cmp(url URL) int { func (u URL) Cmp(url URL) int {
if u.Scheme == url.Scheme { if u.Scheme == url.Scheme {
return strings.Compare(u.Path, url.Path) return strings.Compare(u.Path, url.Path)

View File

@ -71,18 +71,28 @@ type Hub struct {
// NewLedgerHub creates a new hardware wallet manager for Ledger devices. // NewLedgerHub creates a new hardware wallet manager for Ledger devices.
func NewLedgerHub() (*Hub, error) { func NewLedgerHub() (*Hub, error) {
return newHub(LedgerScheme, 0x2c97, []uint16{ return newHub(LedgerScheme, 0x2c97, []uint16{
// Device definitions taken from
// https://github.com/LedgerHQ/ledger-live/blob/38012bc8899e0f07149ea9cfe7e64b2c146bc92b/libs/ledgerjs/packages/devices/src/index.ts
// Original product IDs // Original product IDs
0x0000, /* Ledger Blue */ 0x0000, /* Ledger Blue */
0x0001, /* Ledger Nano S */ 0x0001, /* Ledger Nano S */
0x0004, /* Ledger Nano X */ 0x0004, /* Ledger Nano X */
0x0005, /* Ledger Nano S Plus */
0x0006, /* Ledger Nano FTS */
// Upcoming product IDs: https://www.ledger.com/2019/05/17/windows-10-update-sunsetting-u2f-tunnel-transport-for-ledger-devices/
0x0015, /* HID + U2F + WebUSB Ledger Blue */ 0x0015, /* HID + U2F + WebUSB Ledger Blue */
0x1015, /* HID + U2F + WebUSB Ledger Nano S */ 0x1015, /* HID + U2F + WebUSB Ledger Nano S */
0x4015, /* HID + U2F + WebUSB Ledger Nano X */ 0x4015, /* HID + U2F + WebUSB Ledger Nano X */
0x5015, /* HID + U2F + WebUSB Ledger Nano S Plus */
0x6015, /* HID + U2F + WebUSB Ledger Nano FTS */
0x0011, /* HID + WebUSB Ledger Blue */ 0x0011, /* HID + WebUSB Ledger Blue */
0x1011, /* HID + WebUSB Ledger Nano S */ 0x1011, /* HID + WebUSB Ledger Nano S */
0x4011, /* HID + WebUSB Ledger Nano X */ 0x4011, /* HID + WebUSB Ledger Nano X */
0x5011, /* HID + WebUSB Ledger Nano S Plus */
0x6011, /* HID + WebUSB Ledger Nano FTS */
}, 0xffa0, 0, newLedgerDriver) }, 0xffa0, 0, newLedgerDriver)
} }

View File

@ -195,18 +195,18 @@ func (w *ledgerDriver) SignTypedMessage(path accounts.DerivationPath, domainHash
// //
// The version retrieval protocol is defined as follows: // The version retrieval protocol is defined as follows:
// //
// CLA | INS | P1 | P2 | Lc | Le // CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+----+--- // ----+-----+----+----+----+---
// E0 | 06 | 00 | 00 | 00 | 04 // E0 | 06 | 00 | 00 | 00 | 04
// //
// With no input data, and the output data being: // With no input data, and the output data being:
// //
// Description | Length // Description | Length
// ---------------------------------------------------+-------- // ---------------------------------------------------+--------
// Flags 01: arbitrary data signature enabled by user | 1 byte // Flags 01: arbitrary data signature enabled by user | 1 byte
// Application major version | 1 byte // Application major version | 1 byte
// Application minor version | 1 byte // Application minor version | 1 byte
// Application patch version | 1 byte // Application patch version | 1 byte
func (w *ledgerDriver) ledgerVersion() ([3]byte, error) { func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
// Send the request and wait for the response // Send the request and wait for the response
reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil) reply, err := w.ledgerExchange(ledgerOpGetConfiguration, 0, 0, nil)
@ -227,32 +227,32 @@ func (w *ledgerDriver) ledgerVersion() ([3]byte, error) {
// //
// The address derivation protocol is defined as follows: // The address derivation protocol is defined as follows:
// //
// CLA | INS | P1 | P2 | Lc | Le // CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+-----+--- // ----+-----+----+----+-----+---
// E0 | 02 | 00 return address // E0 | 02 | 00 return address
// 01 display address and confirm before returning // 01 display address and confirm before returning
// | 00: do not return the chain code // | 00: do not return the chain code
// | 01: return the chain code // | 01: return the chain code
// | var | 00 // | var | 00
// //
// Where the input data is: // Where the input data is:
// //
// Description | Length // Description | Length
// -------------------------------------------------+-------- // -------------------------------------------------+--------
// Number of BIP 32 derivations to perform (max 10) | 1 byte // Number of BIP 32 derivations to perform (max 10) | 1 byte
// First derivation index (big endian) | 4 bytes // First derivation index (big endian) | 4 bytes
// ... | 4 bytes // ... | 4 bytes
// Last derivation index (big endian) | 4 bytes // Last derivation index (big endian) | 4 bytes
// //
// And the output data is: // And the output data is:
// //
// Description | Length // Description | Length
// ------------------------+------------------- // ------------------------+-------------------
// Public Key length | 1 byte // Public Key length | 1 byte
// Uncompressed Public Key | arbitrary // Uncompressed Public Key | arbitrary
// Ethereum address length | 1 byte // Ethereum address length | 1 byte
// Ethereum address | 40 bytes hex ascii // Ethereum address | 40 bytes hex ascii
// Chain code if requested | 32 bytes // Chain code if requested | 32 bytes
func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, error) { func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, error) {
// Flatten the derivation path into the Ledger request // Flatten the derivation path into the Ledger request
path := make([]byte, 1+4*len(derivationPath)) path := make([]byte, 1+4*len(derivationPath))
@ -290,35 +290,35 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
// //
// The transaction signing protocol is defined as follows: // The transaction signing protocol is defined as follows:
// //
// CLA | INS | P1 | P2 | Lc | Le // CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+----+-----+--- // ----+-----+----+----+-----+---
// E0 | 04 | 00: first transaction data block // E0 | 04 | 00: first transaction data block
// 80: subsequent transaction data block // 80: subsequent transaction data block
// | 00 | variable | variable // | 00 | variable | variable
// //
// Where the input for the first transaction block (first 255 bytes) is: // Where the input for the first transaction block (first 255 bytes) is:
// //
// Description | Length // Description | Length
// -------------------------------------------------+---------- // -------------------------------------------------+----------
// Number of BIP 32 derivations to perform (max 10) | 1 byte // Number of BIP 32 derivations to perform (max 10) | 1 byte
// First derivation index (big endian) | 4 bytes // First derivation index (big endian) | 4 bytes
// ... | 4 bytes // ... | 4 bytes
// Last derivation index (big endian) | 4 bytes // Last derivation index (big endian) | 4 bytes
// RLP transaction chunk | arbitrary // RLP transaction chunk | arbitrary
// //
// And the input for subsequent transaction blocks (first 255 bytes) are: // And the input for subsequent transaction blocks (first 255 bytes) are:
// //
// Description | Length // Description | Length
// ----------------------+---------- // ----------------------+----------
// RLP transaction chunk | arbitrary // RLP transaction chunk | arbitrary
// //
// And the output data is: // And the output data is:
// //
// Description | Length // Description | Length
// ------------+--------- // ------------+---------
// signature V | 1 byte // signature V | 1 byte
// signature R | 32 bytes // signature R | 32 bytes
// signature S | 32 bytes // signature S | 32 bytes
func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) { func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction, chainID *big.Int) (common.Address, *types.Transaction, error) {
// Flatten the derivation path into the Ledger request // Flatten the derivation path into the Ledger request
path := make([]byte, 1+4*len(derivationPath)) path := make([]byte, 1+4*len(derivationPath))
@ -392,30 +392,28 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
// //
// The signing protocol is defined as follows: // The signing protocol is defined as follows:
// //
// CLA | INS | P1 | P2 | Lc | Le // CLA | INS | P1 | P2 | Lc | Le
// ----+-----+----+-----------------------------+-----+--- // ----+-----+----+-----------------------------+-----+---
// E0 | 0C | 00 | implementation version : 00 | variable | variable // E0 | 0C | 00 | implementation version : 00 | variable | variable
// //
// Where the input is: // Where the input is:
// //
// Description | Length // Description | Length
// -------------------------------------------------+---------- // -------------------------------------------------+----------
// Number of BIP 32 derivations to perform (max 10) | 1 byte // Number of BIP 32 derivations to perform (max 10) | 1 byte
// First derivation index (big endian) | 4 bytes // First derivation index (big endian) | 4 bytes
// ... | 4 bytes // ... | 4 bytes
// Last derivation index (big endian) | 4 bytes // Last derivation index (big endian) | 4 bytes
// domain hash | 32 bytes // domain hash | 32 bytes
// message hash | 32 bytes // message hash | 32 bytes
//
//
// //
// And the output data is: // And the output data is:
// //
// Description | Length // Description | Length
// ------------+--------- // ------------+---------
// signature V | 1 byte // signature V | 1 byte
// signature R | 32 bytes // signature R | 32 bytes
// signature S | 32 bytes // signature S | 32 bytes
func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHash []byte, messageHash []byte) ([]byte, error) { func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHash []byte, messageHash []byte) ([]byte, error) {
// Flatten the derivation path into the Ledger request // Flatten the derivation path into the Ledger request
path := make([]byte, 1+4*len(derivationPath)) path := make([]byte, 1+4*len(derivationPath))
@ -454,12 +452,12 @@ func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHas
// //
// The common transport header is defined as follows: // The common transport header is defined as follows:
// //
// Description | Length // Description | Length
// --------------------------------------+---------- // --------------------------------------+----------
// Communication channel ID (big endian) | 2 bytes // Communication channel ID (big endian) | 2 bytes
// Command tag | 1 byte // Command tag | 1 byte
// Packet sequence index (big endian) | 2 bytes // Packet sequence index (big endian) | 2 bytes
// Payload | arbitrary // Payload | arbitrary
// //
// The Communication channel ID allows commands multiplexing over the same // The Communication channel ID allows commands multiplexing over the same
// physical link. It is not used for the time being, and should be set to 0101 // physical link. It is not used for the time being, and should be set to 0101
@ -473,15 +471,15 @@ func (w *ledgerDriver) ledgerSignTypedMessage(derivationPath []uint32, domainHas
// //
// APDU Command payloads are encoded as follows: // APDU Command payloads are encoded as follows:
// //
// Description | Length // Description | Length
// ----------------------------------- // -----------------------------------
// APDU length (big endian) | 2 bytes // APDU length (big endian) | 2 bytes
// APDU CLA | 1 byte // APDU CLA | 1 byte
// APDU INS | 1 byte // APDU INS | 1 byte
// APDU P1 | 1 byte // APDU P1 | 1 byte
// APDU P2 | 1 byte // APDU P2 | 1 byte
// APDU length | 1 byte // APDU length | 1 byte
// Optional APDU data | arbitrary // Optional APDU data | arbitrary
func (w *ledgerDriver) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) { func (w *ledgerDriver) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 ledgerParam2, data []byte) ([]byte, error) {
// Construct the message payload, possibly split into multiple chunks // Construct the message payload, possibly split into multiple chunks
apdu := make([]byte, 2, 7+len(data)) apdu := make([]byte, 2, 7+len(data))

View File

@ -84,15 +84,15 @@ func (w *trezorDriver) Status() (string, error) {
// Open implements usbwallet.driver, attempting to initialize the connection to // Open implements usbwallet.driver, attempting to initialize the connection to
// the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation: // the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation:
// * The first phase is to initialize the connection and read the wallet's // - The first phase is to initialize the connection and read the wallet's
// features. This phase is invoked if the provided passphrase is empty. The // features. This phase is invoked if the provided passphrase is empty. The
// device will display the pinpad as a result and will return an appropriate // device will display the pinpad as a result and will return an appropriate
// error to notify the user that a second open phase is needed. // error to notify the user that a second open phase is needed.
// * The second phase is to unlock access to the Trezor, which is done by the // - The second phase is to unlock access to the Trezor, which is done by the
// user actually providing a passphrase mapping a keyboard keypad to the pin // user actually providing a passphrase mapping a keyboard keypad to the pin
// number of the user (shuffled according to the pinpad displayed). // number of the user (shuffled according to the pinpad displayed).
// * If needed the device will ask for passphrase which will require calling // - If needed the device will ask for passphrase which will require calling
// open again with the actual passphrase (3rd phase) // open again with the actual passphrase (3rd phase)
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error { func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
w.device, w.failure = device, nil w.device, w.failure = device, nil

View File

@ -94,7 +94,7 @@ func (Failure_FailureType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_aaf30d059fdbc38d, []int{1, 0} return fileDescriptor_aaf30d059fdbc38d, []int{1, 0}
} }
//* // *
// Type of button request // Type of button request
type ButtonRequest_ButtonRequestType int32 type ButtonRequest_ButtonRequestType int32
@ -175,7 +175,7 @@ func (ButtonRequest_ButtonRequestType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_aaf30d059fdbc38d, []int{2, 0} return fileDescriptor_aaf30d059fdbc38d, []int{2, 0}
} }
//* // *
// Type of PIN request // Type of PIN request
type PinMatrixRequest_PinMatrixRequestType int32 type PinMatrixRequest_PinMatrixRequestType int32
@ -220,7 +220,7 @@ func (PinMatrixRequest_PinMatrixRequestType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_aaf30d059fdbc38d, []int{4, 0} return fileDescriptor_aaf30d059fdbc38d, []int{4, 0}
} }
//* // *
// Response: Success of the previous request // Response: Success of the previous request
// @end // @end
type Success struct { type Success struct {
@ -262,7 +262,7 @@ func (m *Success) GetMessage() string {
return "" return ""
} }
//* // *
// Response: Failure of the previous request // Response: Failure of the previous request
// @end // @end
type Failure struct { type Failure struct {
@ -312,7 +312,7 @@ func (m *Failure) GetMessage() string {
return "" return ""
} }
//* // *
// Response: Device is waiting for HW button press. // Response: Device is waiting for HW button press.
// @auxstart // @auxstart
// @next ButtonAck // @next ButtonAck
@ -363,7 +363,7 @@ func (m *ButtonRequest) GetData() string {
return "" return ""
} }
//* // *
// Request: Computer agrees to wait for HW button press // Request: Computer agrees to wait for HW button press
// @auxend // @auxend
type ButtonAck struct { type ButtonAck struct {
@ -397,7 +397,7 @@ func (m *ButtonAck) XXX_DiscardUnknown() {
var xxx_messageInfo_ButtonAck proto.InternalMessageInfo var xxx_messageInfo_ButtonAck proto.InternalMessageInfo
//* // *
// Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme // Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
// @auxstart // @auxstart
// @next PinMatrixAck // @next PinMatrixAck
@ -440,7 +440,7 @@ func (m *PinMatrixRequest) GetType() PinMatrixRequest_PinMatrixRequestType {
return PinMatrixRequest_PinMatrixRequestType_Current return PinMatrixRequest_PinMatrixRequestType_Current
} }
//* // *
// Request: Computer responds with encoded PIN // Request: Computer responds with encoded PIN
// @auxend // @auxend
type PinMatrixAck struct { type PinMatrixAck struct {
@ -482,7 +482,7 @@ func (m *PinMatrixAck) GetPin() string {
return "" return ""
} }
//* // *
// Response: Device awaits encryption passphrase // Response: Device awaits encryption passphrase
// @auxstart // @auxstart
// @next PassphraseAck // @next PassphraseAck
@ -525,7 +525,7 @@ func (m *PassphraseRequest) GetOnDevice() bool {
return false return false
} }
//* // *
// Request: Send passphrase back // Request: Send passphrase back
// @next PassphraseStateRequest // @next PassphraseStateRequest
type PassphraseAck struct { type PassphraseAck struct {
@ -575,7 +575,7 @@ func (m *PassphraseAck) GetState() []byte {
return nil return nil
} }
//* // *
// Response: Device awaits passphrase state // Response: Device awaits passphrase state
// @next PassphraseStateAck // @next PassphraseStateAck
type PassphraseStateRequest struct { type PassphraseStateRequest struct {
@ -617,7 +617,7 @@ func (m *PassphraseStateRequest) GetState() []byte {
return nil return nil
} }
//* // *
// Request: Send passphrase state back // Request: Send passphrase state back
// @auxend // @auxend
type PassphraseStateAck struct { type PassphraseStateAck struct {
@ -651,7 +651,7 @@ func (m *PassphraseStateAck) XXX_DiscardUnknown() {
var xxx_messageInfo_PassphraseStateAck proto.InternalMessageInfo var xxx_messageInfo_PassphraseStateAck proto.InternalMessageInfo
//* // *
// Structure representing BIP32 (hierarchical deterministic) node // Structure representing BIP32 (hierarchical deterministic) node
// Used for imports of private key into the device and exporting public key out of device // Used for imports of private key into the device and exporting public key out of device
// @embed // @embed

View File

@ -21,7 +21,7 @@ var _ = math.Inf
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
//* // *
// Request: Ask device for public key corresponding to address_n path // Request: Ask device for public key corresponding to address_n path
// @start // @start
// @next EthereumPublicKey // @next EthereumPublicKey
@ -73,7 +73,7 @@ func (m *EthereumGetPublicKey) GetShowDisplay() bool {
return false return false
} }
//* // *
// Response: Contains public key derived from device private seed // Response: Contains public key derived from device private seed
// @end // @end
type EthereumPublicKey struct { type EthereumPublicKey struct {
@ -123,7 +123,7 @@ func (m *EthereumPublicKey) GetXpub() string {
return "" return ""
} }
//* // *
// Request: Ask device for Ethereum address corresponding to address_n path // Request: Ask device for Ethereum address corresponding to address_n path
// @start // @start
// @next EthereumAddress // @next EthereumAddress
@ -175,7 +175,7 @@ func (m *EthereumGetAddress) GetShowDisplay() bool {
return false return false
} }
//* // *
// Response: Contains an Ethereum address derived from device private seed // Response: Contains an Ethereum address derived from device private seed
// @end // @end
type EthereumAddress struct { type EthereumAddress struct {
@ -225,7 +225,7 @@ func (m *EthereumAddress) GetAddressHex() string {
return "" return ""
} }
//* // *
// Request: Ask device to sign transaction // Request: Ask device to sign transaction
// All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing. // All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
// Note: the first at most 1024 bytes of data MUST be transmitted as part of this message. // Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
@ -351,7 +351,7 @@ func (m *EthereumSignTx) GetTxType() uint32 {
return 0 return 0
} }
//* // *
// Response: Device asks for more data from transaction payload, or returns the signature. // Response: Device asks for more data from transaction payload, or returns the signature.
// If data_length is set, device awaits that many more bytes of payload. // If data_length is set, device awaits that many more bytes of payload.
// Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present. // Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
@ -420,7 +420,7 @@ func (m *EthereumTxRequest) GetSignatureS() []byte {
return nil return nil
} }
//* // *
// Request: Transaction payload data. // Request: Transaction payload data.
// @next EthereumTxRequest // @next EthereumTxRequest
type EthereumTxAck struct { type EthereumTxAck struct {
@ -462,7 +462,7 @@ func (m *EthereumTxAck) GetDataChunk() []byte {
return nil return nil
} }
//* // *
// Request: Ask device to sign message // Request: Ask device to sign message
// @start // @start
// @next EthereumMessageSignature // @next EthereumMessageSignature
@ -514,7 +514,7 @@ func (m *EthereumSignMessage) GetMessage() []byte {
return nil return nil
} }
//* // *
// Response: Signed message // Response: Signed message
// @end // @end
type EthereumMessageSignature struct { type EthereumMessageSignature struct {
@ -572,7 +572,7 @@ func (m *EthereumMessageSignature) GetAddressHex() string {
return "" return ""
} }
//* // *
// Request: Ask device to verify message // Request: Ask device to verify message
// @start // @start
// @next Success // @next Success

View File

@ -21,7 +21,7 @@ var _ = math.Inf
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
//* // *
// Structure representing passphrase source // Structure representing passphrase source
type ApplySettings_PassphraseSourceType int32 type ApplySettings_PassphraseSourceType int32
@ -66,7 +66,7 @@ func (ApplySettings_PassphraseSourceType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0c720c20d27aa029, []int{4, 0} return fileDescriptor_0c720c20d27aa029, []int{4, 0}
} }
//* // *
// Type of recovery procedure. These should be used as bitmask, e.g., // Type of recovery procedure. These should be used as bitmask, e.g.,
// `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix` // `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
// listing every method supported by the host computer. // listing every method supported by the host computer.
@ -114,7 +114,7 @@ func (RecoveryDevice_RecoveryDeviceType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0c720c20d27aa029, []int{17, 0} return fileDescriptor_0c720c20d27aa029, []int{17, 0}
} }
//* // *
// Type of Recovery Word request // Type of Recovery Word request
type WordRequest_WordRequestType int32 type WordRequest_WordRequestType int32
@ -159,7 +159,7 @@ func (WordRequest_WordRequestType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0c720c20d27aa029, []int{18, 0} return fileDescriptor_0c720c20d27aa029, []int{18, 0}
} }
//* // *
// Request: Reset device to default state and ask for device details // Request: Reset device to default state and ask for device details
// @start // @start
// @next Features // @next Features
@ -210,7 +210,7 @@ func (m *Initialize) GetSkipPassphrase() bool {
return false return false
} }
//* // *
// Request: Ask for device details (no device reset) // Request: Ask for device details (no device reset)
// @start // @start
// @next Features // @next Features
@ -245,7 +245,7 @@ func (m *GetFeatures) XXX_DiscardUnknown() {
var xxx_messageInfo_GetFeatures proto.InternalMessageInfo var xxx_messageInfo_GetFeatures proto.InternalMessageInfo
//* // *
// Response: Reports various information about the device // Response: Reports various information about the device
// @end // @end
type Features struct { type Features struct {
@ -495,7 +495,7 @@ func (m *Features) GetNoBackup() bool {
return false return false
} }
//* // *
// Request: clear session (removes cached PIN, passphrase, etc). // Request: clear session (removes cached PIN, passphrase, etc).
// @start // @start
// @next Success // @next Success
@ -530,7 +530,7 @@ func (m *ClearSession) XXX_DiscardUnknown() {
var xxx_messageInfo_ClearSession proto.InternalMessageInfo var xxx_messageInfo_ClearSession proto.InternalMessageInfo
//* // *
// Request: change language and/or label of the device // Request: change language and/or label of the device
// @start // @start
// @next Success // @next Success
@ -622,7 +622,7 @@ func (m *ApplySettings) GetDisplayRotation() uint32 {
return 0 return 0
} }
//* // *
// Request: set flags of the device // Request: set flags of the device
// @start // @start
// @next Success // @next Success
@ -666,7 +666,7 @@ func (m *ApplyFlags) GetFlags() uint32 {
return 0 return 0
} }
//* // *
// Request: Starts workflow for setting/changing/removing the PIN // Request: Starts workflow for setting/changing/removing the PIN
// @start // @start
// @next Success // @next Success
@ -710,7 +710,7 @@ func (m *ChangePin) GetRemove() bool {
return false return false
} }
//* // *
// Request: Test if the device is alive, device sends back the message in Success response // Request: Test if the device is alive, device sends back the message in Success response
// @start // @start
// @next Success // @next Success
@ -777,7 +777,7 @@ func (m *Ping) GetPassphraseProtection() bool {
return false return false
} }
//* // *
// Request: Abort last operation that required user interaction // Request: Abort last operation that required user interaction
// @start // @start
// @next Failure // @next Failure
@ -812,7 +812,7 @@ func (m *Cancel) XXX_DiscardUnknown() {
var xxx_messageInfo_Cancel proto.InternalMessageInfo var xxx_messageInfo_Cancel proto.InternalMessageInfo
//* // *
// Request: Request a sample of random data generated by hardware RNG. May be used for testing. // Request: Request a sample of random data generated by hardware RNG. May be used for testing.
// @start // @start
// @next Entropy // @next Entropy
@ -856,7 +856,7 @@ func (m *GetEntropy) GetSize() uint32 {
return 0 return 0
} }
//* // *
// Response: Reply with random data generated by internal RNG // Response: Reply with random data generated by internal RNG
// @end // @end
type Entropy struct { type Entropy struct {
@ -898,7 +898,7 @@ func (m *Entropy) GetEntropy() []byte {
return nil return nil
} }
//* // *
// Request: Request device to wipe all sensitive data and settings // Request: Request device to wipe all sensitive data and settings
// @start // @start
// @next Success // @next Success
@ -934,7 +934,7 @@ func (m *WipeDevice) XXX_DiscardUnknown() {
var xxx_messageInfo_WipeDevice proto.InternalMessageInfo var xxx_messageInfo_WipeDevice proto.InternalMessageInfo
//* // *
// Request: Load seed and related internal settings from the computer // Request: Load seed and related internal settings from the computer
// @start // @start
// @next Success // @next Success
@ -1036,7 +1036,7 @@ func (m *LoadDevice) GetU2FCounter() uint32 {
return 0 return 0
} }
//* // *
// Request: Ask device to do initialization involving user interaction // Request: Ask device to do initialization involving user interaction
// @start // @start
// @next EntropyRequest // @next EntropyRequest
@ -1147,7 +1147,7 @@ func (m *ResetDevice) GetNoBackup() bool {
return false return false
} }
//* // *
// Request: Perform backup of the device seed if not backed up using ResetDevice // Request: Perform backup of the device seed if not backed up using ResetDevice
// @start // @start
// @next Success // @next Success
@ -1182,7 +1182,7 @@ func (m *BackupDevice) XXX_DiscardUnknown() {
var xxx_messageInfo_BackupDevice proto.InternalMessageInfo var xxx_messageInfo_BackupDevice proto.InternalMessageInfo
//* // *
// Response: Ask for additional entropy from host computer // Response: Ask for additional entropy from host computer
// @next EntropyAck // @next EntropyAck
type EntropyRequest struct { type EntropyRequest struct {
@ -1216,7 +1216,7 @@ func (m *EntropyRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_EntropyRequest proto.InternalMessageInfo var xxx_messageInfo_EntropyRequest proto.InternalMessageInfo
//* // *
// Request: Provide additional entropy for seed generation function // Request: Provide additional entropy for seed generation function
// @next Success // @next Success
type EntropyAck struct { type EntropyAck struct {
@ -1258,7 +1258,7 @@ func (m *EntropyAck) GetEntropy() []byte {
return nil return nil
} }
//* // *
// Request: Start recovery workflow asking user for specific words of mnemonic // Request: Start recovery workflow asking user for specific words of mnemonic
// Used to recovery device safely even on untrusted computer. // Used to recovery device safely even on untrusted computer.
// @start // @start
@ -1369,7 +1369,7 @@ func (m *RecoveryDevice) GetDryRun() bool {
return false return false
} }
//* // *
// Response: Device is waiting for user to enter word of the mnemonic // Response: Device is waiting for user to enter word of the mnemonic
// Its position is shown only on device's internal display. // Its position is shown only on device's internal display.
// @next WordAck // @next WordAck
@ -1412,7 +1412,7 @@ func (m *WordRequest) GetType() WordRequest_WordRequestType {
return WordRequest_WordRequestType_Plain return WordRequest_WordRequestType_Plain
} }
//* // *
// Request: Computer replies with word from the mnemonic // Request: Computer replies with word from the mnemonic
// @next WordRequest // @next WordRequest
// @next Success // @next Success
@ -1456,7 +1456,7 @@ func (m *WordAck) GetWord() string {
return "" return ""
} }
//* // *
// Request: Set U2F counter // Request: Set U2F counter
// @start // @start
// @next Success // @next Success

View File

@ -22,7 +22,7 @@ var _ = math.Inf
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
//* // *
// Mapping between TREZOR wire identifier (uint) and a protobuf message // Mapping between TREZOR wire identifier (uint) and a protobuf message
type MessageType int32 type MessageType int32

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package beacon package engine
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -74,12 +74,12 @@ var (
// - newPayloadV1: if the payload was accepted, but not processed (side chain) // - newPayloadV1: if the payload was accepted, but not processed (side chain)
ACCEPTED = "ACCEPTED" ACCEPTED = "ACCEPTED"
INVALIDBLOCKHASH = "INVALID_BLOCK_HASH"
GenericServerError = &EngineAPIError{code: -32000, msg: "Server error"} GenericServerError = &EngineAPIError{code: -32000, msg: "Server error"}
UnknownPayload = &EngineAPIError{code: -38001, msg: "Unknown payload"} UnknownPayload = &EngineAPIError{code: -38001, msg: "Unknown payload"}
InvalidForkChoiceState = &EngineAPIError{code: -38002, msg: "Invalid forkchoice state"} InvalidForkChoiceState = &EngineAPIError{code: -38002, msg: "Invalid forkchoice state"}
InvalidPayloadAttributes = &EngineAPIError{code: -38003, msg: "Invalid payload attributes"} InvalidPayloadAttributes = &EngineAPIError{code: -38003, msg: "Invalid payload attributes"}
TooLargeRequest = &EngineAPIError{code: -38004, msg: "Too large request"}
InvalidParams = &EngineAPIError{code: -32602, msg: "Invalid parameters"}
STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil} STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil}
STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil} STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil}

View File

@ -0,0 +1,60 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package engine
import (
"encoding/json"
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
var _ = (*payloadAttributesMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (p PayloadAttributes) MarshalJSON() ([]byte, error) {
type PayloadAttributes struct {
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
}
var enc PayloadAttributes
enc.Timestamp = hexutil.Uint64(p.Timestamp)
enc.Random = p.Random
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
enc.Withdrawals = p.Withdrawals
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (p *PayloadAttributes) UnmarshalJSON(input []byte) error {
type PayloadAttributes struct {
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
}
var dec PayloadAttributes
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.Timestamp == nil {
return errors.New("missing required field 'timestamp' for PayloadAttributes")
}
p.Timestamp = uint64(*dec.Timestamp)
if dec.Random == nil {
return errors.New("missing required field 'prevRandao' for PayloadAttributes")
}
p.Random = *dec.Random
if dec.SuggestedFeeRecipient == nil {
return errors.New("missing required field 'suggestedFeeRecipient' for PayloadAttributes")
}
p.SuggestedFeeRecipient = *dec.SuggestedFeeRecipient
if dec.Withdrawals != nil {
p.Withdrawals = dec.Withdrawals
}
return nil
}

View File

@ -1,6 +1,6 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT. // Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package beacon package engine
import ( import (
"encoding/json" "encoding/json"
@ -9,29 +9,31 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
) )
var _ = (*executableDataMarshaling)(nil) var _ = (*executableDataMarshaling)(nil)
// MarshalJSON marshals as JSON. // MarshalJSON marshals as JSON.
func (e ExecutableDataV1) MarshalJSON() ([]byte, error) { func (e ExecutableData) MarshalJSON() ([]byte, error) {
type ExecutableDataV1 struct { type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"` StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"` BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
} }
var enc ExecutableDataV1 var enc ExecutableData
enc.ParentHash = e.ParentHash enc.ParentHash = e.ParentHash
enc.FeeRecipient = e.FeeRecipient enc.FeeRecipient = e.FeeRecipient
enc.StateRoot = e.StateRoot enc.StateRoot = e.StateRoot
@ -51,89 +53,94 @@ func (e ExecutableDataV1) MarshalJSON() ([]byte, error) {
enc.Transactions[k] = v enc.Transactions[k] = v
} }
} }
enc.Withdrawals = e.Withdrawals
return json.Marshal(&enc) return json.Marshal(&enc)
} }
// UnmarshalJSON unmarshals from JSON. // UnmarshalJSON unmarshals from JSON.
func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error { func (e *ExecutableData) UnmarshalJSON(input []byte) error {
type ExecutableDataV1 struct { type ExecutableData struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"` ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"` Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
BlockHash *common.Hash `json:"blockHash" gencodec:"required"` BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
} }
var dec ExecutableDataV1 var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
return err return err
} }
if dec.ParentHash == nil { if dec.ParentHash == nil {
return errors.New("missing required field 'parentHash' for ExecutableDataV1") return errors.New("missing required field 'parentHash' for ExecutableData")
} }
e.ParentHash = *dec.ParentHash e.ParentHash = *dec.ParentHash
if dec.FeeRecipient == nil { if dec.FeeRecipient == nil {
return errors.New("missing required field 'feeRecipient' for ExecutableDataV1") return errors.New("missing required field 'feeRecipient' for ExecutableData")
} }
e.FeeRecipient = *dec.FeeRecipient e.FeeRecipient = *dec.FeeRecipient
if dec.StateRoot == nil { if dec.StateRoot == nil {
return errors.New("missing required field 'stateRoot' for ExecutableDataV1") return errors.New("missing required field 'stateRoot' for ExecutableData")
} }
e.StateRoot = *dec.StateRoot e.StateRoot = *dec.StateRoot
if dec.ReceiptsRoot == nil { if dec.ReceiptsRoot == nil {
return errors.New("missing required field 'receiptsRoot' for ExecutableDataV1") return errors.New("missing required field 'receiptsRoot' for ExecutableData")
} }
e.ReceiptsRoot = *dec.ReceiptsRoot e.ReceiptsRoot = *dec.ReceiptsRoot
if dec.LogsBloom == nil { if dec.LogsBloom == nil {
return errors.New("missing required field 'logsBloom' for ExecutableDataV1") return errors.New("missing required field 'logsBloom' for ExecutableData")
} }
e.LogsBloom = *dec.LogsBloom e.LogsBloom = *dec.LogsBloom
if dec.Random == nil { if dec.Random == nil {
return errors.New("missing required field 'prevRandao' for ExecutableDataV1") return errors.New("missing required field 'prevRandao' for ExecutableData")
} }
e.Random = *dec.Random e.Random = *dec.Random
if dec.Number == nil { if dec.Number == nil {
return errors.New("missing required field 'blockNumber' for ExecutableDataV1") return errors.New("missing required field 'blockNumber' for ExecutableData")
} }
e.Number = uint64(*dec.Number) e.Number = uint64(*dec.Number)
if dec.GasLimit == nil { if dec.GasLimit == nil {
return errors.New("missing required field 'gasLimit' for ExecutableDataV1") return errors.New("missing required field 'gasLimit' for ExecutableData")
} }
e.GasLimit = uint64(*dec.GasLimit) e.GasLimit = uint64(*dec.GasLimit)
if dec.GasUsed == nil { if dec.GasUsed == nil {
return errors.New("missing required field 'gasUsed' for ExecutableDataV1") return errors.New("missing required field 'gasUsed' for ExecutableData")
} }
e.GasUsed = uint64(*dec.GasUsed) e.GasUsed = uint64(*dec.GasUsed)
if dec.Timestamp == nil { if dec.Timestamp == nil {
return errors.New("missing required field 'timestamp' for ExecutableDataV1") return errors.New("missing required field 'timestamp' for ExecutableData")
} }
e.Timestamp = uint64(*dec.Timestamp) e.Timestamp = uint64(*dec.Timestamp)
if dec.ExtraData == nil { if dec.ExtraData == nil {
return errors.New("missing required field 'extraData' for ExecutableDataV1") return errors.New("missing required field 'extraData' for ExecutableData")
} }
e.ExtraData = *dec.ExtraData e.ExtraData = *dec.ExtraData
if dec.BaseFeePerGas == nil { if dec.BaseFeePerGas == nil {
return errors.New("missing required field 'baseFeePerGas' for ExecutableDataV1") return errors.New("missing required field 'baseFeePerGas' for ExecutableData")
} }
e.BaseFeePerGas = (*big.Int)(dec.BaseFeePerGas) e.BaseFeePerGas = (*big.Int)(dec.BaseFeePerGas)
if dec.BlockHash == nil { if dec.BlockHash == nil {
return errors.New("missing required field 'blockHash' for ExecutableDataV1") return errors.New("missing required field 'blockHash' for ExecutableData")
} }
e.BlockHash = *dec.BlockHash e.BlockHash = *dec.BlockHash
if dec.Transactions == nil { if dec.Transactions == nil {
return errors.New("missing required field 'transactions' for ExecutableDataV1") return errors.New("missing required field 'transactions' for ExecutableData")
} }
e.Transactions = make([][]byte, len(dec.Transactions)) e.Transactions = make([][]byte, len(dec.Transactions))
for k, v := range dec.Transactions { for k, v := range dec.Transactions {
e.Transactions[k] = v e.Transactions[k] = v
} }
if dec.Withdrawals != nil {
e.Withdrawals = dec.Withdrawals
}
return nil return nil
} }

46
beacon/engine/gen_epe.go Normal file
View File

@ -0,0 +1,46 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package engine
import (
"encoding/json"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
)
var _ = (*executionPayloadEnvelopeMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
}
var enc ExecutionPayloadEnvelope
enc.ExecutionPayload = e.ExecutionPayload
enc.BlockValue = (*hexutil.Big)(e.BlockValue)
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
}
var dec ExecutionPayloadEnvelope
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.ExecutionPayload == nil {
return errors.New("missing required field 'executionPayload' for ExecutionPayloadEnvelope")
}
e.ExecutionPayload = dec.ExecutionPayload
if dec.BlockValue == nil {
return errors.New("missing required field 'blockValue' for ExecutionPayloadEnvelope")
}
e.BlockValue = (*big.Int)(dec.BlockValue)
return nil
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package beacon package engine
import ( import (
"fmt" "fmt"
@ -26,38 +26,41 @@ import (
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go //go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
// PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74 // PayloadAttributes describes the environment context in which a block should
type PayloadAttributesV1 struct { // be built.
Timestamp uint64 `json:"timestamp" gencodec:"required"` type PayloadAttributes struct {
Random common.Hash `json:"prevRandao" gencodec:"required"` Timestamp uint64 `json:"timestamp" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
} }
// JSON type overrides for PayloadAttributesV1. // JSON type overrides for PayloadAttributes.
type payloadAttributesMarshaling struct { type payloadAttributesMarshaling struct {
Timestamp hexutil.Uint64 Timestamp hexutil.Uint64
} }
//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go //go:generate go run github.com/fjl/gencodec -type ExecutableData -field-override executableDataMarshaling -out gen_ed.go
// ExecutableDataV1 structure described at https://github.com/ethereum/execution-apis/tree/main/src/engine/specification.md // ExecutableData is the data necessary to execute an EL payload.
type ExecutableDataV1 struct { type ExecutableData struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"` StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"` LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"` Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"` GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"` GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"` Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"` ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"` BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"` BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"` Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
} }
// JSON type overrides for executableData. // JSON type overrides for executableData.
@ -72,6 +75,18 @@ type executableDataMarshaling struct {
Transactions []hexutil.Bytes Transactions []hexutil.Bytes
} }
//go:generate go run github.com/fjl/gencodec -type ExecutionPayloadEnvelope -field-override executionPayloadEnvelopeMarshaling -out gen_epe.go
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *big.Int `json:"blockValue" gencodec:"required"`
}
// JSON type overrides for ExecutionPayloadEnvelope.
type executionPayloadEnvelopeMarshaling struct {
BlockValue *hexutil.Big
}
type PayloadStatusV1 struct { type PayloadStatusV1 struct {
Status string `json:"status"` Status string `json:"status"`
LatestValidHash *common.Hash `json:"latestValidHash"` LatestValidHash *common.Hash `json:"latestValidHash"`
@ -136,11 +151,15 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// ExecutableDataToBlock constructs a block from executable data. // ExecutableDataToBlock constructs a block from executable data.
// It verifies that the following fields: // It verifies that the following fields:
// len(extraData) <= 32 //
// uncleHash = emptyUncleHash // len(extraData) <= 32
// difficulty = 0 // uncleHash = emptyUncleHash
// and that the blockhash of the constructed block matches the parameters. // difficulty = 0
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) { //
// and that the blockhash of the constructed block matches the parameters. Nil
// Withdrawals value will propagate through the returned block. Empty
// Withdrawals value must be passed via non-nil, length 0 value in params.
func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) {
txs, err := decodeTransactions(params.Transactions) txs, err := decodeTransactions(params.Transactions)
if err != nil { if err != nil {
return nil, err return nil, err
@ -155,34 +174,43 @@ func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) { if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) {
return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas) return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas)
} }
header := &types.Header{ // Only set withdrawalsRoot if it is non-nil. This allows CLs to use
ParentHash: params.ParentHash, // ExecutableData before withdrawals are enabled by marshaling
UncleHash: types.EmptyUncleHash, // Withdrawals as the json null value.
Coinbase: params.FeeRecipient, var withdrawalsRoot *common.Hash
Root: params.StateRoot, if params.Withdrawals != nil {
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), h := types.DeriveSha(types.Withdrawals(params.Withdrawals), trie.NewStackTrie(nil))
ReceiptHash: params.ReceiptsRoot, withdrawalsRoot = &h
Bloom: types.BytesToBloom(params.LogsBloom),
Difficulty: common.Big0,
Number: new(big.Int).SetUint64(params.Number),
GasLimit: params.GasLimit,
GasUsed: params.GasUsed,
Time: params.Timestamp,
BaseFee: params.BaseFeePerGas,
Extra: params.ExtraData,
MixDigest: params.Random,
} }
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */) header := &types.Header{
ParentHash: params.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: params.FeeRecipient,
Root: params.StateRoot,
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
ReceiptHash: params.ReceiptsRoot,
Bloom: types.BytesToBloom(params.LogsBloom),
Difficulty: common.Big0,
Number: new(big.Int).SetUint64(params.Number),
GasLimit: params.GasLimit,
GasUsed: params.GasUsed,
Time: params.Timestamp,
BaseFee: params.BaseFeePerGas,
Extra: params.ExtraData,
MixDigest: params.Random,
WithdrawalsHash: withdrawalsRoot,
}
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals)
if block.Hash() != params.BlockHash { if block.Hash() != params.BlockHash {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash()) return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
} }
return block, nil return block, nil
} }
// BlockToExecutableData constructs the executableDataV1 structure by filling the // BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block. // fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block) *ExecutableDataV1 { func BlockToExecutableData(block *types.Block, fees *big.Int) *ExecutionPayloadEnvelope {
return &ExecutableDataV1{ data := &ExecutableData{
BlockHash: block.Hash(), BlockHash: block.Hash(),
ParentHash: block.ParentHash(), ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(), FeeRecipient: block.Coinbase(),
@ -197,5 +225,13 @@ func BlockToExecutableData(block *types.Block) *ExecutableDataV1 {
Transactions: encodeTransactions(block.Transactions()), Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(), Random: block.MixDigest(),
ExtraData: block.Extra(), ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
} }
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees}
}
// ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1
type ExecutionPayloadBodyV1 struct {
TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
} }

View File

@ -1,38 +1,46 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
9920d3306a1ac536cdd2c796d6cb3c54bc559c226fc3cc39c32f1e0bd7f50d2a go1.18.5.src.tar.gz b5c1a3af52c385a6d1c76aed5361cf26459023980d0320de7658bae3915831a2 go1.20.1.src.tar.gz
828eeca8b5abea3e56921df8fa4b1101380a5ebcfee10acbc8ffe7ec0bf5876b go1.18.5.darwin-amd64.tar.gz a300a45e801ab459f3008aae5bb9efbe9a6de9bcd12388f5ca9bbd14f70236de go1.20.1.darwin-amd64.tar.gz
923a377c6fc9a2c789f5db61c24b8f64133f7889056897449891f256af34065f go1.18.5.darwin-arm64.tar.gz f1a8e06c7f1ba1c008313577f3f58132eb166a41ceb95ce6e9af30bc5a3efca4 go1.20.1.darwin-arm64.tar.gz
c3d90264a706e2d88cfb44126dc6f0d008a48f00732e04bc377cea1a2b716a7c go1.18.5.freebsd-386.tar.gz 57d80349dc4fbf692f8cd85a5971f97841aedafcf211e367e59d3ae812292660 go1.20.1.freebsd-386.tar.gz
0de23843c568d388bc0f0e390a8966938cccaae0d74b698325f7175bac04e0c6 go1.18.5.freebsd-amd64.tar.gz 6e124d54d5850a15fdb15754f782986f06af23c5ddb6690849417b9c74f05f98 go1.20.1.freebsd-amd64.tar.gz
0c44f85d146c6f98c34e8ff436a42af22e90e36fe232d3d9d3101f23fd61362b go1.18.5.linux-386.tar.gz 3a7345036ebd92455b653e4b4f6aaf4f7e1f91f4ced33b23d7059159cec5f4d7 go1.20.1.linux-386.tar.gz
9e5de37f9c49942c601b191ac5fba404b868bfc21d446d6960acc12283d6e5f2 go1.18.5.linux-amd64.tar.gz 000a5b1fca4f75895f78befeb2eecf10bfff3c428597f3f1e69133b63b911b02 go1.20.1.linux-amd64.tar.gz
006f6622718212363fa1ff004a6ab4d87bbbe772ec5631bab7cac10be346e4f1 go1.18.5.linux-arm64.tar.gz 5e5e2926733595e6f3c5b5ad1089afac11c1490351855e87849d0e7702b1ec2e go1.20.1.linux-arm64.tar.gz
d5ac34ac5f060a5274319aa04b7b11e41b123bd7887d64efb5f44ead236957af go1.18.5.linux-armv6l.tar.gz e4edc05558ab3657ba3dddb909209463cee38df9c1996893dd08cde274915003 go1.20.1.linux-armv6l.tar.gz
2e37fb9c7cbaedd4e729492d658aa4cde821fc94117391a8105c13b25ca1c84b go1.18.5.linux-ppc64le.tar.gz 85cfd4b89b48c94030783b6e9e619e35557862358b846064636361421d0b0c52 go1.20.1.linux-ppc64le.tar.gz
e3d536e7873639f85353e892444f83b14cb6670603961f215986ae8e28e8e07a go1.18.5.linux-s390x.tar.gz ba3a14381ed4538216dec3ea72b35731750597edd851cece1eb120edf7d60149 go1.20.1.linux-s390x.tar.gz
7b3142ec0c5db991e7f73a231662a92429b90ee151fe47557acb566d8d9ae4d3 go1.18.5.windows-386.zip 61259b5a346193e30b7b3c3f8d108062db25bbb80cf290ee251eeb855965f6ee go1.20.1.windows-386.zip
73753620602d4b4469770040c53db55e5dd6af2ad07ecc18f71f164c3224eaad go1.18.5.windows-amd64.zip 3b493969196a6de8d9762d09f5bc5ae7a3e5814b0cfbf9cc26838c2bc1314f9c go1.20.1.windows-amd64.zip
4d154626affff12ef73ea1017af0e5b52dbc839ef92f6f9e76cf4f71278a5744 go1.18.5.windows-arm64.zip 62d14ddb44bcda27c9b1f5ad9ffd4463013374ed325d762417e2adefd59a802f go1.20.1.windows-arm64.zip
658078aaaf7608693f37c4cf1380b2af418ab8b2d23fdb33e7e2d4339328590e golangci-lint-1.46.2-darwin-amd64.tar.gz fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
81f9b4afd62ec5e612ef8bc3b1d612a88b56ff289874831845cdad394427385f golangci-lint-1.46.2-darwin-arm64.tar.gz 75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
943486e703e62ec55ecd90caeb22bcd39f8cc3962a93eec18c06b7bae12cb46f golangci-lint-1.46.2-freebsd-386.tar.gz e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz
a75dd9ba7e08e8315c411697171db5375c0f6a1ece9e6fbeb9e9a4386822e17d golangci-lint-1.46.2-freebsd-amd64.tar.gz 623ce2d0fa4d35cc2e8d69fa7334227ab592380962a13b4d9cdc77cf41db2008 golangci-lint-1.51.1-freebsd-amd64.tar.gz
83eedca1af72e8be055a1235177eb1b33524fbf08bec5730df2e6c3efade2b23 golangci-lint-1.46.2-freebsd-armv6.tar.gz 131365feb0584cc2736c43192fa673ca50e5b6b765456990cb379ecfb787e568 golangci-lint-1.51.1-freebsd-armv6.tar.gz
513d276c490de6f82baa01f9346d8d78b385f2ae97608f42f05d1f0f1314cd54 golangci-lint-1.46.2-freebsd-armv7.tar.gz 98fb627927cbb654f5bf85dcffc5f646666b2ce96ea0fed977c9fb28abd51532 golangci-lint-1.51.1-freebsd-armv7.tar.gz
461a60016d516c69d406dc3e2d4957b722dbe684b7085dfac4802d0f84409e27 golangci-lint-1.46.2-linux-386.tar.gz b36a99702fa762c15840261bc0fb41b4b1b16b8b19b8c0941bae98c85bb0f8b8 golangci-lint-1.51.1-linux-386.tar.gz
242cd4f2d6ac0556e315192e8555784d13da5d1874e51304711570769c4f2b9b golangci-lint-1.46.2-linux-amd64.tar.gz 17aeb26c76820c22efa0e1838b0ab93e90cfedef43fbfc9a2f33f27eb9e5e070 golangci-lint-1.51.1-linux-amd64.tar.gz
ff5448ada2b3982581984d64b0dec614dba0a3ea4cab2d6a343c77927fc89f7e golangci-lint-1.46.2-linux-arm64.tar.gz 9744bc34e7b8d82ca788b667bfb7155a39b4be9aef43bf9f10318b1372cea338 golangci-lint-1.51.1-linux-arm64.tar.gz
177f5210ef04aee282bfbc6ec519d36af5fb7d2b2c8d3f4ea5e59fdba71b0a27 golangci-lint-1.46.2-linux-armv6.tar.gz 0dda8dbeb2ff7455a044ec8e347f2fc6d655d2e99d281b3b95e88167031c673d golangci-lint-1.51.1-linux-armv6.tar.gz
10dd512a36ee978a1009edbca3ba3af410f0fda8df4d85f0e4793a24213870cc golangci-lint-1.46.2-linux-armv7.tar.gz 0512f311b11d43b8b22989d929f0fe8a2e1e5ebe497f1eb0ff73a0fc3d188fd1 golangci-lint-1.51.1-linux-armv7.tar.gz
67779fa517c688c9db1090c3c456117d95c6b92979c623fe8cce8fb84251f21e golangci-lint-1.46.2-linux-mips64.tar.gz d767108dcf84a8eaa844df3454cb0f75a492f4e7102ecc2b0a3545cfe073a566 golangci-lint-1.51.1-linux-loong64.tar.gz
c085f0f57bdccbb2c902a41b72ce210a3dfff16ca856789374745ab52004b6ee golangci-lint-1.46.2-linux-mips64le.tar.gz 3bd56c54daec16585b2668e0dfabb27af2c2b38cc0fdb46923e2521e1634846b golangci-lint-1.51.1-linux-mips64.tar.gz
abecef6421499248e58ed75d2938bc12b4b1f98b057f25060680b77bb51a881e golangci-lint-1.46.2-linux-ppc64le.tar.gz f72f5adfa2219e15d2414c9a2966f86e74556cf17a85c727a7fb7770a16cf814 golangci-lint-1.51.1-linux-mips64le.tar.gz
134843a8f5c5c182c11979ea75f5866945d54757b2a04f3e5e04a0cf4fbf3a39 golangci-lint-1.46.2-linux-riscv64.tar.gz e605521dac98096d8737e1997c954f41f1d0d8275b8731f62783d410c23574b9 golangci-lint-1.51.1-linux-ppc64le.tar.gz
9fe21a9476567aafe7a2e1a926b9641a39f920d4c0ea8eda9d968bc6136337f9 golangci-lint-1.46.2-linux-s390x.tar.gz 2f683217b814339e74d61ca700922d8407f15addd6d4c5e8b156fbab79f26a87 golangci-lint-1.51.1-linux-riscv64.tar.gz
b48a421ec12a43f8fc8f977b9cf7d4a1ea1c4b97f803a238de7d3ce4ab23a84b golangci-lint-1.46.2-windows-386.zip d98528292b65971a3594e5880530e7624597dc9806fcfccdfbe39be411713d63 golangci-lint-1.51.1-linux-s390x.tar.gz
604acc1378a566abb0eac799362f3a37b7fcb5fa2268aeb2d5d954c829367301 golangci-lint-1.46.2-windows-amd64.zip 9bb2d0fe9e692ed0aea4f2537e3e6862b2f6768fe2849a84f4a6ad09da9fd971 golangci-lint-1.51.1-netbsd-386.tar.gz
927def10db073da9687594072e6a3d9c891f67fa897105a2cfd715e018e7386c golangci-lint-1.46.2-windows-arm64.zip 34cafdcd11ae73ae88d66c33eb8449f5c976fc3e37b44774dbe9c71caa95e592 golangci-lint-1.51.1-netbsd-amd64.tar.gz
729b76ed1d8b4e2612e38772b211503cb940e00a137bbaace1aa066f7c943737 golangci-lint-1.46.2-windows-armv6.zip f8b4e1e47ac17caafe8a5f32f975a2b6a7cb14c27c0f73c1fb15c20ca91c2e03 golangci-lint-1.51.1-netbsd-armv6.tar.gz
ea27c86d91e0b245ecbcfbf6cdb4ac0522d4bc6dca56bba02ea1bc77ad2917ac golangci-lint-1.46.2-windows-armv7.zip c4f58b7e227b9fd41f0e9310dc83f4a4e7d026598e2f6e95b78761081a6d9bd2 golangci-lint-1.51.1-netbsd-armv7.tar.gz
6710e2f5375dc75521c1a17980a6cbbe6ff76c2f8b852964a8af558899a97cf5 golangci-lint-1.51.1-windows-386.zip
722d7b87b9cdda0a3835d5030b3fc5385c2eba4c107f63f6391cfb2ac35f051d golangci-lint-1.51.1-windows-amd64.zip
eb57f9bcb56646f2e3d6ccaf02ec227815fb05077b2e0b1bf9e755805acdc2b9 golangci-lint-1.51.1-windows-arm64.zip
bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint-1.51.1-windows-armv6.zip
cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz

View File

@ -24,24 +24,20 @@ Usage: go run build/ci.go <command> <command flags/arguments>
Available commands are: Available commands are:
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
test [ -coverage ] [ packages... ] -- runs the tests test [ -coverage ] [ packages... ] -- runs the tests
lint -- runs certain pre-selected linters lint -- runs certain pre-selected linters
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts
importkeys -- imports signing keys from env importkeys -- imports signing keys from env
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
nsis -- creates a Windows NSIS installer nsis -- creates a Windows NSIS installer
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
For all commands, -n prevents execution of external programs (dry run mode). For all commands, -n prevents execution of external programs (dry run mode).
*/ */
package main package main
import ( import (
"bufio"
"bytes" "bytes"
"encoding/base64" "encoding/base64"
"flag" "flag"
@ -51,7 +47,6 @@ import (
"os/exec" "os/exec"
"path" "path"
"path/filepath" "path/filepath"
"regexp"
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
@ -78,7 +73,6 @@ var (
executablePath("bootnode"), executablePath("bootnode"),
executablePath("evm"), executablePath("evm"),
executablePath("geth"), executablePath("geth"),
executablePath("puppeth"),
executablePath("rlpdump"), executablePath("rlpdump"),
executablePath("clef"), executablePath("clef"),
} }
@ -101,10 +95,6 @@ var (
BinaryName: "geth", BinaryName: "geth",
Description: "Ethereum CLI client.", Description: "Ethereum CLI client.",
}, },
{
BinaryName: "puppeth",
Description: "Ethereum private network manager.",
},
{ {
BinaryName: "rlpdump", BinaryName: "rlpdump",
Description: "Developer utility tool that prints RLP structures.", Description: "Developer utility tool that prints RLP structures.",
@ -130,15 +120,15 @@ var (
// Distros for which packages are created. // Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it. // Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: the following Ubuntu releases have been officially deprecated on Launchpad: // Note: the following Ubuntu releases have been officially deprecated on Launchpad:
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish
debDistroGoBoots = map[string]string{ debDistroGoBoots = map[string]string{
"trusty": "golang-1.11", // EOL: 04/2024 "trusty": "golang-1.11", // EOL: 04/2024
"xenial": "golang-go", // EOL: 04/2026 "xenial": "golang-go", // EOL: 04/2026
"bionic": "golang-go", // EOL: 04/2028 "bionic": "golang-go", // EOL: 04/2028
"focal": "golang-go", // EOL: 04/2030 "focal": "golang-go", // EOL: 04/2030
"impish": "golang-go", // EOL: 07/2022 "jammy": "golang-go", // EOL: 04/2032
"jammy": "golang-go", // EOL: 04/2032 "kinetic": "golang-go", // EOL: 07/2023
//"kinetic": "golang-go", // EOL: 07/2023 "lunar": "golang-go", // EOL: 01/2024
} }
debGoBootPaths = map[string]string{ debGoBootPaths = map[string]string{
@ -146,10 +136,18 @@ var (
"golang-go": "/usr/lib/go", "golang-go": "/usr/lib/go",
} }
// This is the version of go that will be downloaded by // This is the version of Go that will be downloaded by
// //
// go run ci.go install -dlgo // go run ci.go install -dlgo
dlgoVersion = "1.18.5" dlgoVersion = "1.20.1"
// This is the version of Go that will be used to bootstrap the PPA builder.
//
// This version is fine to be old and full of security holes, we just use it
// to build the latest Go. Don't change it. If it ever becomes insufficient,
// we need to switch over to a recursive builder to jumpt across supported
// versions.
gobootVersion = "1.19.6"
) )
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -185,10 +183,6 @@ func main() {
doDebianSource(os.Args[2:]) doDebianSource(os.Args[2:])
case "nsis": case "nsis":
doWindowsInstaller(os.Args[2:]) doWindowsInstaller(os.Args[2:])
case "aar":
doAndroidArchive(os.Args[2:])
case "xcode":
doXCodeFramework(os.Args[2:])
case "purge": case "purge":
doPurge(os.Args[2:]) doPurge(os.Args[2:])
default: default:
@ -255,8 +249,8 @@ func doInstall(cmdline []string) {
func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) { func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) {
var ld []string var ld []string
if env.Commit != "" { if env.Commit != "" {
ld = append(ld, "-X", "main.gitCommit="+env.Commit) ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitCommit="+env.Commit)
ld = append(ld, "-X", "main.gitDate="+env.Date) ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitDate="+env.Date)
} }
// Strip DWARF on darwin. This used to be required for certain things, // Strip DWARF on darwin. This used to be required for certain things,
// and there is no downside to this, so we just keep doing it. // and there is no downside to this, so we just keep doing it.
@ -347,7 +341,7 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint. // downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string { func downloadLinter(cachedir string) string {
const version = "1.46.2" const version = "1.51.1"
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
arch := runtime.GOARCH arch := runtime.GOARCH
@ -669,10 +663,11 @@ func doDebianSource(cmdline []string) {
gpg.Stdin = bytes.NewReader(key) gpg.Stdin = bytes.NewReader(key)
build.MustRun(gpg) build.MustRun(gpg)
} }
// Download and verify the Go source packages.
// Download and verify the Go source package. var (
gobundle := downloadGoSources(*cachedir) gobootbundle = downloadGoBootstrapSources(*cachedir)
gobundle = downloadGoSources(*cachedir)
)
// Download all the dependencies needed to build the sources and run the ci script // Download all the dependencies needed to build the sources and run the ci script
srcdepfetch := tc.Go("mod", "download") srcdepfetch := tc.Go("mod", "download")
srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath")) srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath"))
@ -689,12 +684,19 @@ func doDebianSource(cmdline []string) {
meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
pkgdir := stageDebianSource(*workdir, meta) pkgdir := stageDebianSource(*workdir, meta)
// Add Go source code // Add bootstrapper Go source code
if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil {
log.Fatalf("Failed to extract bootstrapper Go sources: %v", err)
}
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".goboot")); err != nil {
log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err)
}
// Add builder Go source code
if err := build.ExtractArchive(gobundle, pkgdir); err != nil { if err := build.ExtractArchive(gobundle, pkgdir); err != nil {
log.Fatalf("Failed to extract Go sources: %v", err) log.Fatalf("Failed to extract builder Go sources: %v", err)
} }
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil { if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil {
log.Fatalf("Failed to rename Go source folder: %v", err) log.Fatalf("Failed to rename builder Go source folder: %v", err)
} }
// Add all dependency modules in compressed form // Add all dependency modules in compressed form
os.MkdirAll(filepath.Join(pkgdir, ".mod", "cache"), 0755) os.MkdirAll(filepath.Join(pkgdir, ".mod", "cache"), 0755)
@ -723,6 +725,19 @@ func doDebianSource(cmdline []string) {
} }
} }
// downloadGoBootstrapSources downloads the Go source tarball that will be used
// to bootstrap the builder Go.
func downloadGoBootstrapSources(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt")
file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
url := "https://dl.google.com/go/" + file
dst := filepath.Join(cachedir, file)
if err := csdb.DownloadFile(url, dst); err != nil {
log.Fatal(err)
}
return dst
}
// downloadGoSources downloads the Go source tarball. // downloadGoSources downloads the Go source tarball.
func downloadGoSources(cachedir string) string { func downloadGoSources(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
@ -982,7 +997,10 @@ func doWindowsInstaller(cmdline []string) {
if env.Commit != "" { if env.Commit != "" {
version[2] += "-" + env.Commit[:8] version[2] += "-" + env.Commit[:8]
} }
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe") installer, err := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
if err != nil {
log.Fatalf("Failed to convert installer file path: %v", err)
}
build.MustRunCommand("makensis.exe", build.MustRunCommand("makensis.exe",
"/DOUTPUTFILE="+installer, "/DOUTPUTFILE="+installer,
"/DMAJORVERSION="+version[0], "/DMAJORVERSION="+version[0],
@ -997,236 +1015,6 @@ func doWindowsInstaller(cmdline []string) {
} }
} }
// Android archives
func doAndroidArchive(cmdline []string) {
var (
local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`)
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`)
signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. ANDROID_SIGNIFY_KEY)`)
deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`)
upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`)
)
flag.CommandLine.Parse(cmdline)
env := build.Env()
tc := new(build.GoToolchain)
// Sanity check that the SDK and NDK are installed and set
if os.Getenv("ANDROID_HOME") == "" {
log.Fatal("Please ensure ANDROID_HOME points to your Android SDK")
}
// Build gomobile.
install := tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest")
install.Env = append(install.Env)
build.MustRun(install)
// Ensure all dependencies are available. This is required to make
// gomobile bind work because it expects go.sum to contain all checksums.
build.MustRun(tc.Go("mod", "download"))
// Build the Android archive and Maven resources
build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
if *local {
// If we're building locally, copy bundle to build dir and skip Maven
os.Rename("geth.aar", filepath.Join(GOBIN, "geth.aar"))
os.Rename("geth-sources.jar", filepath.Join(GOBIN, "geth-sources.jar"))
return
}
meta := newMavenMetadata(env)
build.Render("build/mvn.pom", meta.Package+".pom", 0755, meta)
// Skip Maven deploy and Azure upload for PR builds
maybeSkipArchive(env)
// Sign and upload the archive to Azure
archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar"
os.Rename("geth.aar", archive)
if err := archiveUpload(archive, *upload, *signer, *signify); err != nil {
log.Fatal(err)
}
// Sign and upload all the artifacts to Maven Central
os.Rename(archive, meta.Package+".aar")
if *signer != "" && *deploy != "" {
// Import the signing key into the local GPG instance
key := getenvBase64(*signer)
gpg := exec.Command("gpg", "--import")
gpg.Stdin = bytes.NewReader(key)
build.MustRun(gpg)
keyID, err := build.PGPKeyID(string(key))
if err != nil {
log.Fatal(err)
}
// Upload the artifacts to Sonatype and/or Maven Central
repo := *deploy + "/service/local/staging/deploy/maven2"
if meta.Develop {
repo = *deploy + "/content/repositories/snapshots"
}
build.MustRunCommand("mvn", "gpg:sign-and-deploy-file", "-e", "-X",
"-settings=build/mvn.settings", "-Durl="+repo, "-DrepositoryId=ossrh",
"-Dgpg.keyname="+keyID,
"-DpomFile="+meta.Package+".pom", "-Dfile="+meta.Package+".aar")
}
}
func gomobileTool(subcmd string, args ...string) *exec.Cmd {
cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
cmd.Args = append(cmd.Args, args...)
cmd.Env = []string{
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
}
for _, e := range os.Environ() {
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") || strings.HasPrefix(e, "GOBIN=") {
continue
}
cmd.Env = append(cmd.Env, e)
}
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
return cmd
}
type mavenMetadata struct {
Version string
Package string
Develop bool
Contributors []mavenContributor
}
type mavenContributor struct {
Name string
Email string
}
func newMavenMetadata(env build.Environment) mavenMetadata {
// Collect the list of authors from the repo root
contribs := []mavenContributor{}
if authors, err := os.Open("AUTHORS"); err == nil {
defer authors.Close()
scanner := bufio.NewScanner(authors)
for scanner.Scan() {
// Skip any whitespace from the authors list
line := strings.TrimSpace(scanner.Text())
if line == "" || line[0] == '#' {
continue
}
// Split the author and insert as a contributor
re := regexp.MustCompile("([^<]+) <(.+)>")
parts := re.FindStringSubmatch(line)
if len(parts) == 3 {
contribs = append(contribs, mavenContributor{Name: parts[1], Email: parts[2]})
}
}
}
// Render the version and package strings
version := params.Version
if isUnstableBuild(env) {
version += "-SNAPSHOT"
}
return mavenMetadata{
Version: version,
Package: "geth-" + version,
Develop: isUnstableBuild(env),
Contributors: contribs,
}
}
// XCode frameworks
func doXCodeFramework(cmdline []string) {
var (
local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`)
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`)
signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. IOS_SIGNIFY_KEY)`)
deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`)
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
)
flag.CommandLine.Parse(cmdline)
env := build.Env()
tc := new(build.GoToolchain)
// Build gomobile.
build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
// Build the iOS XCode framework
bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
if *local {
// If we're building locally, use the build folder and stop afterwards
bind.Dir = GOBIN
build.MustRun(bind)
return
}
// Create the archive.
maybeSkipArchive(env)
archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
if err := os.MkdirAll(archive, 0755); err != nil {
log.Fatal(err)
}
bind.Dir, _ = filepath.Abs(archive)
build.MustRun(bind)
build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive)
// Sign and upload the framework to Azure
if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil {
log.Fatal(err)
}
// Prepare and upload a PodSpec to CocoaPods
if *deploy != "" {
meta := newPodMetadata(env, archive)
build.Render("build/pod.podspec", "Geth.podspec", 0755, meta)
build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings")
}
}
type podMetadata struct {
Version string
Commit string
Archive string
Contributors []podContributor
}
type podContributor struct {
Name string
Email string
}
func newPodMetadata(env build.Environment, archive string) podMetadata {
// Collect the list of authors from the repo root
contribs := []podContributor{}
if authors, err := os.Open("AUTHORS"); err == nil {
defer authors.Close()
scanner := bufio.NewScanner(authors)
for scanner.Scan() {
// Skip any whitespace from the authors list
line := strings.TrimSpace(scanner.Text())
if line == "" || line[0] == '#' {
continue
}
// Split the author and insert as a contributor
re := regexp.MustCompile("([^<]+) <(.+)>")
parts := re.FindStringSubmatch(line)
if len(parts) == 3 {
contribs = append(contribs, podContributor{Name: parts[1], Email: parts[2]})
}
}
}
version := params.Version
if isUnstableBuild(env) {
version += "-unstable." + env.Buildnum
}
return podMetadata{
Archive: archive,
Version: version,
Commit: env.Commit,
Contributors: contribs,
}
}
// Binary distribution cleanups // Binary distribution cleanups
func doPurge(cmdline []string) { func doPurge(cmdline []string) {

View File

@ -16,7 +16,11 @@ override_dh_auto_build:
# We can't download a fresh Go within Launchpad, so we're shipping and building # We can't download a fresh Go within Launchpad, so we're shipping and building
# one on the fly. However, we can't build it inside the go-ethereum folder as # one on the fly. However, we can't build it inside the go-ethereum folder as
# bootstrapping clashes with go modules, so build in a sibling folder. # bootstrapping clashes with go modules, so build in a sibling folder.
(mv .go ../ && cd ../.go/src && ./make.bash) #
# We're also shipping the bootstrapper as of Go 1.20 as it had minimum version
# requirements opposed to older versions of Go.
(mv .goboot ../ && cd ../.goboot/src && ./make.bash)
(mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot ./make.bash)
# We can't download external go modules within Launchpad, so we're shipping the # We can't download external go modules within Launchpad, so we're shipping the
# entire dependency source cache with go-ethereum. # entire dependency source cache with go-ethereum.

View File

@ -1,57 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.ethereum</groupId>
<artifactId>geth</artifactId>
<version>{{.Version}}</version>
<packaging>aar</packaging>
<name>Android Ethereum Client</name>
<description>Android port of the go-ethereum libraries and node</description>
<url>https://github.com/ethereum/go-ethereum</url>
<inceptionYear>2015</inceptionYear>
<licenses>
<license>
<name>GNU Lesser General Public License, Version 3.0</name>
<url>https://www.gnu.org/licenses/lgpl-3.0.en.html</url>
<distribution>repo</distribution>
</license>
</licenses>
<organization>
<name>Ethereum</name>
<url>https://ethereum.org</url>
</organization>
<developers>
<developer>
<id>karalabe</id>
<name>Péter Szilágyi</name>
<email>peterke@gmail.com</email>
<url>https://github.com/karalabe</url>
<properties>
<picUrl>https://www.gravatar.com/avatar/2ecbf0f5b4b79eebf8c193e5d324357f?s=256</picUrl>
</properties>
</developer>
</developers>
<contributors>{{range .Contributors}}
<contributor>
<name>{{.Name}}</name>
<email>{{.Email}}</email>
</contributor>{{end}}
</contributors>
<issueManagement>
<system>GitHub Issues</system>
<url>https://github.com/ethereum/go-ethereum/issues/</url>
</issueManagement>
<scm>
<url>https://github.com/ethereum/go-ethereum</url>
</scm>
</project>

View File

@ -1,24 +0,0 @@
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
http://maven.apache.org/xsd/settings-1.0.0.xsd">
<servers>
<server>
<id>ossrh</id>
<username>${env.ANDROID_SONATYPE_USERNAME}</username>
<password>${env.ANDROID_SONATYPE_PASSWORD}</password>
</server>
</servers>
<profiles>
<profile>
<id>ossrh</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<properties>
<gpg.executable>gpg</gpg.executable>
<gpg.passphrase></gpg.passphrase>
</properties>
</profile>
</profiles>
</settings>

View File

@ -1,22 +0,0 @@
Pod::Spec.new do |spec|
spec.name = 'Geth'
spec.version = '{{.Version}}'
spec.license = { :type => 'GNU Lesser General Public License, Version 3.0' }
spec.homepage = 'https://github.com/ethereum/go-ethereum'
spec.authors = { {{range .Contributors}}
'{{.Name}}' => '{{.Email}}',{{end}}
}
spec.summary = 'iOS Ethereum Client'
spec.source = { :git => 'https://github.com/ethereum/go-ethereum.git', :commit => '{{.Commit}}' }
spec.platform = :ios
spec.ios.deployment_target = '9.0'
spec.ios.vendored_frameworks = 'Frameworks/Geth.framework'
spec.prepare_command = <<-CMD
curl https://gethstore.blob.core.windows.net/builds/{{.Archive}}.tar.gz | tar -xvz
mkdir Frameworks
mv {{.Archive}}/Geth.framework Frameworks
rm -rf {{.Archive}}
CMD
end

View File

@ -24,8 +24,4 @@ import (
_ "github.com/fjl/gencodec" _ "github.com/fjl/gencodec"
_ "github.com/golang/protobuf/protoc-gen-go" _ "github.com/golang/protobuf/protoc-gen-go"
_ "golang.org/x/tools/cmd/stringer" _ "golang.org/x/tools/cmd/stringer"
// Tool imports for mobile build.
_ "golang.org/x/mobile/cmd/gobind"
_ "golang.org/x/mobile/cmd/gomobile"
) )

View File

@ -342,7 +342,10 @@ func isGenerated(file string) bool {
} }
defer fd.Close() defer fd.Close()
buf := make([]byte, 2048) buf := make([]byte, 2048)
n, _ := fd.Read(buf) n, err := fd.Read(buf)
if err != nil {
return false
}
buf = buf[:n] buf = buf[:n]
for _, l := range bytes.Split(buf, []byte("\n")) { for _, l := range bytes.Split(buf, []byte("\n")) {
if bytes.HasPrefix(l, []byte("// Code generated")) { if bytes.HasPrefix(l, []byte("// Code generated")) {

View File

@ -33,14 +33,6 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
var (
// Git SHA1 commit hash of the release (set via linker flags)
gitCommit = ""
gitDate = ""
app *cli.App
)
var ( var (
// Flags needed by abigen // Flags needed by abigen
abiFlag = &cli.StringFlag{ abiFlag = &cli.StringFlag{
@ -73,7 +65,7 @@ var (
} }
langFlag = &cli.StringFlag{ langFlag = &cli.StringFlag{
Name: "lang", Name: "lang",
Usage: "Destination language for the bindings (go, java, objc)", Usage: "Destination language for the bindings (go)",
Value: "go", Value: "go",
} }
aliasFlag = &cli.StringFlag{ aliasFlag = &cli.StringFlag{
@ -82,8 +74,9 @@ var (
} }
) )
var app = flags.NewApp("Ethereum ABI wrapper code generator")
func init() { func init() {
app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
app.Name = "abigen" app.Name = "abigen"
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
abiFlag, abiFlag,
@ -109,11 +102,6 @@ func abigen(c *cli.Context) error {
switch c.String(langFlag.Name) { switch c.String(langFlag.Name) {
case "go": case "go":
lang = bind.LangGo lang = bind.LangGo
case "java":
lang = bind.LangJava
case "objc":
lang = bind.LangObjC
utils.Fatalf("Objc binding generation is uncompleted")
default: default:
utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name)) utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name))
} }
@ -161,9 +149,12 @@ func abigen(c *cli.Context) error {
types = append(types, kind) types = append(types, kind)
} else { } else {
// Generate the list of types to exclude from binding // Generate the list of types to exclude from binding
exclude := make(map[string]bool) var exclude *nameFilter
for _, kind := range strings.Split(c.String(excFlag.Name), ",") { if c.IsSet(excFlag.Name) {
exclude[strings.ToLower(kind)] = true var err error
if exclude, err = newNameFilter(strings.Split(c.String(excFlag.Name), ",")...); err != nil {
utils.Fatalf("Failed to parse excludes: %v", err)
}
} }
var contracts map[string]*compiler.Contract var contracts map[string]*compiler.Contract
@ -188,7 +179,11 @@ func abigen(c *cli.Context) error {
} }
// Gather all non-excluded contract for binding // Gather all non-excluded contract for binding
for name, contract := range contracts { for name, contract := range contracts {
if exclude[strings.ToLower(name)] { // fully qualified name is of the form <solFilePath>:<type>
nameParts := strings.Split(name, ":")
typeName := nameParts[len(nameParts)-1]
if exclude != nil && exclude.Matches(name) {
fmt.Fprintf(os.Stderr, "excluding: %v\n", name)
continue continue
} }
abi, err := json.Marshal(contract.Info.AbiDefinition) // Flatten the compiler parse abi, err := json.Marshal(contract.Info.AbiDefinition) // Flatten the compiler parse
@ -198,15 +193,14 @@ func abigen(c *cli.Context) error {
abis = append(abis, string(abi)) abis = append(abis, string(abi))
bins = append(bins, contract.Code) bins = append(bins, contract.Code)
sigs = append(sigs, contract.Hashes) sigs = append(sigs, contract.Hashes)
nameParts := strings.Split(name, ":") types = append(types, typeName)
types = append(types, nameParts[len(nameParts)-1])
// Derive the library placeholder which is a 34 character prefix of the // Derive the library placeholder which is a 34 character prefix of the
// hex encoding of the keccak256 hash of the fully qualified library name. // hex encoding of the keccak256 hash of the fully qualified library name.
// Note that the fully qualified library name is the path of its source // Note that the fully qualified library name is the path of its source
// file and the library name separated by ":". // file and the library name separated by ":".
libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] // the first 2 chars are 0x libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] // the first 2 chars are 0x
libs[libPattern] = nameParts[len(nameParts)-1] libs[libPattern] = typeName
} }
} }
// Extract all aliases from the flags // Extract all aliases from the flags

58
cmd/abigen/namefilter.go Normal file
View File

@ -0,0 +1,58 @@
package main
import (
"fmt"
"strings"
)
type nameFilter struct {
fulls map[string]bool // path/to/contract.sol:Type
files map[string]bool // path/to/contract.sol:*
types map[string]bool // *:Type
}
func newNameFilter(patterns ...string) (*nameFilter, error) {
f := &nameFilter{
fulls: make(map[string]bool),
files: make(map[string]bool),
types: make(map[string]bool),
}
for _, pattern := range patterns {
if err := f.add(pattern); err != nil {
return nil, err
}
}
return f, nil
}
func (f *nameFilter) add(pattern string) error {
ft := strings.Split(pattern, ":")
if len(ft) != 2 {
// filenames and types must not include ':' symbol
return fmt.Errorf("invalid pattern: %s", pattern)
}
file, typ := ft[0], ft[1]
if file == "*" {
f.types[typ] = true
return nil
} else if typ == "*" {
f.files[file] = true
return nil
}
f.fulls[pattern] = true
return nil
}
func (f *nameFilter) Matches(name string) bool {
ft := strings.Split(name, ":")
if len(ft) != 2 {
// If contract names are always of the fully-qualified form
// <filePath>:<type>, then this case will never happen.
return false
}
file, typ := ft[0], ft[1]
// full paths > file paths > types
return f.fulls[name] || f.files[file] || f.types[typ]
}

View File

@ -0,0 +1,38 @@
package main
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNameFilter(t *testing.T) {
_, err := newNameFilter("Foo")
require.Error(t, err)
_, err = newNameFilter("too/many:colons:Foo")
require.Error(t, err)
f, err := newNameFilter("a/path:A", "*:B", "c/path:*")
require.NoError(t, err)
for _, tt := range []struct {
name string
match bool
}{
{"a/path:A", true},
{"unknown/path:A", false},
{"a/path:X", false},
{"unknown/path:X", false},
{"any/path:B", true},
{"c/path:X", true},
{"c/path:foo:B", false},
} {
match := f.Matches(tt.name)
if tt.match {
assert.True(t, match, "expected match")
} else {
assert.False(t, match, "expected no match")
}
}
}

View File

@ -40,7 +40,7 @@ func main() {
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit") writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
nodeKeyFile = flag.String("nodekey", "", "private key filename") nodeKeyFile = flag.String("nodekey", "", "private key filename")
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)") nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)") natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)") netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode") runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)") verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)")

View File

@ -86,7 +86,7 @@ checkpoint-admin status --rpc <NODE_RPC_ENDPOINT>
### Enable checkpoint oracle in your private network ### Enable checkpoint oracle in your private network
Currently, only the Ethereum mainnet and the default supported test networks (ropsten, rinkeby, goerli) activate this feature. If you want to activate this feature in your private network, you can overwrite the relevant checkpoint oracle settings through the configuration file after deploying the oracle contract. Currently, only the Ethereum mainnet and the default supported test networks (rinkeby, goerli) activate this feature. If you want to activate this feature in your private network, you can overwrite the relevant checkpoint oracle settings through the configuration file after deploying the oracle contract.
* Get your node configuration file `geth dumpconfig OTHER_COMMAND_LINE_OPTIONS > config.toml` * Get your node configuration file `geth dumpconfig OTHER_COMMAND_LINE_OPTIONS > config.toml`
* Edit the configuration file and add the following information * Edit the configuration file and add the following information

View File

@ -28,16 +28,9 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
var ( var app = flags.NewApp("ethereum checkpoint helper tool")
// Git SHA1 commit hash of the release (set via linker flags)
gitCommit = ""
gitDate = ""
app *cli.App
)
func init() { func init() {
app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
app.Commands = []*cli.Command{ app.Commands = []*cli.Command{
commandStatus, commandStatus,
commandDeploy, commandDeploy,

View File

@ -29,7 +29,7 @@ GLOBAL OPTIONS:
--loglevel value log level to emit to the screen (default: 4) --loglevel value log level to emit to the screen (default: 4)
--keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore") --keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore")
--configdir value Directory for Clef configuration (default: "$HOME/.clef") --configdir value Directory for Clef configuration (default: "$HOME/.clef")
--chainid value Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli) (default: 1) --chainid value Chain id to use for signing (1=mainnet, 4=Rinkeby, 5=Goerli) (default: 1)
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength --lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
--nousb Disables monitoring for and managing USB hardware wallets --nousb Disables monitoring for and managing USB hardware wallets
--pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm") --pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm")

117
cmd/clef/consolecmd_test.go Normal file
View File

@ -0,0 +1,117 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
)
// TestImportRaw tests clef --importraw
func TestImportRaw(t *testing.T) {
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
t.Cleanup(func() { os.Remove(keyPath) })
t.Parallel()
t.Run("happy-path", func(t *testing.T) {
// Run clef importraw
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("myverylongpassword").input("myverylongpassword")
if out := string(clef.Output()); !strings.Contains(out,
"Key imported:\n Address 0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6") {
t.Logf("Output\n%v", out)
t.Error("Failure")
}
})
// tests clef --importraw with mismatched passwords.
t.Run("pw-mismatch", func(t *testing.T) {
// Run clef importraw
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit()
if have, want := clef.StderrText(), "Passwords do not match\n"; have != want {
t.Errorf("have %q, want %q", have, want)
}
})
// tests clef --importraw with a too short password.
t.Run("short-pw", func(t *testing.T) {
// Run clef importraw
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("shorty").input("shorty").WaitExit()
if have, want := clef.StderrText(),
"password requirements not met: password too short (<10 characters)\n"; have != want {
t.Errorf("have %q, want %q", have, want)
}
})
}
// TestListAccounts tests clef --list-accounts
func TestListAccounts(t *testing.T) {
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
t.Cleanup(func() { os.Remove(keyPath) })
t.Parallel()
t.Run("no-accounts", func(t *testing.T) {
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts")
if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") {
t.Logf("Output\n%v", out)
t.Error("Failure")
}
})
t.Run("one-account", func(t *testing.T) {
// First, we need to import
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
// Secondly, do a listing, using the same datadir
clef = runWithKeystore(t, clef.Datadir, "--suppress-bootwarn", "--lightkdf", "list-accounts")
if out := string(clef.Output()); !strings.Contains(out, "0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6 (keystore:") {
t.Logf("Output\n%v", out)
t.Error("Failure")
}
})
}
// TestListWallets tests clef --list-wallets
func TestListWallets(t *testing.T) {
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
t.Cleanup(func() { os.Remove(keyPath) })
t.Parallel()
t.Run("no-accounts", func(t *testing.T) {
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets")
if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") {
t.Logf("Output\n%v", out)
t.Error("Failure")
}
})
t.Run("one-account", func(t *testing.T) {
// First, we need to import
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
// Secondly, do a listing, using the same datadir
clef = runWithKeystore(t, clef.Datadir, "--suppress-bootwarn", "--lightkdf", "list-wallets")
if out := string(clef.Output()); !strings.Contains(out, "Account 0: 0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6") {
t.Logf("Output\n%v", out)
t.Error("Failure")
}
})
}

View File

@ -23,6 +23,7 @@ import (
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
@ -74,7 +75,7 @@ PURPOSE. See the GNU General Public License for more details.
var ( var (
logLevelFlag = &cli.IntFlag{ logLevelFlag = &cli.IntFlag{
Name: "loglevel", Name: "loglevel",
Value: 4, Value: 3,
Usage: "log level to emit to the screen", Usage: "log level to emit to the screen",
} }
advancedMode = &cli.BoolFlag{ advancedMode = &cli.BoolFlag{
@ -98,7 +99,7 @@ var (
chainIdFlag = &cli.Int64Flag{ chainIdFlag = &cli.Int64Flag{
Name: "chainid", Name: "chainid",
Value: params.MainnetChainConfig.ChainID.Int64(), Value: params.MainnetChainConfig.ChainID.Int64(),
Usage: "Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli)", Usage: "Chain id to use for signing (1=mainnet, 4=Rinkeby, 5=Goerli)",
} }
rpcPortFlag = &cli.IntFlag{ rpcPortFlag = &cli.IntFlag{
Name: "http.port", Name: "http.port",
@ -203,25 +204,61 @@ The delpw command removes a password for a given address (keyfile).
}, },
Description: ` Description: `
The newaccount command creates a new keystore-backed account. It is a convenience-method The newaccount command creates a new keystore-backed account. It is a convenience-method
which can be used in lieu of an external UI.`, which can be used in lieu of an external UI.
} `}
gendocCommand = &cli.Command{ gendocCommand = &cli.Command{
Action: GenDoc, Action: GenDoc,
Name: "gendoc", Name: "gendoc",
Usage: "Generate documentation about json-rpc format", Usage: "Generate documentation about json-rpc format",
Description: ` Description: `
The gendoc generates example structures of the json-rpc communication types. The gendoc generates example structures of the json-rpc communication types.
`}
listAccountsCommand = &cli.Command{
Action: listAccounts,
Name: "list-accounts",
Usage: "List accounts in the keystore",
Flags: []cli.Flag{
logLevelFlag,
keystoreFlag,
utils.LightKDFFlag,
acceptFlag,
},
Description: `
Lists the accounts in the keystore.
`}
listWalletsCommand = &cli.Command{
Action: listWallets,
Name: "list-wallets",
Usage: "List wallets known to Clef",
Flags: []cli.Flag{
logLevelFlag,
keystoreFlag,
utils.LightKDFFlag,
acceptFlag,
},
Description: `
Lists the wallets known to Clef.
`}
importRawCommand = &cli.Command{
Action: accountImport,
Name: "importraw",
Usage: "Import a hex-encoded private key.",
ArgsUsage: "<keyfile>",
Flags: []cli.Flag{
logLevelFlag,
keystoreFlag,
utils.LightKDFFlag,
acceptFlag,
},
Description: `
Imports an unencrypted private key from <keyfile> and creates a new account.
Prints the address.
The keyfile is assumed to contain an unencrypted private key in hexadecimal format.
The account is saved in encrypted format, you are prompted for a password.
`} `}
) )
var ( var app = flags.NewApp("Manage Ethereum account operations")
// Git SHA1 commit hash of the release (set via linker flags)
gitCommit = ""
gitDate = ""
app = flags.NewApp(gitCommit, gitDate, "Manage Ethereum account operations")
)
func init() { func init() {
app.Name = "Clef" app.Name = "Clef"
@ -254,7 +291,10 @@ func init() {
setCredentialCommand, setCredentialCommand,
delCredentialCommand, delCredentialCommand,
newAccountCommand, newAccountCommand,
importRawCommand,
gendocCommand, gendocCommand,
listAccountsCommand,
listWalletsCommand,
} }
} }
@ -357,6 +397,22 @@ func attestFile(ctx *cli.Context) error {
return nil return nil
} }
func initInternalApi(c *cli.Context) (*core.UIServerAPI, core.UIClientAPI, error) {
if err := initialize(c); err != nil {
return nil, nil, err
}
var (
ui = core.NewCommandlineUI()
pwStorage storage.Storage = &storage.NoStorage{}
ksLoc = c.String(keystoreFlag.Name)
lightKdf = c.Bool(utils.LightKDFFlag.Name)
)
am := core.StartClefAccountManager(ksLoc, true, lightKdf, "")
api := core.NewSignerAPI(am, 0, true, ui, nil, false, pwStorage)
internalApi := core.NewUIServerAPI(api)
return internalApi, ui, nil
}
func setCredential(ctx *cli.Context) error { func setCredential(ctx *cli.Context) error {
if ctx.NArg() < 1 { if ctx.NArg() < 1 {
utils.Fatalf("This command requires an address to be passed as an argument") utils.Fatalf("This command requires an address to be passed as an argument")
@ -415,31 +471,6 @@ func removeCredential(ctx *cli.Context) error {
return nil return nil
} }
func newAccount(c *cli.Context) error {
if err := initialize(c); err != nil {
return err
}
// The newaccount is meant for users using the CLI, since 'real' external
// UIs can use the UI-api instead. So we'll just use the native CLI UI here.
var (
ui = core.NewCommandlineUI()
pwStorage storage.Storage = &storage.NoStorage{}
ksLoc = c.String(keystoreFlag.Name)
lightKdf = c.Bool(utils.LightKDFFlag.Name)
)
log.Info("Starting clef", "keystore", ksLoc, "light-kdf", lightKdf)
am := core.StartClefAccountManager(ksLoc, true, lightKdf, "")
// This gives is us access to the external API
apiImpl := core.NewSignerAPI(am, 0, true, ui, nil, false, pwStorage)
// This gives us access to the internal API
internalApi := core.NewUIServerAPI(apiImpl)
addr, err := internalApi.New(context.Background())
if err == nil {
fmt.Printf("Generated account %v\n", addr.String())
}
return err
}
func initialize(c *cli.Context) error { func initialize(c *cli.Context) error {
// Set up the logger to print everything // Set up the logger to print everything
logOutput := os.Stdout logOutput := os.Stdout
@ -465,6 +496,108 @@ func initialize(c *cli.Context) error {
return nil return nil
} }
func newAccount(c *cli.Context) error {
internalApi, _, err := initInternalApi(c)
if err != nil {
return err
}
addr, err := internalApi.New(context.Background())
if err == nil {
fmt.Printf("Generated account %v\n", addr.String())
}
return err
}
func listAccounts(c *cli.Context) error {
internalApi, _, err := initInternalApi(c)
if err != nil {
return err
}
accs, err := internalApi.ListAccounts(context.Background())
if err != nil {
return err
}
if len(accs) == 0 {
fmt.Println("\nThe keystore is empty.")
}
fmt.Println()
for _, account := range accs {
fmt.Printf("%v (%v)\n", account.Address, account.URL)
}
return err
}
func listWallets(c *cli.Context) error {
internalApi, _, err := initInternalApi(c)
if err != nil {
return err
}
wallets := internalApi.ListWallets()
if len(wallets) == 0 {
fmt.Println("\nThere are no wallets.")
}
fmt.Println()
for i, wallet := range wallets {
fmt.Printf("- Wallet %d at %v (%v %v)\n", i, wallet.URL, wallet.Status, wallet.Failure)
for j, acc := range wallet.Accounts {
fmt.Printf(" -Account %d: %v (%v)\n", j, acc.Address, acc.URL)
}
fmt.Println()
}
return nil
}
// accountImport imports a raw hexadecimal private key via CLI.
func accountImport(c *cli.Context) error {
if c.Args().Len() != 1 {
return errors.New("<keyfile> must be given as first argument.")
}
internalApi, ui, err := initInternalApi(c)
if err != nil {
return err
}
pKey, err := crypto.LoadECDSA(c.Args().First())
if err != nil {
return err
}
readPw := func(prompt string) (string, error) {
resp, err := ui.OnInputRequired(core.UserInputRequest{
Title: "Password",
Prompt: prompt,
IsPassword: true,
})
if err != nil {
return "", err
}
return resp.Text, nil
}
first, err := readPw("Please enter a password for the imported account")
if err != nil {
return err
}
second, err := readPw("Please repeat the password you just entered")
if err != nil {
return err
}
if first != second {
return errors.New("Passwords do not match")
}
acc, err := internalApi.ImportRawKey(hex.EncodeToString(crypto.FromECDSA(pKey)), first)
if err != nil {
return err
}
ui.ShowInfo(fmt.Sprintf(`Key imported:
Address %v
Keystore file: %v
The key is now encrypted; losing the password will result in permanently losing
access to the key and all associated funds!
Make sure to backup keystore and passwords in a safe location.`,
acc.Address, acc.URL.Path))
return nil
}
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into // ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
// account the set data folders as well as the designated platform we're currently // account the set data folders as well as the designated platform we're currently
// running on. // running on.
@ -574,6 +707,7 @@ func signer(c *cli.Context) error {
// it with the UI. // it with the UI.
ui.RegisterUIServer(core.NewUIServerAPI(apiImpl)) ui.RegisterUIServer(core.NewUIServerAPI(apiImpl))
api = apiImpl api = apiImpl
// Audit logging // Audit logging
if logfile := c.String(auditLogFlag.Name); logfile != "" { if logfile := c.String(auditLogFlag.Name); logfile != "" {
api, err = core.NewAuditLogger(logfile, api) api, err = core.NewAuditLogger(logfile, api)
@ -635,7 +769,6 @@ func signer(c *cli.Context) error {
log.Info("IPC endpoint closed", "url", ipcapiURL) log.Info("IPC endpoint closed", "url", ipcapiURL)
}() }()
} }
if c.Bool(testFlag.Name) { if c.Bool(testFlag.Name) {
log.Info("Performing UI test") log.Info("Performing UI test")
go testExternalUI(apiImpl) go testExternalUI(apiImpl)
@ -646,8 +779,7 @@ func signer(c *cli.Context) error {
"extapi_version": core.ExternalAPIVersion, "extapi_version": core.ExternalAPIVersion,
"extapi_http": extapiURL, "extapi_http": extapiURL,
"extapi_ipc": ipcapiURL, "extapi_ipc": ipcapiURL,
}, }})
})
abortChan := make(chan os.Signal, 1) abortChan := make(chan os.Signal, 1)
signal.Notify(abortChan, os.Interrupt) signal.Notify(abortChan, os.Interrupt)

109
cmd/clef/run_test.go Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"testing"
"github.com/docker/docker/pkg/reexec"
"github.com/ethereum/go-ethereum/internal/cmdtest"
)
const registeredName = "clef-test"
type testproc struct {
*cmdtest.TestCmd
// template variables for expect
Datadir string
Etherbase string
}
func init() {
reexec.Register(registeredName, func() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Exit(0)
})
}
func TestMain(m *testing.M) {
// check if we have been reexec'd
if reexec.Init() {
return
}
os.Exit(m.Run())
}
// runClef spawns clef with the given command line args and adds keystore arg.
// This method creates a temporary keystore folder which will be removed after
// the test exits.
func runClef(t *testing.T, args ...string) *testproc {
ddir, err := os.MkdirTemp("", "cleftest-*")
if err != nil {
return nil
}
t.Cleanup(func() {
os.RemoveAll(ddir)
})
return runWithKeystore(t, ddir, args...)
}
// runWithKeystore spawns clef with the given command line args and adds keystore arg.
// This method does _not_ create the keystore folder, but it _does_ add the arg
// to the args.
func runWithKeystore(t *testing.T, keystore string, args ...string) *testproc {
args = append([]string{"--keystore", keystore}, args...)
tt := &testproc{Datadir: keystore}
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
// Boot "clef". This actually runs the test binary but the TestMain
// function will prevent any tests from running.
tt.Run(registeredName, args...)
return tt
}
func (proc *testproc) input(text string) *testproc {
proc.TestCmd.InputLine(text)
return proc
}
/*
// waitForEndpoint waits for the rpc endpoint to appear, or
// aborts after 3 seconds.
func (proc *testproc) waitForEndpoint(t *testing.T) *testproc {
t.Helper()
timeout := 3 * time.Second
ipc := filepath.Join(proc.Datadir, "clef.ipc")
start := time.Now()
for time.Since(start) < timeout {
if _, err := os.Stat(ipc); !errors.Is(err, os.ErrNotExist) {
t.Logf("endpoint %v opened", ipc)
return proc
}
time.Sleep(200 * time.Millisecond)
}
t.Logf("stderr: \n%v", proc.StderrText())
t.Logf("stdout: \n%v", proc.Output())
t.Fatal("endpoint", ipc, "did not open within", timeout)
return proc
}
*/

View File

@ -44,7 +44,7 @@ set to standard output. The following filters are supported:
- `-limit <N>` limits the output set to N entries, taking the top N nodes by score - `-limit <N>` limits the output set to N entries, taking the top N nodes by score
- `-ip <CIDR>` filters nodes by IP subnet - `-ip <CIDR>` filters nodes by IP subnet
- `-min-age <duration>` filters nodes by 'first seen' time - `-min-age <duration>` filters nodes by 'first seen' time
- `-eth-network <mainnet/rinkeby/goerli/ropsten>` filters nodes by "eth" ENR entry - `-eth-network <mainnet/rinkeby/goerli/sepolia>` filters nodes by "eth" ENR entry
- `-les-server` filters nodes by LES server support - `-les-server` filters nodes by LES server support
- `-snap` filters nodes by snap protocol support - `-snap` filters nodes by snap protocol support
@ -135,6 +135,6 @@ replacing `<enode>` with the enode of the geth node:
``` ```
[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md [eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md
[dns-tutorial]: https://geth.ethereum.org/docs/developers/dns-discovery-setup [dns-tutorial]: https://geth.ethereum.org/docs/developers/geth-developer/dns-discovery-setup
[discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md [discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md
[discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md [discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md

View File

@ -36,6 +36,14 @@ type crawler struct {
revalidateInterval time.Duration revalidateInterval time.Duration
} }
const (
nodeRemoved = iota
nodeSkipRecent
nodeSkipIncompat
nodeAdded
nodeUpdated
)
type resolver interface { type resolver interface {
RequestENR(*enode.Node) (*enode.Node, error) RequestENR(*enode.Node) (*enode.Node, error)
} }
@ -63,19 +71,39 @@ func (c *crawler) run(timeout time.Duration) nodeSet {
var ( var (
timeoutTimer = time.NewTimer(timeout) timeoutTimer = time.NewTimer(timeout)
timeoutCh <-chan time.Time timeoutCh <-chan time.Time
statusTicker = time.NewTicker(time.Second * 8)
doneCh = make(chan enode.Iterator, len(c.iters)) doneCh = make(chan enode.Iterator, len(c.iters))
liveIters = len(c.iters) liveIters = len(c.iters)
) )
defer timeoutTimer.Stop() defer timeoutTimer.Stop()
defer statusTicker.Stop()
for _, it := range c.iters { for _, it := range c.iters {
go c.runIterator(doneCh, it) go c.runIterator(doneCh, it)
} }
var (
added int
updated int
skipped int
recent int
removed int
)
loop: loop:
for { for {
select { select {
case n := <-c.ch: case n := <-c.ch:
c.updateNode(n) switch c.updateNode(n) {
case nodeSkipIncompat:
skipped++
case nodeSkipRecent:
recent++
case nodeRemoved:
removed++
case nodeAdded:
added++
default:
updated++
}
case it := <-doneCh: case it := <-doneCh:
if it == c.inputIter { if it == c.inputIter {
// Enable timeout when we're done revalidating the input nodes. // Enable timeout when we're done revalidating the input nodes.
@ -89,6 +117,10 @@ loop:
} }
case <-timeoutCh: case <-timeoutCh:
break loop break loop
case <-statusTicker.C:
log.Info("Crawling in progress",
"added", added, "updated", updated, "removed", removed,
"ignored(recent)", recent, "ignored(incompatible)", skipped)
} }
} }
@ -113,22 +145,25 @@ func (c *crawler) runIterator(done chan<- enode.Iterator, it enode.Iterator) {
} }
} }
func (c *crawler) updateNode(n *enode.Node) { // updateNode updates the info about the given node, and returns a status
// about what changed
func (c *crawler) updateNode(n *enode.Node) int {
node, ok := c.output[n.ID()] node, ok := c.output[n.ID()]
// Skip validation of recently-seen nodes. // Skip validation of recently-seen nodes.
if ok && time.Since(node.LastCheck) < c.revalidateInterval { if ok && time.Since(node.LastCheck) < c.revalidateInterval {
return return nodeSkipRecent
} }
// Request the node record. // Request the node record.
nn, err := c.disc.RequestENR(n) nn, err := c.disc.RequestENR(n)
node.LastCheck = truncNow() node.LastCheck = truncNow()
status := nodeUpdated
if err != nil { if err != nil {
if node.Score == 0 { if node.Score == 0 {
// Node doesn't implement EIP-868. // Node doesn't implement EIP-868.
log.Debug("Skipping node", "id", n.ID()) log.Debug("Skipping node", "id", n.ID())
return return nodeSkipIncompat
} }
node.Score /= 2 node.Score /= 2
} else { } else {
@ -137,18 +172,20 @@ func (c *crawler) updateNode(n *enode.Node) {
node.Score++ node.Score++
if node.FirstResponse.IsZero() { if node.FirstResponse.IsZero() {
node.FirstResponse = node.LastCheck node.FirstResponse = node.LastCheck
status = nodeAdded
} }
node.LastResponse = node.LastCheck node.LastResponse = node.LastCheck
} }
// Store/update node in output set. // Store/update node in output set.
if node.Score <= 0 { if node.Score <= 0 {
log.Info("Removing node", "id", n.ID()) log.Debug("Removing node", "id", n.ID())
delete(c.output, n.ID()) delete(c.output, n.ID())
} else { return nodeRemoved
log.Info("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score)
c.output[n.ID()] = node
} }
log.Debug("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score)
c.output[n.ID()] = node
return status
} }
func truncNow() time.Time { func truncNow() time.Time {

View File

@ -19,6 +19,7 @@ package main
import ( import (
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"time" "time"
@ -50,34 +51,34 @@ var (
Usage: "Sends ping to a node", Usage: "Sends ping to a node",
Action: discv4Ping, Action: discv4Ping,
ArgsUsage: "<node>", ArgsUsage: "<node>",
Flags: v4NodeFlags, Flags: discoveryNodeFlags,
} }
discv4RequestRecordCommand = &cli.Command{ discv4RequestRecordCommand = &cli.Command{
Name: "requestenr", Name: "requestenr",
Usage: "Requests a node record using EIP-868 enrRequest", Usage: "Requests a node record using EIP-868 enrRequest",
Action: discv4RequestRecord, Action: discv4RequestRecord,
ArgsUsage: "<node>", ArgsUsage: "<node>",
Flags: v4NodeFlags, Flags: discoveryNodeFlags,
} }
discv4ResolveCommand = &cli.Command{ discv4ResolveCommand = &cli.Command{
Name: "resolve", Name: "resolve",
Usage: "Finds a node in the DHT", Usage: "Finds a node in the DHT",
Action: discv4Resolve, Action: discv4Resolve,
ArgsUsage: "<node>", ArgsUsage: "<node>",
Flags: v4NodeFlags, Flags: discoveryNodeFlags,
} }
discv4ResolveJSONCommand = &cli.Command{ discv4ResolveJSONCommand = &cli.Command{
Name: "resolve-json", Name: "resolve-json",
Usage: "Re-resolves nodes in a nodes.json file", Usage: "Re-resolves nodes in a nodes.json file",
Action: discv4ResolveJSON, Action: discv4ResolveJSON,
Flags: v4NodeFlags, Flags: discoveryNodeFlags,
ArgsUsage: "<nodes.json file>", ArgsUsage: "<nodes.json file>",
} }
discv4CrawlCommand = &cli.Command{ discv4CrawlCommand = &cli.Command{
Name: "crawl", Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT", Usage: "Updates a nodes.json file with random nodes found in the DHT",
Action: discv4Crawl, Action: discv4Crawl,
Flags: flags.Merge(v4NodeFlags, []cli.Flag{crawlTimeoutFlag}), Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag}),
} }
discv4TestCommand = &cli.Command{ discv4TestCommand = &cli.Command{
Name: "test", Name: "test",
@ -110,6 +111,10 @@ var (
Name: "addr", Name: "addr",
Usage: "Listening address", Usage: "Listening address",
} }
extAddrFlag = &cli.StringFlag{
Name: "extaddr",
Usage: "UDP endpoint announced in ENR. You can provide a bare IP address or IP:port as the value of this flag.",
}
crawlTimeoutFlag = &cli.DurationFlag{ crawlTimeoutFlag = &cli.DurationFlag{
Name: "timeout", Name: "timeout",
Usage: "Time limit for the crawl.", Usage: "Time limit for the crawl.",
@ -122,11 +127,12 @@ var (
} }
) )
var v4NodeFlags = []cli.Flag{ var discoveryNodeFlags = []cli.Flag{
bootnodesFlag, bootnodesFlag,
nodekeyFlag, nodekeyFlag,
nodedbFlag, nodedbFlag,
listenAddrFlag, listenAddrFlag,
extAddrFlag,
} }
func discv4Ping(ctx *cli.Context) error { func discv4Ping(ctx *cli.Context) error {
@ -228,7 +234,7 @@ func discv4Test(ctx *cli.Context) error {
// startV4 starts an ephemeral discovery V4 node. // startV4 starts an ephemeral discovery V4 node.
func startV4(ctx *cli.Context) *discover.UDPv4 { func startV4(ctx *cli.Context) *discover.UDPv4 {
ln, config := makeDiscoveryConfig(ctx) ln, config := makeDiscoveryConfig(ctx)
socket := listen(ln, ctx.String(listenAddrFlag.Name)) socket := listen(ctx, ln)
disc, err := discover.ListenV4(socket, ln, config) disc, err := discover.ListenV4(socket, ln, config)
if err != nil { if err != nil {
exit(err) exit(err)
@ -266,7 +272,28 @@ func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) {
return ln, cfg return ln, cfg
} }
func listen(ln *enode.LocalNode, addr string) *net.UDPConn { func parseExtAddr(spec string) (ip net.IP, port int, ok bool) {
ip = net.ParseIP(spec)
if ip != nil {
return ip, 0, true
}
host, portstr, err := net.SplitHostPort(spec)
if err != nil {
return nil, 0, false
}
ip = net.ParseIP(host)
if ip == nil {
return nil, 0, false
}
port, err = strconv.Atoi(portstr)
if err != nil {
return nil, 0, false
}
return ip, port, true
}
func listen(ctx *cli.Context, ln *enode.LocalNode) *net.UDPConn {
addr := ctx.String(listenAddrFlag.Name)
if addr == "" { if addr == "" {
addr = "0.0.0.0:0" addr = "0.0.0.0:0"
} }
@ -274,6 +301,8 @@ func listen(ln *enode.LocalNode, addr string) *net.UDPConn {
if err != nil { if err != nil {
exit(err) exit(err)
} }
// Configure UDP endpoint in ENR from listener address.
usocket := socket.(*net.UDPConn) usocket := socket.(*net.UDPConn)
uaddr := socket.LocalAddr().(*net.UDPAddr) uaddr := socket.LocalAddr().(*net.UDPAddr)
if uaddr.IP.IsUnspecified() { if uaddr.IP.IsUnspecified() {
@ -282,6 +311,22 @@ func listen(ln *enode.LocalNode, addr string) *net.UDPConn {
ln.SetFallbackIP(uaddr.IP) ln.SetFallbackIP(uaddr.IP)
} }
ln.SetFallbackUDP(uaddr.Port) ln.SetFallbackUDP(uaddr.Port)
// If an ENR endpoint is set explicitly on the command-line, override
// the information from the listening address. Note this is careful not
// to set the UDP port if the external address doesn't have it.
extAddr := ctx.String(extAddrFlag.Name)
if extAddr != "" {
ip, port, ok := parseExtAddr(extAddr)
if !ok {
exit(fmt.Errorf("-%s: invalid external address %q", extAddrFlag.Name, extAddr))
}
ln.SetStaticIP(ip)
if port != 0 {
ln.SetFallbackUDP(port)
}
}
return usocket return usocket
} }

View File

@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test" "github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -42,18 +43,21 @@ var (
Name: "ping", Name: "ping",
Usage: "Sends ping to a node", Usage: "Sends ping to a node",
Action: discv5Ping, Action: discv5Ping,
Flags: discoveryNodeFlags,
} }
discv5ResolveCommand = &cli.Command{ discv5ResolveCommand = &cli.Command{
Name: "resolve", Name: "resolve",
Usage: "Finds a node in the DHT", Usage: "Finds a node in the DHT",
Action: discv5Resolve, Action: discv5Resolve,
Flags: []cli.Flag{bootnodesFlag}, Flags: discoveryNodeFlags,
} }
discv5CrawlCommand = &cli.Command{ discv5CrawlCommand = &cli.Command{
Name: "crawl", Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT", Usage: "Updates a nodes.json file with random nodes found in the DHT",
Action: discv5Crawl, Action: discv5Crawl,
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag}, Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
crawlTimeoutFlag,
}),
} }
discv5TestCommand = &cli.Command{ discv5TestCommand = &cli.Command{
Name: "test", Name: "test",
@ -70,12 +74,7 @@ var (
Name: "listen", Name: "listen",
Usage: "Runs a node", Usage: "Runs a node",
Action: discv5Listen, Action: discv5Listen,
Flags: []cli.Flag{ Flags: discoveryNodeFlags,
bootnodesFlag,
nodekeyFlag,
nodedbFlag,
listenAddrFlag,
},
} }
) )
@ -137,7 +136,7 @@ func discv5Listen(ctx *cli.Context) error {
// startV5 starts an ephemeral discovery v5 node. // startV5 starts an ephemeral discovery v5 node.
func startV5(ctx *cli.Context) *discover.UDPv5 { func startV5(ctx *cli.Context) *discover.UDPv5 {
ln, config := makeDiscoveryConfig(ctx) ln, config := makeDiscoveryConfig(ctx)
socket := listen(ln, ctx.String(listenAddrFlag.Name)) socket := listen(ctx, ln)
disc, err := discover.ListenV5(socket, ln, config) disc, err := discover.ListenV5(socket, ln, config)
if err != nil { if err != nil {
exit(err) exit(err)

View File

@ -76,7 +76,7 @@ func (c *Chain) RootAt(height int) common.Hash {
// ForkID gets the fork id of the chain. // ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID { func (c *Chain) ForkID() forkid.ID {
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len())) return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
} }
// Shorten returns a copy chain of a desired height from the imported // Shorten returns a copy chain of a desired height from the imported

View File

@ -63,8 +63,9 @@ func (s *Suite) dial() (*Conn, error) {
conn.caps = []p2p.Cap{ conn.caps = []p2p.Cap{
{Name: "eth", Version: 66}, {Name: "eth", Version: 66},
{Name: "eth", Version: 67}, {Name: "eth", Version: 67},
{Name: "eth", Version: 68},
} }
conn.ourHighestProtoVersion = 67 conn.ourHighestProtoVersion = 68
return &conn, nil return &conn, nil
} }
@ -357,9 +358,15 @@ func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error {
return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash) return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash)
} }
return nil return nil
case *NewPooledTransactionHashes:
// ignore tx announcements from previous tests // ignore tx announcements from previous tests
case *NewPooledTransactionHashes66:
continue continue
case *NewPooledTransactionHashes:
continue
case *Transactions:
continue
default: default:
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
} }

View File

@ -23,6 +23,7 @@ import (
"math/rand" "math/rand"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/internal/utesting"
@ -90,7 +91,7 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
{4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero}, {4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
// A 127 block old stateroot, expected to be served // A 127 block old stateroot, expected to be served
{4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")}, {4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
// A root which is not actually an account root, but a storage orot // A root which is not actually an account root, but a storage root
{4000, storageRoot, zero, ffHash, 0, zero, zero}, {4000, storageRoot, zero, ffHash, 0, zero, zero},
// And some non-sensical requests // And some non-sensical requests
@ -121,7 +122,7 @@ type stRangesTest struct {
expSlots int expSlots int
} }
// TestSnapGetStorageRange various forms of GetStorageRanges requests. // TestSnapGetStorageRanges various forms of GetStorageRanges requests.
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) { func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
var ( var (
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
@ -210,13 +211,6 @@ type byteCodesTest struct {
expHashes int expHashes int
} }
var (
// emptyRoot is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
// emptyCode is the known hash of the empty EVM bytecode.
emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
)
// TestSnapGetByteCodes various forms of GetByteCodes requests. // TestSnapGetByteCodes various forms of GetByteCodes requests.
func (s *Suite) TestSnapGetByteCodes(t *utesting.T) { func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
// The halfchain import should yield these bytecodes // The halfchain import should yield these bytecodes
@ -263,15 +257,15 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
}, },
// Empties // Empties
{ {
nBytes: 10000, hashes: []common.Hash{emptyRoot}, nBytes: 10000, hashes: []common.Hash{types.EmptyRootHash},
expHashes: 0, expHashes: 0,
}, },
{ {
nBytes: 10000, hashes: []common.Hash{emptyCode}, nBytes: 10000, hashes: []common.Hash{types.EmptyCodeHash},
expHashes: 1, expHashes: 1,
}, },
{ {
nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode}, nBytes: 10000, hashes: []common.Hash{types.EmptyCodeHash, types.EmptyCodeHash, types.EmptyCodeHash},
expHashes: 3, expHashes: 3,
}, },
// The existing bytecodes // The existing bytecodes
@ -363,7 +357,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
for i := 1; i <= 65; i++ { for i := 1; i <= 65; i++ {
accPaths = append(accPaths, pathTo(i)) accPaths = append(accPaths, pathTo(i))
} }
empty := emptyCode empty := types.EmptyCodeHash
for i, tc := range []trieNodesTest{ for i, tc := range []trieNodesTest{
{ {
root: s.chain.RootAt(999), root: s.chain.RootAt(999),
@ -406,8 +400,10 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
{[]byte{0}}, {[]byte{0}},
{[]byte{1}, []byte{0}}, {[]byte{1}, []byte{0}},
}, },
nBytes: 5000, nBytes: 5000,
expHashes: []common.Hash{}, expHashes: []common.Hash{
common.HexToHash("0x1ee1bb2fbac4d46eab331f3e8551e18a0805d084ed54647883aa552809ca968d"),
},
}, },
{ {
// The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures. // The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
@ -437,7 +433,35 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"), common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
}, },
}, },
} { {
/*
A test against this account, requesting trie nodes for the storage trie
{
"balance": "0",
"nonce": 1,
"root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"storage": {
"0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02",
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01",
"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03"
},
"key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"
}
*/
root: s.chain.RootAt(999),
paths: []snap.TrieNodePathSet{
{
common.FromHex("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"),
[]byte{0},
},
},
nBytes: 5000,
expHashes: []common.Hash{
common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790"),
},
},
}[7:] {
tc := tc tc := tc
if err := s.snapGetTrieNodes(t, &tc); err != nil { if err := s.snapGetTrieNodes(t, &tc); err != nil {
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err) t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)

View File

@ -510,17 +510,18 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
} }
// generate 50 txs // generate 50 txs
hashMap, _, err := generateTxs(s, 50) _, txs, err := generateTxs(s, 50)
if err != nil { if err != nil {
t.Fatalf("failed to generate transactions: %v", err) t.Fatalf("failed to generate transactions: %v", err)
} }
hashes := make([]common.Hash, len(txs))
// create new pooled tx hashes announcement types := make([]byte, len(txs))
hashes := make([]common.Hash, 0) sizes := make([]uint32, len(txs))
for _, hash := range hashMap { for i, tx := range txs {
hashes = append(hashes, hash) hashes[i] = tx.Hash()
types[i] = tx.Type()
sizes[i] = uint32(tx.Size())
} }
announce := NewPooledTransactionHashes(hashes)
// send announcement // send announcement
conn, err := s.dial() conn, err := s.dial()
@ -531,7 +532,13 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
if err = conn.peer(s.chain, nil); err != nil { if err = conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err) t.Fatalf("peering failed: %v", err)
} }
if err = conn.Write(announce); err != nil {
var ann Message = NewPooledTransactionHashes{Types: types, Sizes: sizes, Hashes: hashes}
if conn.negotiatedProtoVersion < eth.ETH68 {
ann = NewPooledTransactionHashes66(hashes)
}
err = conn.Write(ann)
if err != nil {
t.Fatalf("failed to write to connection: %v", err) t.Fatalf("failed to write to connection: %v", err)
} }
@ -544,9 +551,15 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket))
} }
return return
// ignore propagated txs from previous tests // ignore propagated txs from previous tests
case *NewPooledTransactionHashes66:
continue
case *NewPooledTransactionHashes: case *NewPooledTransactionHashes:
continue continue
case *Transactions:
continue
// ignore block announcements from previous tests // ignore block announcements from previous tests
case *NewBlockHashes: case *NewBlockHashes:
continue continue

View File

@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") // var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
func (s *Suite) sendSuccessfulTxs(t *utesting.T) error { func (s *Suite) sendSuccessfulTxs(t *utesting.T) error {
@ -95,7 +95,7 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction
} }
} }
return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash()) return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash())
case *NewPooledTransactionHashes: case *NewPooledTransactionHashes66:
txHashes := *msg txHashes := *msg
// if you receive an old tx propagation, read from connection again // if you receive an old tx propagation, read from connection again
if len(txHashes) == 1 && prevTx != nil { if len(txHashes) == 1 && prevTx != nil {
@ -110,6 +110,34 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction
} }
} }
return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash()) return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash())
case *NewPooledTransactionHashes:
txHashes := msg.Hashes
if len(txHashes) != len(msg.Sizes) {
return fmt.Errorf("invalid msg size lengths: hashes: %v sizes: %v", len(txHashes), len(msg.Sizes))
}
if len(txHashes) != len(msg.Types) {
return fmt.Errorf("invalid msg type lengths: hashes: %v types: %v", len(txHashes), len(msg.Types))
}
// if you receive an old tx propagation, read from connection again
if len(txHashes) == 1 && prevTx != nil {
if txHashes[0] == prevTx.Hash() {
continue
}
}
for index, gotHash := range txHashes {
if gotHash == tx.Hash() {
if msg.Sizes[index] != uint32(tx.Size()) {
return fmt.Errorf("invalid tx size: got %v want %v", msg.Sizes[index], tx.Size())
}
if msg.Types[index] != tx.Type() {
return fmt.Errorf("invalid tx type: got %v want %v", msg.Types[index], tx.Type())
}
// Ok
return nil
}
}
return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash())
default: default:
return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg))
} }
@ -192,17 +220,19 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction
nonce = txs[len(txs)-1].Nonce() nonce = txs[len(txs)-1].Nonce()
// Wait for the transaction announcement(s) and make sure all sent txs are being propagated. // Wait for the transaction announcement(s) and make sure all sent txs are being propagated.
// all txs should be announced within 3 announcements. // all txs should be announced within a couple announcements.
recvHashes := make([]common.Hash, 0) recvHashes := make([]common.Hash, 0)
for i := 0; i < 3; i++ { for i := 0; i < 20; i++ {
switch msg := recvConn.readAndServe(s.chain, timeout).(type) { switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
case *Transactions: case *Transactions:
for _, tx := range *msg { for _, tx := range *msg {
recvHashes = append(recvHashes, tx.Hash()) recvHashes = append(recvHashes, tx.Hash())
} }
case *NewPooledTransactionHashes: case *NewPooledTransactionHashes66:
recvHashes = append(recvHashes, *msg...) recvHashes = append(recvHashes, *msg...)
case *NewPooledTransactionHashes:
recvHashes = append(recvHashes, msg.Hashes...)
default: default:
if !strings.Contains(pretty.Sdump(msg), "i/o timeout") { if !strings.Contains(pretty.Sdump(msg), "i/o timeout") {
return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg)) return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg))
@ -246,11 +276,16 @@ func checkMaliciousTxPropagation(s *Suite, txs []*types.Transaction, conn *Conn)
if len(badTxs) > 0 { if len(badTxs) > 0 {
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
} }
case *NewPooledTransactionHashes: case *NewPooledTransactionHashes66:
badTxs, _ := compareReceivedTxs(*msg, txs) badTxs, _ := compareReceivedTxs(*msg, txs)
if len(badTxs) > 0 { if len(badTxs) > 0 {
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs) return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
} }
case *NewPooledTransactionHashes:
badTxs, _ := compareReceivedTxs(msg.Hashes, txs)
if len(badTxs) > 0 {
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
}
case *Error: case *Error:
// Transaction should not be announced -> wait for timeout // Transaction should not be announced -> wait for timeout
return nil return nil

View File

@ -126,8 +126,14 @@ type NewBlock eth.NewBlockPacket
func (msg NewBlock) Code() int { return 23 } func (msg NewBlock) Code() int { return 23 }
func (msg NewBlock) ReqID() uint64 { return 0 } func (msg NewBlock) ReqID() uint64 { return 0 }
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66
func (msg NewPooledTransactionHashes66) Code() int { return 24 }
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
// NewPooledTransactionHashes is the network packet for the tx hash propagation message. // NewPooledTransactionHashes is the network packet for the tx hash propagation message.
type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68
func (msg NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) Code() int { return 24 }
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
@ -202,8 +208,13 @@ func (c *Conn) Read() Message {
msg = new(NewBlockHashes) msg = new(NewBlockHashes)
case (Transactions{}).Code(): case (Transactions{}).Code():
msg = new(Transactions) msg = new(Transactions)
case (NewPooledTransactionHashes{}).Code(): case (NewPooledTransactionHashes66{}).Code():
msg = new(NewPooledTransactionHashes) // Try decoding to eth68
ethMsg := new(NewPooledTransactionHashes)
if err := rlp.DecodeBytes(rawData, ethMsg); err == nil {
return ethMsg
}
msg = new(NewPooledTransactionHashes66)
case (GetPooledTransactions{}.Code()): case (GetPooledTransactions{}.Code()):
ethMsg := new(eth.GetPooledTransactionsPacket66) ethMsg := new(eth.GetPooledTransactionsPacket66)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {

View File

@ -37,9 +37,9 @@ const (
var ( var (
// Remote node under test // Remote node under test
Remote string Remote string
// IP where the first tester is listening, port will be assigned // Listen1 is the IP where the first tester is listening, port will be assigned
Listen1 string = "127.0.0.1" Listen1 string = "127.0.0.1"
// IP where the second tester is listening, port will be assigned // Listen2 is the IP where the second tester is listening, port will be assigned
// Before running the test, you may have to `sudo ifconfig lo0 add 127.0.0.2` (on MacOS at least) // Before running the test, you may have to `sudo ifconfig lo0 add 127.0.0.2` (on MacOS at least)
Listen2 string = "127.0.0.2" Listen2 string = "127.0.0.2"
) )
@ -68,7 +68,7 @@ func futureExpiration() uint64 {
return uint64(time.Now().Add(expiration).Unix()) return uint64(time.Now().Add(expiration).Unix())
} }
// This test just sends a PING packet and expects a response. // BasicPing just sends a PING packet and expects a response.
func BasicPing(t *utesting.T) { func BasicPing(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
defer te.close() defer te.close()
@ -137,7 +137,7 @@ func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error {
return nil return nil
} }
// This test sends a PING packet with wrong 'to' field and expects a PONG response. // PingWrongTo sends a PING packet with wrong 'to' field and expects a PONG response.
func PingWrongTo(t *utesting.T) { func PingWrongTo(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
defer te.close() defer te.close()
@ -154,7 +154,7 @@ func PingWrongTo(t *utesting.T) {
} }
} }
// This test sends a PING packet with wrong 'from' field and expects a PONG response. // PingWrongFrom sends a PING packet with wrong 'from' field and expects a PONG response.
func PingWrongFrom(t *utesting.T) { func PingWrongFrom(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
defer te.close() defer te.close()
@ -172,7 +172,7 @@ func PingWrongFrom(t *utesting.T) {
} }
} }
// This test sends a PING packet with additional data at the end and expects a PONG // PingExtraData This test sends a PING packet with additional data at the end and expects a PONG
// response. The remote node should respond because EIP-8 mandates ignoring additional // response. The remote node should respond because EIP-8 mandates ignoring additional
// trailing data. // trailing data.
func PingExtraData(t *utesting.T) { func PingExtraData(t *utesting.T) {
@ -256,6 +256,7 @@ func WrongPacketType(t *utesting.T) {
func BondThenPingWithWrongFrom(t *utesting.T) { func BondThenPingWithWrongFrom(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
defer te.close() defer te.close()
bond(t, te) bond(t, te)
wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")} wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")}
@ -265,10 +266,25 @@ func BondThenPingWithWrongFrom(t *utesting.T) {
To: te.remoteEndpoint(), To: te.remoteEndpoint(),
Expiration: futureExpiration(), Expiration: futureExpiration(),
}) })
if reply, _, err := te.read(te.l1); err != nil {
t.Fatal(err) waitForPong:
} else if err := te.checkPong(reply, pingHash); err != nil { for {
t.Fatal(err) reply, _, err := te.read(te.l1)
if err != nil {
t.Fatal(err)
}
switch reply.Kind() {
case v4wire.PongPacket:
if err := te.checkPong(reply, pingHash); err != nil {
t.Fatal(err)
}
break waitForPong
case v4wire.FindnodePacket:
// FINDNODE from the node is acceptable here since the endpoint
// verification was performed earlier.
default:
t.Fatalf("Expected PONG, got %v %v", reply.Name(), reply)
}
} }
} }
@ -379,7 +395,7 @@ func FindnodePastExpiration(t *utesting.T) {
// bond performs the endpoint proof with the remote node. // bond performs the endpoint proof with the remote node.
func bond(t *utesting.T, te *testenv) { func bond(t *utesting.T, te *testenv) {
te.send(te.l1, &v4wire.Ping{ pingHash := te.send(te.l1, &v4wire.Ping{
Version: 4, Version: 4,
From: te.localEndpoint(te.l1), From: te.localEndpoint(te.l1),
To: te.remoteEndpoint(), To: te.remoteEndpoint(),
@ -401,7 +417,9 @@ func bond(t *utesting.T, te *testenv) {
}) })
gotPing = true gotPing = true
case *v4wire.Pong: case *v4wire.Pong:
// TODO: maybe verify pong data here if err := te.checkPong(req, pingHash); err != nil {
t.Fatal(err)
}
gotPong = true gotPong = true
} }
} }

View File

@ -58,7 +58,7 @@ func (s *Suite) AllTests() []utesting.Test {
} }
} }
// This test sends PING and expects a PONG response. // TestPing sends PING and expects a PONG response.
func (s *Suite) TestPing(t *utesting.T) { func (s *Suite) TestPing(t *utesting.T) {
conn, l1 := s.listen1(t) conn, l1 := s.listen1(t)
defer conn.close() defer conn.close()
@ -84,7 +84,7 @@ func checkPong(t *utesting.T, pong *v5wire.Pong, ping *v5wire.Ping, c net.Packet
} }
} }
// This test sends PING with a 9-byte request ID, which isn't allowed by the spec. // TestPingLargeRequestID sends PING with a 9-byte request ID, which isn't allowed by the spec.
// The remote node should not respond. // The remote node should not respond.
func (s *Suite) TestPingLargeRequestID(t *utesting.T) { func (s *Suite) TestPingLargeRequestID(t *utesting.T) {
conn, l1 := s.listen1(t) conn, l1 := s.listen1(t)
@ -103,7 +103,7 @@ func (s *Suite) TestPingLargeRequestID(t *utesting.T) {
} }
} }
// In this test, a session is established from one IP as usual. The session is then reused // TestPingMultiIP establishes a session from one IP as usual. The session is then reused
// on another IP, which shouldn't work. The remote node should respond with WHOAREYOU for // on another IP, which shouldn't work. The remote node should respond with WHOAREYOU for
// the attempt from a different IP. // the attempt from a different IP.
func (s *Suite) TestPingMultiIP(t *utesting.T) { func (s *Suite) TestPingMultiIP(t *utesting.T) {
@ -153,7 +153,7 @@ func (s *Suite) TestPingMultiIP(t *utesting.T) {
} }
} }
// This test starts a handshake, but doesn't finish it and sends a second ordinary message // TestPingHandshakeInterrupted starts a handshake, but doesn't finish it and sends a second ordinary message
// packet instead of a handshake message packet. The remote node should respond with // packet instead of a handshake message packet. The remote node should respond with
// another WHOAREYOU challenge for the second packet. // another WHOAREYOU challenge for the second packet.
func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) { func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) {
@ -180,7 +180,7 @@ func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) {
} }
} }
// This test sends TALKREQ and expects an empty TALKRESP response. // TestTalkRequest sends TALKREQ and expects an empty TALKRESP response.
func (s *Suite) TestTalkRequest(t *utesting.T) { func (s *Suite) TestTalkRequest(t *utesting.T) {
conn, l1 := s.listen1(t) conn, l1 := s.listen1(t)
defer conn.close() defer conn.close()
@ -215,7 +215,7 @@ func (s *Suite) TestTalkRequest(t *utesting.T) {
} }
} }
// This test checks that the remote node returns itself for FINDNODE with distance zero. // TestFindnodeZeroDistance checks that the remote node returns itself for FINDNODE with distance zero.
func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) { func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) {
conn, l1 := s.listen1(t) conn, l1 := s.listen1(t)
defer conn.close() defer conn.close()
@ -232,7 +232,7 @@ func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) {
} }
} }
// In this test, multiple nodes ping the node under test. After waiting for them to be // TestFindnodeResults pings the node under test from multiple nodes. After waiting for them to be
// accepted into the remote table, the test checks that they are returned by FINDNODE. // accepted into the remote table, the test checks that they are returned by FINDNODE.
func (s *Suite) TestFindnodeResults(t *utesting.T) { func (s *Suite) TestFindnodeResults(t *utesting.T) {
// Create bystanders. // Create bystanders.
@ -355,7 +355,7 @@ func (bn *bystander) loop() {
wasAdded = true wasAdded = true
bn.notifyAdded() bn.notifyAdded()
case *v5wire.Findnode: case *v5wire.Findnode:
bn.conn.write(bn.l, &v5wire.Nodes{ReqID: p.ReqID, Total: 1}, nil) bn.conn.write(bn.l, &v5wire.Nodes{ReqID: p.ReqID, RespCount: 1}, nil)
wasAdded = true wasAdded = true
bn.notifyAdded() bn.notifyAdded()
case *v5wire.TalkRequest: case *v5wire.TalkRequest:

View File

@ -44,6 +44,8 @@ func (p *readError) Unwrap() error { return p.err }
func (p *readError) RequestID() []byte { return nil } func (p *readError) RequestID() []byte { return nil }
func (p *readError) SetRequestID([]byte) {} func (p *readError) SetRequestID([]byte) {}
func (p *readError) AppendLogInfo(ctx []interface{}) []interface{} { return ctx }
// readErrorf creates a readError with the given text. // readErrorf creates a readError with the given text.
func readErrorf(format string, args ...interface{}) *readError { func readErrorf(format string, args ...interface{}) *readError {
return &readError{fmt.Errorf(format, args...)} return &readError{fmt.Errorf(format, args...)}
@ -86,7 +88,7 @@ func newConn(dest *enode.Node, log logger) *conn {
localNode: ln, localNode: ln,
remote: dest, remote: dest,
remoteAddr: &net.UDPAddr{IP: dest.IP(), Port: dest.UDP()}, remoteAddr: &net.UDPAddr{IP: dest.IP(), Port: dest.UDP()},
codec: v5wire.NewCodec(ln, key, mclock.System{}), codec: v5wire.NewCodec(ln, key, mclock.System{}, nil),
log: log, log: log,
} }
} }
@ -171,16 +173,16 @@ func (tc *conn) findnode(c net.PacketConn, dists []uint) ([]*enode.Node, error)
// Check total count. It should be greater than one // Check total count. It should be greater than one
// and needs to be the same across all responses. // and needs to be the same across all responses.
if first { if first {
if resp.Total == 0 || resp.Total > 6 { if resp.RespCount == 0 || resp.RespCount > 6 {
return nil, fmt.Errorf("invalid NODES response 'total' %d (not in (0,7))", resp.Total) return nil, fmt.Errorf("invalid NODES response count %d (not in (0,7))", resp.RespCount)
} }
total = resp.Total total = resp.RespCount
n = int(total) - 1 n = int(total) - 1
first = false first = false
} else { } else {
n-- n--
if resp.Total != total { if resp.RespCount != total {
return nil, fmt.Errorf("invalid NODES response 'total' %d (!= %d)", resp.Total, total) return nil, fmt.Errorf("invalid NODES response count %d (!= %d)", resp.RespCount, total)
} }
} }
// Check nodes. // Check nodes.

View File

@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -31,7 +32,9 @@ var (
Usage: "Operations on node keys", Usage: "Operations on node keys",
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
keyGenerateCommand, keyGenerateCommand,
keyToIDCommand,
keyToNodeCommand, keyToNodeCommand,
keyToRecordCommand,
}, },
} }
keyGenerateCommand = &cli.Command{ keyGenerateCommand = &cli.Command{
@ -40,6 +43,13 @@ var (
ArgsUsage: "keyfile", ArgsUsage: "keyfile",
Action: genkey, Action: genkey,
} }
keyToIDCommand = &cli.Command{
Name: "to-id",
Usage: "Creates a node ID from a node key file",
ArgsUsage: "keyfile",
Action: keyToID,
Flags: []cli.Flag{},
}
keyToNodeCommand = &cli.Command{ keyToNodeCommand = &cli.Command{
Name: "to-enode", Name: "to-enode",
Usage: "Creates an enode URL from a node key file", Usage: "Creates an enode URL from a node key file",
@ -47,6 +57,13 @@ var (
Action: keyToURL, Action: keyToURL,
Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag}, Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag},
} }
keyToRecordCommand = &cli.Command{
Name: "to-enr",
Usage: "Creates an ENR from a node key file",
ArgsUsage: "keyfile",
Action: keyToRecord,
Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag},
}
) )
var ( var (
@ -80,9 +97,36 @@ func genkey(ctx *cli.Context) error {
return crypto.SaveECDSA(file, key) return crypto.SaveECDSA(file, key)
} }
func keyToID(ctx *cli.Context) error {
n, err := makeRecord(ctx)
if err != nil {
return err
}
fmt.Println(n.ID())
return nil
}
func keyToURL(ctx *cli.Context) error { func keyToURL(ctx *cli.Context) error {
n, err := makeRecord(ctx)
if err != nil {
return err
}
fmt.Println(n.URLv4())
return nil
}
func keyToRecord(ctx *cli.Context) error {
n, err := makeRecord(ctx)
if err != nil {
return err
}
fmt.Println(n.String())
return nil
}
func makeRecord(ctx *cli.Context) (*enode.Node, error) {
if ctx.NArg() != 1 { if ctx.NArg() != 1 {
return fmt.Errorf("need key file as argument") return nil, fmt.Errorf("need key file as argument")
} }
var ( var (
@ -93,13 +137,26 @@ func keyToURL(ctx *cli.Context) error {
) )
key, err := crypto.LoadECDSA(file) key, err := crypto.LoadECDSA(file)
if err != nil { if err != nil {
return err return nil, err
} }
ip := net.ParseIP(host)
if ip == nil { var r enr.Record
return fmt.Errorf("invalid IP address %q", host) if host != "" {
ip := net.ParseIP(host)
if ip == nil {
return nil, fmt.Errorf("invalid IP address %q", host)
}
r.Set(enr.IP(ip))
} }
node := enode.NewV4(&key.PublicKey, ip, tcp, udp) if udp != 0 {
fmt.Println(node.URLv4()) r.Set(enr.UDP(udp))
return nil }
if tcp != 0 {
r.Set(enr.TCP(tcp))
}
if err := enode.SignV4(&r, key); err != nil {
return nil, err
}
return enode.New(enode.ValidSchemes, &r)
} }

View File

@ -19,30 +19,17 @@ package main
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
var ( var app = flags.NewApp("go-ethereum devp2p tool")
// Git information set by linker when building with ci.go.
gitCommit string
gitDate string
app = &cli.App{
Name: filepath.Base(os.Args[0]),
Usage: "go-ethereum devp2p tool",
Version: params.VersionWithCommit(gitCommit, gitDate),
Writer: os.Stdout,
HideVersion: true,
}
)
func init() { func init() {
// Set up the CLI app. app.HideVersion = true
app.Flags = append(app.Flags, debug.Flags...) app.Flags = append(app.Flags, debug.Flags...)
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
flags.MigrateGlobalFlags(ctx) flags.MigrateGlobalFlags(ctx)
@ -56,6 +43,7 @@ func init() {
fmt.Fprintf(os.Stderr, "No such command: %s\n", cmd) fmt.Fprintf(os.Stderr, "No such command: %s\n", cmd)
os.Exit(1) os.Exit(1)
} }
// Add subcommands. // Add subcommands.
app.Commands = []*cli.Command{ app.Commands = []*cli.Command{
enrdumpCommand, enrdumpCommand,

View File

@ -181,7 +181,7 @@ func parseFilterLimit(args []string) (int, error) {
return limit, nil return limit, nil
} }
// andFilter parses node filters in args and and returns a single filter that requires all // andFilter parses node filters in args and returns a single filter that requires all
// of them to match. // of them to match.
func andFilter(args []string) (nodeFilter, error) { func andFilter(args []string) (nodeFilter, error) {
checks, err := parseFilters(args) checks, err := parseFilters(args)
@ -233,8 +233,6 @@ func ethFilter(args []string) (nodeFilter, error) {
filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash) filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash)
case "goerli": case "goerli":
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
case "ropsten":
filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash)
case "sepolia": case "sepolia":
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash) filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash)
default: default:

View File

@ -28,14 +28,10 @@ const (
defaultKeyfileName = "keyfile.json" defaultKeyfileName = "keyfile.json"
) )
// Git SHA1 commit hash of the release (set via linker flags)
var gitCommit = ""
var gitDate = ""
var app *cli.App var app *cli.App
func init() { func init() {
app = flags.NewApp(gitCommit, gitDate, "an Ethereum key manager") app = flags.NewApp("Ethereum key manager")
app.Commands = []*cli.Command{ app.Commands = []*cli.Command{
commandGenerate, commandGenerate,
commandInspect, commandInspect,

View File

@ -1,56 +1,196 @@
## EVM state transition tool # EVM tool
The EVM tool provides a few useful subcommands to facilitate testing at the EVM
layer.
* transition tool (`t8n`) : a stateless state transition utility
* transaction tool (`t9n`) : a transaction validation utility
* block builder tool (`b11r`): a block assembler utility
## State transition tool (`t8n`)
The `evm t8n` tool is a stateless state transition utility. It is a utility The `evm t8n` tool is a stateless state transition utility. It is a utility
which can which can
1. Take a prestate, including 1. Take a prestate, including
- Accounts, - Accounts,
- Block context information, - Block context information,
- Previous blockshashes (*optional) - Previous blockshashes (*optional)
2. Apply a set of transactions, 2. Apply a set of transactions,
3. Apply a mining-reward (*optional), 3. Apply a mining-reward (*optional),
4. And generate a post-state, including 4. And generate a post-state, including
- State root, transaction root, receipt root, - State root, transaction root, receipt root,
- Information about rejected transactions, - Information about rejected transactions,
- Optionally: a full or partial post-state dump - Optionally: a full or partial post-state dump
## Specification ### Specification
The idea is to specify the behaviour of this binary very _strict_, so that other The idea is to specify the behaviour of this binary very _strict_, so that other
node implementors can build replicas based on their own state-machines, and the node implementors can build replicas based on their own state-machines, and the
state generators can swap between a `geth`-based implementation and a `parityvm`-based state generators can swap between a \`geth\`-based implementation and a \`parityvm\`-based
implementation. implementation.
### Command line params #### Command line params
Command line params that has to be supported are Command line params that need to be supported are
```
--trace Output full trace logs to files <txhash>.jsonl
--trace.nomemory Disable full memory dump in traces
--trace.nostack Disable stack output in traces
--trace.noreturndata Disable return data output in traces
--output.basedir value Specifies where output files are placed. Will be created if it does not exist.
--output.alloc alloc Determines where to put the alloc of the post-state.
`stdout` - into the stdout output
`stderr` - into the stderr output
--output.result result Determines where to put the result (stateroot, txroot etc) of the post-state.
`stdout` - into the stdout output
`stderr` - into the stderr output
--output.body value If set, the RLP of the transactions (block body) will be written to this file.
--input.txs stdin stdin or file name of where to find the transactions to apply. If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions.The '.rlp' format is identical to the output.body format. (default: "txs.json")
--state.fork value Name of ruleset to use.
--state.chainid value ChainID to use (default: 1)
--state.reward value Mining reward. Set to -1 to disable (default: 0)
``` ```
--input.alloc value (default: "alloc.json")
--input.env value (default: "env.json")
--input.txs value (default: "txs.json")
--output.alloc value (default: "alloc.json")
--output.basedir value
--output.body value
--output.result value (default: "result.json")
--state.chainid value (default: 1)
--state.fork value (default: "GrayGlacier")
--state.reward value (default: 0)
--trace.memory (default: false)
--trace.nomemory (default: true)
--trace.noreturndata (default: true)
--trace.nostack (default: false)
--trace.returndata (default: false)
```
#### Objects
### Error codes and output The transition tool uses JSON objects to read and write data related to the transition operation. The
following object definitions are required.
##### `alloc`
The `alloc` object defines the prestate that transition will begin with.
```go
// Map of address to account definition.
type Alloc map[common.Address]Account
// Genesis account. Each field is optional.
type Account struct {
Code []byte `json:"code"`
Storage map[common.Hash]common.Hash `json:"storage"`
Balance *big.Int `json:"balance"`
Nonce uint64 `json:"nonce"`
SecretKey []byte `json:"secretKey"`
}
```
##### `env`
The `env` object defines the environmental context in which the transition will
take place.
```go
type Env struct {
// required
CurrentCoinbase common.Address `json:"currentCoinbase"`
CurrentGasLimit uint64 `json:"currentGasLimit"`
CurrentNumber uint64 `json:"currentNumber"`
CurrentTimestamp uint64 `json:"currentTimestamp"`
Withdrawals []*Withdrawal `json:"withdrawals"`
// optional
CurrentDifficulty *big.Int `json:"currentDifficuly"`
CurrentRandom *big.Int `json:"currentRandom"`
CurrentBaseFee *big.Int `json:"currentBaseFee"`
ParentDifficulty *big.Int `json:"parentDifficulty"`
ParentGasUsed uint64 `json:"parentGasUsed"`
ParentGasLimit uint64 `json:"parentGasLimit"`
ParentTimestamp uint64 `json:"parentTimestamp"`
BlockHashes map[uint64]common.Hash `json:"blockHashes"`
ParentUncleHash common.Hash `json:"parentUncleHash"`
Ommers []Ommer `json:"ommers"`
}
type Ommer struct {
Delta uint64 `json:"delta"`
Address common.Address `json:"address"`
}
type Withdrawal struct {
Index uint64 `json:"index"`
ValidatorIndex uint64 `json:"validatorIndex"`
Recipient common.Address `json:"recipient"`
Amount *big.Int `json:"amount"`
}
```
##### `txs`
The `txs` object is an array of any of the transaction types: `LegacyTx`,
`AccessListTx`, or `DynamicFeeTx`.
```go
type LegacyTx struct {
Nonce uint64 `json:"nonce"`
GasPrice *big.Int `json:"gasPrice"`
Gas uint64 `json:"gas"`
To *common.Address `json:"to"`
Value *big.Int `json:"value"`
Data []byte `json:"data"`
V *big.Int `json:"v"`
R *big.Int `json:"r"`
S *big.Int `json:"s"`
SecretKey *common.Hash `json:"secretKey"`
}
type AccessList []AccessTuple
type AccessTuple struct {
Address common.Address `json:"address" gencodec:"required"`
StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"`
}
type AccessListTx struct {
ChainID *big.Int `json:"chainId"`
Nonce uint64 `json:"nonce"`
GasPrice *big.Int `json:"gasPrice"`
Gas uint64 `json:"gas"`
To *common.Address `json:"to"`
Value *big.Int `json:"value"`
Data []byte `json:"data"`
AccessList AccessList `json:"accessList"`
V *big.Int `json:"v"`
R *big.Int `json:"r"`
S *big.Int `json:"s"`
SecretKey *common.Hash `json:"secretKey"`
}
type DynamicFeeTx struct {
ChainID *big.Int `json:"chainId"`
Nonce uint64 `json:"nonce"`
GasTipCap *big.Int `json:"maxPriorityFeePerGas"`
GasFeeCap *big.Int `json:"maxFeePerGas"`
Gas uint64 `json:"gas"`
To *common.Address `json:"to"`
Value *big.Int `json:"value"`
Data []byte `json:"data"`
AccessList AccessList `json:"accessList"`
V *big.Int `json:"v"`
R *big.Int `json:"r"`
S *big.Int `json:"s"`
SecretKey *common.Hash `json:"secretKey"`
}
```
##### `result`
The `result` object is output after a transition is executed. It includes
information about the post-transition environment.
```go
type ExecutionResult struct {
StateRoot common.Hash `json:"stateRoot"`
TxRoot common.Hash `json:"txRoot"`
ReceiptRoot common.Hash `json:"receiptsRoot"`
LogsHash common.Hash `json:"logsHash"`
Bloom types.Bloom `json:"logsBloom"`
Receipts types.Receipts `json:"receipts"`
Rejected []*rejectedTx `json:"rejected,omitempty"`
Difficulty *big.Int `json:"currentDifficulty"`
GasUsed uint64 `json:"gasUsed"`
BaseFee *big.Int `json:"currentBaseFee,omitempty"`
}
```
#### Error codes and output
All logging should happen against the `stderr`. All logging should happen against the `stderr`.
There are a few (not many) errors that can occur, those are defined below. There are a few (not many) errors that can occur, those are defined below.
#### EVM-based errors (`2` to `9`) ##### EVM-based errors (`2` to `9`)
- Other EVM error. Exit code `2` - Other EVM error. Exit code `2`
- Failed configuration: when a non-supported or invalid fork was specified. Exit code `3`. - Failed configuration: when a non-supported or invalid fork was specified. Exit code `3`.
@ -58,18 +198,30 @@ There are a few (not many) errors that can occur, those are defined below.
is invoked targeting a block which history has not been provided for, the program will is invoked targeting a block which history has not been provided for, the program will
exit with code `4`. exit with code `4`.
#### IO errors (`10`-`20`) ##### IO errors (`10`-`20`)
- Invalid input json: the supplied data could not be marshalled. - Invalid input json: the supplied data could not be marshalled.
The program will exit with code `10` The program will exit with code `10`
- IO problems: failure to load or save files, the program will exit with code `11` - IO problems: failure to load or save files, the program will exit with code `11`
## Examples ```
# This should exit with 3
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --state.fork=Frontier+1346 2>/dev/null
exitcode:3 OK
```
#### Forks
### Basic usage ### Basic usage
The chain configuration to be used for a transition is specified via the
`--state.fork` CLI flag. A list of possible values and configurations can be
found in [`tests/init.go`](tests/init.go).
#### Examples
##### Basic usage
Invoking it with the provided example files Invoking it with the provided example files
``` ```
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --state.fork=Berlin
``` ```
Two resulting files: Two resulting files:
@ -94,7 +246,7 @@ Two resulting files:
{ {
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13", "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [ "receipts": [
@ -116,78 +268,82 @@ Two resulting files:
"index": 1, "index": 1,
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
} }
] ],
"currentDifficulty": "0x20000",
"gasUsed": "0x5208"
} }
``` ```
We can make them spit out the data to e.g. `stdout` like this: We can make them spit out the data to e.g. `stdout` like this:
``` ```
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.result=stdout --output.alloc=stdout --state.fork=Berlin
``` ```
Output: Output:
```json ```json
{ {
"alloc": { "alloc": {
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
"balance": "0xfeed1a9d", "balance": "0xfeed1a9d",
"nonce": "0x1" "nonce": "0x1"
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x5ffd4878be161d74",
"nonce": "0xac"
},
"0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0xa410"
}
}, },
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { "result": {
"balance": "0x5ffd4878be161d74", "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
"nonce": "0xac" "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
}, "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"balance": "0xa410"
}
},
"result": {
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
{
"root": "0x",
"status": "0x1",
"cumulativeGasUsed": "0x5208",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null, "receipts": [
"transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", {
"contractAddress": "0x0000000000000000000000000000000000000000", "root": "0x",
"gasUsed": "0x5208", "status": "0x1",
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "cumulativeGasUsed": "0x5208",
"transactionIndex": "0x0" "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
} "logs": null,
], "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
"rejected": [ "contractAddress": "0x0000000000000000000000000000000000000000",
{ "gasUsed": "0x5208",
"index": 1, "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" "transactionIndex": "0x0"
} }
] ],
} "rejected": [
{
"index": 1,
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
}
],
"currentDifficulty": "0x20000",
"gasUsed": "0x5208"
}
} }
``` ```
## About Ommers #### About Ommers
Mining rewards and ommer rewards might need to be added. This is how those are applied: Mining rewards and ommer rewards might need to be added. This is how those are applied:
- `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`. - `block_reward` is the block mining reward for the miner (`0xaa`), of a block at height `N`.
- For each ommer (mined by `0xbb`), with blocknumber `N-delta` - For each ommer (mined by `0xbb`), with blocknumber `N-delta`
- (where `delta` is the difference between the current block and the ommer) - (where `delta` is the difference between the current block and the ommer)
- The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward` - The account `0xbb` (ommer miner) is awarded `(8-delta)/ 8 * block_reward`
- The account `0xaa` (block miner) is awarded `block_reward / 32` - The account `0xaa` (block miner) is awarded `block_reward / 32`
To make `state_t8n` apply these, the following inputs are required: To make `t8n` apply these, the following inputs are required:
- `state.reward` - `--state.reward`
- For ethash, it is `5000000000000000000` `wei`, - For ethash, it is `5000000000000000000` `wei`,
- If this is not defined, mining rewards are not applied, - If this is not defined, mining rewards are not applied,
- A value of `0` is valid, and causes accounts to be 'touched'. - A value of `0` is valid, and causes accounts to be 'touched'.
- For each ommer, the tool needs to be given an `address` and a `delta`. This - For each ommer, the tool needs to be given an `addres\` and a `delta`. This
is done via the `env`. is done via the `ommers` field in `env`.
Note: the tool does not verify that e.g. the normal uncle rules apply, Note: the tool does not verify that e.g. the normal uncle rules apply,
and allows e.g two uncles at the same height, or the uncle-distance. This means that and allows e.g two uncles at the same height, or the uncle-distance. This means that
@ -208,42 +364,38 @@ Example:
] ]
} }
``` ```
When applying this, using a reward of `0x80` When applying this, using a reward of `0x08`
Output: Output:
```json ```json
{ {
"alloc": { "alloc": {
"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": { "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {
"balance": "0x88" "balance": "0x88"
}, },
"0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": { "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": {
"balance": "0x70" "balance": "0x70"
}, },
"0xcccccccccccccccccccccccccccccccccccccccc": { "0xcccccccccccccccccccccccccccccccccccccccc": {
"balance": "0x60" "balance": "0x60"
}
} }
}
} }
``` ```
### Future EIPS #### Future EIPS
It is also possible to experiment with future eips that are not yet defined in a hard fork. It is also possible to experiment with future eips that are not yet defined in a hard fork.
Example, putting EIP-1344 into Frontier: Example, putting EIP-1344 into Frontier:
``` ```
./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json ./evm t8n --state.fork=Frontier+1344 --input.pre=./testdata/1/pre.json --input.txs=./testdata/1/txs.json --input.env=/testdata/1/env.json
``` ```
### Block history #### Block history
The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`. The `BLOCKHASH` opcode requires blockhashes to be provided by the caller, inside the `env`.
If a required blockhash is not provided, the exit code should be `4`: If a required blockhash is not provided, the exit code should be `4`:
Example where blockhashes are provided: Example where blockhashes are provided:
``` ```
./evm --verbosity=1 t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace ./evm t8n --input.alloc=./testdata/3/alloc.json --input.txs=./testdata/3/txs.json --input.env=./testdata/3/env.json --trace --state.fork=Berlin
INFO [07-27|11:53:40.960] Trie dumping started root=b7341d..857ea1
INFO [07-27|11:53:40.960] Trie dumping complete accounts=3 elapsed="103.298µs"
INFO [07-27|11:53:40.960] Wrote file file=alloc.json
INFO [07-27|11:53:40.960] Wrote file file=result.json
``` ```
@ -251,44 +403,34 @@ INFO [07-27|11:53:40.960] Wrote file file=result.j
cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2
``` ```
``` ```
{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnData":"0x","depth":1,"refund":0,"opName":"PUSH1","error":""} {"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnData":"0x","depth":1,"refund":0,"opName":"BLOCKHASH","error":""} {"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memSize":0,"stack":["0x1"],"depth":1,"refund":0,"opName":"BLOCKHASH"}
{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnData":"0x","depth":1,"refund":0,"opName":"STOP","error":""} {"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"depth":1,"refund":0,"opName":"STOP"}
{"output":"","gasUsed":"0x17","time":156276} {"output":"","gasUsed":"0x17"}
``` ```
In this example, the caller has not provided the required blockhash: In this example, the caller has not provided the required blockhash:
``` ```
./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace ./evm t8n --input.alloc=./testdata/4/alloc.json --input.txs=./testdata/4/txs.json --input.env=./testdata/4/env.json --trace --state.fork=Berlin
ERROR(4): getHash(3) invoked, blockhash for that block not provided ERROR(4): getHash(3) invoked, blockhash for that block not provided
``` ```
Error code: 4 Error code: 4
### Chaining #### Chaining
Another thing that can be done, is to chain invocations: Another thing that can be done, is to chain invocations:
``` ```
./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --state.fork=Berlin --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json --state.fork=Berlin
INFO [07-27|11:53:41.049] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
INFO [07-27|11:53:41.050] Trie dumping started root=84208a..ae4e13
INFO [07-27|11:53:41.050] Trie dumping complete accounts=3 elapsed="59.412µs"
INFO [07-27|11:53:41.050] Wrote file file=result.json
INFO [07-27|11:53:41.051] rejected tx index=0 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
INFO [07-27|11:53:41.051] rejected tx index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
INFO [07-27|11:53:41.052] Trie dumping started root=84208a..ae4e13
INFO [07-27|11:53:41.052] Trie dumping complete accounts=3 elapsed="45.734µs"
INFO [07-27|11:53:41.052] Wrote file file=alloc.json
INFO [07-27|11:53:41.052] Wrote file file=result.json
``` ```
What happened here, is that we first applied two identical transactions, so the second one was rejected. What happened here, is that we first applied two identical transactions, so the second one was rejected.
Then, taking the poststate alloc as the input for the next state, we tried again to include Then, taking the poststate alloc as the input for the next state, we tried again to include
the same two transactions: this time, both failed due to too low nonce. the same two transactions: this time, both failed due to too low nonce.
In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the
actual blocknumber (exposed to the EVM) would not increase. actual blocknumber (exposed to the EVM) would not increase.
### Transactions in RLP form #### Transactions in RLP form
It is possible to provide already-signed transactions as input to, using an `input.txs` which ends with the `rlp` suffix. It is possible to provide already-signed transactions as input to, using an `input.txs` which ends with the `rlp` suffix.
The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible The input format for RLP-form transactions is _identical_ to the _output_ format for block bodies. Therefore, it's fully possible
@ -297,12 +439,11 @@ to use the evm to go from `json` input to `rlp` input.
The following command takes **json** the transactions in `./testdata/13/txs.json` and signs them. After execution, they are output to `signed_txs.rlp`.: The following command takes **json** the transactions in `./testdata/13/txs.json` and signs them. After execution, they are output to `signed_txs.rlp`.:
``` ```
./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp ./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./testdata/13/txs.json --input.env=./testdata/13/env.json --output.result=alloc_jsontx.json --output.body=signed_txs.rlp
INFO [07-27|11:53:41.124] Trie dumping started root=e4b924..6aef61 INFO [12-27|09:25:11.102] Trie dumping started root=e4b924..6aef61
INFO [07-27|11:53:41.124] Trie dumping complete accounts=3 elapsed="94.284µs" INFO [12-27|09:25:11.102] Trie dumping complete accounts=3 elapsed="275.66µs"
INFO [07-27|11:53:41.125] Wrote file file=alloc.json INFO [12-27|09:25:11.102] Wrote file file=alloc.json
INFO [07-27|11:53:41.125] Wrote file file=alloc_jsontx.json INFO [12-27|09:25:11.103] Wrote file file=alloc_jsontx.json
INFO [07-27|11:53:41.125] Wrote file file=signed_txs.rlp INFO [12-27|09:25:11.103] Wrote file file=signed_txs.rlp
``` ```
The `output.body` is the rlp-list of transactions, encoded in hex and placed in a string a'la `json` encoding rules: The `output.body` is the rlp-list of transactions, encoded in hex and placed in a string a'la `json` encoding rules:
@ -311,7 +452,7 @@ cat signed_txs.rlp
"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9" "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
``` ```
We can use `rlpdump` to check what the contents are: We can use `rlpdump` to check what the contents are:
``` ```
rlpdump -hex $(cat signed_txs.rlp | jq -r ) rlpdump -hex $(cat signed_txs.rlp | jq -r )
[ [
@ -319,20 +460,167 @@ rlpdump -hex $(cat signed_txs.rlp | jq -r )
02f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9, 02f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9,
] ]
``` ```
Now, we can now use those (or any other already signed transactions), as input, like so: Now, we can now use those (or any other already signed transactions), as input, like so:
``` ```
./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json ./evm t8n --state.fork=London --input.alloc=./testdata/13/alloc.json --input.txs=./signed_txs.rlp --input.env=./testdata/13/env.json --output.result=alloc_rlptx.json
INFO [07-27|11:53:41.253] Trie dumping started root=e4b924..6aef61 INFO [12-27|09:25:11.187] Trie dumping started root=e4b924..6aef61
INFO [07-27|11:53:41.253] Trie dumping complete accounts=3 elapsed="128.445µs" INFO [12-27|09:25:11.187] Trie dumping complete accounts=3 elapsed="123.676µs"
INFO [07-27|11:53:41.253] Wrote file file=alloc.json INFO [12-27|09:25:11.187] Wrote file file=alloc.json
INFO [07-27|11:53:41.255] Wrote file file=alloc_rlptx.json INFO [12-27|09:25:11.187] Wrote file file=alloc_rlptx.json
``` ```
You might have noticed that the results from these two invocations were stored in two separate files.
You might have noticed that the results from these two invocations were stored in two separate files.
And we can now finally check that they match. And we can now finally check that they match.
``` ```
cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot cat alloc_jsontx.json | jq .stateRoot && cat alloc_rlptx.json | jq .stateRoot
"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61" "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61"
"0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61" "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61"
``` ```
## Transaction tool
The transaction tool is used to perform static validity checks on transactions such as:
* intrinsic gas calculation
* max values on integers
* fee semantics, such as `maxFeePerGas < maxPriorityFeePerGas`
* newer tx types on old forks
### Examples
```
./evm t9n --state.fork Homestead --input.txs testdata/15/signed_txs.rlp
[
{
"error": "transaction type not supported",
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
},
{
"error": "transaction type not supported",
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
}
]
```
```
./evm t9n --state.fork London --input.txs testdata/15/signed_txs.rlp
[
{
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
"intrinsicGas": "0x5208"
},
{
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
"intrinsicGas": "0x5208"
}
]
```
## Block builder tool (b11r)
The `evm b11r` tool is used to assemble and seal full block rlps.
### Specification
#### Command line params
Command line params that need to be supported are:
```
--input.header value `stdin` or file name of where to find the block header to use. (default: "header.json")
--input.ommers value `stdin` or file name of where to find the list of ommer header RLPs to use.
--input.txs value `stdin` or file name of where to find the transactions list in RLP form. (default: "txs.rlp")
--output.basedir value Specifies where output files are placed. Will be created if it does not exist.
--output.block value Determines where to put the alloc of the post-state. (default: "block.json")
<file> - into the file <file>
`stdout` - into the stdout output
`stderr` - into the stderr output
--seal.clique value Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.
--seal.ethash Seal block with ethash. (default: false)
--seal.ethash.dir value Path to ethash DAG. If none exists, a new DAG will be generated.
--seal.ethash.mode value Defines the type and amount of PoW verification an ethash engine makes. (default: "normal")
--verbosity value Sets the verbosity level. (default: 3)
```
#### Objects
##### `header`
The `header` object is a consensus header.
```go=
type Header struct {
ParentHash common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom types.Bloom `json:"logsBloom"`
Difficulty *big.Int `json:"difficulty"`
Number *big.Int `json:"number" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed"`
Time uint64 `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData"`
MixDigest common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"`
BaseFee *big.Int `json:"baseFeePerGas"`
}
```
#### `ommers`
The `ommers` object is a list of RLP-encoded ommer blocks in hex
representation.
```go=
type Ommers []string
```
#### `txs`
The `txs` object is a list of RLP-encoded transactions in hex representation.
```go=
type Txs []string
```
#### `clique`
The `clique` object provides the necessary information to complete a clique
seal of the block.
```go=
var CliqueInfo struct {
Key *common.Hash `json:"secretKey"`
Voted *common.Address `json:"voted"`
Authorize *bool `json:"authorize"`
Vanity common.Hash `json:"vanity"`
}
```
#### `output`
The `output` object contains two values, the block RLP and the block hash.
```go=
type BlockInfo struct {
Rlp []byte `json:"rlp"`
Hash common.Hash `json:"hash"`
}
```
## A Note on Encoding
The encoding of values for `evm` utility attempts to be relatively flexible. It
generally supports hex-encoded or decimal-encoded numeric values, and
hex-encoded byte values (like `common.Address`, `common.Hash`, etc). When in
doubt, the [`execution-apis`](https://github.com/ethereum/execution-apis) way
of encoding should always be accepted.
## Testing
There are many test cases in the [`cmd/evm/testdata`](./testdata) directory.
These fixtures are used to power the `t8n` tests in
[`t8n_test.go`](./t8n_test.go). The best way to verify correctness of new `evm`
implementations is to execute these and verify the output and error codes match
the expected values.

61
cmd/evm/blockrunner.go Normal file
View File

@ -0,0 +1,61 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"errors"
"fmt"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2"
)
var blockTestCommand = &cli.Command{
Action: blockTestCmd,
Name: "blocktest",
Usage: "executes the given blockchain tests",
ArgsUsage: "<file>",
}
func blockTestCmd(ctx *cli.Context) error {
if len(ctx.Args().First()) == 0 {
return errors.New("path-to-test argument required")
}
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
// Load the test content from the input file
src, err := os.ReadFile(ctx.Args().First())
if err != nil {
return err
}
var tests map[string]tests.BlockTest
if err = json.Unmarshal(src, &tests); err != nil {
return err
}
for i, test := range tests {
if err := test.Run(false); err != nil {
return fmt.Errorf("test %v: %w", i, err)
}
}
return nil
}

View File

@ -38,22 +38,23 @@ import (
//go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go //go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go
type header struct { type header struct {
ParentHash common.Hash `json:"parentHash"` ParentHash common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"` OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"` Coinbase *common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"` Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"` TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"` ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom types.Bloom `json:"logsBloom"` Bloom types.Bloom `json:"logsBloom"`
Difficulty *big.Int `json:"difficulty"` Difficulty *big.Int `json:"difficulty"`
Number *big.Int `json:"number" gencodec:"required"` Number *big.Int `json:"number" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"` GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed"` GasUsed uint64 `json:"gasUsed"`
Time uint64 `json:"timestamp" gencodec:"required"` Time uint64 `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData"` Extra []byte `json:"extraData"`
MixDigest common.Hash `json:"mixHash"` MixDigest common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
} }
type headerMarshaling struct { type headerMarshaling struct {
@ -67,10 +68,11 @@ type headerMarshaling struct {
} }
type bbInput struct { type bbInput struct {
Header *header `json:"header,omitempty"` Header *header `json:"header,omitempty"`
OmmersRlp []string `json:"ommers,omitempty"` OmmersRlp []string `json:"ommers,omitempty"`
TxRlp string `json:"txs,omitempty"` TxRlp string `json:"txs,omitempty"`
Clique *cliqueInput `json:"clique,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
Clique *cliqueInput `json:"clique,omitempty"`
Ethash bool `json:"-"` Ethash bool `json:"-"`
EthashDir string `json:"-"` EthashDir string `json:"-"`
@ -114,21 +116,22 @@ func (c *cliqueInput) UnmarshalJSON(input []byte) error {
// ToBlock converts i into a *types.Block // ToBlock converts i into a *types.Block
func (i *bbInput) ToBlock() *types.Block { func (i *bbInput) ToBlock() *types.Block {
header := &types.Header{ header := &types.Header{
ParentHash: i.Header.ParentHash, ParentHash: i.Header.ParentHash,
UncleHash: types.EmptyUncleHash, UncleHash: types.EmptyUncleHash,
Coinbase: common.Address{}, Coinbase: common.Address{},
Root: i.Header.Root, Root: i.Header.Root,
TxHash: types.EmptyRootHash, TxHash: types.EmptyTxsHash,
ReceiptHash: types.EmptyRootHash, ReceiptHash: types.EmptyReceiptsHash,
Bloom: i.Header.Bloom, Bloom: i.Header.Bloom,
Difficulty: common.Big0, Difficulty: common.Big0,
Number: i.Header.Number, Number: i.Header.Number,
GasLimit: i.Header.GasLimit, GasLimit: i.Header.GasLimit,
GasUsed: i.Header.GasUsed, GasUsed: i.Header.GasUsed,
Time: i.Header.Time, Time: i.Header.Time,
Extra: i.Header.Extra, Extra: i.Header.Extra,
MixDigest: i.Header.MixDigest, MixDigest: i.Header.MixDigest,
BaseFee: i.Header.BaseFee, BaseFee: i.Header.BaseFee,
WithdrawalsHash: i.Header.WithdrawalsHash,
} }
// Fill optional values. // Fill optional values.
@ -153,7 +156,7 @@ func (i *bbInput) ToBlock() *types.Block {
if header.Difficulty != nil { if header.Difficulty != nil {
header.Difficulty = i.Header.Difficulty header.Difficulty = i.Header.Difficulty
} }
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers) return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals)
} }
// SealBlock seals the given block using the configured engine. // SealBlock seals the given block using the configured engine.
@ -259,14 +262,15 @@ func BuildBlock(ctx *cli.Context) error {
func readInput(ctx *cli.Context) (*bbInput, error) { func readInput(ctx *cli.Context) (*bbInput, error) {
var ( var (
headerStr = ctx.String(InputHeaderFlag.Name) headerStr = ctx.String(InputHeaderFlag.Name)
ommersStr = ctx.String(InputOmmersFlag.Name) ommersStr = ctx.String(InputOmmersFlag.Name)
txsStr = ctx.String(InputTxsRlpFlag.Name) withdrawalsStr = ctx.String(InputWithdrawalsFlag.Name)
cliqueStr = ctx.String(SealCliqueFlag.Name) txsStr = ctx.String(InputTxsRlpFlag.Name)
ethashOn = ctx.Bool(SealEthashFlag.Name) cliqueStr = ctx.String(SealCliqueFlag.Name)
ethashDir = ctx.String(SealEthashDirFlag.Name) ethashOn = ctx.Bool(SealEthashFlag.Name)
ethashMode = ctx.String(SealEthashModeFlag.Name) ethashDir = ctx.String(SealEthashDirFlag.Name)
inputData = &bbInput{} ethashMode = ctx.String(SealEthashModeFlag.Name)
inputData = &bbInput{}
) )
if ethashOn && cliqueStr != "" { if ethashOn && cliqueStr != "" {
return nil, NewError(ErrorConfig, fmt.Errorf("both ethash and clique sealing specified, only one may be chosen")) return nil, NewError(ErrorConfig, fmt.Errorf("both ethash and clique sealing specified, only one may be chosen"))
@ -312,6 +316,13 @@ func readInput(ctx *cli.Context) (*bbInput, error) {
} }
inputData.OmmersRlp = ommers inputData.OmmersRlp = ommers
} }
if withdrawalsStr != stdinSelector && withdrawalsStr != "" {
var withdrawals []*types.Withdrawal
if err := readFile(withdrawalsStr, "withdrawals", &withdrawals); err != nil {
return nil, err
}
inputData.Withdrawals = withdrawals
}
if txsStr != stdinSelector { if txsStr != stdinSelector {
var txs string var txs string
if err := readFile(txsStr, "txs", &txs); err != nil { if err := readFile(txsStr, "txs", &txs); err != nil {
@ -351,15 +362,14 @@ func readInput(ctx *cli.Context) (*bbInput, error) {
// files // files
func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error { func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error {
raw, _ := rlp.EncodeToBytes(block) raw, _ := rlp.EncodeToBytes(block)
type blockInfo struct { type blockInfo struct {
Rlp hexutil.Bytes `json:"rlp"` Rlp hexutil.Bytes `json:"rlp"`
Hash common.Hash `json:"hash"` Hash common.Hash `json:"hash"`
} }
var enc blockInfo enc := blockInfo{
enc.Rlp = raw Rlp: raw,
enc.Hash = block.Hash() Hash: block.Hash(),
}
b, err := json.MarshalIndent(enc, "", " ") b, err := json.MarshalIndent(enc, "", " ")
if err != nil { if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))

View File

@ -47,15 +47,17 @@ type Prestate struct {
// ExecutionResult contains the execution status after running a state test, any // ExecutionResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested. // error that might have occurred and a dump of the final state if requested.
type ExecutionResult struct { type ExecutionResult struct {
StateRoot common.Hash `json:"stateRoot"` StateRoot common.Hash `json:"stateRoot"`
TxRoot common.Hash `json:"txRoot"` TxRoot common.Hash `json:"txRoot"`
ReceiptRoot common.Hash `json:"receiptsRoot"` ReceiptRoot common.Hash `json:"receiptsRoot"`
LogsHash common.Hash `json:"logsHash"` LogsHash common.Hash `json:"logsHash"`
Bloom types.Bloom `json:"logsBloom" gencodec:"required"` Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
Receipts types.Receipts `json:"receipts"` Receipts types.Receipts `json:"receipts"`
Rejected []*rejectedTx `json:"rejected,omitempty"` Rejected []*rejectedTx `json:"rejected,omitempty"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"` GasUsed math.HexOrDecimal64 `json:"gasUsed"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
} }
type ommer struct { type ommer struct {
@ -69,12 +71,16 @@ type stEnv struct {
Difficulty *big.Int `json:"currentDifficulty"` Difficulty *big.Int `json:"currentDifficulty"`
Random *big.Int `json:"currentRandom"` Random *big.Int `json:"currentRandom"`
ParentDifficulty *big.Int `json:"parentDifficulty"` ParentDifficulty *big.Int `json:"parentDifficulty"`
ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"`
ParentGasUsed uint64 `json:"parentGasUsed,omitempty"`
ParentGasLimit uint64 `json:"parentGasLimit,omitempty"`
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
Number uint64 `json:"currentNumber" gencodec:"required"` Number uint64 `json:"currentNumber" gencodec:"required"`
Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` ParentTimestamp uint64 `json:"parentTimestamp,omitempty"`
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
Ommers []ommer `json:"ommers,omitempty"` Ommers []ommer `json:"ommers,omitempty"`
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *big.Int `json:"currentBaseFee,omitempty"` BaseFee *big.Int `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"` ParentUncleHash common.Hash `json:"parentUncleHash"`
} }
@ -84,6 +90,9 @@ type stEnvMarshaling struct {
Difficulty *math.HexOrDecimal256 Difficulty *math.HexOrDecimal256
Random *math.HexOrDecimal256 Random *math.HexOrDecimal256
ParentDifficulty *math.HexOrDecimal256 ParentDifficulty *math.HexOrDecimal256
ParentBaseFee *math.HexOrDecimal256
ParentGasUsed math.HexOrDecimal64
ParentGasLimit math.HexOrDecimal64
GasLimit math.HexOrDecimal64 GasLimit math.HexOrDecimal64
Number math.HexOrDecimal64 Number math.HexOrDecimal64
Timestamp math.HexOrDecimal64 Timestamp math.HexOrDecimal64
@ -131,7 +140,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
Transfer: core.Transfer, Transfer: core.Transfer,
Coinbase: pre.Env.Coinbase, Coinbase: pre.Env.Coinbase,
BlockNumber: new(big.Int).SetUint64(pre.Env.Number), BlockNumber: new(big.Int).SetUint64(pre.Env.Number),
Time: new(big.Int).SetUint64(pre.Env.Timestamp), Time: pre.Env.Timestamp,
Difficulty: pre.Env.Difficulty, Difficulty: pre.Env.Difficulty,
GasLimit: pre.Env.GasLimit, GasLimit: pre.Env.GasLimit,
GetHash: getHash, GetHash: getHash,
@ -166,7 +175,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
} }
vmConfig.Tracer = tracer vmConfig.Tracer = tracer
vmConfig.Debug = (tracer != nil) vmConfig.Debug = (tracer != nil)
statedb.Prepare(tx.Hash(), txIndex) statedb.SetTxContext(tx.Hash(), txIndex)
txContext := core.NewEVMTxContext(msg) txContext := core.NewEVMTxContext(msg)
snapshot := statedb.Snapshot() snapshot := statedb.Snapshot()
evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig) evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig)
@ -211,7 +220,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
} }
// Set the receipt logs and create the bloom filter. // Set the receipt logs and create the bloom filter.
receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash) receipt.Logs = statedb.GetLogs(tx.Hash(), vmContext.BlockNumber.Uint64(), blockHash)
receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
// These three are non-consensus fields: // These three are non-consensus fields:
//receipt.BlockHash //receipt.BlockHash
@ -223,8 +232,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txIndex++ txIndex++
} }
statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)) statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber))
// Add mining reward? // Add mining reward? (-1 means rewards are disabled)
if miningReward > 0 { if miningReward >= 0 {
// Add mining reward. The mining reward may be `0`, which only makes a difference in the cases // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases
// where // where
// - the coinbase suicided, or // - the coinbase suicided, or
@ -247,6 +256,12 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
} }
statedb.AddBalance(pre.Env.Coinbase, minerReward) statedb.AddBalance(pre.Env.Coinbase, minerReward)
} }
// Apply withdrawals
for _, w := range pre.Env.Withdrawals {
// Amount is in gwei, turn into wei
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
statedb.AddBalance(w.Address, amount)
}
// Commit block // Commit block
root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber)) root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber))
if err != nil { if err != nil {
@ -263,6 +278,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
Rejected: rejectedTxs, Rejected: rejectedTxs,
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty), Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
GasUsed: (math.HexOrDecimal64)(gasUsed), GasUsed: (math.HexOrDecimal64)(gasUsed),
BaseFee: (*math.HexOrDecimal256)(vmContext.BaseFee),
}
if pre.Env.Withdrawals != nil {
h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil))
execRs.WithdrawalsRoot = &h
} }
return statedb, execRs, nil return statedb, execRs, nil
} }

View File

@ -112,6 +112,10 @@ var (
Name: "input.ommers", Name: "input.ommers",
Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.", Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.",
} }
InputWithdrawalsFlag = &cli.StringFlag{
Name: "input.withdrawals",
Usage: "`stdin` or file name of where to find the list of withdrawals to use.",
}
InputTxsRlpFlag = &cli.StringFlag{ InputTxsRlpFlag = &cli.StringFlag{
Name: "input.txs", Name: "input.txs",
Usage: "`stdin` or file name of where to find the transactions list in RLP form.", Usage: "`stdin` or file name of where to find the transactions list in RLP form.",

View File

@ -18,22 +18,23 @@ var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON. // MarshalJSON marshals as JSON.
func (h header) MarshalJSON() ([]byte, error) { func (h header) MarshalJSON() ([]byte, error) {
type header struct { type header struct {
ParentHash common.Hash `json:"parentHash"` ParentHash common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"` OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"` Coinbase *common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"` Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"` TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"` ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom types.Bloom `json:"logsBloom"` Bloom types.Bloom `json:"logsBloom"`
Difficulty *math.HexOrDecimal256 `json:"difficulty"` Difficulty *math.HexOrDecimal256 `json:"difficulty"`
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"` GasUsed math.HexOrDecimal64 `json:"gasUsed"`
Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData"` Extra hexutil.Bytes `json:"extraData"`
MixDigest common.Hash `json:"mixHash"` MixDigest common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
} }
var enc header var enc header
enc.ParentHash = h.ParentHash enc.ParentHash = h.ParentHash
@ -52,28 +53,30 @@ func (h header) MarshalJSON() ([]byte, error) {
enc.MixDigest = h.MixDigest enc.MixDigest = h.MixDigest
enc.Nonce = h.Nonce enc.Nonce = h.Nonce
enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee) enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee)
enc.WithdrawalsHash = h.WithdrawalsHash
return json.Marshal(&enc) return json.Marshal(&enc)
} }
// UnmarshalJSON unmarshals from JSON. // UnmarshalJSON unmarshals from JSON.
func (h *header) UnmarshalJSON(input []byte) error { func (h *header) UnmarshalJSON(input []byte) error {
type header struct { type header struct {
ParentHash *common.Hash `json:"parentHash"` ParentHash *common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"` OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"` Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"` Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"` TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"` ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom *types.Bloom `json:"logsBloom"` Bloom *types.Bloom `json:"logsBloom"`
Difficulty *math.HexOrDecimal256 `json:"difficulty"` Difficulty *math.HexOrDecimal256 `json:"difficulty"`
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"` Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"` GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"` Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData"` Extra *hexutil.Bytes `json:"extraData"`
MixDigest *common.Hash `json:"mixHash"` MixDigest *common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
} }
var dec header var dec header
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -131,5 +134,8 @@ func (h *header) UnmarshalJSON(input []byte) error {
if dec.BaseFee != nil { if dec.BaseFee != nil {
h.BaseFee = (*big.Int)(dec.BaseFee) h.BaseFee = (*big.Int)(dec.BaseFee)
} }
if dec.WithdrawalsHash != nil {
h.WithdrawalsHash = dec.WithdrawalsHash
}
return nil return nil
} }

View File

@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
) )
var _ = (*stEnvMarshaling)(nil) var _ = (*stEnvMarshaling)(nil)
@ -20,12 +21,16 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
Random *math.HexOrDecimal256 `json:"currentRandom"` Random *math.HexOrDecimal256 `json:"currentRandom"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"`
ParentGasUsed math.HexOrDecimal64 `json:"parentGasUsed,omitempty"`
ParentGasLimit math.HexOrDecimal64 `json:"parentGasLimit,omitempty"`
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
Ommers []ommer `json:"ommers,omitempty"` Ommers []ommer `json:"ommers,omitempty"`
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"` ParentUncleHash common.Hash `json:"parentUncleHash"`
} }
@ -34,12 +39,16 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
enc.Random = (*math.HexOrDecimal256)(s.Random) enc.Random = (*math.HexOrDecimal256)(s.Random)
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty) enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
enc.ParentBaseFee = (*math.HexOrDecimal256)(s.ParentBaseFee)
enc.ParentGasUsed = math.HexOrDecimal64(s.ParentGasUsed)
enc.ParentGasLimit = math.HexOrDecimal64(s.ParentGasLimit)
enc.GasLimit = math.HexOrDecimal64(s.GasLimit) enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
enc.Number = math.HexOrDecimal64(s.Number) enc.Number = math.HexOrDecimal64(s.Number)
enc.Timestamp = math.HexOrDecimal64(s.Timestamp) enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp) enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp)
enc.BlockHashes = s.BlockHashes enc.BlockHashes = s.BlockHashes
enc.Ommers = s.Ommers enc.Ommers = s.Ommers
enc.Withdrawals = s.Withdrawals
enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee)
enc.ParentUncleHash = s.ParentUncleHash enc.ParentUncleHash = s.ParentUncleHash
return json.Marshal(&enc) return json.Marshal(&enc)
@ -52,12 +61,16 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
Random *math.HexOrDecimal256 `json:"currentRandom"` Random *math.HexOrDecimal256 `json:"currentRandom"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"`
ParentGasUsed *math.HexOrDecimal64 `json:"parentGasUsed,omitempty"`
ParentGasLimit *math.HexOrDecimal64 `json:"parentGasLimit,omitempty"`
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
Ommers []ommer `json:"ommers,omitempty"` Ommers []ommer `json:"ommers,omitempty"`
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash *common.Hash `json:"parentUncleHash"` ParentUncleHash *common.Hash `json:"parentUncleHash"`
} }
@ -78,6 +91,15 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.ParentDifficulty != nil { if dec.ParentDifficulty != nil {
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
} }
if dec.ParentBaseFee != nil {
s.ParentBaseFee = (*big.Int)(dec.ParentBaseFee)
}
if dec.ParentGasUsed != nil {
s.ParentGasUsed = uint64(*dec.ParentGasUsed)
}
if dec.ParentGasLimit != nil {
s.ParentGasLimit = uint64(*dec.ParentGasLimit)
}
if dec.GasLimit == nil { if dec.GasLimit == nil {
return errors.New("missing required field 'currentGasLimit' for stEnv") return errors.New("missing required field 'currentGasLimit' for stEnv")
} }
@ -99,6 +121,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.Ommers != nil { if dec.Ommers != nil {
s.Ommers = dec.Ommers s.Ommers = dec.Ommers
} }
if dec.Withdrawals != nil {
s.Withdrawals = dec.Withdrawals
}
if dec.BaseFee != nil { if dec.BaseFee != nil {
s.BaseFee = (*big.Int)(dec.BaseFee) s.BaseFee = (*big.Int)(dec.BaseFee)
} }

View File

@ -140,7 +140,7 @@ func Transaction(ctx *cli.Context) error {
} }
// Check intrinsic gas // Check intrinsic gas
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil,
chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int))); err != nil { chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int)), chainConfig.IsShanghai(0)); err != nil {
r.Error = err r.Error = err
results = append(results, r) results = append(results, r)
continue continue
@ -171,6 +171,10 @@ func Transaction(ctx *cli.Context) error {
case new(big.Int).Mul(tx.GasFeeCap(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256: case new(big.Int).Mul(tx.GasFeeCap(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256:
r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits") r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits")
} }
// Check whether the init code size has been exceeded.
if chainConfig.IsShanghai(0) && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
r.Error = errors.New("max initcode size exceeded")
}
results = append(results, r) results = append(results, r)
} }
out, err := json.MarshalIndent(results, "", " ") out, err := json.MarshalIndent(results, "", " ")

View File

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -247,10 +248,23 @@ func Transition(ctx *cli.Context) error {
} }
// Sanity check, to not `panic` in state_transition // Sanity check, to not `panic` in state_transition
if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
if prestate.Env.BaseFee == nil { if prestate.Env.BaseFee != nil {
// Already set, base fee has precedent over parent base fee.
} else if prestate.Env.ParentBaseFee != nil {
parent := &types.Header{
Number: new(big.Int).SetUint64(prestate.Env.Number),
BaseFee: prestate.Env.ParentBaseFee,
GasUsed: prestate.Env.ParentGasUsed,
GasLimit: prestate.Env.ParentGasLimit,
}
prestate.Env.BaseFee = misc.CalcBaseFee(chainConfig, parent)
} else {
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
} }
} }
if chainConfig.IsShanghai(prestate.Env.Number) && prestate.Env.Withdrawals == nil {
return NewError(ErrorConfig, errors.New("Shanghai config but missing 'withdrawals' in env section"))
}
isMerged := chainConfig.TerminalTotalDifficulty != nil && chainConfig.TerminalTotalDifficulty.BitLen() == 0 isMerged := chainConfig.TerminalTotalDifficulty != nil && chainConfig.TerminalTotalDifficulty.BitLen() == 0
env := prestate.Env env := prestate.Env
if isMerged { if isMerged {
@ -334,8 +348,9 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error {
// signUnsignedTransactions converts the input txs to canonical transactions. // signUnsignedTransactions converts the input txs to canonical transactions.
// //
// The transactions can have two forms, either // The transactions can have two forms, either
// 1. unsigned or // 1. unsigned or
// 2. signed // 2. signed
//
// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set. // For (1), r, s, v, need so be zero, and the `secretKey` needs to be set.
// If so, we sign it here and now, with the given `secretKey` // If so, we sign it here and now, with the given `secretKey`
// If the condition above is not met, then it's considered a signed transaction. // If the condition above is not met, then it's considered a signed transaction.
@ -393,7 +408,7 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) {
g[addr] = genesisAccount g[addr] = genesisAccount
} }
// saveFile marshalls the object to the given file // saveFile marshals the object to the given file
func saveFile(baseDir, filename string, data interface{}) error { func saveFile(baseDir, filename string, data interface{}) error {
b, err := json.MarshalIndent(data, "", " ") b, err := json.MarshalIndent(data, "", " ")
if err != nil { if err != nil {

View File

@ -27,13 +27,6 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
var (
gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags)
gitDate = ""
app = flags.NewApp(gitCommit, gitDate, "the evm command line interface")
)
var ( var (
DebugFlag = &cli.BoolFlag{ DebugFlag = &cli.BoolFlag{
Name: "debug", Name: "debug",
@ -183,6 +176,7 @@ var blockBuilderCommand = &cli.Command{
t8ntool.OutputBlockFlag, t8ntool.OutputBlockFlag,
t8ntool.InputHeaderFlag, t8ntool.InputHeaderFlag,
t8ntool.InputOmmersFlag, t8ntool.InputOmmersFlag,
t8ntool.InputWithdrawalsFlag,
t8ntool.InputTxsRlpFlag, t8ntool.InputTxsRlpFlag,
t8ntool.SealCliqueFlag, t8ntool.SealCliqueFlag,
t8ntool.SealEthashFlag, t8ntool.SealEthashFlag,
@ -192,6 +186,8 @@ var blockBuilderCommand = &cli.Command{
}, },
} }
var app = flags.NewApp("the evm command line interface")
func init() { func init() {
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
BenchFlag, BenchFlag,
@ -222,6 +218,7 @@ func init() {
compileCommand, compileCommand,
disasmCommand, disasmCommand,
runCommand, runCommand,
blockTestCommand,
stateTestCommand, stateTestCommand,
stateTransitionCommand, stateTransitionCommand,
transactionCommand, transactionCommand,

View File

@ -209,7 +209,7 @@ func runCmd(ctx *cli.Context) error {
GasPrice: flags.GlobalBig(ctx, PriceFlag.Name), GasPrice: flags.GlobalBig(ctx, PriceFlag.Name),
Value: flags.GlobalBig(ctx, ValueFlag.Name), Value: flags.GlobalBig(ctx, ValueFlag.Name),
Difficulty: genesisConfig.Difficulty, Difficulty: genesisConfig.Difficulty,
Time: new(big.Int).SetUint64(genesisConfig.Timestamp), Time: genesisConfig.Timestamp,
Coinbase: genesisConfig.Coinbase, Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{ EVMConfig: vm.Config{

View File

@ -22,12 +22,12 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -41,11 +41,12 @@ var stateTestCommand = &cli.Command{
// StatetestResult contains the execution status after running a state test, any // StatetestResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested. // error that might have occurred and a dump of the final state if requested.
type StatetestResult struct { type StatetestResult struct {
Name string `json:"name"` Name string `json:"name"`
Pass bool `json:"pass"` Pass bool `json:"pass"`
Fork string `json:"fork"` Root *common.Hash `json:"stateRoot,omitempty"`
Error string `json:"error,omitempty"` Fork string `json:"fork"`
State *state.Dump `json:"state,omitempty"` Error string `json:"error,omitempty"`
State *state.Dump `json:"state,omitempty"`
} }
func stateTestCmd(ctx *cli.Context) error { func stateTestCmd(ctx *cli.Context) error {
@ -100,8 +101,12 @@ func stateTestCmd(ctx *cli.Context) error {
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
_, s, err := test.Run(st, cfg, false) _, s, err := test.Run(st, cfg, false)
// print state root for evmlab tracing // print state root for evmlab tracing
if ctx.Bool(MachineFlag.Name) && s != nil { if s != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false)) root := s.IntermediateRoot(false)
result.Root = &root
if ctx.Bool(MachineFlag.Name) {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
}
} }
if err != nil { if err != nil {
// Test failed, mark as so and dump any state to aid debugging // Test failed, mark as so and dump any state to aid debugging

View File

@ -230,7 +230,7 @@ func TestT8n(t *testing.T) {
{ // Test post-merge transition { // Test post-merge transition
base: "./testdata/24", base: "./testdata/24",
input: t8nInput{ input: t8nInput{
"alloc.json", "txs.json", "env.json", "Merged", "", "alloc.json", "txs.json", "env.json", "Merge", "",
}, },
output: t8nOutput{alloc: true, result: true}, output: t8nOutput{alloc: true, result: true},
expOut: "exp.json", expOut: "exp.json",
@ -238,11 +238,27 @@ func TestT8n(t *testing.T) {
{ // Test post-merge transition where input is missing random { // Test post-merge transition where input is missing random
base: "./testdata/24", base: "./testdata/24",
input: t8nInput{ input: t8nInput{
"alloc.json", "txs.json", "env-missingrandom.json", "Merged", "", "alloc.json", "txs.json", "env-missingrandom.json", "Merge", "",
}, },
output: t8nOutput{alloc: false, result: false}, output: t8nOutput{alloc: false, result: false},
expExitCode: 3, expExitCode: 3,
}, },
{ // Test base fee calculation
base: "./testdata/25",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Merge", "",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
{ // Test withdrawals transition
base: "./testdata/26",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Shanghai", "",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
} { } {
args := []string{"t8n"} args := []string{"t8n"}
args = append(args, tc.output.get()...) args = append(args, tc.output.get()...)
@ -383,13 +399,14 @@ func TestT9n(t *testing.T) {
} }
type b11rInput struct { type b11rInput struct {
inEnv string inEnv string
inOmmersRlp string inOmmersRlp string
inTxsRlp string inWithdrawals string
inClique string inTxsRlp string
ethash bool inClique string
ethashMode string ethash bool
ethashDir string ethashMode string
ethashDir string
} }
func (args *b11rInput) get(base string) []string { func (args *b11rInput) get(base string) []string {
@ -402,6 +419,10 @@ func (args *b11rInput) get(base string) []string {
out = append(out, "--input.ommers") out = append(out, "--input.ommers")
out = append(out, fmt.Sprintf("%v/%v", base, opt)) out = append(out, fmt.Sprintf("%v/%v", base, opt))
} }
if opt := args.inWithdrawals; opt != "" {
out = append(out, "--input.withdrawals")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inTxsRlp; opt != "" { if opt := args.inTxsRlp; opt != "" {
out = append(out, "--input.txs") out = append(out, "--input.txs")
out = append(out, fmt.Sprintf("%v/%v", base, opt)) out = append(out, fmt.Sprintf("%v/%v", base, opt))
@ -472,6 +493,16 @@ func TestB11r(t *testing.T) {
}, },
expOut: "exp.json", expOut: "exp.json",
}, },
{ // block with withdrawals
base: "./testdata/27",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inWithdrawals: "withdrawals.json",
inTxsRlp: "txs.rlp",
},
expOut: "exp.json",
},
} { } {
args := []string{"b11r"} args := []string{"b11r"}
args = append(args, tc.input.get(tc.base)...) args = append(args, tc.input.get(tc.base)...)

View File

@ -34,6 +34,7 @@
} }
], ],
"currentDifficulty": "0x20000", "currentDifficulty": "0x20000",
"gasUsed": "0x109a0" "gasUsed": "0x109a0",
"currentBaseFee": "0x36b"
} }
} }

View File

@ -7,6 +7,7 @@
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000020000000", "currentDifficulty": "0x2000020000000",
"receipts": [], "receipts": [],
"gasUsed": "0x0" "gasUsed": "0x0",
"currentBaseFee": "0x500"
} }
} }

View File

@ -7,6 +7,7 @@
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [], "receipts": [],
"currentDifficulty": "0x1ff8020000000", "currentDifficulty": "0x1ff8020000000",
"gasUsed": "0x0" "gasUsed": "0x0",
"currentBaseFee": "0x500"
} }
} }

View File

@ -7,6 +7,7 @@
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [], "receipts": [],
"currentDifficulty": "0x1ff9000000000", "currentDifficulty": "0x1ff9000000000",
"gasUsed": "0x0" "gasUsed": "0x0",
"currentBaseFee": "0x500"
} }
} }

View File

@ -7,6 +7,7 @@
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000000200000", "currentDifficulty": "0x2000000200000",
"receipts": [], "receipts": [],
"gasUsed": "0x0" "gasUsed": "0x0",
"currentBaseFee": "0x500"
} }
} }

Some files were not shown because too many files have changed in this diff Show More