Compare commits
3 Commits
v1.11.6-st
...
v1.10.20-s
Author | SHA1 | Date | |
---|---|---|---|
|
8c4e4fb8de | ||
|
8c6cd1e66e | ||
|
b609d4d6c2 |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@ -5,14 +5,16 @@ accounts/usbwallet @karalabe
|
|||||||
accounts/scwallet @gballet
|
accounts/scwallet @gballet
|
||||||
accounts/abi @gballet @MariusVanDerWijden
|
accounts/abi @gballet @MariusVanDerWijden
|
||||||
cmd/clef @holiman
|
cmd/clef @holiman
|
||||||
|
cmd/puppeth @karalabe
|
||||||
consensus @karalabe
|
consensus @karalabe
|
||||||
core/ @karalabe @holiman @rjl493456442
|
core/ @karalabe @holiman @rjl493456442
|
||||||
eth/ @karalabe @holiman @rjl493456442
|
eth/ @karalabe @holiman @rjl493456442
|
||||||
eth/catalyst/ @gballet
|
eth/catalyst/ @gballet
|
||||||
eth/tracers/ @s1na
|
eth/tracers/ @s1na
|
||||||
graphql/ @s1na
|
graphql/ @gballet @s1na
|
||||||
les/ @zsfelfoldi @rjl493456442
|
les/ @zsfelfoldi @rjl493456442
|
||||||
light/ @zsfelfoldi @rjl493456442
|
light/ @zsfelfoldi @rjl493456442
|
||||||
|
mobile/ @karalabe @ligi
|
||||||
node/ @fjl
|
node/ @fjl
|
||||||
p2p/ @fjl @zsfelfoldi
|
p2p/ @fjl @zsfelfoldi
|
||||||
rpc/ @fjl @holiman
|
rpc/ @fjl @holiman
|
||||||
|
2
.github/CONTRIBUTING.md
vendored
2
.github/CONTRIBUTING.md
vendored
@ -35,6 +35,6 @@ and help.
|
|||||||
|
|
||||||
## Configuration, dependencies, and tests
|
## Configuration, dependencies, and tests
|
||||||
|
|
||||||
Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/geth-developer/dev-guide)
|
Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide)
|
||||||
for more details on configuring your environment, managing project dependencies
|
for more details on configuring your environment, managing project dependencies
|
||||||
and testing procedures.
|
and testing procedures.
|
||||||
|
1
.github/ISSUE_TEMPLATE/bug.md
vendored
1
.github/ISSUE_TEMPLATE/bug.md
vendored
@ -9,7 +9,6 @@ assignees: ''
|
|||||||
#### System information
|
#### System information
|
||||||
|
|
||||||
Geth version: `geth version`
|
Geth version: `geth version`
|
||||||
CL client & version: e.g. lighthouse/nimbus/prysm@v1.0.0
|
|
||||||
OS & Version: Windows/Linux/OSX
|
OS & Version: Windows/Linux/OSX
|
||||||
Commit hash : (if `develop`)
|
Commit hash : (if `develop`)
|
||||||
|
|
||||||
|
2
.github/workflows/checks.yml
vendored
2
.github/workflows/checks.yml
vendored
@ -8,7 +8,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: "1.19"
|
go-version: ">=1.18.0"
|
||||||
check-latest: true
|
check-latest: true
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Run linter
|
- name: Run linter
|
||||||
|
21
.github/workflows/manual_binary_publish.yaml
vendored
21
.github/workflows/manual_binary_publish.yaml
vendored
@ -1,21 +0,0 @@
|
|||||||
name: MANUAL Override Publish geth binary to release
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
giteaPublishTag:
|
|
||||||
description: 'Package to publish TO on gitea; e.g. v1.10.25-statediff-4.2.1-alpha'
|
|
||||||
required: true
|
|
||||||
cercContainerTag:
|
|
||||||
description: 'Tagged Container to extract geth binary FROM'
|
|
||||||
required: true
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Manual override publish of geth binary FROM tagged release TO TAGGED package on git.vdb.to
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Copy ethereum binary file
|
|
||||||
run: docker run --rm --entrypoint cat git.vdb.to/cerc-io/go-ethereum/go-ethereum:${{ github.event.inputs.cercContainerTag }} /usr/local/bin/geth > geth-linux-amd64
|
|
||||||
- name: curl
|
|
||||||
uses: enflo/curl-action@master
|
|
||||||
with:
|
|
||||||
curl: --user cerccicd:${{ secrets.GITEA_TOKEN }} --upload-file geth-linux-amd64 https://git.vdb.to/api/packages/cerc-io/generic/go-ethereum/${{ github.event.inputs.giteaPublishTag }}/geth-linux-amd64
|
|
21
.github/workflows/manual_publish.yaml
vendored
21
.github/workflows/manual_publish.yaml
vendored
@ -1,21 +0,0 @@
|
|||||||
name: MANUAL Override Publish geth binary to release
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
giteaPublishTag:
|
|
||||||
description: 'Package to publish TO on gitea; e.g. v1.10.25-statediff-4.2.1-alpha'
|
|
||||||
required: true
|
|
||||||
cercContainerTag:
|
|
||||||
description: 'Tagged Container to extract geth binary FROM'
|
|
||||||
required: true
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Manual override publish of geth binary FROM tagged release TO TAGGED package on git.vdb.to
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Copy ethereum binary file
|
|
||||||
run: docker run --rm --entrypoint cat git.vdb.to/cerc-io/go-ethereum/go-ethereum:${{ github.event.inputs.cercContainerTag }} /usr/local/bin/geth > geth-linux-amd64
|
|
||||||
- name: curl
|
|
||||||
uses: enflo/curl-action@master
|
|
||||||
with:
|
|
||||||
curl: --user circcicd:${{ secrets.GITEA_TOKEN }} --upload-file geth-linux-amd64 https://git.vdb.to/api/packages/cerc-io/generic/go-ethereum/${{ github.event.inputs.giteaPublishTag }}/geth-linux-amd64
|
|
55
.github/workflows/publish.yaml
vendored
55
.github/workflows/publish.yaml
vendored
@ -12,25 +12,50 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Run docker build
|
- name: Run docker build
|
||||||
run: docker build -t cerc-io/go-ethereum -f Dockerfile .
|
run: docker build -t vulcanize/go-ethereum -f Dockerfile .
|
||||||
|
- name: Get the version
|
||||||
|
id: vars
|
||||||
|
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||||
|
- name: Tag docker image
|
||||||
|
run: docker tag vulcanize/go-ethereum docker.pkg.github.com/vulcanize/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}}
|
||||||
|
- name: Docker Login
|
||||||
|
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||||
|
- name: Docker Push
|
||||||
|
run: docker push docker.pkg.github.com/vulcanize/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}}
|
||||||
|
push_to_registries:
|
||||||
|
name: Publish assets to Release
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
steps:
|
||||||
- name: Get the version
|
- name: Get the version
|
||||||
id: vars
|
id: vars
|
||||||
run: |
|
run: |
|
||||||
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||||
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
||||||
- name: Tag docker image
|
- name: Docker Login to Github Registry
|
||||||
run: docker tag cerc-io/go-ethereum git.vdb.to/cerc-io/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}}
|
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||||
- name: Tag docker image
|
- name: Docker Pull
|
||||||
run: docker tag git.vdb.to/cerc-io/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}} git.vdb.to/cerc-io/go-ethereum/go-ethereum:${{steps.vars.outputs.tag}}
|
run: docker pull docker.pkg.github.com/vulcanize/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}}
|
||||||
- name: Docker Login
|
|
||||||
run: echo ${{ secrets.GITEA_PUBLISH_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
|
||||||
- name: Docker Push
|
|
||||||
run: docker push git.vdb.to/cerc-io/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}}
|
|
||||||
- name: Docker Push TAGGED
|
|
||||||
run: docker push git.vdb.to/cerc-io/go-ethereum/go-ethereum:${{steps.vars.outputs.tag}}
|
|
||||||
- name: Copy ethereum binary file
|
- name: Copy ethereum binary file
|
||||||
run: docker run --rm --entrypoint cat git.vdb.to/cerc-io/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}} /usr/local/bin/geth > geth-linux-amd64
|
run: docker run --rm --entrypoint cat docker.pkg.github.com/vulcanize/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}} /usr/local/bin/geth > geth-linux-amd64
|
||||||
- name: curl
|
- name: Docker Login to Docker Registry
|
||||||
uses: enflo/curl-action@master
|
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
||||||
|
- name: Tag docker image
|
||||||
|
run: docker tag docker.pkg.github.com/vulcanize/go-ethereum/go-ethereum:${{steps.vars.outputs.sha}} vulcanize/vdb-geth:${{steps.vars.outputs.tag}}
|
||||||
|
- name: Docker Push to Docker Hub
|
||||||
|
run: docker push vulcanize/vdb-geth:${{steps.vars.outputs.tag}}
|
||||||
|
- name: Get release
|
||||||
|
id: get_release
|
||||||
|
uses: bruceadams/get-release@v1.2.0
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Upload Release Asset
|
||||||
|
id: upload-release-asset
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
curl: --user cerccicd:${{ secrets.GITEA_PUBLISH_TOKEN }} --upload-file geth-linux-amd64 https://git.vdb.to/api/packages/cerc-io/generic/go-ethereum/${{steps.vars.outputs.tag}}/geth-linux-amd64
|
upload_url: ${{ steps.get_release.outputs.upload_url }}
|
||||||
|
asset_path: geth-linux-amd64
|
||||||
|
asset_name: geth-linux-amd64
|
||||||
|
asset_content_type: application/octet-stream
|
||||||
|
46
.github/workflows/tests.yml
vendored
46
.github/workflows/tests.yml
vendored
@ -4,8 +4,8 @@ on:
|
|||||||
workflow_call:
|
workflow_call:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref || 'e62830c982d4dfc5f3c1c2b12c1754a7e9b538f1'}}
|
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref || '382aca8e42bc5e33f301f77cdd2e09cc80602fc3'}}
|
||||||
ipld-eth-db-ref: ${{ github.event.inputs.ipld-eth-db-ref || '1b922dbff350bfe2a9aec5fe82079e9d855ea7ed' }}
|
ipld-eth-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref || '65b7bee7a6757c1fc527c8bfdc4f99ab915fcf36' }}
|
||||||
GOPATH: /tmp/go
|
GOPATH: /tmp/go
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -15,7 +15,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Run docker build
|
- name: Run docker build
|
||||||
run: docker build -t cerc-io/go-ethereum .
|
run: docker build -t vulcanize/go-ethereum .
|
||||||
|
|
||||||
geth-unit-test:
|
geth-unit-test:
|
||||||
name: Run geth unit test
|
name: Run geth unit test
|
||||||
@ -28,7 +28,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: "1.19"
|
go-version: ">=1.18.0"
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
@ -47,26 +47,16 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: "1.19"
|
go-version: ">=1.18.0"
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
ref: ${{ env.ipld-eth-db-ref }}
|
|
||||||
repository: cerc-io/ipld-eth-db
|
|
||||||
path: "./ipld-eth-db/"
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Build ipld-eth-db
|
|
||||||
run: |
|
|
||||||
docker build -f ./ipld-eth-db/Dockerfile ./ipld-eth-db/ -t cerc/ipld-eth-db:local
|
|
||||||
|
|
||||||
- name: Run docker compose
|
- name: Run docker compose
|
||||||
run: |
|
run: |
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
|
|
||||||
- name: Give the migration a few seconds
|
- name: Give the migration a few seconds
|
||||||
run: sleep 30;
|
run: sleep 30;
|
||||||
|
|
||||||
@ -82,7 +72,7 @@ jobs:
|
|||||||
|
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: "1.19"
|
go-version: ">=1.18.0"
|
||||||
check-latest: true
|
check-latest: true
|
||||||
|
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
@ -94,13 +84,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
ref: ${{ env.stack-orchestrator-ref }}
|
ref: ${{ env.stack-orchestrator-ref }}
|
||||||
path: "./stack-orchestrator/"
|
path: "./stack-orchestrator/"
|
||||||
repository: cerc-io/mshaw_stack_hack
|
repository: vulcanize/stack-orchestrator
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
ref: ${{ env.ipld-eth-db-ref }}
|
ref: ${{ env.ipld-eth-db-ref }}
|
||||||
repository: cerc-io/ipld-eth-db
|
repository: vulcanize/ipld-eth-db
|
||||||
path: "./ipld-eth-db/"
|
path: "./ipld-eth-db/"
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -109,13 +99,14 @@ jobs:
|
|||||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ > $GITHUB_WORKSPACE/config.sh
|
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ > $GITHUB_WORKSPACE/config.sh
|
||||||
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ >> $GITHUB_WORKSPACE/config.sh
|
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ >> $GITHUB_WORKSPACE/config.sh
|
||||||
echo db_write=true >> $GITHUB_WORKSPACE/config.sh
|
echo db_write=true >> $GITHUB_WORKSPACE/config.sh
|
||||||
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> $GITHUB_WORKSPACE/config.sh
|
|
||||||
cat $GITHUB_WORKSPACE/config.sh
|
cat $GITHUB_WORKSPACE/config.sh
|
||||||
|
|
||||||
- name: Compile Geth
|
- name: Compile Geth
|
||||||
run: |
|
run: |
|
||||||
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
|
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
|
||||||
./compile-geth.sh -e docker -p $GITHUB_WORKSPACE/config.sh
|
./compile-geth.sh -e docker -p $GITHUB_WORKSPACE/config.sh
|
||||||
cd -
|
cd -
|
||||||
|
|
||||||
- name: Run docker compose
|
- name: Run docker compose
|
||||||
run: |
|
run: |
|
||||||
docker-compose \
|
docker-compose \
|
||||||
@ -123,28 +114,27 @@ jobs:
|
|||||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
|
||||||
--env-file $GITHUB_WORKSPACE/config.sh \
|
--env-file $GITHUB_WORKSPACE/config.sh \
|
||||||
up -d --build
|
up -d --build
|
||||||
|
|
||||||
- name: Make sure the /root/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS exists within a certain time frame.
|
- name: Make sure the /root/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS exists within a certain time frame.
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
COUNT=0
|
COUNT=0
|
||||||
ATTEMPTS=15
|
ATTEMPTS=15
|
||||||
docker logs local_go-ethereum_1
|
until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" cp go-ethereum:/root/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS ./STATEFUL_TEST_DEPLOYED_ADDRESS) || [[ $COUNT -eq $ATTEMPTS ]]; do echo -e "$(( COUNT++ ))... \c"; sleep 10; done
|
||||||
docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec go-ethereum ps aux
|
|
||||||
until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" cp go-ethereum:/root/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS ./STATEFUL_TEST_DEPLOYED_ADDRESS) || [[ $COUNT -eq $ATTEMPTS ]]; do echo -e "$(( COUNT++ ))... \c"; sleep 10; done
|
|
||||||
[[ $COUNT -eq $ATTEMPTS ]] && echo "Could not find the successful contract deployment" && (exit 1)
|
[[ $COUNT -eq $ATTEMPTS ]] && echo "Could not find the successful contract deployment" && (exit 1)
|
||||||
cat ./STATEFUL_TEST_DEPLOYED_ADDRESS
|
cat ./STATEFUL_TEST_DEPLOYED_ADDRESS
|
||||||
echo "Address length: `wc ./STATEFUL_TEST_DEPLOYED_ADDRESS`"
|
|
||||||
sleep 15;
|
sleep 15;
|
||||||
|
|
||||||
- name: Create a new transaction.
|
- name: Create a new transaction.
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
docker logs local_go-ethereum_1
|
docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec go-ethereum /bin/bash /root/transaction_info/NEW_TRANSACTION
|
||||||
docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec go-ethereum /bin/bash /root/transaction_info/NEW_TRANSACTION
|
|
||||||
echo $?
|
echo $?
|
||||||
|
|
||||||
- name: Make sure we see entries in the header table
|
- name: Make sure we see entries in the header table
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
rows=$(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec ipld-eth-db psql -U vdbm -d vulcanize_testing -AXqtc "SELECT COUNT(*) FROM eth.header_cids")
|
rows=$(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec ipld-eth-db psql -U vdbm -d vulcanize_testing -AXqtc "SELECT COUNT(*) FROM eth.header_cids")
|
||||||
[[ "$rows" -lt "1" ]] && echo "We could not find any rows in postgres table." && (exit 1)
|
[[ "$rows" -lt "1" ]] && echo "We could not find any rows in postgres table." && (exit 1)
|
||||||
echo $rows
|
echo $rows
|
||||||
docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec ipld-eth-db psql -U vdbm -d vulcanize_testing -AXqtc "SELECT * FROM eth.header_cids"
|
docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec ipld-eth-db psql -U vdbm -d vulcanize_testing -AXqtc "SELECT * FROM eth.header_cids"
|
||||||
|
8
.gitignore
vendored
8
.gitignore
vendored
@ -47,7 +47,6 @@ profile.cov
|
|||||||
/dashboard/assets/package-lock.json
|
/dashboard/assets/package-lock.json
|
||||||
|
|
||||||
**/yarn-error.log
|
**/yarn-error.log
|
||||||
logs/
|
|
||||||
foundry/deployments/local-private-network/geth-linux-amd64
|
foundry/deployments/local-private-network/geth-linux-amd64
|
||||||
foundry/projects/local-private-network/geth-linux-amd64
|
foundry/projects/local-private-network/geth-linux-amd64
|
||||||
|
|
||||||
@ -55,11 +54,8 @@ foundry/projects/local-private-network/geth-linux-amd64
|
|||||||
related-repositories/foundry-test/**
|
related-repositories/foundry-test/**
|
||||||
related-repositories/hive/**
|
related-repositories/hive/**
|
||||||
related-repositories/ipld-eth-db/**
|
related-repositories/ipld-eth-db/**
|
||||||
related-repositories/foundry-test/
|
|
||||||
related-repositories/ipld-eth-db/
|
|
||||||
|
|
||||||
# files generated by statediffing tests
|
|
||||||
statediff/indexer/database/sql/statediffing_test_file.sql
|
statediff/indexer/database/sql/statediffing_test_file.sql
|
||||||
statediff/statediffing_test_file.sql
|
statediff/statediffing_test_file.sql
|
||||||
statediff/known_gaps.sql
|
statediff/known_gaps.sql
|
||||||
statediff/indexer/database/file/statediffing_test
|
related-repositories/foundry-test/
|
||||||
|
related-repositories/ipld-eth-db/
|
||||||
|
@ -12,6 +12,7 @@ run:
|
|||||||
linters:
|
linters:
|
||||||
disable-all: true
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
|
- deadcode
|
||||||
- goconst
|
- goconst
|
||||||
- goimports
|
- goimports
|
||||||
- gosimple
|
- gosimple
|
||||||
@ -19,16 +20,17 @@ linters:
|
|||||||
- ineffassign
|
- ineffassign
|
||||||
- misspell
|
- misspell
|
||||||
- unconvert
|
- unconvert
|
||||||
|
- varcheck
|
||||||
- typecheck
|
- typecheck
|
||||||
- unused
|
- unused
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- bidichk
|
- bidichk
|
||||||
- durationcheck
|
- durationcheck
|
||||||
- exportloopref
|
- exportloopref
|
||||||
- whitespace
|
- gosec
|
||||||
|
|
||||||
# - structcheck # lots of false positives
|
#- structcheck # lots of false positives
|
||||||
# - errcheck #lot of false positives
|
#- errcheck #lot of false positives
|
||||||
# - contextcheck
|
# - contextcheck
|
||||||
# - errchkjson # lots of false positives
|
# - errchkjson # lots of false positives
|
||||||
# - errorlint # this check crashes
|
# - errorlint # this check crashes
|
||||||
@ -42,6 +44,11 @@ linters-settings:
|
|||||||
goconst:
|
goconst:
|
||||||
min-len: 3 # minimum length of string constant
|
min-len: 3 # minimum length of string constant
|
||||||
min-occurrences: 6 # minimum number of occurrences
|
min-occurrences: 6 # minimum number of occurrences
|
||||||
|
gosec:
|
||||||
|
excludes:
|
||||||
|
- G404 # Use of weak random number generator - lots of FP
|
||||||
|
- G107 # Potential http request -- those are intentional
|
||||||
|
- G306 # G306: Expect WriteFile permissions to be 0600 or less
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
@ -50,15 +57,16 @@ issues:
|
|||||||
- deadcode
|
- deadcode
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- path: internal/build/pgp.go
|
- path: internal/build/pgp.go
|
||||||
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
|
text: 'SA1019: package golang.org/x/crypto/openpgp is deprecated'
|
||||||
- path: core/vm/contracts.go
|
- path: core/vm/contracts.go
|
||||||
text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.'
|
text: 'SA1019: package golang.org/x/crypto/ripemd160 is deprecated'
|
||||||
- path: accounts/usbwallet/trezor.go
|
- path: accounts/usbwallet/trezor.go
|
||||||
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
|
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
|
||||||
- path: accounts/usbwallet/trezor/
|
- path: accounts/usbwallet/trezor/
|
||||||
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
|
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
|
||||||
exclude:
|
exclude:
|
||||||
- 'SA1019: event.TypeMux is deprecated: use Feed'
|
- 'SA1019: event.TypeMux is deprecated: use Feed'
|
||||||
- 'SA1019: strings.Title is deprecated'
|
- 'SA1019: strings.Title is deprecated'
|
||||||
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
|
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
|
||||||
- 'SA1029: should not use built-in type string as key for value'
|
- 'SA1029: should not use built-in type string as key for value'
|
||||||
|
- 'G306: Expect WriteFile permissions to be 0600 or less'
|
||||||
|
136
.travis.yml
136
.travis.yml
@ -5,15 +5,18 @@ jobs:
|
|||||||
allow_failures:
|
allow_failures:
|
||||||
- stage: build
|
- stage: build
|
||||||
os: osx
|
os: osx
|
||||||
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- azure-osx
|
- azure-osx
|
||||||
|
- azure-ios
|
||||||
|
- cocoapods-ios
|
||||||
|
|
||||||
include:
|
include:
|
||||||
# This builder only tests code linters on latest version of Go
|
# This builder only tests code linters on latest version of Go
|
||||||
- stage: lint
|
- stage: lint
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.20.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- lint
|
- lint
|
||||||
git:
|
git:
|
||||||
@ -28,7 +31,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.20.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- docker
|
- docker
|
||||||
services:
|
services:
|
||||||
@ -45,7 +48,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: arm64
|
arch: arm64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.20.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- docker
|
- docker
|
||||||
services:
|
services:
|
||||||
@ -57,13 +60,37 @@ jobs:
|
|||||||
script:
|
script:
|
||||||
- go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
|
- go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
|
||||||
|
|
||||||
|
# This builder does the Ubuntu PPA upload
|
||||||
|
- stage: build
|
||||||
|
if: type = push
|
||||||
|
os: linux
|
||||||
|
dist: bionic
|
||||||
|
go: 1.18.x
|
||||||
|
env:
|
||||||
|
- ubuntu-ppa
|
||||||
|
- GO111MODULE=on
|
||||||
|
git:
|
||||||
|
submodules: false # avoid cloning ethereum/tests
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- devscripts
|
||||||
|
- debhelper
|
||||||
|
- dput
|
||||||
|
- fakeroot
|
||||||
|
- python-bzrlib
|
||||||
|
- python-paramiko
|
||||||
|
script:
|
||||||
|
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||||
|
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||||
|
|
||||||
# This builder does the Linux Azure uploads
|
# This builder does the Linux Azure uploads
|
||||||
- stage: build
|
- stage: build
|
||||||
if: type = push
|
if: type = push
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
sudo: required
|
sudo: required
|
||||||
go: 1.20.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- azure-linux
|
- azure-linux
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
@ -93,13 +120,53 @@ jobs:
|
|||||||
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
|
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
|
||||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||||
|
|
||||||
# This builder does the OSX Azure uploads
|
# This builder does the Android Maven and Azure uploads
|
||||||
|
- stage: build
|
||||||
|
if: type = push
|
||||||
|
os: linux
|
||||||
|
dist: bionic
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- openjdk-8-jdk
|
||||||
|
env:
|
||||||
|
- azure-android
|
||||||
|
- maven-android
|
||||||
|
- GO111MODULE=on
|
||||||
|
git:
|
||||||
|
submodules: false # avoid cloning ethereum/tests
|
||||||
|
before_install:
|
||||||
|
# Install Android and it's dependencies manually, Travis is stale
|
||||||
|
- export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
|
||||||
|
- curl https://dl.google.com/android/repository/commandlinetools-linux-6858069_latest.zip -o android.zip
|
||||||
|
- unzip -q android.zip -d $HOME/sdk && rm android.zip
|
||||||
|
- mv $HOME/sdk/cmdline-tools $HOME/sdk/latest && mkdir $HOME/sdk/cmdline-tools && mv $HOME/sdk/latest $HOME/sdk/cmdline-tools
|
||||||
|
- export PATH=$PATH:$HOME/sdk/cmdline-tools/latest/bin
|
||||||
|
- export ANDROID_HOME=$HOME/sdk
|
||||||
|
|
||||||
|
- yes | sdkmanager --licenses >/dev/null
|
||||||
|
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
|
||||||
|
|
||||||
|
# Install Go to allow building with
|
||||||
|
- curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz | tar -xz
|
||||||
|
- export PATH=`pwd`/go/bin:$PATH
|
||||||
|
- export GOROOT=`pwd`/go
|
||||||
|
- export GOPATH=$HOME/go
|
||||||
|
script:
|
||||||
|
# Build the Android archive and upload it to Maven Central and Azure
|
||||||
|
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||||
|
- ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum
|
||||||
|
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -signify SIGNIFY_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
|
||||||
|
|
||||||
|
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||||
- stage: build
|
- stage: build
|
||||||
if: type = push
|
if: type = push
|
||||||
os: osx
|
os: osx
|
||||||
go: 1.20.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- azure-osx
|
- azure-osx
|
||||||
|
- azure-ios
|
||||||
|
- cocoapods-ios
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
git:
|
git:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
@ -107,67 +174,58 @@ jobs:
|
|||||||
- go run build/ci.go install -dlgo
|
- go run build/ci.go install -dlgo
|
||||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
|
||||||
|
|
||||||
|
# Build the iOS framework and upload it to CocoaPods and Azure
|
||||||
|
- gem uninstall cocoapods -a -x
|
||||||
|
- gem install cocoapods
|
||||||
|
|
||||||
|
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
|
||||||
|
- sed -i '.bak' 's/repo.join/!repo.join/g' $(dirname `gem which cocoapods`)/cocoapods/sources_manager.rb
|
||||||
|
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then git clone --depth=1 https://github.com/CocoaPods/Specs.git ~/.cocoapods/repos/master && pod setup --verbose; fi
|
||||||
|
|
||||||
|
- xctool -version
|
||||||
|
- xcrun simctl list
|
||||||
|
|
||||||
|
# Workaround for https://github.com/golang/go/issues/23749
|
||||||
|
- export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
|
||||||
|
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -signify SIGNIFY_KEY -deploy trunk -upload gethstore/builds
|
||||||
|
|
||||||
# These builders run the tests
|
# These builders run the tests
|
||||||
- stage: build
|
- stage: build
|
||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.20.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
- go run build/ci.go test $TEST_PACKAGES
|
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||||
|
|
||||||
- stage: build
|
- stage: build
|
||||||
if: type = pull_request
|
if: type = pull_request
|
||||||
os: linux
|
os: linux
|
||||||
arch: arm64
|
arch: arm64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.19.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
- go run build/ci.go test $TEST_PACKAGES
|
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||||
|
|
||||||
- stage: build
|
- stage: build
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.19.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
- go run build/ci.go test $TEST_PACKAGES
|
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||||
|
|
||||||
# This builder does the Ubuntu PPA nightly uploads
|
|
||||||
- stage: build
|
|
||||||
if: type = cron || (type = push && tag ~= /^v[0-9]/)
|
|
||||||
os: linux
|
|
||||||
dist: bionic
|
|
||||||
go: 1.20.x
|
|
||||||
env:
|
|
||||||
- ubuntu-ppa
|
|
||||||
- GO111MODULE=on
|
|
||||||
git:
|
|
||||||
submodules: false # avoid cloning ethereum/tests
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- devscripts
|
|
||||||
- debhelper
|
|
||||||
- dput
|
|
||||||
- fakeroot
|
|
||||||
- python-bzrlib
|
|
||||||
- python-paramiko
|
|
||||||
script:
|
|
||||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
|
||||||
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
|
||||||
|
|
||||||
# This builder does the Azure archive purges to avoid accumulating junk
|
# This builder does the Azure archive purges to avoid accumulating junk
|
||||||
- stage: build
|
- stage: build
|
||||||
if: type = cron
|
if: type = cron
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.20.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- azure-purge
|
- azure-purge
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
@ -181,9 +239,9 @@ jobs:
|
|||||||
if: type = cron
|
if: type = cron
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.20.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
- go run build/ci.go test -race $TEST_PACKAGES
|
- go run build/ci.go test -race -coverage $TEST_PACKAGES
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.20-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ COPY go.sum /go-ethereum/
|
|||||||
RUN cd /go-ethereum && go mod download
|
RUN cd /go-ethereum && go mod download
|
||||||
|
|
||||||
ADD . /go-ethereum
|
ADD . /go-ethereum
|
||||||
RUN cd /go-ethereum && go run build/ci.go install -static ./cmd/geth
|
RUN cd /go-ethereum && go run build/ci.go install ./cmd/geth
|
||||||
|
|
||||||
# Pull Geth into a second stage deploy alpine container
|
# Pull Geth into a second stage deploy alpine container
|
||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.20-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ COPY go.sum /go-ethereum/
|
|||||||
RUN cd /go-ethereum && go mod download
|
RUN cd /go-ethereum && go mod download
|
||||||
|
|
||||||
ADD . /go-ethereum
|
ADD . /go-ethereum
|
||||||
RUN cd /go-ethereum && go run build/ci.go install -static
|
RUN cd /go-ethereum && go run build/ci.go install
|
||||||
|
|
||||||
# Pull all binaries into a second stage deploy alpine container
|
# Pull all binaries into a second stage deploy alpine container
|
||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
50
Jenkinsfile
vendored
50
Jenkinsfile
vendored
@ -1,50 +0,0 @@
|
|||||||
pipeline {
|
|
||||||
agent any
|
|
||||||
|
|
||||||
stages {
|
|
||||||
stage('Build') {
|
|
||||||
steps {
|
|
||||||
script{
|
|
||||||
docker.withRegistry('https://git.vdb.to'){
|
|
||||||
echo 'Building geth image...'
|
|
||||||
//def geth_image = docker.build("cerc-io/go-ethereum:jenkinscicd")
|
|
||||||
echo 'built geth image'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Test') {
|
|
||||||
agent {
|
|
||||||
docker {
|
|
||||||
image 'cerc-io/foundation:jenkinscicd'
|
|
||||||
//image 'cerc-io/foundation_alpine:jenkinscicd'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
environment {
|
|
||||||
GO111MODULE = "on"
|
|
||||||
CGO_ENABLED = 1
|
|
||||||
//GOPATH = "${JENKINS_HOME}/jobs/${JOB_NAME}/builds/${BUILD_ID}"
|
|
||||||
//GOPATH = "/go"
|
|
||||||
GOPATH = "/tmp/go"
|
|
||||||
//GOMODCACHE = "/go/pkg/mod"
|
|
||||||
GOCACHE = "${WORKSPACE}/.cache/go-build"
|
|
||||||
GOENV = "${WORKSPACE}/.config/go/env"
|
|
||||||
GOMODCACHE = "/tmp/go/pkg/mod"
|
|
||||||
GOWORK=""
|
|
||||||
//GOFLAGS=""
|
|
||||||
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
echo 'Testing ...'
|
|
||||||
//sh '/usr/local/go/bin/go test -p 1 -v ./...'
|
|
||||||
sh 'make test'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Packaging') {
|
|
||||||
steps {
|
|
||||||
echo 'Packaging ...'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
14
Makefile
14
Makefile
@ -26,7 +26,7 @@ PASSWORD = password
|
|||||||
export PGPASSWORD=$(PASSWORD)
|
export PGPASSWORD=$(PASSWORD)
|
||||||
|
|
||||||
#Test
|
#Test
|
||||||
TEST_DB = cerc_testing
|
TEST_DB = vulcanize_public
|
||||||
TEST_CONNECT_STRING = postgresql://$(USER):$(PASSWORD)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
|
TEST_CONNECT_STRING = postgresql://$(USER):$(PASSWORD)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
|
||||||
|
|
||||||
geth:
|
geth:
|
||||||
@ -37,6 +37,18 @@ geth:
|
|||||||
all:
|
all:
|
||||||
$(GORUN) build/ci.go install
|
$(GORUN) build/ci.go install
|
||||||
|
|
||||||
|
android:
|
||||||
|
$(GORUN) build/ci.go aar --local
|
||||||
|
@echo "Done building."
|
||||||
|
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
|
||||||
|
@echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs"
|
||||||
|
@echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc"
|
||||||
|
|
||||||
|
ios:
|
||||||
|
$(GORUN) build/ci.go xcode --local
|
||||||
|
@echo "Done building."
|
||||||
|
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
|
||||||
|
|
||||||
test: all
|
test: all
|
||||||
$(GORUN) build/ci.go test
|
$(GORUN) build/ci.go test
|
||||||
|
|
||||||
|
85
README.md
85
README.md
@ -1,6 +1,6 @@
|
|||||||
## Go Ethereum
|
## Go Ethereum
|
||||||
|
|
||||||
Official Golang execution layer implementation of the Ethereum protocol.
|
Official Golang implementation of the Ethereum protocol.
|
||||||
|
|
||||||
[](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
|
[](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
|
||||||
[](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
[](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||||
@ -27,9 +27,9 @@ This process is subject to change.
|
|||||||
|
|
||||||
## Building the source
|
## Building the source
|
||||||
|
|
||||||
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth).
|
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth).
|
||||||
|
|
||||||
Building `geth` requires both a Go (version 1.19 or later) and a C compiler. You can install
|
Building `geth` requires both a Go (version 1.16 or later) and a C compiler. You can install
|
||||||
them using your favourite package manager. Once the dependencies are installed, run
|
them using your favourite package manager. Once the dependencies are installed, run
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@ -49,18 +49,19 @@ directory.
|
|||||||
|
|
||||||
| Command | Description |
|
| Command | Description |
|
||||||
| :--------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| :--------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/fundamentals/command-line-options) for command line options. |
|
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. |
|
||||||
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
|
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
|
||||||
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
|
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
|
||||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/developers/dapp-developer/native-bindings) page for details. |
|
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. |
|
||||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
|
||||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||||
|
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||||
|
|
||||||
## Running `geth`
|
## Running `geth`
|
||||||
|
|
||||||
Going through all the possible command line flags is out of scope here (please consult our
|
Going through all the possible command line flags is out of scope here (please consult our
|
||||||
[CLI Wiki page](https://geth.ethereum.org/docs/fundamentals/command-line-options)),
|
[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)),
|
||||||
but we've enumerated a few common parameter combos to get you up to speed quickly
|
but we've enumerated a few common parameter combos to get you up to speed quickly
|
||||||
on how you can run your own `geth` instance.
|
on how you can run your own `geth` instance.
|
||||||
|
|
||||||
@ -75,16 +76,16 @@ Minimum:
|
|||||||
|
|
||||||
Recommended:
|
Recommended:
|
||||||
|
|
||||||
* Fast CPU with 4+ cores
|
- Fast CPU with 4+ cores
|
||||||
* 16GB+ RAM
|
- 16GB+ RAM
|
||||||
* High-performance SSD with at least 1TB of free space
|
- High Performance SSD with at least 1TB free space
|
||||||
* 25+ MBit/sec download Internet service
|
- 25+ MBit/sec download Internet service
|
||||||
|
|
||||||
### Full node on the main Ethereum network
|
### Full node on the main Ethereum network
|
||||||
|
|
||||||
By far the most common scenario is people wanting to simply interact with the Ethereum
|
By far the most common scenario is people wanting to simply interact with the Ethereum
|
||||||
network: create accounts; transfer funds; deploy and interact with contracts. For this
|
network: create accounts; transfer funds; deploy and interact with contracts. For this
|
||||||
particular use case, the user doesn't care about years-old historical data, so we can
|
particular use-case the user doesn't care about years-old historical data, so we can
|
||||||
sync quickly to the current state of the network. To do so:
|
sync quickly to the current state of the network. To do so:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@ -92,14 +93,15 @@ $ geth console
|
|||||||
```
|
```
|
||||||
|
|
||||||
This command will:
|
This command will:
|
||||||
* Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag),
|
|
||||||
|
- Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag),
|
||||||
causing it to download more data in exchange for avoiding processing the entire history
|
causing it to download more data in exchange for avoiding processing the entire history
|
||||||
of the Ethereum network, which is very CPU intensive.
|
of the Ethereum network, which is very CPU intensive.
|
||||||
* Start the built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interacting-with-geth/javascript-console),
|
- Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
|
||||||
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md)
|
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md)
|
||||||
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
|
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
|
||||||
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/interacting-with-geth/rpc).
|
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
|
||||||
This tool is optional and if you leave it out you can always attach it to an already running
|
This tool is optional and if you leave it out you can always attach to an already running
|
||||||
`geth` instance with `geth attach`.
|
`geth` instance with `geth attach`.
|
||||||
|
|
||||||
### A Full node on the Görli test network
|
### A Full node on the Görli test network
|
||||||
@ -114,15 +116,15 @@ the main network, but with play-Ether only.
|
|||||||
$ geth --goerli console
|
$ geth --goerli console
|
||||||
```
|
```
|
||||||
|
|
||||||
The `console` subcommand has the same meaning as above and is equally
|
The `console` subcommand has the exact same meaning as above and they are equally
|
||||||
useful on the testnet too.
|
useful on the testnet too. Please, see above for their explanations if you've skipped here.
|
||||||
|
|
||||||
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
|
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
|
||||||
|
|
||||||
* Instead of connecting to the main Ethereum network, the client will connect to the Görli
|
- Instead of connecting the main Ethereum network, the client will connect to the Görli
|
||||||
test network, which uses different P2P bootnodes, different network IDs and genesis
|
test network, which uses different P2P bootnodes, different network IDs and genesis
|
||||||
states.
|
states.
|
||||||
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
|
- Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
|
||||||
will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on
|
will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on
|
||||||
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
|
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
|
||||||
requires the use of a custom endpoint since `geth attach` will try to attach to a
|
requires the use of a custom endpoint since `geth attach` will try to attach to a
|
||||||
@ -130,9 +132,9 @@ Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a
|
|||||||
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
|
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
|
||||||
this.
|
this.
|
||||||
|
|
||||||
*Note: Although some internal protective measures prevent transactions from
|
_Note: Although there are some internal protective measures to prevent transactions from
|
||||||
crossing over between the main network and test network, you should always
|
crossing over between the main network and test network, you should make sure to always
|
||||||
use separate accounts for play and real money. Unless you manually move
|
use separate accounts for play-money and real-money. Unless you manually move
|
||||||
accounts, `geth` will by default correctly separate the two networks and will not make any
|
accounts, `geth` will by default correctly separate the two networks and will not make any
|
||||||
accounts available between them._
|
accounts available between them._
|
||||||
|
|
||||||
@ -145,6 +147,19 @@ called [_Rinkeby_](https://www.rinkeby.io) which is operated by members of the c
|
|||||||
$ geth --rinkeby console
|
$ geth --rinkeby console
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Full node on the Ropsten test network
|
||||||
|
|
||||||
|
In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The
|
||||||
|
Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such,
|
||||||
|
it has certain extra overhead and is more susceptible to reorganization attacks due to the
|
||||||
|
network's low difficulty/security.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ geth --ropsten console
|
||||||
|
```
|
||||||
|
|
||||||
|
_Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory._
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a
|
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a
|
||||||
@ -154,7 +169,7 @@ configuration file via:
|
|||||||
$ geth --config /path/to/your_config.toml
|
$ geth --config /path/to/your_config.toml
|
||||||
```
|
```
|
||||||
|
|
||||||
To get an idea of how the file should look like you can use the `dumpconfig` subcommand to
|
To get an idea how the file should look like you can use the `dumpconfig` subcommand to
|
||||||
export your existing configuration:
|
export your existing configuration:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@ -174,7 +189,7 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
|
|||||||
ethereum/client-go
|
ethereum/client-go
|
||||||
```
|
```
|
||||||
|
|
||||||
This will start `geth` in snap-sync mode with a DB memory allowance of 1GB, as the
|
This will start `geth` in snap-sync mode with a DB memory allowance of 1GB just as the
|
||||||
above command does. It will also create a persistent volume in your home directory for
|
above command does. It will also create a persistent volume in your home directory for
|
||||||
saving your blockchain as well as map the default ports. There is also an `alpine` tag
|
saving your blockchain as well as map the default ports. There is also an `alpine` tag
|
||||||
available for a slim version of the image.
|
available for a slim version of the image.
|
||||||
@ -188,7 +203,7 @@ accessible from the outside.
|
|||||||
As a developer, sooner rather than later you'll want to start interacting with `geth` and the
|
As a developer, sooner rather than later you'll want to start interacting with `geth` and the
|
||||||
Ethereum network via your own programs and not manually through the console. To aid
|
Ethereum network via your own programs and not manually through the console. To aid
|
||||||
this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://ethereum.github.io/execution-apis/api-documentation/)
|
this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://ethereum.github.io/execution-apis/api-documentation/)
|
||||||
and [`geth` specific APIs](https://geth.ethereum.org/docs/interacting-with-geth/rpc)).
|
and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)).
|
||||||
These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based
|
These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based
|
||||||
platforms, and named pipes on Windows).
|
platforms, and named pipes on Windows).
|
||||||
|
|
||||||
@ -208,7 +223,7 @@ HTTP based JSON-RPC API options:
|
|||||||
* `--ws.addr` WS-RPC server listening interface (default: `localhost`)
|
* `--ws.addr` WS-RPC server listening interface (default: `localhost`)
|
||||||
* `--ws.port` WS-RPC server listening port (default: `8546`)
|
* `--ws.port` WS-RPC server listening port (default: `8546`)
|
||||||
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
|
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
|
||||||
* `--ws.origins` Origins from which to accept WebSocket requests
|
* `--ws.origins` Origins from which to accept websockets requests
|
||||||
* `--ipcdisable` Disable the IPC-RPC server
|
* `--ipcdisable` Disable the IPC-RPC server
|
||||||
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
|
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
|
||||||
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
||||||
@ -321,8 +336,12 @@ also need to configure a miner to process transactions and create new blocks for
|
|||||||
|
|
||||||
#### Running a private miner
|
#### Running a private miner
|
||||||
|
|
||||||
|
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
|
||||||
|
requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
|
||||||
|
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
|
||||||
|
and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
|
||||||
|
|
||||||
In a private network setting a single CPU miner instance is more than enough for
|
In a private network setting, however a single CPU miner instance is more than enough for
|
||||||
practical purposes as it can produce a stable stream of blocks at the correct intervals
|
practical purposes as it can produce a stable stream of blocks at the correct intervals
|
||||||
without needing heavy resources (consider running on a single thread, no need for multiple
|
without needing heavy resources (consider running on a single thread, no need for multiple
|
||||||
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
|
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
|
||||||
@ -339,7 +358,7 @@ transactions are accepted at (`--miner.gasprice`).
|
|||||||
|
|
||||||
## Contribution
|
## Contribution
|
||||||
|
|
||||||
Thank you for considering helping out with the source code! We welcome contributions
|
Thank you for considering to help out with the source code! We welcome contributions
|
||||||
from anyone on the internet, and are grateful for even the smallest of fixes!
|
from anyone on the internet, and are grateful for even the smallest of fixes!
|
||||||
|
|
||||||
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
|
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
|
||||||
@ -359,22 +378,16 @@ Please make sure your contributions adhere to our coding guidelines:
|
|||||||
- Commit messages should be prefixed with the package(s) they modify.
|
- Commit messages should be prefixed with the package(s) they modify.
|
||||||
- E.g. "eth, rpc: make trace configs optional"
|
- E.g. "eth, rpc: make trace configs optional"
|
||||||
|
|
||||||
Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/geth-developer/dev-guide)
|
Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide)
|
||||||
for more details on configuring your environment, managing project dependencies, and
|
for more details on configuring your environment, managing project dependencies, and
|
||||||
testing procedures.
|
testing procedures.
|
||||||
|
|
||||||
### Contributing to geth.ethereum.org
|
|
||||||
|
|
||||||
For contributions to the [go-ethereum website](https://geth.ethereum.org), please checkout and raise pull requests against the `website` branch.
|
|
||||||
For more detailed instructions please see the `website` branch [README](https://github.com/ethereum/go-ethereum/tree/website#readme) or the
|
|
||||||
[contributing](https://geth.ethereum.org/docs/developers/geth-developer/contributing) page of the website.
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
|
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
|
||||||
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html),
|
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html),
|
||||||
also included in our repository in the `COPYING.LESSER` file.
|
also included in our repository in the `COPYING.LESSER` file.
|
||||||
|
|
||||||
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) are licensed under the
|
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the
|
||||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also
|
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also
|
||||||
included in our repository in the `COPYING` file.
|
included in our repository in the `COPYING` file.
|
||||||
|
@ -87,7 +87,7 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
|
|||||||
var args Arguments
|
var args Arguments
|
||||||
if method, ok := abi.Methods[name]; ok {
|
if method, ok := abi.Methods[name]; ok {
|
||||||
if len(data)%32 != 0 {
|
if len(data)%32 != 0 {
|
||||||
return nil, fmt.Errorf("abi: improperly formatted output: %q - Bytes: %+v", data, data)
|
return nil, fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data)
|
||||||
}
|
}
|
||||||
args = method.Outputs
|
args = method.Outputs
|
||||||
}
|
}
|
||||||
@ -95,7 +95,7 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
|
|||||||
args = event.Inputs
|
args = event.Inputs
|
||||||
}
|
}
|
||||||
if args == nil {
|
if args == nil {
|
||||||
return nil, fmt.Errorf("abi: could not locate named method or event: %s", name)
|
return nil, errors.New("abi: could not locate named method or event")
|
||||||
}
|
}
|
||||||
return args, nil
|
return args, nil
|
||||||
}
|
}
|
||||||
@ -246,10 +246,7 @@ func UnpackRevert(data []byte) (string, error) {
|
|||||||
if !bytes.Equal(data[:4], revertSelector) {
|
if !bytes.Equal(data[:4], revertSelector) {
|
||||||
return "", errors.New("invalid data for unpacking")
|
return "", errors.New("invalid data for unpacking")
|
||||||
}
|
}
|
||||||
typ, err := NewType("string", "", nil)
|
typ, _ := NewType("string", "", nil)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
|
unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -165,7 +165,6 @@ func TestInvalidABI(t *testing.T) {
|
|||||||
|
|
||||||
// TestConstructor tests a constructor function.
|
// TestConstructor tests a constructor function.
|
||||||
// The test is based on the following contract:
|
// The test is based on the following contract:
|
||||||
//
|
|
||||||
// contract TestConstructor {
|
// contract TestConstructor {
|
||||||
// constructor(uint256 a, uint256 b) public{}
|
// constructor(uint256 a, uint256 b) public{}
|
||||||
// }
|
// }
|
||||||
@ -725,7 +724,6 @@ func TestBareEvents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TestUnpackEvent is based on this contract:
|
// TestUnpackEvent is based on this contract:
|
||||||
//
|
|
||||||
// contract T {
|
// contract T {
|
||||||
// event received(address sender, uint amount, bytes memo);
|
// event received(address sender, uint amount, bytes memo);
|
||||||
// event receivedAddr(address sender);
|
// event receivedAddr(address sender);
|
||||||
@ -734,9 +732,7 @@ func TestBareEvents(t *testing.T) {
|
|||||||
// receivedAddr(msg.sender);
|
// receivedAddr(msg.sender);
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||||
//
|
|
||||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||||
func TestUnpackEvent(t *testing.T) {
|
func TestUnpackEvent(t *testing.T) {
|
||||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
||||||
@ -1082,7 +1078,6 @@ func TestDoubleDuplicateMethodNames(t *testing.T) {
|
|||||||
// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name
|
// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name
|
||||||
// conflict and that the second send event will be renamed send1.
|
// conflict and that the second send event will be renamed send1.
|
||||||
// The test runs the abi of the following contract.
|
// The test runs the abi of the following contract.
|
||||||
//
|
|
||||||
// contract DuplicateEvent {
|
// contract DuplicateEvent {
|
||||||
// event send(uint256 a);
|
// event send(uint256 a);
|
||||||
// event send0();
|
// event send0();
|
||||||
@ -1111,7 +1106,6 @@ func TestDoubleDuplicateEventNames(t *testing.T) {
|
|||||||
// TestUnnamedEventParam checks that an event with unnamed parameters is
|
// TestUnnamedEventParam checks that an event with unnamed parameters is
|
||||||
// correctly handled.
|
// correctly handled.
|
||||||
// The test runs the abi of the following contract.
|
// The test runs the abi of the following contract.
|
||||||
//
|
|
||||||
// contract TestEvent {
|
// contract TestEvent {
|
||||||
// event send(uint256, uint256);
|
// event send(uint256, uint256);
|
||||||
// }
|
// }
|
||||||
|
@ -18,7 +18,6 @@ package abi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -80,7 +79,7 @@ func (arguments Arguments) isTuple() bool {
|
|||||||
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
if len(arguments.NonIndexed()) != 0 {
|
if len(arguments.NonIndexed()) != 0 {
|
||||||
return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
|
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||||
}
|
}
|
||||||
return make([]interface{}, 0), nil
|
return make([]interface{}, 0), nil
|
||||||
}
|
}
|
||||||
@ -91,11 +90,11 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
|||||||
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
||||||
// Make sure map is not nil
|
// Make sure map is not nil
|
||||||
if v == nil {
|
if v == nil {
|
||||||
return errors.New("abi: cannot unpack into a nil map")
|
return fmt.Errorf("abi: cannot unpack into a nil map")
|
||||||
}
|
}
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
if len(arguments.NonIndexed()) != 0 {
|
if len(arguments.NonIndexed()) != 0 {
|
||||||
return errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
|
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||||
}
|
}
|
||||||
return nil // Nothing to unmarshal, return
|
return nil // Nothing to unmarshal, return
|
||||||
}
|
}
|
||||||
@ -117,7 +116,7 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
|
|||||||
}
|
}
|
||||||
if len(values) == 0 {
|
if len(values) == 0 {
|
||||||
if len(arguments.NonIndexed()) != 0 {
|
if len(arguments.NonIndexed()) != 0 {
|
||||||
return errors.New("abi: attempting to copy no values while arguments are expected")
|
return fmt.Errorf("abi: attempting to copy no values while arguments are expected")
|
||||||
}
|
}
|
||||||
return nil // Nothing to copy, return
|
return nil // Nothing to copy, return
|
||||||
}
|
}
|
||||||
@ -187,9 +186,6 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
|||||||
virtualArgs := 0
|
virtualArgs := 0
|
||||||
for index, arg := range nonIndexedArgs {
|
for index, arg := range nonIndexedArgs {
|
||||||
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
|
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
|
||||||
// If we have a static array, like [3]uint256, these are coded as
|
// If we have a static array, like [3]uint256, these are coded as
|
||||||
// just like uint256,uint256,uint256.
|
// just like uint256,uint256,uint256.
|
||||||
@ -207,6 +203,9 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
|||||||
// coded as just like uint256,bool,uint256
|
// coded as just like uint256,bool,uint256
|
||||||
virtualArgs += getTypeSize(arg.Type)/32 - 1
|
virtualArgs += getTypeSize(arg.Type)/32 - 1
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
retval = append(retval, marshalledValue)
|
retval = append(retval, marshalledValue)
|
||||||
}
|
}
|
||||||
return retval, nil
|
return retval, nil
|
||||||
|
@ -68,8 +68,7 @@ type SimulatedBackend struct {
|
|||||||
pendingState *state.StateDB // Currently pending state that will be the active on request
|
pendingState *state.StateDB // Currently pending state that will be the active on request
|
||||||
pendingReceipts types.Receipts // Currently receipts for the pending block
|
pendingReceipts types.Receipts // Currently receipts for the pending block
|
||||||
|
|
||||||
events *filters.EventSystem // for filtering log events live
|
events *filters.EventSystem // Event system for filtering log events live
|
||||||
filterSystem *filters.FilterSystem // for filtering database logs
|
|
||||||
|
|
||||||
config *params.ChainConfig
|
config *params.ChainConfig
|
||||||
}
|
}
|
||||||
@ -78,27 +77,17 @@ type SimulatedBackend struct {
|
|||||||
// and uses a simulated blockchain for testing purposes.
|
// and uses a simulated blockchain for testing purposes.
|
||||||
// A simulated backend always uses chainID 1337.
|
// A simulated backend always uses chainID 1337.
|
||||||
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
||||||
genesis := core.Genesis{
|
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
|
||||||
Config: params.AllEthashProtocolChanges,
|
genesis.MustCommit(database)
|
||||||
GasLimit: gasLimit,
|
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
Alloc: alloc,
|
|
||||||
}
|
|
||||||
blockchain, _ := core.NewBlockChain(database, nil, &genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
|
||||||
|
|
||||||
backend := &SimulatedBackend{
|
backend := &SimulatedBackend{
|
||||||
database: database,
|
database: database,
|
||||||
blockchain: blockchain,
|
blockchain: blockchain,
|
||||||
config: genesis.Config,
|
config: genesis.Config,
|
||||||
}
|
}
|
||||||
|
backend.events = filters.NewEventSystem(&filterBackend{database, blockchain, backend}, false)
|
||||||
filterBackend := &filterBackend{database, blockchain, backend}
|
backend.rollback(blockchain.CurrentBlock())
|
||||||
backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{})
|
|
||||||
backend.events = filters.NewEventSystem(backend.filterSystem, false)
|
|
||||||
|
|
||||||
header := backend.blockchain.CurrentBlock()
|
|
||||||
block := backend.blockchain.GetBlock(header.Hash(), header.Number.Uint64())
|
|
||||||
|
|
||||||
backend.rollback(block)
|
|
||||||
return backend
|
return backend
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,20 +106,16 @@ func (b *SimulatedBackend) Close() error {
|
|||||||
|
|
||||||
// Commit imports all the pending transactions as a single block and starts a
|
// Commit imports all the pending transactions as a single block and starts a
|
||||||
// fresh new state.
|
// fresh new state.
|
||||||
func (b *SimulatedBackend) Commit() common.Hash {
|
func (b *SimulatedBackend) Commit() {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil {
|
if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil {
|
||||||
panic(err) // This cannot happen unless the simulator is wrong, fail in that case
|
panic(err) // This cannot happen unless the simulator is wrong, fail in that case
|
||||||
}
|
}
|
||||||
blockHash := b.pendingBlock.Hash()
|
|
||||||
|
|
||||||
// Using the last inserted block here makes it possible to build on a side
|
// Using the last inserted block here makes it possible to build on a side
|
||||||
// chain after a fork.
|
// chain after a fork.
|
||||||
b.rollback(b.pendingBlock)
|
b.rollback(b.pendingBlock)
|
||||||
|
|
||||||
return blockHash
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rollback aborts all pending transactions, reverting to the last committed state.
|
// Rollback aborts all pending transactions, reverting to the last committed state.
|
||||||
@ -138,10 +123,7 @@ func (b *SimulatedBackend) Rollback() {
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
header := b.blockchain.CurrentBlock()
|
b.rollback(b.blockchain.CurrentBlock())
|
||||||
block := b.blockchain.GetBlock(header.Hash(), header.Number.Uint64())
|
|
||||||
|
|
||||||
b.rollback(block)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *SimulatedBackend) rollback(parent *types.Block) {
|
func (b *SimulatedBackend) rollback(parent *types.Block) {
|
||||||
@ -180,7 +162,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error {
|
|||||||
|
|
||||||
// stateByBlockNumber retrieves a state by a given blocknumber.
|
// stateByBlockNumber retrieves a state by a given blocknumber.
|
||||||
func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
|
func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
|
||||||
if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number) == 0 {
|
if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 {
|
||||||
return b.blockchain.State()
|
return b.blockchain.State()
|
||||||
}
|
}
|
||||||
block, err := b.blockByNumber(ctx, blockNumber)
|
block, err := b.blockByNumber(ctx, blockNumber)
|
||||||
@ -309,7 +291,7 @@ func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (
|
|||||||
// (associated with its hash) if found without Lock.
|
// (associated with its hash) if found without Lock.
|
||||||
func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
||||||
if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
|
if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
|
||||||
return b.blockByHash(ctx, b.blockchain.CurrentBlock().Hash())
|
return b.blockchain.CurrentBlock(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
|
block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
|
||||||
@ -437,7 +419,7 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
|
|||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number) != 0 {
|
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
|
||||||
return nil, errBlockNumberUnsupported
|
return nil, errBlockNumberUnsupported
|
||||||
}
|
}
|
||||||
stateDB, err := b.blockchain.State()
|
stateDB, err := b.blockchain.State()
|
||||||
@ -461,7 +443,7 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu
|
|||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
|
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
|
||||||
|
|
||||||
res, err := b.callContract(ctx, call, b.pendingBlock.Header(), b.pendingState)
|
res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -533,7 +515,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||||||
available := new(big.Int).Set(balance)
|
available := new(big.Int).Set(balance)
|
||||||
if call.Value != nil {
|
if call.Value != nil {
|
||||||
if call.Value.Cmp(available) >= 0 {
|
if call.Value.Cmp(available) >= 0 {
|
||||||
return 0, core.ErrInsufficientFundsForTransfer
|
return 0, errors.New("insufficient funds for transfer")
|
||||||
}
|
}
|
||||||
available.Sub(available, call.Value)
|
available.Sub(available, call.Value)
|
||||||
}
|
}
|
||||||
@ -555,7 +537,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||||||
call.Gas = gas
|
call.Gas = gas
|
||||||
|
|
||||||
snapshot := b.pendingState.Snapshot()
|
snapshot := b.pendingState.Snapshot()
|
||||||
res, err := b.callContract(ctx, call, b.pendingBlock.Header(), b.pendingState)
|
res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||||
b.pendingState.RevertToSnapshot(snapshot)
|
b.pendingState.RevertToSnapshot(snapshot)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -605,7 +587,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
|||||||
|
|
||||||
// callContract implements common code between normal and pending contract calls.
|
// callContract implements common code between normal and pending contract calls.
|
||||||
// state is modified during execution, make sure to copy it if necessary.
|
// state is modified during execution, make sure to copy it if necessary.
|
||||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, header *types.Header, stateDB *state.StateDB) (*core.ExecutionResult, error) {
|
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) {
|
||||||
// Gas prices post 1559 need to be initialized
|
// Gas prices post 1559 need to be initialized
|
||||||
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
|
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
|
||||||
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
||||||
@ -623,7 +605,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||||||
// User specified the legacy gas field, convert to 1559 gas typing
|
// User specified the legacy gas field, convert to 1559 gas typing
|
||||||
call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice
|
call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice
|
||||||
} else {
|
} else {
|
||||||
// User specified 1559 gas fields (or none), use those
|
// User specified 1559 gas feilds (or none), use those
|
||||||
if call.GasFeeCap == nil {
|
if call.GasFeeCap == nil {
|
||||||
call.GasFeeCap = new(big.Int)
|
call.GasFeeCap = new(big.Int)
|
||||||
}
|
}
|
||||||
@ -644,33 +626,20 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||||||
if call.Value == nil {
|
if call.Value == nil {
|
||||||
call.Value = new(big.Int)
|
call.Value = new(big.Int)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set infinite balance to the fake caller account.
|
// Set infinite balance to the fake caller account.
|
||||||
from := stateDB.GetOrNewStateObject(call.From)
|
from := stateDB.GetOrNewStateObject(call.From)
|
||||||
from.SetBalance(math.MaxBig256)
|
from.SetBalance(math.MaxBig256)
|
||||||
|
|
||||||
// Execute the call.
|
// Execute the call.
|
||||||
msg := &core.Message{
|
msg := callMsg{call}
|
||||||
From: call.From,
|
|
||||||
To: call.To,
|
|
||||||
Value: call.Value,
|
|
||||||
GasLimit: call.Gas,
|
|
||||||
GasPrice: call.GasPrice,
|
|
||||||
GasFeeCap: call.GasFeeCap,
|
|
||||||
GasTipCap: call.GasTipCap,
|
|
||||||
Data: call.Data,
|
|
||||||
AccessList: call.AccessList,
|
|
||||||
SkipAccountChecks: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
txContext := core.NewEVMTxContext(msg)
|
||||||
|
evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil)
|
||||||
// Create a new environment which holds all relevant information
|
// Create a new environment which holds all relevant information
|
||||||
// about the transaction and calling mechanisms.
|
// about the transaction and calling mechanisms.
|
||||||
txContext := core.NewEVMTxContext(msg)
|
|
||||||
evmContext := core.NewEVMBlockContext(header, b.blockchain, nil)
|
|
||||||
vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true})
|
vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true})
|
||||||
gasPool := new(core.GasPool).AddGas(math.MaxUint64)
|
gasPool := new(core.GasPool).AddGas(math.MaxUint64)
|
||||||
|
|
||||||
return core.ApplyMessage(vmEnv, msg, gasPool)
|
return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendTransaction updates the pending block to include the given transaction.
|
// SendTransaction updates the pending block to include the given transaction.
|
||||||
@ -716,7 +685,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
|
|||||||
var filter *filters.Filter
|
var filter *filters.Filter
|
||||||
if query.BlockHash != nil {
|
if query.BlockHash != nil {
|
||||||
// Block filter requested, construct a single-shot filter
|
// Block filter requested, construct a single-shot filter
|
||||||
filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics)
|
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain, b}, *query.BlockHash, query.Addresses, query.Topics)
|
||||||
} else {
|
} else {
|
||||||
// Initialize unset filter boundaries to run from genesis to chain head
|
// Initialize unset filter boundaries to run from genesis to chain head
|
||||||
from := int64(0)
|
from := int64(0)
|
||||||
@ -728,7 +697,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
|
|||||||
to = query.ToBlock.Int64()
|
to = query.ToBlock.Int64()
|
||||||
}
|
}
|
||||||
// Construct the range filter
|
// Construct the range filter
|
||||||
filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics)
|
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain, b}, from, to, query.Addresses, query.Topics)
|
||||||
}
|
}
|
||||||
// Run the filter and return all the logs
|
// Run the filter and return all the logs
|
||||||
logs, err := filter.Logs(ctx)
|
logs, err := filter.Logs(ctx)
|
||||||
@ -812,13 +781,8 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
|||||||
if len(b.pendingBlock.Transactions()) != 0 {
|
if len(b.pendingBlock.Transactions()) != 0 {
|
||||||
return errors.New("Could not adjust time on non-empty block")
|
return errors.New("Could not adjust time on non-empty block")
|
||||||
}
|
}
|
||||||
// Get the last block
|
|
||||||
block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash())
|
|
||||||
if block == nil {
|
|
||||||
return fmt.Errorf("could not find parent")
|
|
||||||
}
|
|
||||||
|
|
||||||
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||||
block.OffsetTime(int64(adjustment.Seconds()))
|
block.OffsetTime(int64(adjustment.Seconds()))
|
||||||
})
|
})
|
||||||
stateDB, _ := b.blockchain.State()
|
stateDB, _ := b.blockchain.State()
|
||||||
@ -834,6 +798,23 @@ func (b *SimulatedBackend) Blockchain() *core.BlockChain {
|
|||||||
return b.blockchain
|
return b.blockchain
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// callMsg implements core.Message to allow passing it as a transaction simulator.
|
||||||
|
type callMsg struct {
|
||||||
|
ethereum.CallMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m callMsg) From() common.Address { return m.CallMsg.From }
|
||||||
|
func (m callMsg) Nonce() uint64 { return 0 }
|
||||||
|
func (m callMsg) IsFake() bool { return true }
|
||||||
|
func (m callMsg) To() *common.Address { return m.CallMsg.To }
|
||||||
|
func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
|
||||||
|
func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap }
|
||||||
|
func (m callMsg) GasTipCap() *big.Int { return m.CallMsg.GasTipCap }
|
||||||
|
func (m callMsg) Gas() uint64 { return m.CallMsg.Gas }
|
||||||
|
func (m callMsg) Value() *big.Int { return m.CallMsg.Value }
|
||||||
|
func (m callMsg) Data() []byte { return m.CallMsg.Data }
|
||||||
|
func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList }
|
||||||
|
|
||||||
// filterBackend implements filters.Backend to support filtering for logs without
|
// filterBackend implements filters.Backend to support filtering for logs without
|
||||||
// taking bloom-bits acceleration structures into account.
|
// taking bloom-bits acceleration structures into account.
|
||||||
type filterBackend struct {
|
type filterBackend struct {
|
||||||
@ -843,38 +824,19 @@ type filterBackend struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
|
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
|
||||||
|
|
||||||
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
|
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
|
||||||
|
|
||||||
func (fb *filterBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {
|
func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) {
|
||||||
switch number {
|
if block == rpc.LatestBlockNumber {
|
||||||
case rpc.PendingBlockNumber:
|
|
||||||
if block := fb.backend.pendingBlock; block != nil {
|
|
||||||
return block.Header(), nil
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
case rpc.LatestBlockNumber:
|
|
||||||
return fb.bc.CurrentHeader(), nil
|
return fb.bc.CurrentHeader(), nil
|
||||||
case rpc.FinalizedBlockNumber:
|
|
||||||
return fb.bc.CurrentFinalBlock(), nil
|
|
||||||
case rpc.SafeBlockNumber:
|
|
||||||
return fb.bc.CurrentSafeBlock(), nil
|
|
||||||
default:
|
|
||||||
return fb.bc.GetHeaderByNumber(uint64(number.Int64())), nil
|
|
||||||
}
|
}
|
||||||
|
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||||
return fb.bc.GetHeaderByHash(hash), nil
|
return fb.bc.GetHeaderByHash(hash), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
|
|
||||||
if body := fb.bc.GetBody(hash); body != nil {
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("block body not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
|
func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
|
||||||
return fb.backend.pendingBlock, fb.backend.pendingReceipts
|
return fb.backend.pendingBlock, fb.backend.pendingReceipts
|
||||||
}
|
}
|
||||||
@ -887,8 +849,19 @@ func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (typ
|
|||||||
return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil
|
return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
|
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
||||||
logs := rawdb.ReadLogs(fb.db, hash, number, fb.bc.Config())
|
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
||||||
|
if number == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
receipts := rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config())
|
||||||
|
if receipts == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
logs := make([][]*types.Log, len(receipts))
|
||||||
|
for i, receipt := range receipts {
|
||||||
|
logs[i] = receipt.Logs
|
||||||
|
}
|
||||||
return logs, nil
|
return logs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -918,14 +891,6 @@ func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.Matche
|
|||||||
panic("not supported")
|
panic("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fb *filterBackend) ChainConfig() *params.ChainConfig {
|
|
||||||
panic("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) CurrentHeader() *types.Header {
|
|
||||||
panic("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func nullSubscription() event.Subscription {
|
func nullSubscription() event.Subscription {
|
||||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||||
<-quit
|
<-quit
|
||||||
|
@ -94,7 +94,6 @@ func TestSimulatedBackend(t *testing.T) {
|
|||||||
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
|
||||||
// the following is based on this contract:
|
// the following is based on this contract:
|
||||||
//
|
|
||||||
// contract T {
|
// contract T {
|
||||||
// event received(address sender, uint amount, bytes memo);
|
// event received(address sender, uint amount, bytes memo);
|
||||||
// event receivedAddr(address sender);
|
// event receivedAddr(address sender);
|
||||||
@ -423,8 +422,7 @@ func TestEstimateGas(t *testing.T) {
|
|||||||
function OOG() public { for (uint i = 0; ; i++) {}}
|
function OOG() public { for (uint i = 0; ; i++) {}}
|
||||||
function Assert() public { assert(false);}
|
function Assert() public { assert(false);}
|
||||||
function Valid() public {}
|
function Valid() public {}
|
||||||
}
|
}*/
|
||||||
*/
|
|
||||||
const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
||||||
const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033"
|
const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033"
|
||||||
|
|
||||||
@ -996,7 +994,6 @@ func TestCodeAt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||||
//
|
|
||||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||||
func TestPendingAndCallContract(t *testing.T) {
|
func TestPendingAndCallContract(t *testing.T) {
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
@ -1189,7 +1186,7 @@ func TestFork(t *testing.T) {
|
|||||||
sim.Commit()
|
sim.Commit()
|
||||||
}
|
}
|
||||||
// 3.
|
// 3.
|
||||||
if sim.blockchain.CurrentBlock().Number.Uint64() != uint64(n) {
|
if sim.blockchain.CurrentBlock().NumberU64() != uint64(n) {
|
||||||
t.Error("wrong chain length")
|
t.Error("wrong chain length")
|
||||||
}
|
}
|
||||||
// 4.
|
// 4.
|
||||||
@ -1199,7 +1196,7 @@ func TestFork(t *testing.T) {
|
|||||||
sim.Commit()
|
sim.Commit()
|
||||||
}
|
}
|
||||||
// 6.
|
// 6.
|
||||||
if sim.blockchain.CurrentBlock().Number.Uint64() != uint64(n+1) {
|
if sim.blockchain.CurrentBlock().NumberU64() != uint64(n+1) {
|
||||||
t.Error("wrong chain length")
|
t.Error("wrong chain length")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1207,11 +1204,11 @@ func TestFork(t *testing.T) {
|
|||||||
/*
|
/*
|
||||||
Example contract to test event emission:
|
Example contract to test event emission:
|
||||||
|
|
||||||
pragma solidity >=0.7.0 <0.9.0;
|
pragma solidity >=0.7.0 <0.9.0;
|
||||||
contract Callable {
|
contract Callable {
|
||||||
event Called();
|
event Called();
|
||||||
function Call() public { emit Called(); }
|
function Call() public { emit Called(); }
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
const callableAbi = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
||||||
|
|
||||||
@ -1338,62 +1335,3 @@ func TestForkResendTx(t *testing.T) {
|
|||||||
t.Errorf("TX included in wrong block: %d", h)
|
t.Errorf("TX included in wrong block: %d", h)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCommitReturnValue(t *testing.T) {
|
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
|
||||||
sim := simTestBackend(testAddr)
|
|
||||||
defer sim.Close()
|
|
||||||
|
|
||||||
startBlockHeight := sim.blockchain.CurrentBlock().Number.Uint64()
|
|
||||||
|
|
||||||
// Test if Commit returns the correct block hash
|
|
||||||
h1 := sim.Commit()
|
|
||||||
if h1 != sim.blockchain.CurrentBlock().Hash() {
|
|
||||||
t.Error("Commit did not return the hash of the last block.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a block in the original chain (containing a transaction to force different block hashes)
|
|
||||||
head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
|
||||||
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
|
|
||||||
_tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil)
|
|
||||||
tx, _ := types.SignTx(_tx, types.HomesteadSigner{}, testKey)
|
|
||||||
sim.SendTransaction(context.Background(), tx)
|
|
||||||
h2 := sim.Commit()
|
|
||||||
|
|
||||||
// Create another block in the original chain
|
|
||||||
sim.Commit()
|
|
||||||
|
|
||||||
// Fork at the first bock
|
|
||||||
if err := sim.Fork(context.Background(), h1); err != nil {
|
|
||||||
t.Errorf("forking: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test if Commit returns the correct block hash after the reorg
|
|
||||||
h2fork := sim.Commit()
|
|
||||||
if h2 == h2fork {
|
|
||||||
t.Error("The block in the fork and the original block are the same block!")
|
|
||||||
}
|
|
||||||
if sim.blockchain.GetHeader(h2fork, startBlockHeight+2) == nil {
|
|
||||||
t.Error("Could not retrieve the just created block (side-chain)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestAdjustTimeAfterFork ensures that after a fork, AdjustTime uses the pending fork
|
|
||||||
// block's parent rather than the canonical head's parent.
|
|
||||||
func TestAdjustTimeAfterFork(t *testing.T) {
|
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
|
||||||
sim := simTestBackend(testAddr)
|
|
||||||
defer sim.Close()
|
|
||||||
|
|
||||||
sim.Commit() // h1
|
|
||||||
h1 := sim.blockchain.CurrentHeader().Hash()
|
|
||||||
sim.Commit() // h2
|
|
||||||
sim.Fork(context.Background(), h1)
|
|
||||||
sim.AdjustTime(1 * time.Second)
|
|
||||||
sim.Commit()
|
|
||||||
|
|
||||||
head := sim.blockchain.CurrentHeader()
|
|
||||||
if head.Number == common.Big2 && head.ParentHash != h1 {
|
|
||||||
t.Errorf("failed to build block on fork")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -32,13 +32,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
const basefeeWiggleMultiplier = 2
|
|
||||||
|
|
||||||
var (
|
|
||||||
errNoEventSignature = errors.New("no event signature")
|
|
||||||
errEventSignatureMismatch = errors.New("event signature mismatch")
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignerFn is a signer function callback when a contract requires a method to
|
// SignerFn is a signer function callback when a contract requires a method to
|
||||||
// sign the transaction before submission.
|
// sign the transaction before submission.
|
||||||
type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error)
|
type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error)
|
||||||
@ -261,7 +254,7 @@ func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Add
|
|||||||
if gasFeeCap == nil {
|
if gasFeeCap == nil {
|
||||||
gasFeeCap = new(big.Int).Add(
|
gasFeeCap = new(big.Int).Add(
|
||||||
gasTipCap,
|
gasTipCap,
|
||||||
new(big.Int).Mul(head.BaseFee, big.NewInt(basefeeWiggleMultiplier)),
|
new(big.Int).Mul(head.BaseFee, big.NewInt(2)),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if gasFeeCap.Cmp(gasTipCap) < 0 {
|
if gasFeeCap.Cmp(gasTipCap) < 0 {
|
||||||
@ -378,8 +371,6 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
|||||||
)
|
)
|
||||||
if opts.GasPrice != nil {
|
if opts.GasPrice != nil {
|
||||||
rawTx, err = c.createLegacyTx(opts, contract, input)
|
rawTx, err = c.createLegacyTx(opts, contract, input)
|
||||||
} else if opts.GasFeeCap != nil && opts.GasTipCap != nil {
|
|
||||||
rawTx, err = c.createDynamicTx(opts, contract, input, nil)
|
|
||||||
} else {
|
} else {
|
||||||
// Only query for basefee if gasPrice not specified
|
// Only query for basefee if gasPrice not specified
|
||||||
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {
|
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {
|
||||||
@ -493,12 +484,8 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
|
|||||||
|
|
||||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||||
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
|
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
|
||||||
// Anonymous events are not supported.
|
|
||||||
if len(log.Topics) == 0 {
|
|
||||||
return errNoEventSignature
|
|
||||||
}
|
|
||||||
if log.Topics[0] != c.abi.Events[event].ID {
|
if log.Topics[0] != c.abi.Events[event].ID {
|
||||||
return errEventSignatureMismatch
|
return fmt.Errorf("event signature mismatch")
|
||||||
}
|
}
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
|
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||||
@ -516,12 +503,8 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
|
|||||||
|
|
||||||
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
||||||
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
|
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
|
||||||
// Anonymous events are not supported.
|
|
||||||
if len(log.Topics) == 0 {
|
|
||||||
return errNoEventSignature
|
|
||||||
}
|
|
||||||
if log.Topics[0] != c.abi.Events[event].ID {
|
if log.Topics[0] != c.abi.Events[event].ID {
|
||||||
return errEventSignatureMismatch
|
return fmt.Errorf("event signature mismatch")
|
||||||
}
|
}
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
|
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
|
||||||
|
@ -115,6 +115,7 @@ func (mc *mockPendingCaller) PendingCallContract(ctx context.Context, call ether
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPassingBlockNumber(t *testing.T) {
|
func TestPassingBlockNumber(t *testing.T) {
|
||||||
|
|
||||||
mc := &mockPendingCaller{
|
mc := &mockPendingCaller{
|
||||||
mockCaller: &mockCaller{
|
mockCaller: &mockCaller{
|
||||||
codeAtBytes: []byte{1, 2, 3},
|
codeAtBytes: []byte{1, 2, 3},
|
||||||
@ -186,23 +187,6 @@ func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
|||||||
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackAnonymousLogIntoMap(t *testing.T) {
|
|
||||||
mockLog := newMockLog(nil, common.HexToHash("0x0"))
|
|
||||||
|
|
||||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]`
|
|
||||||
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
|
||||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
|
||||||
|
|
||||||
var received map[string]interface{}
|
|
||||||
err := bc.UnpackLogIntoMap(received, "received", mockLog)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("unpacking anonymous event is not supported")
|
|
||||||
}
|
|
||||||
if err.Error() != "no event signature" {
|
|
||||||
t.Errorf("expected error 'no event signature', got '%s'", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
||||||
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
|
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,6 +22,7 @@ package bind
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/format"
|
"go/format"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -38,45 +39,10 @@ type Lang int
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
LangGo Lang = iota
|
LangGo Lang = iota
|
||||||
|
LangJava
|
||||||
|
LangObjC
|
||||||
)
|
)
|
||||||
|
|
||||||
func isKeyWord(arg string) bool {
|
|
||||||
switch arg {
|
|
||||||
case "break":
|
|
||||||
case "case":
|
|
||||||
case "chan":
|
|
||||||
case "const":
|
|
||||||
case "continue":
|
|
||||||
case "default":
|
|
||||||
case "defer":
|
|
||||||
case "else":
|
|
||||||
case "fallthrough":
|
|
||||||
case "for":
|
|
||||||
case "func":
|
|
||||||
case "go":
|
|
||||||
case "goto":
|
|
||||||
case "if":
|
|
||||||
case "import":
|
|
||||||
case "interface":
|
|
||||||
case "iota":
|
|
||||||
case "map":
|
|
||||||
case "make":
|
|
||||||
case "new":
|
|
||||||
case "package":
|
|
||||||
case "range":
|
|
||||||
case "return":
|
|
||||||
case "select":
|
|
||||||
case "struct":
|
|
||||||
case "switch":
|
|
||||||
case "type":
|
|
||||||
case "var":
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
|
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
|
||||||
// to be used as is in client code, but rather as an intermediate struct which
|
// to be used as is in client code, but rather as an intermediate struct which
|
||||||
// enforces compile time type safety and naming convention opposed to having to
|
// enforces compile time type safety and naming convention opposed to having to
|
||||||
@ -148,7 +114,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||||
copy(normalized.Inputs, original.Inputs)
|
copy(normalized.Inputs, original.Inputs)
|
||||||
for j, input := range normalized.Inputs {
|
for j, input := range normalized.Inputs {
|
||||||
if input.Name == "" || isKeyWord(input.Name) {
|
if input.Name == "" {
|
||||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||||
}
|
}
|
||||||
if hasStruct(input.Type) {
|
if hasStruct(input.Type) {
|
||||||
@ -192,7 +158,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||||
copy(normalized.Inputs, original.Inputs)
|
copy(normalized.Inputs, original.Inputs)
|
||||||
for j, input := range normalized.Inputs {
|
for j, input := range normalized.Inputs {
|
||||||
if input.Name == "" || isKeyWord(input.Name) {
|
if input.Name == "" {
|
||||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||||
}
|
}
|
||||||
// Event is a bit special, we need to define event struct in binding,
|
// Event is a bit special, we need to define event struct in binding,
|
||||||
@ -218,6 +184,11 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
if evmABI.HasReceive() {
|
if evmABI.HasReceive() {
|
||||||
receive = &tmplMethod{Original: evmABI.Receive}
|
receive = &tmplMethod{Original: evmABI.Receive}
|
||||||
}
|
}
|
||||||
|
// There is no easy way to pass arbitrary java objects to the Go side.
|
||||||
|
if len(structs) > 0 && lang == LangJava {
|
||||||
|
return "", errors.New("java binding for tuple arguments is not supported yet")
|
||||||
|
}
|
||||||
|
|
||||||
contracts[types[i]] = &tmplContract{
|
contracts[types[i]] = &tmplContract{
|
||||||
Type: capitalise(types[i]),
|
Type: capitalise(types[i]),
|
||||||
InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""),
|
InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""),
|
||||||
@ -291,6 +262,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
// programming language types.
|
// programming language types.
|
||||||
var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
|
var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
|
||||||
LangGo: bindTypeGo,
|
LangGo: bindTypeGo,
|
||||||
|
LangJava: bindTypeJava,
|
||||||
}
|
}
|
||||||
|
|
||||||
// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones.
|
// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones.
|
||||||
@ -333,10 +305,86 @@ func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java ones.
|
||||||
|
func bindBasicTypeJava(kind abi.Type) string {
|
||||||
|
switch kind.T {
|
||||||
|
case abi.AddressTy:
|
||||||
|
return "Address"
|
||||||
|
case abi.IntTy, abi.UintTy:
|
||||||
|
// Note that uint and int (without digits) are also matched,
|
||||||
|
// these are size 256, and will translate to BigInt (the default).
|
||||||
|
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String())
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return kind.String()
|
||||||
|
}
|
||||||
|
// All unsigned integers should be translated to BigInt since gomobile doesn't
|
||||||
|
// support them.
|
||||||
|
if parts[1] == "u" {
|
||||||
|
return "BigInt"
|
||||||
|
}
|
||||||
|
|
||||||
|
namedSize := map[string]string{
|
||||||
|
"8": "byte",
|
||||||
|
"16": "short",
|
||||||
|
"32": "int",
|
||||||
|
"64": "long",
|
||||||
|
}[parts[2]]
|
||||||
|
|
||||||
|
// default to BigInt
|
||||||
|
if namedSize == "" {
|
||||||
|
namedSize = "BigInt"
|
||||||
|
}
|
||||||
|
return namedSize
|
||||||
|
case abi.FixedBytesTy, abi.BytesTy:
|
||||||
|
return "byte[]"
|
||||||
|
case abi.BoolTy:
|
||||||
|
return "boolean"
|
||||||
|
case abi.StringTy:
|
||||||
|
return "String"
|
||||||
|
case abi.FunctionTy:
|
||||||
|
return "byte[24]"
|
||||||
|
default:
|
||||||
|
return kind.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pluralizeJavaType explicitly converts multidimensional types to predefined
|
||||||
|
// types in go side.
|
||||||
|
func pluralizeJavaType(typ string) string {
|
||||||
|
switch typ {
|
||||||
|
case "boolean":
|
||||||
|
return "Bools"
|
||||||
|
case "String":
|
||||||
|
return "Strings"
|
||||||
|
case "Address":
|
||||||
|
return "Addresses"
|
||||||
|
case "byte[]":
|
||||||
|
return "Binaries"
|
||||||
|
case "BigInt":
|
||||||
|
return "BigInts"
|
||||||
|
}
|
||||||
|
return typ + "[]"
|
||||||
|
}
|
||||||
|
|
||||||
|
// bindTypeJava converts a Solidity type to a Java one. Since there is no clear mapping
|
||||||
|
// from all Solidity types to Java ones (e.g. uint17), those that cannot be exactly
|
||||||
|
// mapped will use an upscaled type (e.g. BigDecimal).
|
||||||
|
func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||||
|
switch kind.T {
|
||||||
|
case abi.TupleTy:
|
||||||
|
return structs[kind.TupleRawName+kind.String()].Name
|
||||||
|
case abi.ArrayTy, abi.SliceTy:
|
||||||
|
return pluralizeJavaType(bindTypeJava(*kind.Elem, structs))
|
||||||
|
default:
|
||||||
|
return bindBasicTypeJava(kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// bindTopicType is a set of type binders that convert Solidity types to some
|
// bindTopicType is a set of type binders that convert Solidity types to some
|
||||||
// supported programming language topic types.
|
// supported programming language topic types.
|
||||||
var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
|
var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
|
||||||
LangGo: bindTopicTypeGo,
|
LangGo: bindTopicTypeGo,
|
||||||
|
LangJava: bindTopicTypeJava,
|
||||||
}
|
}
|
||||||
|
|
||||||
// bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same
|
// bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same
|
||||||
@ -356,10 +404,28 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
|||||||
return bound
|
return bound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// bindTopicTypeJava converts a Solidity topic type to a Java one. It is almost the same
|
||||||
|
// functionality as for simple types, but dynamic types get converted to hashes.
|
||||||
|
func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||||
|
bound := bindTypeJava(kind, structs)
|
||||||
|
|
||||||
|
// todo(rjl493456442) according solidity documentation, indexed event
|
||||||
|
// parameters that are not value types i.e. arrays and structs are not
|
||||||
|
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||||
|
//
|
||||||
|
// We only convert strings and bytes to hash, still need to deal with
|
||||||
|
// array(both fixed-size and dynamic-size) and struct.
|
||||||
|
if bound == "String" || bound == "byte[]" {
|
||||||
|
bound = "Hash"
|
||||||
|
}
|
||||||
|
return bound
|
||||||
|
}
|
||||||
|
|
||||||
// bindStructType is a set of type binders that convert Solidity tuple types to some supported
|
// bindStructType is a set of type binders that convert Solidity tuple types to some supported
|
||||||
// programming language struct definition.
|
// programming language struct definition.
|
||||||
var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
|
var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
|
||||||
LangGo: bindStructTypeGo,
|
LangGo: bindStructTypeGo,
|
||||||
|
LangJava: bindStructTypeJava,
|
||||||
}
|
}
|
||||||
|
|
||||||
// bindStructTypeGo converts a Solidity tuple type to a Go one and records the mapping
|
// bindStructTypeGo converts a Solidity tuple type to a Go one and records the mapping
|
||||||
@ -408,10 +474,74 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// bindStructTypeJava converts a Solidity tuple type to a Java one and records the mapping
|
||||||
|
// in the given map.
|
||||||
|
// Notably, this function will resolve and record nested struct recursively.
|
||||||
|
func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||||
|
switch kind.T {
|
||||||
|
case abi.TupleTy:
|
||||||
|
// We compose a raw struct name and a canonical parameter expression
|
||||||
|
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||||
|
// is empty, so we use canonical parameter expression to distinguish
|
||||||
|
// different struct definition. From the consideration of backward
|
||||||
|
// compatibility, we concat these two together so that if kind.TupleRawName
|
||||||
|
// is not empty, it can have unique id.
|
||||||
|
id := kind.TupleRawName + kind.String()
|
||||||
|
if s, exist := structs[id]; exist {
|
||||||
|
return s.Name
|
||||||
|
}
|
||||||
|
var fields []*tmplField
|
||||||
|
for i, elem := range kind.TupleElems {
|
||||||
|
field := bindStructTypeJava(*elem, structs)
|
||||||
|
fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem})
|
||||||
|
}
|
||||||
|
name := kind.TupleRawName
|
||||||
|
if name == "" {
|
||||||
|
name = fmt.Sprintf("Class%d", len(structs))
|
||||||
|
}
|
||||||
|
structs[id] = &tmplStruct{
|
||||||
|
Name: name,
|
||||||
|
Fields: fields,
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
case abi.ArrayTy, abi.SliceTy:
|
||||||
|
return pluralizeJavaType(bindStructTypeJava(*kind.Elem, structs))
|
||||||
|
default:
|
||||||
|
return bindBasicTypeJava(kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// namedType is a set of functions that transform language specific types to
|
// namedType is a set of functions that transform language specific types to
|
||||||
// named versions that may be used inside method names.
|
// named versions that may be used inside method names.
|
||||||
var namedType = map[Lang]func(string, abi.Type) string{
|
var namedType = map[Lang]func(string, abi.Type) string{
|
||||||
LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") },
|
LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") },
|
||||||
|
LangJava: namedTypeJava,
|
||||||
|
}
|
||||||
|
|
||||||
|
// namedTypeJava converts some primitive data types to named variants that can
|
||||||
|
// be used as parts of method names.
|
||||||
|
func namedTypeJava(javaKind string, solKind abi.Type) string {
|
||||||
|
switch javaKind {
|
||||||
|
case "byte[]":
|
||||||
|
return "Binary"
|
||||||
|
case "boolean":
|
||||||
|
return "Bool"
|
||||||
|
default:
|
||||||
|
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String())
|
||||||
|
if len(parts) != 4 {
|
||||||
|
return javaKind
|
||||||
|
}
|
||||||
|
switch parts[2] {
|
||||||
|
case "8", "16", "32", "64":
|
||||||
|
if parts[3] == "" {
|
||||||
|
return capitalise(fmt.Sprintf("%sint%s", parts[1], parts[2]))
|
||||||
|
}
|
||||||
|
return capitalise(fmt.Sprintf("%sint%ss", parts[1], parts[2]))
|
||||||
|
|
||||||
|
default:
|
||||||
|
return javaKind
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// alias returns an alias of the given string based on the aliasing rules
|
// alias returns an alias of the given string based on the aliasing rules
|
||||||
@ -427,6 +557,7 @@ func alias(aliases map[string]string, n string) string {
|
|||||||
// conform to target language naming conventions.
|
// conform to target language naming conventions.
|
||||||
var methodNormalizer = map[Lang]func(string) string{
|
var methodNormalizer = map[Lang]func(string) string{
|
||||||
LangGo: abi.ToCamelCase,
|
LangGo: abi.ToCamelCase,
|
||||||
|
LangJava: decapitalise,
|
||||||
}
|
}
|
||||||
|
|
||||||
// capitalise makes a camel-case string which starts with an upper case character.
|
// capitalise makes a camel-case string which starts with an upper case character.
|
||||||
|
File diff suppressed because one or more lines are too long
@ -76,6 +76,7 @@ type tmplStruct struct {
|
|||||||
// programming languages the package can generate to.
|
// programming languages the package can generate to.
|
||||||
var tmplSource = map[Lang]string{
|
var tmplSource = map[Lang]string{
|
||||||
LangGo: tmplSourceGo,
|
LangGo: tmplSourceGo,
|
||||||
|
LangJava: tmplSourceJava,
|
||||||
}
|
}
|
||||||
|
|
||||||
// tmplSourceGo is the Go source template that the generated Go contract binding
|
// tmplSourceGo is the Go source template that the generated Go contract binding
|
||||||
@ -109,7 +110,6 @@ var (
|
|||||||
_ = common.Big1
|
_ = common.Big1
|
||||||
_ = types.BloomLookup
|
_ = types.BloomLookup
|
||||||
_ = event.NewSubscription
|
_ = event.NewSubscription
|
||||||
_ = abi.ConvertType
|
|
||||||
)
|
)
|
||||||
|
|
||||||
{{$structs := .Structs}}
|
{{$structs := .Structs}}
|
||||||
@ -268,11 +268,11 @@ var (
|
|||||||
|
|
||||||
// bind{{.Type}} binds a generic wrapper to an already deployed contract.
|
// bind{{.Type}} binds a generic wrapper to an already deployed contract.
|
||||||
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
|
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
|
||||||
parsed, err := {{.Type}}MetaData.GetAbi()
|
parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
|
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call invokes the (constant) contract method with params as input values and
|
// Call invokes the (constant) contract method with params as input values and
|
||||||
@ -569,3 +569,140 @@ var (
|
|||||||
{{end}}
|
{{end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
// tmplSourceJava is the Java source template that the generated Java contract binding
|
||||||
|
// is based on.
|
||||||
|
const tmplSourceJava = `
|
||||||
|
// This file is an automatically generated Java binding. Do not modify as any
|
||||||
|
// change will likely be lost upon the next re-generation!
|
||||||
|
|
||||||
|
package {{.Package}};
|
||||||
|
|
||||||
|
import org.ethereum.geth.*;
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
|
{{$structs := .Structs}}
|
||||||
|
{{range $contract := .Contracts}}
|
||||||
|
{{if not .Library}}public {{end}}class {{.Type}} {
|
||||||
|
// ABI is the input ABI used to generate the binding from.
|
||||||
|
public final static String ABI = "{{.InputABI}}";
|
||||||
|
{{if $contract.FuncSigs}}
|
||||||
|
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
|
||||||
|
public final static Map<String, String> {{.Type}}FuncSigs;
|
||||||
|
static {
|
||||||
|
Hashtable<String, String> temp = new Hashtable<String, String>();
|
||||||
|
{{range $strsig, $binsig := .FuncSigs}}temp.put("{{$binsig}}", "{{$strsig}}");
|
||||||
|
{{end}}
|
||||||
|
{{.Type}}FuncSigs = Collections.unmodifiableMap(temp);
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
{{if .InputBin}}
|
||||||
|
// BYTECODE is the compiled bytecode used for deploying new contracts.
|
||||||
|
public final static String BYTECODE = "0x{{.InputBin}}";
|
||||||
|
|
||||||
|
// deploy deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
|
||||||
|
public static {{.Type}} deploy(TransactOpts auth, EthereumClient client{{range .Constructor.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
|
||||||
|
Interfaces args = Geth.newInterfaces({{(len .Constructor.Inputs)}});
|
||||||
|
String bytecode = BYTECODE;
|
||||||
|
{{if .Libraries}}
|
||||||
|
|
||||||
|
// "link" contract to dependent libraries by deploying them first.
|
||||||
|
{{range $pattern, $name := .Libraries}}
|
||||||
|
{{capitalise $name}} {{decapitalise $name}}Inst = {{capitalise $name}}.deploy(auth, client);
|
||||||
|
bytecode = bytecode.replace("__${{$pattern}}$__", {{decapitalise $name}}Inst.Address.getHex().substring(2));
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
{{range $index, $element := .Constructor.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
|
||||||
|
{{end}}
|
||||||
|
return new {{.Type}}(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal constructor used by contract deployment.
|
||||||
|
private {{.Type}}(BoundContract deployment) {
|
||||||
|
this.Address = deployment.getAddress();
|
||||||
|
this.Deployer = deployment.getDeployer();
|
||||||
|
this.Contract = deployment;
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
// Ethereum address where this contract is located at.
|
||||||
|
public final Address Address;
|
||||||
|
|
||||||
|
// Ethereum transaction in which this contract was deployed (if known!).
|
||||||
|
public final Transaction Deployer;
|
||||||
|
|
||||||
|
// Contract instance bound to a blockchain address.
|
||||||
|
private final BoundContract Contract;
|
||||||
|
|
||||||
|
// Creates a new instance of {{.Type}}, bound to a specific deployed contract.
|
||||||
|
public {{.Type}}(Address address, EthereumClient client) throws Exception {
|
||||||
|
this(Geth.bindContract(address, ABI, client));
|
||||||
|
}
|
||||||
|
|
||||||
|
{{range .Calls}}
|
||||||
|
{{if gt (len .Normalized.Outputs) 1}}
|
||||||
|
// {{capitalise .Normalized.Name}}Results is the output of a call to {{.Normalized.Name}}.
|
||||||
|
public class {{capitalise .Normalized.Name}}Results {
|
||||||
|
{{range $index, $item := .Normalized.Outputs}}public {{bindtype .Type $structs}} {{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}};
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Original.String}}
|
||||||
|
public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
|
||||||
|
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
|
||||||
|
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
Interfaces results = Geth.newInterfaces({{(len .Normalized.Outputs)}});
|
||||||
|
{{range $index, $item := .Normalized.Outputs}}Interface result{{$index}} = Geth.newInterface(); result{{$index}}.setDefault{{namedtype (bindtype .Type $structs) .Type}}(); results.set({{$index}}, result{{$index}});
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
if (opts == null) {
|
||||||
|
opts = Geth.newCallOpts();
|
||||||
|
}
|
||||||
|
this.Contract.call(opts, results, "{{.Original.Name}}", args);
|
||||||
|
{{if gt (len .Normalized.Outputs) 1}}
|
||||||
|
{{capitalise .Normalized.Name}}Results result = new {{capitalise .Normalized.Name}}Results();
|
||||||
|
{{range $index, $item := .Normalized.Outputs}}result.{{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}} = results.get({{$index}}).get{{namedtype (bindtype .Type $structs) .Type}}();
|
||||||
|
{{end}}
|
||||||
|
return result;
|
||||||
|
{{else}}{{range .Normalized.Outputs}}return results.get(0).get{{namedtype (bindtype .Type $structs) .Type}}();{{end}}
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{range .Transacts}}
|
||||||
|
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Original.String}}
|
||||||
|
public Transaction {{.Normalized.Name}}(TransactOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
|
||||||
|
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
|
||||||
|
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
|
||||||
|
{{end}}
|
||||||
|
return this.Contract.transact(opts, "{{.Original.Name}}" , args);
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{if .Fallback}}
|
||||||
|
// Fallback is a paid mutator transaction binding the contract fallback function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Fallback.Original.String}}
|
||||||
|
public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception {
|
||||||
|
return this.Contract.rawTransact(opts, calldata);
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{if .Receive}}
|
||||||
|
// Receive is a paid mutator transaction binding the contract receive function.
|
||||||
|
//
|
||||||
|
// Solidity: {{.Receive.Original.String}}
|
||||||
|
public Transaction Receive(TransactOpts opts) throws Exception {
|
||||||
|
return this.Contract.rawTransact(opts, null);
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
}
|
||||||
|
{{end}}
|
||||||
|
`
|
||||||
|
@ -24,14 +24,6 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errBadBool = errors.New("abi: improperly encoded boolean value")
|
errBadBool = errors.New("abi: improperly encoded boolean value")
|
||||||
errBadUint8 = errors.New("abi: improperly encoded uint8 value")
|
|
||||||
errBadUint16 = errors.New("abi: improperly encoded uint16 value")
|
|
||||||
errBadUint32 = errors.New("abi: improperly encoded uint32 value")
|
|
||||||
errBadUint64 = errors.New("abi: improperly encoded uint64 value")
|
|
||||||
errBadInt8 = errors.New("abi: improperly encoded int8 value")
|
|
||||||
errBadInt16 = errors.New("abi: improperly encoded int16 value")
|
|
||||||
errBadInt32 = errors.New("abi: improperly encoded int32 value")
|
|
||||||
errBadInt64 = errors.New("abi: improperly encoded int64 value")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// formatSliceString formats the reflection kind with the given slice size
|
// formatSliceString formats the reflection kind with the given slice size
|
||||||
@ -81,6 +73,7 @@ func typeCheck(t Type, value reflect.Value) error {
|
|||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// typeErr returns a formatted type casting error.
|
// typeErr returns a formatted type casting error.
|
||||||
|
@ -161,6 +161,7 @@ func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEventTupleUnpack(t *testing.T) {
|
func TestEventTupleUnpack(t *testing.T) {
|
||||||
|
|
||||||
type EventTransfer struct {
|
type EventTransfer struct {
|
||||||
Value *big.Int
|
Value *big.Int
|
||||||
}
|
}
|
||||||
|
@ -25,18 +25,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ConvertType converts an interface of a runtime type into a interface of the
|
// ConvertType converts an interface of a runtime type into a interface of the
|
||||||
// given type, e.g. turn this code:
|
// given type
|
||||||
//
|
// e.g. turn
|
||||||
// var fields []reflect.StructField
|
// var fields []reflect.StructField
|
||||||
//
|
|
||||||
// fields = append(fields, reflect.StructField{
|
// fields = append(fields, reflect.StructField{
|
||||||
// Name: "X",
|
// Name: "X",
|
||||||
// Type: reflect.TypeOf(new(big.Int)),
|
// Type: reflect.TypeOf(new(big.Int)),
|
||||||
// Tag: reflect.StructTag("json:\"" + "x" + "\""),
|
// Tag: reflect.StructTag("json:\"" + "x" + "\""),
|
||||||
// }
|
// }
|
||||||
//
|
// into
|
||||||
// into:
|
|
||||||
//
|
|
||||||
// type TupleT struct { X *big.Int }
|
// type TupleT struct { X *big.Int }
|
||||||
func ConvertType(in interface{}, proto interface{}) interface{} {
|
func ConvertType(in interface{}, proto interface{}) interface{} {
|
||||||
protoType := reflect.TypeOf(proto)
|
protoType := reflect.TypeOf(proto)
|
||||||
@ -102,7 +99,7 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
|||||||
func set(dst, src reflect.Value) error {
|
func set(dst, src reflect.Value) error {
|
||||||
dstType, srcType := dst.Type(), src.Type()
|
dstType, srcType := dst.Type(), src.Type()
|
||||||
switch {
|
switch {
|
||||||
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid() && (dst.Elem().Type().Kind() == reflect.Ptr || dst.Elem().CanSet()):
|
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid():
|
||||||
return set(dst.Elem(), src)
|
return set(dst.Elem(), src)
|
||||||
case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
|
case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
|
||||||
return set(dst.Elem(), src)
|
return set(dst.Elem(), src)
|
||||||
@ -173,13 +170,11 @@ func setStruct(dst, src reflect.Value) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
|
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
|
||||||
//
|
// first round: for each Exportable field that contains a `abi:""` tag
|
||||||
// first round: for each Exportable field that contains a `abi:""` tag and this field name
|
// and this field name exists in the given argument name list, pair them together.
|
||||||
// exists in the given argument name list, pair them together.
|
// second round: for each argument name that has not been already linked,
|
||||||
//
|
// find what variable is expected to be mapped into, if it exists and has not been
|
||||||
// second round: for each argument name that has not been already linked, find what
|
// used, pair them.
|
||||||
// variable is expected to be mapped into, if it exists and has not been used, pair them.
|
|
||||||
//
|
|
||||||
// Note this function assumes the given value is a struct value.
|
// Note this function assumes the given value is a struct value.
|
||||||
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
|
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
|
||||||
typ := value.Type()
|
typ := value.Type()
|
||||||
@ -225,6 +220,7 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
|
|||||||
|
|
||||||
// second round ~~~
|
// second round ~~~
|
||||||
for _, argName := range argNames {
|
for _, argName := range argNames {
|
||||||
|
|
||||||
structFieldName := ToCamelCase(argName)
|
structFieldName := ToCamelCase(argName)
|
||||||
|
|
||||||
if structFieldName == "" {
|
if structFieldName == "" {
|
||||||
|
@ -32,7 +32,7 @@ type reflectTest struct {
|
|||||||
|
|
||||||
var reflectTests = []reflectTest{
|
var reflectTests = []reflectTest{
|
||||||
{
|
{
|
||||||
name: "OneToOneCorrespondence",
|
name: "OneToOneCorrespondance",
|
||||||
args: []string{"fieldA"},
|
args: []string{"fieldA"},
|
||||||
struc: struct {
|
struc: struct {
|
||||||
FieldA int `abi:"fieldA"`
|
FieldA int `abi:"fieldA"`
|
||||||
|
@ -166,7 +166,7 @@ func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
|
|||||||
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
|
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reassemble the fake ABI and construct the JSON
|
// Reassemble the fake ABI and constuct the JSON
|
||||||
fakeArgs, err := assembleArgs(args)
|
fakeArgs, err := assembleArgs(args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
|
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
|
||||||
|
@ -154,9 +154,6 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
|
|||||||
if varSize == 0 {
|
if varSize == 0 {
|
||||||
typ.T = BytesTy
|
typ.T = BytesTy
|
||||||
} else {
|
} else {
|
||||||
if varSize > 32 {
|
|
||||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
|
||||||
}
|
|
||||||
typ.T = FixedBytesTy
|
typ.T = FixedBytesTy
|
||||||
typ.Size = varSize
|
typ.Size = varSize
|
||||||
}
|
}
|
||||||
|
@ -366,10 +366,3 @@ func TestGetTypeSize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFixedBytesOver32(t *testing.T) {
|
|
||||||
_, err := NewType("bytes4096", "", nil)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("fixed bytes with size over 32 is not spec'd")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -19,7 +19,6 @@ package abi
|
|||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
@ -34,72 +33,43 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ReadInteger reads the integer based on its kind and returns the appropriate value.
|
// ReadInteger reads the integer based on its kind and returns the appropriate value.
|
||||||
func ReadInteger(typ Type, b []byte) (interface{}, error) {
|
func ReadInteger(typ Type, b []byte) interface{} {
|
||||||
ret := new(big.Int).SetBytes(b)
|
|
||||||
|
|
||||||
if typ.T == UintTy {
|
if typ.T == UintTy {
|
||||||
u64, isu64 := ret.Uint64(), ret.IsUint64()
|
|
||||||
switch typ.Size {
|
switch typ.Size {
|
||||||
case 8:
|
case 8:
|
||||||
if !isu64 || u64 > math.MaxUint8 {
|
return b[len(b)-1]
|
||||||
return nil, errBadUint8
|
|
||||||
}
|
|
||||||
return byte(u64), nil
|
|
||||||
case 16:
|
case 16:
|
||||||
if !isu64 || u64 > math.MaxUint16 {
|
return binary.BigEndian.Uint16(b[len(b)-2:])
|
||||||
return nil, errBadUint16
|
|
||||||
}
|
|
||||||
return uint16(u64), nil
|
|
||||||
case 32:
|
case 32:
|
||||||
if !isu64 || u64 > math.MaxUint32 {
|
return binary.BigEndian.Uint32(b[len(b)-4:])
|
||||||
return nil, errBadUint32
|
|
||||||
}
|
|
||||||
return uint32(u64), nil
|
|
||||||
case 64:
|
case 64:
|
||||||
if !isu64 {
|
return binary.BigEndian.Uint64(b[len(b)-8:])
|
||||||
return nil, errBadUint64
|
|
||||||
}
|
|
||||||
return u64, nil
|
|
||||||
default:
|
default:
|
||||||
// the only case left for unsigned integer is uint256.
|
// the only case left for unsigned integer is uint256.
|
||||||
return ret, nil
|
return new(big.Int).SetBytes(b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
switch typ.Size {
|
||||||
|
case 8:
|
||||||
|
return int8(b[len(b)-1])
|
||||||
|
case 16:
|
||||||
|
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
|
||||||
|
case 32:
|
||||||
|
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
|
||||||
|
case 64:
|
||||||
|
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||||
|
default:
|
||||||
|
// the only case left for integer is int256
|
||||||
// big.SetBytes can't tell if a number is negative or positive in itself.
|
// big.SetBytes can't tell if a number is negative or positive in itself.
|
||||||
// On EVM, if the returned number > max int256, it is negative.
|
// On EVM, if the returned number > max int256, it is negative.
|
||||||
// A number is > max int256 if the bit at position 255 is set.
|
// A number is > max int256 if the bit at position 255 is set.
|
||||||
|
ret := new(big.Int).SetBytes(b)
|
||||||
if ret.Bit(255) == 1 {
|
if ret.Bit(255) == 1 {
|
||||||
ret.Add(MaxUint256, new(big.Int).Neg(ret))
|
ret.Add(MaxUint256, new(big.Int).Neg(ret))
|
||||||
ret.Add(ret, common.Big1)
|
ret.Add(ret, common.Big1)
|
||||||
ret.Neg(ret)
|
ret.Neg(ret)
|
||||||
}
|
}
|
||||||
i64, isi64 := ret.Int64(), ret.IsInt64()
|
return ret
|
||||||
switch typ.Size {
|
|
||||||
case 8:
|
|
||||||
if !isi64 || i64 < math.MinInt8 || i64 > math.MaxInt8 {
|
|
||||||
return nil, errBadInt8
|
|
||||||
}
|
|
||||||
return int8(i64), nil
|
|
||||||
case 16:
|
|
||||||
if !isi64 || i64 < math.MinInt16 || i64 > math.MaxInt16 {
|
|
||||||
return nil, errBadInt16
|
|
||||||
}
|
|
||||||
return int16(i64), nil
|
|
||||||
case 32:
|
|
||||||
if !isi64 || i64 < math.MinInt32 || i64 > math.MaxInt32 {
|
|
||||||
return nil, errBadInt32
|
|
||||||
}
|
|
||||||
return int32(i64), nil
|
|
||||||
case 64:
|
|
||||||
if !isi64 {
|
|
||||||
return nil, errBadInt64
|
|
||||||
}
|
|
||||||
return i64, nil
|
|
||||||
default:
|
|
||||||
// the only case left for integer is int256
|
|
||||||
|
|
||||||
return ret, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,6 +115,7 @@ func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
|
|||||||
|
|
||||||
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
|
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
|
||||||
return array.Interface(), nil
|
return array.Interface(), nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// forEachUnpack iteratively unpack elements.
|
// forEachUnpack iteratively unpack elements.
|
||||||
@ -153,7 +124,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
|
|||||||
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
|
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
|
||||||
}
|
}
|
||||||
if start+32*size > len(output) {
|
if start+32*size > len(output) {
|
||||||
return nil, fmt.Errorf("abi: cannot marshal into go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// this value will become our slice or our array, depending on the type
|
// this value will become our slice or our array, depending on the type
|
||||||
@ -192,9 +163,6 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) {
|
|||||||
virtualArgs := 0
|
virtualArgs := 0
|
||||||
for index, elem := range t.TupleElems {
|
for index, elem := range t.TupleElems {
|
||||||
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
|
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if elem.T == ArrayTy && !isDynamicType(*elem) {
|
if elem.T == ArrayTy && !isDynamicType(*elem) {
|
||||||
// If we have a static array, like [3]uint256, these are coded as
|
// If we have a static array, like [3]uint256, these are coded as
|
||||||
// just like uint256,uint256,uint256.
|
// just like uint256,uint256,uint256.
|
||||||
@ -212,6 +180,9 @@ func forTupleUnpack(t Type, output []byte) (interface{}, error) {
|
|||||||
// coded as just like uint256,bool,uint256
|
// coded as just like uint256,bool,uint256
|
||||||
virtualArgs += getTypeSize(*elem)/32 - 1
|
virtualArgs += getTypeSize(*elem)/32 - 1
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
retval.Field(index).Set(reflect.ValueOf(marshalledValue))
|
retval.Field(index).Set(reflect.ValueOf(marshalledValue))
|
||||||
}
|
}
|
||||||
return retval.Interface(), nil
|
return retval.Interface(), nil
|
||||||
@ -264,7 +235,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
case StringTy: // variable arrays are written at the end of the return bytes
|
case StringTy: // variable arrays are written at the end of the return bytes
|
||||||
return string(output[begin : begin+length]), nil
|
return string(output[begin : begin+length]), nil
|
||||||
case IntTy, UintTy:
|
case IntTy, UintTy:
|
||||||
return ReadInteger(t, returnOutput)
|
return ReadInteger(t, returnOutput), nil
|
||||||
case BoolTy:
|
case BoolTy:
|
||||||
return readBool(returnOutput)
|
return readBool(returnOutput)
|
||||||
case AddressTy:
|
case AddressTy:
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -353,11 +352,6 @@ func TestMethodMultiReturn(t *testing.T) {
|
|||||||
&[]interface{}{&expected.Int, &expected.String},
|
&[]interface{}{&expected.Int, &expected.String},
|
||||||
"",
|
"",
|
||||||
"Can unpack into a slice",
|
"Can unpack into a slice",
|
||||||
}, {
|
|
||||||
&[]interface{}{&bigint, ""},
|
|
||||||
&[]interface{}{&expected.Int, expected.String},
|
|
||||||
"",
|
|
||||||
"Can unpack into a slice without indirection",
|
|
||||||
}, {
|
}, {
|
||||||
&[2]interface{}{&bigint, new(string)},
|
&[2]interface{}{&bigint, new(string)},
|
||||||
&[2]interface{}{&expected.Int, &expected.String},
|
&[2]interface{}{&expected.Int, &expected.String},
|
||||||
@ -944,164 +938,3 @@ func TestOOMMaliciousInput(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPackAndUnpackIncompatibleNumber(t *testing.T) {
|
|
||||||
var encodeABI Arguments
|
|
||||||
uint256Ty, err := NewType("uint256", "", nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
encodeABI = Arguments{
|
|
||||||
{Type: uint256Ty},
|
|
||||||
}
|
|
||||||
|
|
||||||
maxU64, ok := new(big.Int).SetString(strconv.FormatUint(math.MaxUint64, 10), 10)
|
|
||||||
if !ok {
|
|
||||||
panic("bug")
|
|
||||||
}
|
|
||||||
maxU64Plus1 := new(big.Int).Add(maxU64, big.NewInt(1))
|
|
||||||
cases := []struct {
|
|
||||||
decodeType string
|
|
||||||
inputValue *big.Int
|
|
||||||
err error
|
|
||||||
expectValue interface{}
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
decodeType: "uint8",
|
|
||||||
inputValue: big.NewInt(math.MaxUint8 + 1),
|
|
||||||
err: errBadUint8,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "uint8",
|
|
||||||
inputValue: big.NewInt(math.MaxUint8),
|
|
||||||
err: nil,
|
|
||||||
expectValue: uint8(math.MaxUint8),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "uint16",
|
|
||||||
inputValue: big.NewInt(math.MaxUint16 + 1),
|
|
||||||
err: errBadUint16,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "uint16",
|
|
||||||
inputValue: big.NewInt(math.MaxUint16),
|
|
||||||
err: nil,
|
|
||||||
expectValue: uint16(math.MaxUint16),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "uint32",
|
|
||||||
inputValue: big.NewInt(math.MaxUint32 + 1),
|
|
||||||
err: errBadUint32,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "uint32",
|
|
||||||
inputValue: big.NewInt(math.MaxUint32),
|
|
||||||
err: nil,
|
|
||||||
expectValue: uint32(math.MaxUint32),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "uint64",
|
|
||||||
inputValue: maxU64Plus1,
|
|
||||||
err: errBadUint64,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "uint64",
|
|
||||||
inputValue: maxU64,
|
|
||||||
err: nil,
|
|
||||||
expectValue: uint64(math.MaxUint64),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "uint256",
|
|
||||||
inputValue: maxU64Plus1,
|
|
||||||
err: nil,
|
|
||||||
expectValue: maxU64Plus1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int8",
|
|
||||||
inputValue: big.NewInt(math.MaxInt8 + 1),
|
|
||||||
err: errBadInt8,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int8",
|
|
||||||
inputValue: big.NewInt(math.MinInt8 - 1),
|
|
||||||
err: errBadInt8,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int8",
|
|
||||||
inputValue: big.NewInt(math.MaxInt8),
|
|
||||||
err: nil,
|
|
||||||
expectValue: int8(math.MaxInt8),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int16",
|
|
||||||
inputValue: big.NewInt(math.MaxInt16 + 1),
|
|
||||||
err: errBadInt16,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int16",
|
|
||||||
inputValue: big.NewInt(math.MinInt16 - 1),
|
|
||||||
err: errBadInt16,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int16",
|
|
||||||
inputValue: big.NewInt(math.MaxInt16),
|
|
||||||
err: nil,
|
|
||||||
expectValue: int16(math.MaxInt16),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int32",
|
|
||||||
inputValue: big.NewInt(math.MaxInt32 + 1),
|
|
||||||
err: errBadInt32,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int32",
|
|
||||||
inputValue: big.NewInt(math.MinInt32 - 1),
|
|
||||||
err: errBadInt32,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int32",
|
|
||||||
inputValue: big.NewInt(math.MaxInt32),
|
|
||||||
err: nil,
|
|
||||||
expectValue: int32(math.MaxInt32),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int64",
|
|
||||||
inputValue: new(big.Int).Add(big.NewInt(math.MaxInt64), big.NewInt(1)),
|
|
||||||
err: errBadInt64,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int64",
|
|
||||||
inputValue: new(big.Int).Sub(big.NewInt(math.MinInt64), big.NewInt(1)),
|
|
||||||
err: errBadInt64,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
decodeType: "int64",
|
|
||||||
inputValue: big.NewInt(math.MaxInt64),
|
|
||||||
err: nil,
|
|
||||||
expectValue: int64(math.MaxInt64),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i, testCase := range cases {
|
|
||||||
packed, err := encodeABI.Pack(testCase.inputValue)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
ty, err := NewType(testCase.decodeType, "", nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
decodeABI := Arguments{
|
|
||||||
{Type: ty},
|
|
||||||
}
|
|
||||||
decoded, err := decodeABI.Unpack(packed)
|
|
||||||
if err != testCase.err {
|
|
||||||
t.Fatalf("Expected error %v, actual error %v. case %d", testCase.err, err, i)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(decoded[0], testCase.expectValue) {
|
|
||||||
t.Fatalf("Expected value %v, actual value %v", testCase.expectValue, decoded[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -27,8 +27,9 @@ import "fmt"
|
|||||||
// and struct definition) name will be converted to camelcase style which
|
// and struct definition) name will be converted to camelcase style which
|
||||||
// may eventually lead to name conflicts.
|
// may eventually lead to name conflicts.
|
||||||
//
|
//
|
||||||
// Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains
|
// Name conflicts are mostly resolved by adding number suffix.
|
||||||
// Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send".
|
// e.g. if the abi contains Methods send, send1
|
||||||
|
// ResolveNameConflict would return send2 for input send.
|
||||||
func ResolveNameConflict(rawName string, used func(string) bool) string {
|
func ResolveNameConflict(rawName string, used func(string) bool) string {
|
||||||
name := rawName
|
name := rawName
|
||||||
ok := used(name)
|
ok := used(name)
|
||||||
|
@ -177,7 +177,6 @@ type Backend interface {
|
|||||||
// safely used to calculate a signature from.
|
// safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calculated as
|
// The hash is calculated as
|
||||||
//
|
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
@ -190,7 +189,6 @@ func TextHash(data []byte) []byte {
|
|||||||
// safely used to calculate a signature from.
|
// safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calculated as
|
// The hash is calculated as
|
||||||
//
|
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
mapset "github.com/deckarep/golang-set/v2"
|
mapset "github.com/deckarep/golang-set"
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
@ -79,7 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
|||||||
keydir: keydir,
|
keydir: keydir,
|
||||||
byAddr: make(map[common.Address][]accounts.Account),
|
byAddr: make(map[common.Address][]accounts.Account),
|
||||||
notify: make(chan struct{}, 1),
|
notify: make(chan struct{}, 1),
|
||||||
fileC: fileCache{all: mapset.NewThreadUnsafeSet[string]()},
|
fileC: fileCache{all: mapset.NewThreadUnsafeSet()},
|
||||||
}
|
}
|
||||||
ac.watcher = newWatcher(ac)
|
ac.watcher = newWatcher(ac)
|
||||||
return ac, ac.notify
|
return ac, ac.notify
|
||||||
@ -146,14 +146,6 @@ func (ac *accountCache) deleteByFile(path string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcherStarted returns true if the watcher loop started running (even if it
|
|
||||||
// has since also ended).
|
|
||||||
func (ac *accountCache) watcherStarted() bool {
|
|
||||||
ac.mu.Lock()
|
|
||||||
defer ac.mu.Unlock()
|
|
||||||
return ac.watcher.running || ac.watcher.runEnded
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
|
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
|
||||||
for i := range slice {
|
for i := range slice {
|
||||||
if slice[i] == elem {
|
if slice[i] == elem {
|
||||||
@ -283,15 +275,16 @@ func (ac *accountCache) scanAccounts() error {
|
|||||||
// Process all the file diffs
|
// Process all the file diffs
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
for _, path := range creates.ToSlice() {
|
for _, p := range creates.ToSlice() {
|
||||||
if a := readAccount(path); a != nil {
|
if a := readAccount(p.(string)); a != nil {
|
||||||
ac.add(*a)
|
ac.add(*a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, path := range deletes.ToSlice() {
|
for _, p := range deletes.ToSlice() {
|
||||||
ac.deleteByFile(path)
|
ac.deleteByFile(p.(string))
|
||||||
}
|
}
|
||||||
for _, path := range updates.ToSlice() {
|
for _, p := range updates.ToSlice() {
|
||||||
|
path := p.(string)
|
||||||
ac.deleteByFile(path)
|
ac.deleteByFile(path)
|
||||||
if a := readAccount(path); a != nil {
|
if a := readAccount(path); a != nil {
|
||||||
ac.add(*a)
|
ac.add(*a)
|
||||||
|
@ -50,38 +50,6 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// waitWatcherStarts waits up to 1s for the keystore watcher to start.
|
|
||||||
func waitWatcherStart(ks *KeyStore) bool {
|
|
||||||
// On systems where file watch is not supported, just return "ok".
|
|
||||||
if !ks.cache.watcher.enabled() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// The watcher should start, and then exit.
|
|
||||||
for t0 := time.Now(); time.Since(t0) < 1*time.Second; time.Sleep(100 * time.Millisecond) {
|
|
||||||
if ks.cache.watcherStarted() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
|
||||||
var list []accounts.Account
|
|
||||||
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(200 * time.Millisecond) {
|
|
||||||
list = ks.Accounts()
|
|
||||||
if reflect.DeepEqual(list, wantAccounts) {
|
|
||||||
// ks should have also received change notifications
|
|
||||||
select {
|
|
||||||
case <-ks.changes:
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("wasn't notified of new accounts")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWatchNewFile(t *testing.T) {
|
func TestWatchNewFile(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@ -89,9 +57,8 @@ func TestWatchNewFile(t *testing.T) {
|
|||||||
|
|
||||||
// Ensure the watcher is started before adding any files.
|
// Ensure the watcher is started before adding any files.
|
||||||
ks.Accounts()
|
ks.Accounts()
|
||||||
if !waitWatcherStart(ks) {
|
time.Sleep(1000 * time.Millisecond)
|
||||||
t.Fatal("keystore watcher didn't start in time")
|
|
||||||
}
|
|
||||||
// Move in the files.
|
// Move in the files.
|
||||||
wantAccounts := make([]accounts.Account, len(cachetestAccounts))
|
wantAccounts := make([]accounts.Account, len(cachetestAccounts))
|
||||||
for i := range cachetestAccounts {
|
for i := range cachetestAccounts {
|
||||||
@ -105,24 +72,37 @@ func TestWatchNewFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ks should see the accounts.
|
// ks should see the accounts.
|
||||||
if err := waitForAccounts(wantAccounts, ks); err != nil {
|
var list []accounts.Account
|
||||||
t.Error(err)
|
for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 {
|
||||||
|
list = ks.Accounts()
|
||||||
|
if reflect.DeepEqual(list, wantAccounts) {
|
||||||
|
// ks should have also received change notifications
|
||||||
|
select {
|
||||||
|
case <-ks.changes:
|
||||||
|
default:
|
||||||
|
t.Fatalf("wasn't notified of new accounts")
|
||||||
}
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(d)
|
||||||
|
}
|
||||||
|
t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantAccounts))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWatchNoDir(t *testing.T) {
|
func TestWatchNoDir(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// Create ks but not the directory that it watches.
|
// Create ks but not the directory that it watches.
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
|
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
|
||||||
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
||||||
|
|
||||||
list := ks.Accounts()
|
list := ks.Accounts()
|
||||||
if len(list) > 0 {
|
if len(list) > 0 {
|
||||||
t.Error("initial account list not empty:", list)
|
t.Error("initial account list not empty:", list)
|
||||||
}
|
}
|
||||||
// The watcher should start, and then exit.
|
time.Sleep(100 * time.Millisecond)
|
||||||
if !waitWatcherStart(ks) {
|
|
||||||
t.Fatal("keystore watcher didn't start in time")
|
|
||||||
}
|
|
||||||
// Create the directory and copy a key file into it.
|
// Create the directory and copy a key file into it.
|
||||||
os.MkdirAll(dir, 0700)
|
os.MkdirAll(dir, 0700)
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
@ -315,12 +295,31 @@ func TestCacheFind(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
||||||
|
var list []accounts.Account
|
||||||
|
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
|
||||||
|
list = ks.Accounts()
|
||||||
|
if reflect.DeepEqual(list, wantAccounts) {
|
||||||
|
// ks should have also received change notifications
|
||||||
|
select {
|
||||||
|
case <-ks.changes:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("wasn't notified of new accounts")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
time.Sleep(d)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
|
||||||
|
}
|
||||||
|
|
||||||
// TestUpdatedKeyfileContents tests that updating the contents of a keystore file
|
// TestUpdatedKeyfileContents tests that updating the contents of a keystore file
|
||||||
// is noticed by the watcher, and the account cache is updated accordingly
|
// is noticed by the watcher, and the account cache is updated accordingly
|
||||||
func TestUpdatedKeyfileContents(t *testing.T) {
|
func TestUpdatedKeyfileContents(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
// Create a temporary keystore to test with
|
// Create a temporary kesytore to test with
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
|
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
|
||||||
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
|
||||||
|
|
||||||
@ -328,9 +327,8 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
if len(list) > 0 {
|
if len(list) > 0 {
|
||||||
t.Error("initial account list not empty:", list)
|
t.Error("initial account list not empty:", list)
|
||||||
}
|
}
|
||||||
if !waitWatcherStart(ks) {
|
time.Sleep(100 * time.Millisecond)
|
||||||
t.Fatal("keystore watcher didn't start in time")
|
|
||||||
}
|
|
||||||
// Create the directory and copy a key file into it.
|
// Create the directory and copy a key file into it.
|
||||||
os.MkdirAll(dir, 0700)
|
os.MkdirAll(dir, 0700)
|
||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
@ -348,8 +346,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||||
time.Sleep(time.Second)
|
time.Sleep(1000 * time.Millisecond)
|
||||||
|
|
||||||
// Now replace file contents
|
// Now replace file contents
|
||||||
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
||||||
@ -365,7 +364,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||||
time.Sleep(time.Second)
|
time.Sleep(1000 * time.Millisecond)
|
||||||
|
|
||||||
// Now replace file contents again
|
// Now replace file contents again
|
||||||
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
||||||
@ -381,7 +380,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// needed so that modTime of `file` is different to its current value after os.WriteFile
|
// needed so that modTime of `file` is different to its current value after os.WriteFile
|
||||||
time.Sleep(time.Second)
|
time.Sleep(1000 * time.Millisecond)
|
||||||
|
|
||||||
// Now replace file contents with crap
|
// Now replace file contents with crap
|
||||||
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
|
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
|
||||||
|
@ -23,23 +23,23 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
mapset "github.com/deckarep/golang-set/v2"
|
mapset "github.com/deckarep/golang-set"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fileCache is a cache of files seen during scan of keystore.
|
// fileCache is a cache of files seen during scan of keystore.
|
||||||
type fileCache struct {
|
type fileCache struct {
|
||||||
all mapset.Set[string] // Set of all files from the keystore folder
|
all mapset.Set // Set of all files from the keystore folder
|
||||||
lastMod time.Time // Last time instance when a file was modified
|
lastMod time.Time // Last time instance when a file was modified
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// scan performs a new scan on the given directory, compares against the already
|
// scan performs a new scan on the given directory, compares against the already
|
||||||
// cached filenames, and returns file sets: creates, deletes, updates.
|
// cached filenames, and returns file sets: creates, deletes, updates.
|
||||||
func (fc *fileCache) scan(keyDir string) (mapset.Set[string], mapset.Set[string], mapset.Set[string], error) {
|
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
|
|
||||||
// List all the files from the keystore folder
|
// List all the failes from the keystore folder
|
||||||
files, err := os.ReadDir(keyDir)
|
files, err := os.ReadDir(keyDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
@ -50,8 +50,8 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set[string], mapset.Set[string]
|
|||||||
defer fc.mu.Unlock()
|
defer fc.mu.Unlock()
|
||||||
|
|
||||||
// Iterate all the files and gather their metadata
|
// Iterate all the files and gather their metadata
|
||||||
all := mapset.NewThreadUnsafeSet[string]()
|
all := mapset.NewThreadUnsafeSet()
|
||||||
mods := mapset.NewThreadUnsafeSet[string]()
|
mods := mapset.NewThreadUnsafeSet()
|
||||||
|
|
||||||
var newLastMod time.Time
|
var newLastMod time.Time
|
||||||
for _, fi := range files {
|
for _, fi := range files {
|
||||||
@ -61,7 +61,7 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set[string], mapset.Set[string]
|
|||||||
log.Trace("Ignoring file on account scan", "path", path)
|
log.Trace("Ignoring file on account scan", "path", path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Gather the set of all and freshly modified files
|
// Gather the set of all and fresly modified files
|
||||||
all.Add(path)
|
all.Add(path)
|
||||||
|
|
||||||
info, err := fi.Info()
|
info, err := fi.Info()
|
||||||
|
@ -498,14 +498,6 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account
|
|||||||
return a, nil
|
return a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isUpdating returns whether the event notification loop is running.
|
|
||||||
// This method is mainly meant for tests.
|
|
||||||
func (ks *KeyStore) isUpdating() bool {
|
|
||||||
ks.mu.RLock()
|
|
||||||
defer ks.mu.RUnlock()
|
|
||||||
return ks.updating
|
|
||||||
}
|
|
||||||
|
|
||||||
// zeroKey zeroes a private key in memory.
|
// zeroKey zeroes a private key in memory.
|
||||||
func zeroKey(k *ecdsa.PrivateKey) {
|
func zeroKey(k *ecdsa.PrivateKey) {
|
||||||
b := k.D.Bits()
|
b := k.D.Bits()
|
||||||
|
@ -113,7 +113,6 @@ func TestSignWithPassphrase(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTimedUnlock(t *testing.T) {
|
func TestTimedUnlock(t *testing.T) {
|
||||||
t.Parallel()
|
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
@ -148,7 +147,6 @@ func TestTimedUnlock(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideUnlock(t *testing.T) {
|
func TestOverrideUnlock(t *testing.T) {
|
||||||
t.Parallel()
|
|
||||||
_, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
@ -189,7 +187,6 @@ func TestOverrideUnlock(t *testing.T) {
|
|||||||
|
|
||||||
// This test should fail under -race if signing races the expiration goroutine.
|
// This test should fail under -race if signing races the expiration goroutine.
|
||||||
func TestSignRace(t *testing.T) {
|
func TestSignRace(t *testing.T) {
|
||||||
t.Parallel()
|
|
||||||
_, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
|
|
||||||
// Create a test account.
|
// Create a test account.
|
||||||
@ -214,33 +211,19 @@ func TestSignRace(t *testing.T) {
|
|||||||
t.Errorf("Account did not lock within the timeout")
|
t.Errorf("Account did not lock within the timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForKsUpdating waits until the updating-status of the ks reaches the
|
|
||||||
// desired wantStatus.
|
|
||||||
// It waits for a maximum time of maxTime, and returns false if it does not
|
|
||||||
// finish in time
|
|
||||||
func waitForKsUpdating(t *testing.T, ks *KeyStore, wantStatus bool, maxTime time.Duration) bool {
|
|
||||||
t.Helper()
|
|
||||||
// Wait max 250 ms, then return false
|
|
||||||
for t0 := time.Now(); time.Since(t0) < maxTime; {
|
|
||||||
if ks.isUpdating() == wantStatus {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
time.Sleep(25 * time.Millisecond)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that the wallet notifier loop starts and stops correctly based on the
|
// Tests that the wallet notifier loop starts and stops correctly based on the
|
||||||
// addition and removal of wallet event subscriptions.
|
// addition and removal of wallet event subscriptions.
|
||||||
func TestWalletNotifierLifecycle(t *testing.T) {
|
func TestWalletNotifierLifecycle(t *testing.T) {
|
||||||
t.Parallel()
|
// Create a temporary kesytore to test with
|
||||||
// Create a temporary keystore to test with
|
|
||||||
_, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
|
|
||||||
// Ensure that the notification updater is not running yet
|
// Ensure that the notification updater is not running yet
|
||||||
time.Sleep(250 * time.Millisecond)
|
time.Sleep(250 * time.Millisecond)
|
||||||
|
ks.mu.RLock()
|
||||||
|
updating := ks.updating
|
||||||
|
ks.mu.RUnlock()
|
||||||
|
|
||||||
if ks.isUpdating() {
|
if updating {
|
||||||
t.Errorf("wallet notifier running without subscribers")
|
t.Errorf("wallet notifier running without subscribers")
|
||||||
}
|
}
|
||||||
// Subscribe to the wallet feed and ensure the updater boots up
|
// Subscribe to the wallet feed and ensure the updater boots up
|
||||||
@ -250,26 +233,38 @@ func TestWalletNotifierLifecycle(t *testing.T) {
|
|||||||
for i := 0; i < len(subs); i++ {
|
for i := 0; i < len(subs); i++ {
|
||||||
// Create a new subscription
|
// Create a new subscription
|
||||||
subs[i] = ks.Subscribe(updates)
|
subs[i] = ks.Subscribe(updates)
|
||||||
if !waitForKsUpdating(t, ks, true, 250*time.Millisecond) {
|
|
||||||
|
// Ensure the notifier comes online
|
||||||
|
time.Sleep(250 * time.Millisecond)
|
||||||
|
ks.mu.RLock()
|
||||||
|
updating = ks.updating
|
||||||
|
ks.mu.RUnlock()
|
||||||
|
|
||||||
|
if !updating {
|
||||||
t.Errorf("sub %d: wallet notifier not running after subscription", i)
|
t.Errorf("sub %d: wallet notifier not running after subscription", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Close all but one sub
|
// Unsubscribe and ensure the updater terminates eventually
|
||||||
for i := 0; i < len(subs)-1; i++ {
|
for i := 0; i < len(subs); i++ {
|
||||||
// Close an existing subscription
|
// Close an existing subscription
|
||||||
subs[i].Unsubscribe()
|
subs[i].Unsubscribe()
|
||||||
}
|
|
||||||
// Check that it is still running
|
|
||||||
time.Sleep(250 * time.Millisecond)
|
|
||||||
|
|
||||||
if !ks.isUpdating() {
|
// Ensure the notifier shuts down at and only at the last close
|
||||||
t.Fatal("event notifier stopped prematurely")
|
for k := 0; k < int(walletRefreshCycle/(250*time.Millisecond))+2; k++ {
|
||||||
|
ks.mu.RLock()
|
||||||
|
updating = ks.updating
|
||||||
|
ks.mu.RUnlock()
|
||||||
|
|
||||||
|
if i < len(subs)-1 && !updating {
|
||||||
|
t.Fatalf("sub %d: event notifier stopped prematurely", i)
|
||||||
|
}
|
||||||
|
if i == len(subs)-1 && !updating {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(250 * time.Millisecond)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Unsubscribe the last one and ensure the updater terminates eventually.
|
|
||||||
subs[len(subs)-1].Unsubscribe()
|
|
||||||
if !waitForKsUpdating(t, ks, false, 4*time.Second) {
|
|
||||||
t.Errorf("wallet notifier didn't terminate after unsubscribe")
|
t.Errorf("wallet notifier didn't terminate after unsubscribe")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type walletEvent struct {
|
type walletEvent struct {
|
||||||
@ -382,6 +377,7 @@ func TestImportExport(t *testing.T) {
|
|||||||
if _, err = ks2.Import(json, "new", "new"); err == nil {
|
if _, err = ks2.Import(json, "new", "new"); err == nil {
|
||||||
t.Errorf("importing a key twice succeeded")
|
t.Errorf("importing a key twice succeeded")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestImportRace tests the keystore on races.
|
// TestImportRace tests the keystore on races.
|
||||||
@ -406,6 +402,7 @@ func TestImportRace(t *testing.T) {
|
|||||||
if _, err := ks2.Import(json, "new", "new"); err != nil {
|
if _, err := ks2.Import(json, "new", "new"); err != nil {
|
||||||
atomic.AddUint32(&atom, 1)
|
atomic.AddUint32(&atom, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -138,6 +138,7 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
|
|||||||
|
|
||||||
// Encryptdata encrypts the data given as 'data' with the password 'auth'.
|
// Encryptdata encrypts the data given as 'data' with the password 'auth'.
|
||||||
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
|
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
|
||||||
|
|
||||||
salt := make([]byte, 32)
|
salt := make([]byte, 32)
|
||||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||||
panic("reading from crypto/rand failed: " + err.Error())
|
panic("reading from crypto/rand failed: " + err.Error())
|
||||||
@ -340,6 +341,7 @@ func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
|
|||||||
r := ensureInt(cryptoJSON.KDFParams["r"])
|
r := ensureInt(cryptoJSON.KDFParams["r"])
|
||||||
p := ensureInt(cryptoJSON.KDFParams["p"])
|
p := ensureInt(cryptoJSON.KDFParams["p"])
|
||||||
return scrypt.Key(authArray, salt, n, r, p, dkLen)
|
return scrypt.Key(authArray, salt, n, r, p, dkLen)
|
||||||
|
|
||||||
} else if cryptoJSON.KDF == "pbkdf2" {
|
} else if cryptoJSON.KDF == "pbkdf2" {
|
||||||
c := ensureInt(cryptoJSON.KDFParams["c"])
|
c := ensureInt(cryptoJSON.KDFParams["c"])
|
||||||
prf := cryptoJSON.KDFParams["prf"].(string)
|
prf := cryptoJSON.KDFParams["prf"].(string)
|
||||||
|
@ -23,27 +23,25 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/fsnotify/fsnotify"
|
"github.com/rjeczalik/notify"
|
||||||
)
|
)
|
||||||
|
|
||||||
type watcher struct {
|
type watcher struct {
|
||||||
ac *accountCache
|
ac *accountCache
|
||||||
running bool // set to true when runloop begins
|
starting bool
|
||||||
runEnded bool // set to true when runloop ends
|
running bool
|
||||||
starting bool // set to true prior to runloop starting
|
ev chan notify.EventInfo
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newWatcher(ac *accountCache) *watcher {
|
func newWatcher(ac *accountCache) *watcher {
|
||||||
return &watcher{
|
return &watcher{
|
||||||
ac: ac,
|
ac: ac,
|
||||||
|
ev: make(chan notify.EventInfo, 10),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// enabled returns false on systems not supported.
|
|
||||||
func (*watcher) enabled() bool { return true }
|
|
||||||
|
|
||||||
// starts the watcher loop in the background.
|
// starts the watcher loop in the background.
|
||||||
// Start a watcher in the background if that's not already in progress.
|
// Start a watcher in the background if that's not already in progress.
|
||||||
// The caller must hold w.ac.mu.
|
// The caller must hold w.ac.mu.
|
||||||
@ -64,24 +62,16 @@ func (w *watcher) loop() {
|
|||||||
w.ac.mu.Lock()
|
w.ac.mu.Lock()
|
||||||
w.running = false
|
w.running = false
|
||||||
w.starting = false
|
w.starting = false
|
||||||
w.runEnded = true
|
|
||||||
w.ac.mu.Unlock()
|
w.ac.mu.Unlock()
|
||||||
}()
|
}()
|
||||||
logger := log.New("path", w.ac.keydir)
|
logger := log.New("path", w.ac.keydir)
|
||||||
|
|
||||||
// Create new watcher.
|
if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil {
|
||||||
watcher, err := fsnotify.NewWatcher()
|
logger.Trace("Failed to watch keystore folder", "err", err)
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to start filesystem watcher", "err", err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer watcher.Close()
|
defer notify.Stop(w.ev)
|
||||||
if err := watcher.Add(w.ac.keydir); err != nil {
|
logger.Trace("Started watching keystore folder")
|
||||||
logger.Warn("Failed to watch keystore folder", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Trace("Started watching keystore folder", "folder", w.ac.keydir)
|
|
||||||
defer logger.Trace("Stopped watching keystore folder")
|
defer logger.Trace("Stopped watching keystore folder")
|
||||||
|
|
||||||
w.ac.mu.Lock()
|
w.ac.mu.Lock()
|
||||||
@ -105,24 +95,12 @@ func (w *watcher) loop() {
|
|||||||
select {
|
select {
|
||||||
case <-w.quit:
|
case <-w.quit:
|
||||||
return
|
return
|
||||||
case _, ok := <-watcher.Events:
|
case <-w.ev:
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Trigger the scan (with delay), if not already triggered
|
// Trigger the scan (with delay), if not already triggered
|
||||||
if !rescanTriggered {
|
if !rescanTriggered {
|
||||||
debounce.Reset(debounceDuration)
|
debounce.Reset(debounceDuration)
|
||||||
rescanTriggered = true
|
rescanTriggered = true
|
||||||
}
|
}
|
||||||
// The fsnotify library does provide more granular event-info, it
|
|
||||||
// would be possible to refresh individual affected files instead
|
|
||||||
// of scheduling a full rescan. For most cases though, the
|
|
||||||
// full rescan is quick and obviously simplest.
|
|
||||||
case err, ok := <-watcher.Errors:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Info("Filsystem watcher error", "err", err)
|
|
||||||
case <-debounce.C:
|
case <-debounce.C:
|
||||||
w.ac.scanAccounts()
|
w.ac.scanAccounts()
|
||||||
rescanTriggered = false
|
rescanTriggered = false
|
||||||
|
@ -22,14 +22,8 @@
|
|||||||
|
|
||||||
package keystore
|
package keystore
|
||||||
|
|
||||||
type watcher struct {
|
type watcher struct{ running bool }
|
||||||
running bool
|
|
||||||
runEnded bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newWatcher(*accountCache) *watcher { return new(watcher) }
|
func newWatcher(*accountCache) *watcher { return new(watcher) }
|
||||||
func (*watcher) start() {}
|
func (*watcher) start() {}
|
||||||
func (*watcher) close() {}
|
func (*watcher) close() {}
|
||||||
|
|
||||||
// enabled returns false on systems not supported.
|
|
||||||
func (*watcher) enabled() bool { return false }
|
|
||||||
|
@ -257,7 +257,7 @@ func merge(slice []Wallet, wallets ...Wallet) []Wallet {
|
|||||||
return slice
|
return slice
|
||||||
}
|
}
|
||||||
|
|
||||||
// drop is the counterpart of merge, which looks up wallets from within the sorted
|
// drop is the couterpart of merge, which looks up wallets from within the sorted
|
||||||
// cache and removes the ones specified.
|
// cache and removes the ones specified.
|
||||||
func drop(slice []Wallet, wallets ...Wallet) []Wallet {
|
func drop(slice []Wallet, wallets ...Wallet) []Wallet {
|
||||||
for _, wallet := range wallets {
|
for _, wallet := range wallets {
|
||||||
|
@ -178,7 +178,7 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
|
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
|
||||||
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: %#x%x", response.Sw1, response.Sw2)
|
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(response.Data) != scSecretLength {
|
if len(response.Data) != scSecretLength {
|
||||||
@ -261,7 +261,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
|
|||||||
rapdu.deserialize(plainData)
|
rapdu.deserialize(plainData)
|
||||||
|
|
||||||
if rapdu.Sw1 != sw1Ok {
|
if rapdu.Sw1 != sw1Ok {
|
||||||
return nil, fmt.Errorf("unexpected response status Cla=%#x, Ins=%#x, Sw=%#x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
|
||||||
}
|
}
|
||||||
|
|
||||||
return rapdu, nil
|
return rapdu, nil
|
||||||
|
@ -99,8 +99,8 @@ const (
|
|||||||
P1DeriveKeyFromCurrent = uint8(0x10)
|
P1DeriveKeyFromCurrent = uint8(0x10)
|
||||||
statusP1WalletStatus = uint8(0x00)
|
statusP1WalletStatus = uint8(0x00)
|
||||||
statusP1Path = uint8(0x01)
|
statusP1Path = uint8(0x01)
|
||||||
signP1PrecomputedHash = uint8(0x00)
|
signP1PrecomputedHash = uint8(0x01)
|
||||||
signP2OnlyBlock = uint8(0x00)
|
signP2OnlyBlock = uint8(0x81)
|
||||||
exportP1Any = uint8(0x00)
|
exportP1Any = uint8(0x00)
|
||||||
exportP2Pubkey = uint8(0x01)
|
exportP2Pubkey = uint8(0x01)
|
||||||
)
|
)
|
||||||
@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if response.Sw1 != sw1Ok {
|
if response.Sw1 != sw1Ok {
|
||||||
return nil, fmt.Errorf("unexpected insecure response status Cla=%#x, Ins=%#x, Sw=%#x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
@ -879,7 +879,6 @@ func (s *Session) walletStatus() (*walletStatus, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// derivationPath fetches the wallet's current derivation path from the card.
|
// derivationPath fetches the wallet's current derivation path from the card.
|
||||||
//
|
|
||||||
//lint:ignore U1000 needs to be added to the console interface
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
|
||||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
|
||||||
@ -995,7 +994,6 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// keyExport contains information on an exported keypair.
|
// keyExport contains information on an exported keypair.
|
||||||
//
|
|
||||||
//lint:ignore U1000 needs to be added to the console interface
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
type keyExport struct {
|
type keyExport struct {
|
||||||
PublicKey []byte `asn1:"tag:0"`
|
PublicKey []byte `asn1:"tag:0"`
|
||||||
@ -1003,7 +1001,6 @@ type keyExport struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// publicKey returns the public key for the current derivation path.
|
// publicKey returns the public key for the current derivation path.
|
||||||
//
|
|
||||||
//lint:ignore U1000 needs to be added to the console interface
|
//lint:ignore U1000 needs to be added to the console interface
|
||||||
func (s *Session) publicKey() ([]byte, error) {
|
func (s *Session) publicKey() ([]byte, error) {
|
||||||
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
|
||||||
|
@ -95,6 +95,7 @@ func (u *URL) UnmarshalJSON(input []byte) error {
|
|||||||
// -1 if x < y
|
// -1 if x < y
|
||||||
// 0 if x == y
|
// 0 if x == y
|
||||||
// +1 if x > y
|
// +1 if x > y
|
||||||
|
//
|
||||||
func (u URL) Cmp(url URL) int {
|
func (u URL) Cmp(url URL) int {
|
||||||
if u.Scheme == url.Scheme {
|
if u.Scheme == url.Scheme {
|
||||||
return strings.Compare(u.Path, url.Path)
|
return strings.Compare(u.Path, url.Path)
|
||||||
|
@ -71,28 +71,18 @@ type Hub struct {
|
|||||||
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
|
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
|
||||||
func NewLedgerHub() (*Hub, error) {
|
func NewLedgerHub() (*Hub, error) {
|
||||||
return newHub(LedgerScheme, 0x2c97, []uint16{
|
return newHub(LedgerScheme, 0x2c97, []uint16{
|
||||||
|
|
||||||
// Device definitions taken from
|
|
||||||
// https://github.com/LedgerHQ/ledger-live/blob/38012bc8899e0f07149ea9cfe7e64b2c146bc92b/libs/ledgerjs/packages/devices/src/index.ts
|
|
||||||
|
|
||||||
// Original product IDs
|
// Original product IDs
|
||||||
0x0000, /* Ledger Blue */
|
0x0000, /* Ledger Blue */
|
||||||
0x0001, /* Ledger Nano S */
|
0x0001, /* Ledger Nano S */
|
||||||
0x0004, /* Ledger Nano X */
|
0x0004, /* Ledger Nano X */
|
||||||
0x0005, /* Ledger Nano S Plus */
|
|
||||||
0x0006, /* Ledger Nano FTS */
|
|
||||||
|
|
||||||
|
// Upcoming product IDs: https://www.ledger.com/2019/05/17/windows-10-update-sunsetting-u2f-tunnel-transport-for-ledger-devices/
|
||||||
0x0015, /* HID + U2F + WebUSB Ledger Blue */
|
0x0015, /* HID + U2F + WebUSB Ledger Blue */
|
||||||
0x1015, /* HID + U2F + WebUSB Ledger Nano S */
|
0x1015, /* HID + U2F + WebUSB Ledger Nano S */
|
||||||
0x4015, /* HID + U2F + WebUSB Ledger Nano X */
|
0x4015, /* HID + U2F + WebUSB Ledger Nano X */
|
||||||
0x5015, /* HID + U2F + WebUSB Ledger Nano S Plus */
|
|
||||||
0x6015, /* HID + U2F + WebUSB Ledger Nano FTS */
|
|
||||||
|
|
||||||
0x0011, /* HID + WebUSB Ledger Blue */
|
0x0011, /* HID + WebUSB Ledger Blue */
|
||||||
0x1011, /* HID + WebUSB Ledger Nano S */
|
0x1011, /* HID + WebUSB Ledger Nano S */
|
||||||
0x4011, /* HID + WebUSB Ledger Nano X */
|
0x4011, /* HID + WebUSB Ledger Nano X */
|
||||||
0x5011, /* HID + WebUSB Ledger Nano S Plus */
|
|
||||||
0x6011, /* HID + WebUSB Ledger Nano FTS */
|
|
||||||
}, 0xffa0, 0, newLedgerDriver)
|
}, 0xffa0, 0, newLedgerDriver)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,8 +59,6 @@ const (
|
|||||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||||
|
|
||||||
ledgerEip155Size int = 3 // Size of the EIP-155 chain_id,r,s in unsigned transactions
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
|
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||||
@ -349,15 +347,9 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
|||||||
op = ledgerP1InitTransactionData
|
op = ledgerP1InitTransactionData
|
||||||
reply []byte
|
reply []byte
|
||||||
)
|
)
|
||||||
|
|
||||||
// Chunk size selection to mitigate an underlying RLP deserialization issue on the ledger app.
|
|
||||||
// https://github.com/LedgerHQ/app-ethereum/issues/409
|
|
||||||
chunk := 255
|
|
||||||
for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(payload) > 0 {
|
for len(payload) > 0 {
|
||||||
// Calculate the size of the next data chunk
|
// Calculate the size of the next data chunk
|
||||||
|
chunk := 255
|
||||||
if chunk > len(payload) {
|
if chunk > len(payload) {
|
||||||
chunk = len(payload)
|
chunk = len(payload)
|
||||||
}
|
}
|
||||||
@ -415,6 +407,8 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
|||||||
// domain hash | 32 bytes
|
// domain hash | 32 bytes
|
||||||
// message hash | 32 bytes
|
// message hash | 32 bytes
|
||||||
//
|
//
|
||||||
|
//
|
||||||
|
//
|
||||||
// And the output data is:
|
// And the output data is:
|
||||||
//
|
//
|
||||||
// Description | Length
|
// Description | Length
|
||||||
|
@ -84,14 +84,14 @@ func (w *trezorDriver) Status() (string, error) {
|
|||||||
|
|
||||||
// Open implements usbwallet.driver, attempting to initialize the connection to
|
// Open implements usbwallet.driver, attempting to initialize the connection to
|
||||||
// the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation:
|
// the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation:
|
||||||
// - The first phase is to initialize the connection and read the wallet's
|
// * The first phase is to initialize the connection and read the wallet's
|
||||||
// features. This phase is invoked if the provided passphrase is empty. The
|
// features. This phase is invoked if the provided passphrase is empty. The
|
||||||
// device will display the pinpad as a result and will return an appropriate
|
// device will display the pinpad as a result and will return an appropriate
|
||||||
// error to notify the user that a second open phase is needed.
|
// error to notify the user that a second open phase is needed.
|
||||||
// - The second phase is to unlock access to the Trezor, which is done by the
|
// * The second phase is to unlock access to the Trezor, which is done by the
|
||||||
// user actually providing a passphrase mapping a keyboard keypad to the pin
|
// user actually providing a passphrase mapping a keyboard keypad to the pin
|
||||||
// number of the user (shuffled according to the pinpad displayed).
|
// number of the user (shuffled according to the pinpad displayed).
|
||||||
// - If needed the device will ask for passphrase which will require calling
|
// * If needed the device will ask for passphrase which will require calling
|
||||||
// open again with the actual passphrase (3rd phase)
|
// open again with the actual passphrase (3rd phase)
|
||||||
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
|
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
|
||||||
w.device, w.failure = device, nil
|
w.device, w.failure = device, nil
|
||||||
@ -196,10 +196,10 @@ func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, er
|
|||||||
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
|
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
|
||||||
return common.Address{}, err
|
return common.Address{}, err
|
||||||
}
|
}
|
||||||
if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary formats
|
if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary fomats
|
||||||
return common.BytesToAddress(addr), nil
|
return common.BytesToAddress(addr), nil
|
||||||
}
|
}
|
||||||
if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal formats
|
if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal fomats
|
||||||
return common.HexToAddress(addr), nil
|
return common.HexToAddress(addr), nil
|
||||||
}
|
}
|
||||||
return common.Address{}, errors.New("missing derived address")
|
return common.Address{}, errors.New("missing derived address")
|
||||||
|
@ -94,7 +94,7 @@ func (Failure_FailureType) EnumDescriptor() ([]byte, []int) {
|
|||||||
return fileDescriptor_aaf30d059fdbc38d, []int{1, 0}
|
return fileDescriptor_aaf30d059fdbc38d, []int{1, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Type of button request
|
// Type of button request
|
||||||
type ButtonRequest_ButtonRequestType int32
|
type ButtonRequest_ButtonRequestType int32
|
||||||
|
|
||||||
@ -175,7 +175,7 @@ func (ButtonRequest_ButtonRequestType) EnumDescriptor() ([]byte, []int) {
|
|||||||
return fileDescriptor_aaf30d059fdbc38d, []int{2, 0}
|
return fileDescriptor_aaf30d059fdbc38d, []int{2, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Type of PIN request
|
// Type of PIN request
|
||||||
type PinMatrixRequest_PinMatrixRequestType int32
|
type PinMatrixRequest_PinMatrixRequestType int32
|
||||||
|
|
||||||
@ -220,7 +220,7 @@ func (PinMatrixRequest_PinMatrixRequestType) EnumDescriptor() ([]byte, []int) {
|
|||||||
return fileDescriptor_aaf30d059fdbc38d, []int{4, 0}
|
return fileDescriptor_aaf30d059fdbc38d, []int{4, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Success of the previous request
|
// Response: Success of the previous request
|
||||||
// @end
|
// @end
|
||||||
type Success struct {
|
type Success struct {
|
||||||
@ -262,7 +262,7 @@ func (m *Success) GetMessage() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Failure of the previous request
|
// Response: Failure of the previous request
|
||||||
// @end
|
// @end
|
||||||
type Failure struct {
|
type Failure struct {
|
||||||
@ -312,7 +312,7 @@ func (m *Failure) GetMessage() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Device is waiting for HW button press.
|
// Response: Device is waiting for HW button press.
|
||||||
// @auxstart
|
// @auxstart
|
||||||
// @next ButtonAck
|
// @next ButtonAck
|
||||||
@ -363,7 +363,7 @@ func (m *ButtonRequest) GetData() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Computer agrees to wait for HW button press
|
// Request: Computer agrees to wait for HW button press
|
||||||
// @auxend
|
// @auxend
|
||||||
type ButtonAck struct {
|
type ButtonAck struct {
|
||||||
@ -397,7 +397,7 @@ func (m *ButtonAck) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_ButtonAck proto.InternalMessageInfo
|
var xxx_messageInfo_ButtonAck proto.InternalMessageInfo
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
|
// Response: Device is asking computer to show PIN matrix and awaits PIN encoded using this matrix scheme
|
||||||
// @auxstart
|
// @auxstart
|
||||||
// @next PinMatrixAck
|
// @next PinMatrixAck
|
||||||
@ -440,7 +440,7 @@ func (m *PinMatrixRequest) GetType() PinMatrixRequest_PinMatrixRequestType {
|
|||||||
return PinMatrixRequest_PinMatrixRequestType_Current
|
return PinMatrixRequest_PinMatrixRequestType_Current
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Computer responds with encoded PIN
|
// Request: Computer responds with encoded PIN
|
||||||
// @auxend
|
// @auxend
|
||||||
type PinMatrixAck struct {
|
type PinMatrixAck struct {
|
||||||
@ -482,7 +482,7 @@ func (m *PinMatrixAck) GetPin() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Device awaits encryption passphrase
|
// Response: Device awaits encryption passphrase
|
||||||
// @auxstart
|
// @auxstart
|
||||||
// @next PassphraseAck
|
// @next PassphraseAck
|
||||||
@ -525,7 +525,7 @@ func (m *PassphraseRequest) GetOnDevice() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Send passphrase back
|
// Request: Send passphrase back
|
||||||
// @next PassphraseStateRequest
|
// @next PassphraseStateRequest
|
||||||
type PassphraseAck struct {
|
type PassphraseAck struct {
|
||||||
@ -575,7 +575,7 @@ func (m *PassphraseAck) GetState() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Device awaits passphrase state
|
// Response: Device awaits passphrase state
|
||||||
// @next PassphraseStateAck
|
// @next PassphraseStateAck
|
||||||
type PassphraseStateRequest struct {
|
type PassphraseStateRequest struct {
|
||||||
@ -617,7 +617,7 @@ func (m *PassphraseStateRequest) GetState() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Send passphrase state back
|
// Request: Send passphrase state back
|
||||||
// @auxend
|
// @auxend
|
||||||
type PassphraseStateAck struct {
|
type PassphraseStateAck struct {
|
||||||
@ -651,7 +651,7 @@ func (m *PassphraseStateAck) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_PassphraseStateAck proto.InternalMessageInfo
|
var xxx_messageInfo_PassphraseStateAck proto.InternalMessageInfo
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Structure representing BIP32 (hierarchical deterministic) node
|
// Structure representing BIP32 (hierarchical deterministic) node
|
||||||
// Used for imports of private key into the device and exporting public key out of device
|
// Used for imports of private key into the device and exporting public key out of device
|
||||||
// @embed
|
// @embed
|
||||||
|
@ -21,7 +21,7 @@ var _ = math.Inf
|
|||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Ask device for public key corresponding to address_n path
|
// Request: Ask device for public key corresponding to address_n path
|
||||||
// @start
|
// @start
|
||||||
// @next EthereumPublicKey
|
// @next EthereumPublicKey
|
||||||
@ -73,7 +73,7 @@ func (m *EthereumGetPublicKey) GetShowDisplay() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Contains public key derived from device private seed
|
// Response: Contains public key derived from device private seed
|
||||||
// @end
|
// @end
|
||||||
type EthereumPublicKey struct {
|
type EthereumPublicKey struct {
|
||||||
@ -123,7 +123,7 @@ func (m *EthereumPublicKey) GetXpub() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Ask device for Ethereum address corresponding to address_n path
|
// Request: Ask device for Ethereum address corresponding to address_n path
|
||||||
// @start
|
// @start
|
||||||
// @next EthereumAddress
|
// @next EthereumAddress
|
||||||
@ -175,7 +175,7 @@ func (m *EthereumGetAddress) GetShowDisplay() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Contains an Ethereum address derived from device private seed
|
// Response: Contains an Ethereum address derived from device private seed
|
||||||
// @end
|
// @end
|
||||||
type EthereumAddress struct {
|
type EthereumAddress struct {
|
||||||
@ -225,7 +225,7 @@ func (m *EthereumAddress) GetAddressHex() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Ask device to sign transaction
|
// Request: Ask device to sign transaction
|
||||||
// All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
|
// All fields are optional from the protocol's point of view. Each field defaults to value `0` if missing.
|
||||||
// Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
|
// Note: the first at most 1024 bytes of data MUST be transmitted as part of this message.
|
||||||
@ -351,7 +351,7 @@ func (m *EthereumSignTx) GetTxType() uint32 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Device asks for more data from transaction payload, or returns the signature.
|
// Response: Device asks for more data from transaction payload, or returns the signature.
|
||||||
// If data_length is set, device awaits that many more bytes of payload.
|
// If data_length is set, device awaits that many more bytes of payload.
|
||||||
// Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
|
// Otherwise, the signature_* fields contain the computed transaction signature. All three fields will be present.
|
||||||
@ -420,7 +420,7 @@ func (m *EthereumTxRequest) GetSignatureS() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Transaction payload data.
|
// Request: Transaction payload data.
|
||||||
// @next EthereumTxRequest
|
// @next EthereumTxRequest
|
||||||
type EthereumTxAck struct {
|
type EthereumTxAck struct {
|
||||||
@ -462,7 +462,7 @@ func (m *EthereumTxAck) GetDataChunk() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Ask device to sign message
|
// Request: Ask device to sign message
|
||||||
// @start
|
// @start
|
||||||
// @next EthereumMessageSignature
|
// @next EthereumMessageSignature
|
||||||
@ -514,7 +514,7 @@ func (m *EthereumSignMessage) GetMessage() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Signed message
|
// Response: Signed message
|
||||||
// @end
|
// @end
|
||||||
type EthereumMessageSignature struct {
|
type EthereumMessageSignature struct {
|
||||||
@ -572,7 +572,7 @@ func (m *EthereumMessageSignature) GetAddressHex() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Ask device to verify message
|
// Request: Ask device to verify message
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -21,7 +21,7 @@ var _ = math.Inf
|
|||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Structure representing passphrase source
|
// Structure representing passphrase source
|
||||||
type ApplySettings_PassphraseSourceType int32
|
type ApplySettings_PassphraseSourceType int32
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ func (ApplySettings_PassphraseSourceType) EnumDescriptor() ([]byte, []int) {
|
|||||||
return fileDescriptor_0c720c20d27aa029, []int{4, 0}
|
return fileDescriptor_0c720c20d27aa029, []int{4, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Type of recovery procedure. These should be used as bitmask, e.g.,
|
// Type of recovery procedure. These should be used as bitmask, e.g.,
|
||||||
// `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
|
// `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix`
|
||||||
// listing every method supported by the host computer.
|
// listing every method supported by the host computer.
|
||||||
@ -114,7 +114,7 @@ func (RecoveryDevice_RecoveryDeviceType) EnumDescriptor() ([]byte, []int) {
|
|||||||
return fileDescriptor_0c720c20d27aa029, []int{17, 0}
|
return fileDescriptor_0c720c20d27aa029, []int{17, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Type of Recovery Word request
|
// Type of Recovery Word request
|
||||||
type WordRequest_WordRequestType int32
|
type WordRequest_WordRequestType int32
|
||||||
|
|
||||||
@ -159,7 +159,7 @@ func (WordRequest_WordRequestType) EnumDescriptor() ([]byte, []int) {
|
|||||||
return fileDescriptor_0c720c20d27aa029, []int{18, 0}
|
return fileDescriptor_0c720c20d27aa029, []int{18, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Reset device to default state and ask for device details
|
// Request: Reset device to default state and ask for device details
|
||||||
// @start
|
// @start
|
||||||
// @next Features
|
// @next Features
|
||||||
@ -210,7 +210,7 @@ func (m *Initialize) GetSkipPassphrase() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Ask for device details (no device reset)
|
// Request: Ask for device details (no device reset)
|
||||||
// @start
|
// @start
|
||||||
// @next Features
|
// @next Features
|
||||||
@ -245,7 +245,7 @@ func (m *GetFeatures) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_GetFeatures proto.InternalMessageInfo
|
var xxx_messageInfo_GetFeatures proto.InternalMessageInfo
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Reports various information about the device
|
// Response: Reports various information about the device
|
||||||
// @end
|
// @end
|
||||||
type Features struct {
|
type Features struct {
|
||||||
@ -495,7 +495,7 @@ func (m *Features) GetNoBackup() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: clear session (removes cached PIN, passphrase, etc).
|
// Request: clear session (removes cached PIN, passphrase, etc).
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -530,7 +530,7 @@ func (m *ClearSession) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_ClearSession proto.InternalMessageInfo
|
var xxx_messageInfo_ClearSession proto.InternalMessageInfo
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: change language and/or label of the device
|
// Request: change language and/or label of the device
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -622,7 +622,7 @@ func (m *ApplySettings) GetDisplayRotation() uint32 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: set flags of the device
|
// Request: set flags of the device
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -666,7 +666,7 @@ func (m *ApplyFlags) GetFlags() uint32 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Starts workflow for setting/changing/removing the PIN
|
// Request: Starts workflow for setting/changing/removing the PIN
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -710,7 +710,7 @@ func (m *ChangePin) GetRemove() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Test if the device is alive, device sends back the message in Success response
|
// Request: Test if the device is alive, device sends back the message in Success response
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -777,7 +777,7 @@ func (m *Ping) GetPassphraseProtection() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Abort last operation that required user interaction
|
// Request: Abort last operation that required user interaction
|
||||||
// @start
|
// @start
|
||||||
// @next Failure
|
// @next Failure
|
||||||
@ -812,7 +812,7 @@ func (m *Cancel) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_Cancel proto.InternalMessageInfo
|
var xxx_messageInfo_Cancel proto.InternalMessageInfo
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Request a sample of random data generated by hardware RNG. May be used for testing.
|
// Request: Request a sample of random data generated by hardware RNG. May be used for testing.
|
||||||
// @start
|
// @start
|
||||||
// @next Entropy
|
// @next Entropy
|
||||||
@ -856,7 +856,7 @@ func (m *GetEntropy) GetSize() uint32 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Reply with random data generated by internal RNG
|
// Response: Reply with random data generated by internal RNG
|
||||||
// @end
|
// @end
|
||||||
type Entropy struct {
|
type Entropy struct {
|
||||||
@ -898,7 +898,7 @@ func (m *Entropy) GetEntropy() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Request device to wipe all sensitive data and settings
|
// Request: Request device to wipe all sensitive data and settings
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -934,7 +934,7 @@ func (m *WipeDevice) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_WipeDevice proto.InternalMessageInfo
|
var xxx_messageInfo_WipeDevice proto.InternalMessageInfo
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Load seed and related internal settings from the computer
|
// Request: Load seed and related internal settings from the computer
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -1036,7 +1036,7 @@ func (m *LoadDevice) GetU2FCounter() uint32 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Ask device to do initialization involving user interaction
|
// Request: Ask device to do initialization involving user interaction
|
||||||
// @start
|
// @start
|
||||||
// @next EntropyRequest
|
// @next EntropyRequest
|
||||||
@ -1147,7 +1147,7 @@ func (m *ResetDevice) GetNoBackup() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Perform backup of the device seed if not backed up using ResetDevice
|
// Request: Perform backup of the device seed if not backed up using ResetDevice
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -1182,7 +1182,7 @@ func (m *BackupDevice) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_BackupDevice proto.InternalMessageInfo
|
var xxx_messageInfo_BackupDevice proto.InternalMessageInfo
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Ask for additional entropy from host computer
|
// Response: Ask for additional entropy from host computer
|
||||||
// @next EntropyAck
|
// @next EntropyAck
|
||||||
type EntropyRequest struct {
|
type EntropyRequest struct {
|
||||||
@ -1216,7 +1216,7 @@ func (m *EntropyRequest) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_EntropyRequest proto.InternalMessageInfo
|
var xxx_messageInfo_EntropyRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Provide additional entropy for seed generation function
|
// Request: Provide additional entropy for seed generation function
|
||||||
// @next Success
|
// @next Success
|
||||||
type EntropyAck struct {
|
type EntropyAck struct {
|
||||||
@ -1258,7 +1258,7 @@ func (m *EntropyAck) GetEntropy() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Start recovery workflow asking user for specific words of mnemonic
|
// Request: Start recovery workflow asking user for specific words of mnemonic
|
||||||
// Used to recovery device safely even on untrusted computer.
|
// Used to recovery device safely even on untrusted computer.
|
||||||
// @start
|
// @start
|
||||||
@ -1369,7 +1369,7 @@ func (m *RecoveryDevice) GetDryRun() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Response: Device is waiting for user to enter word of the mnemonic
|
// Response: Device is waiting for user to enter word of the mnemonic
|
||||||
// Its position is shown only on device's internal display.
|
// Its position is shown only on device's internal display.
|
||||||
// @next WordAck
|
// @next WordAck
|
||||||
@ -1412,7 +1412,7 @@ func (m *WordRequest) GetType() WordRequest_WordRequestType {
|
|||||||
return WordRequest_WordRequestType_Plain
|
return WordRequest_WordRequestType_Plain
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Computer replies with word from the mnemonic
|
// Request: Computer replies with word from the mnemonic
|
||||||
// @next WordRequest
|
// @next WordRequest
|
||||||
// @next Success
|
// @next Success
|
||||||
@ -1456,7 +1456,7 @@ func (m *WordAck) GetWord() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Request: Set U2F counter
|
// Request: Set U2F counter
|
||||||
// @start
|
// @start
|
||||||
// @next Success
|
// @next Success
|
||||||
|
@ -22,7 +22,7 @@ var _ = math.Inf
|
|||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
// *
|
//*
|
||||||
// Mapping between TREZOR wire identifier (uint) and a protobuf message
|
// Mapping between TREZOR wire identifier (uint) and a protobuf message
|
||||||
type MessageType int32
|
type MessageType int32
|
||||||
|
|
||||||
|
@ -380,7 +380,7 @@ func (w *wallet) selfDerive() {
|
|||||||
// of legacy-ledger, the first account on the legacy-path will
|
// of legacy-ledger, the first account on the legacy-path will
|
||||||
// be shown to the user, even if we don't actively track it
|
// be shown to the user, even if we don't actively track it
|
||||||
if i < len(nextAddrs)-1 {
|
if i < len(nextAddrs)-1 {
|
||||||
w.log.Info("Skipping tracking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track",
|
w.log.Info("Skipping trakcking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track",
|
||||||
"path", path, "address", nextAddrs[i])
|
"path", path, "address", nextAddrs[i])
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -526,6 +526,7 @@ func (w *wallet) signHash(account accounts.Account, hash []byte) ([]byte, error)
|
|||||||
|
|
||||||
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
|
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
|
||||||
func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
|
func (w *wallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
|
||||||
|
|
||||||
// Unless we are doing 712 signing, simply dispatch to signHash
|
// Unless we are doing 712 signing, simply dispatch to signHash
|
||||||
if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) {
|
if !(mimeType == accounts.MimetypeTypedData && len(data) == 66 && data[0] == 0x19 && data[1] == 0x01) {
|
||||||
return w.signHash(account, crypto.Keccak256(data))
|
return w.signHash(account, crypto.Keccak256(data))
|
||||||
|
@ -26,7 +26,7 @@ for:
|
|||||||
- go run build/ci.go lint
|
- go run build/ci.go lint
|
||||||
- go run build/ci.go install -dlgo
|
- go run build/ci.go install -dlgo
|
||||||
test_script:
|
test_script:
|
||||||
- go run build/ci.go test -dlgo
|
- go run build/ci.go test -dlgo -coverage
|
||||||
|
|
||||||
# linux/386 is disabled.
|
# linux/386 is disabled.
|
||||||
- matrix:
|
- matrix:
|
||||||
@ -54,4 +54,4 @@ for:
|
|||||||
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
||||||
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
|
||||||
test_script:
|
test_script:
|
||||||
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
|
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage
|
||||||
|
@ -1,60 +0,0 @@
|
|||||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
|
||||||
|
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = (*payloadAttributesMarshaling)(nil)
|
|
||||||
|
|
||||||
// MarshalJSON marshals as JSON.
|
|
||||||
func (p PayloadAttributes) MarshalJSON() ([]byte, error) {
|
|
||||||
type PayloadAttributes struct {
|
|
||||||
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
|
||||||
Random common.Hash `json:"prevRandao" gencodec:"required"`
|
|
||||||
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
|
||||||
}
|
|
||||||
var enc PayloadAttributes
|
|
||||||
enc.Timestamp = hexutil.Uint64(p.Timestamp)
|
|
||||||
enc.Random = p.Random
|
|
||||||
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
|
|
||||||
enc.Withdrawals = p.Withdrawals
|
|
||||||
return json.Marshal(&enc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals from JSON.
|
|
||||||
func (p *PayloadAttributes) UnmarshalJSON(input []byte) error {
|
|
||||||
type PayloadAttributes struct {
|
|
||||||
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
|
||||||
Random *common.Hash `json:"prevRandao" gencodec:"required"`
|
|
||||||
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
|
||||||
}
|
|
||||||
var dec PayloadAttributes
|
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if dec.Timestamp == nil {
|
|
||||||
return errors.New("missing required field 'timestamp' for PayloadAttributes")
|
|
||||||
}
|
|
||||||
p.Timestamp = uint64(*dec.Timestamp)
|
|
||||||
if dec.Random == nil {
|
|
||||||
return errors.New("missing required field 'prevRandao' for PayloadAttributes")
|
|
||||||
}
|
|
||||||
p.Random = *dec.Random
|
|
||||||
if dec.SuggestedFeeRecipient == nil {
|
|
||||||
return errors.New("missing required field 'suggestedFeeRecipient' for PayloadAttributes")
|
|
||||||
}
|
|
||||||
p.SuggestedFeeRecipient = *dec.SuggestedFeeRecipient
|
|
||||||
if dec.Withdrawals != nil {
|
|
||||||
p.Withdrawals = dec.Withdrawals
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
|
||||||
|
|
||||||
package engine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = (*executionPayloadEnvelopeMarshaling)(nil)
|
|
||||||
|
|
||||||
// MarshalJSON marshals as JSON.
|
|
||||||
func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
|
|
||||||
type ExecutionPayloadEnvelope struct {
|
|
||||||
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
|
|
||||||
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
|
|
||||||
}
|
|
||||||
var enc ExecutionPayloadEnvelope
|
|
||||||
enc.ExecutionPayload = e.ExecutionPayload
|
|
||||||
enc.BlockValue = (*hexutil.Big)(e.BlockValue)
|
|
||||||
return json.Marshal(&enc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals from JSON.
|
|
||||||
func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
|
|
||||||
type ExecutionPayloadEnvelope struct {
|
|
||||||
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
|
|
||||||
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
|
|
||||||
}
|
|
||||||
var dec ExecutionPayloadEnvelope
|
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if dec.ExecutionPayload == nil {
|
|
||||||
return errors.New("missing required field 'executionPayload' for ExecutionPayloadEnvelope")
|
|
||||||
}
|
|
||||||
e.ExecutionPayload = dec.ExecutionPayload
|
|
||||||
if dec.BlockValue == nil {
|
|
||||||
return errors.New("missing required field 'blockValue' for ExecutionPayloadEnvelope")
|
|
||||||
}
|
|
||||||
e.BlockValue = (*big.Int)(dec.BlockValue)
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,46 +1,38 @@
|
|||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
e447b498cde50215c4f7619e5124b0fc4e25fb5d16ea47271c47f278e7aa763a go1.20.3.src.tar.gz
|
efd43e0f1402e083b73a03d444b7b6576bb4c539ac46208b63a916b69aca4088 go1.18.1.src.tar.gz
|
||||||
c1e1161d6d859deb576e6cfabeb40e3d042ceb1c6f444f617c3c9d76269c3565 go1.20.3.darwin-amd64.tar.gz
|
3703e9a0db1000f18c0c7b524f3d378aac71219b4715a6a4c5683eb639f41a4d go1.18.1.darwin-amd64.tar.gz
|
||||||
86b0ed0f2b2df50fa8036eea875d1cf2d76cefdacf247c44639a1464b7e36b95 go1.20.3.darwin-arm64.tar.gz
|
6d5641a06edba8cd6d425fb0adad06bad80e2afe0fa91b4aa0e5aed1bc78f58e go1.18.1.darwin-arm64.tar.gz
|
||||||
340e80abd047c597fdc0f50a6cc59617f06c297d62f7fc77f4a0164e2da6f7aa go1.20.3.freebsd-386.tar.gz
|
b9a9063d4265d8ccc046c9b314194d6eadc47e56d0d637db81e98e68aad45035 go1.18.1.freebsd-386.tar.gz
|
||||||
2169fcd8b6c94c5fbe07c0b470ccfb6001d343f6548ad49f3d9ab78e3b5753c7 go1.20.3.freebsd-amd64.tar.gz
|
2bc1c138d645e37dbbc63517dd1cf1bf33fc4cb95f442a6384df0418b5134e9f go1.18.1.freebsd-amd64.tar.gz
|
||||||
e12384311403f1389d14cc1c1295bfb4e0dd5ab919403b80da429f671a223507 go1.20.3.linux-386.tar.gz
|
9a8df5dde9058f08ac01ecfaae42534610db398e487138788c01da26a0d41ff9 go1.18.1.linux-386.tar.gz
|
||||||
979694c2c25c735755bf26f4f45e19e64e4811d661dd07b8c010f7a8e18adfca go1.20.3.linux-amd64.tar.gz
|
b3b815f47ababac13810fc6021eb73d65478e0b2db4b09d348eefad9581a2334 go1.18.1.linux-amd64.tar.gz
|
||||||
eb186529f13f901e7a2c4438a05c2cd90d74706aaa0a888469b2a4a617b6ee54 go1.20.3.linux-arm64.tar.gz
|
56a91851c97fb4697077abbca38860f735c32b38993ff79b088dac46e4735633 go1.18.1.linux-arm64.tar.gz
|
||||||
b421e90469a83671641f81b6e20df6500f033e9523e89cbe7b7223704dd1035c go1.20.3.linux-armv6l.tar.gz
|
9edc01c8e7db64e9ceeffc8258359e027812886ceca3444e83c4eb96ddb068ee go1.18.1.linux-armv6l.tar.gz
|
||||||
943c89aa1624ea544a022b31e3d6e16a037200e436370bdd5fd67f3fa60be282 go1.20.3.linux-ppc64le.tar.gz
|
33db623d1eecf362fe365107c12efc90eff0b9609e0b3345e258388019cb552a go1.18.1.linux-ppc64le.tar.gz
|
||||||
126cf823a5634ef2544b866db107b9d351d3ea70d9e240b0bdcfb46f4dcae54b go1.20.3.linux-s390x.tar.gz
|
5d9301324148ed4dbfaa0800da43a843ffd65c834ee73fcf087255697c925f74 go1.18.1.linux-s390x.tar.gz
|
||||||
37e9146e1f9d681cfcaa6fee6c7b890c44c64bc50228c9588f3c4231346d33bd go1.20.3.windows-386.zip
|
49ae65551acbfaa57b52fbefa0350b2072512ae3103b8cf1a919a02626dbc743 go1.18.1.windows-386.zip
|
||||||
143a2837821c7dbacf7744cbb1a8421c1f48307c6fdfaeffc5f8c2f69e1b7932 go1.20.3.windows-amd64.zip
|
c30bc3f1f7314a953fe208bd9cd5e24bd9403392a6c556ced3677f9f70f71fe1 go1.18.1.windows-amd64.zip
|
||||||
158cb159e00bc979f473e0f5b5a561613129c5e51067967b72b8e072e5a4db81 go1.20.3.windows-arm64.zip
|
2c4a8265030eac37f906634f5c13c22c3d0ea725f2488e1bca005c6b981653d7 go1.18.1.windows-arm64.zip
|
||||||
|
|
||||||
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
|
658078aaaf7608693f37c4cf1380b2af418ab8b2d23fdb33e7e2d4339328590e golangci-lint-1.46.2-darwin-amd64.tar.gz
|
||||||
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
|
81f9b4afd62ec5e612ef8bc3b1d612a88b56ff289874831845cdad394427385f golangci-lint-1.46.2-darwin-arm64.tar.gz
|
||||||
e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz
|
943486e703e62ec55ecd90caeb22bcd39f8cc3962a93eec18c06b7bae12cb46f golangci-lint-1.46.2-freebsd-386.tar.gz
|
||||||
623ce2d0fa4d35cc2e8d69fa7334227ab592380962a13b4d9cdc77cf41db2008 golangci-lint-1.51.1-freebsd-amd64.tar.gz
|
a75dd9ba7e08e8315c411697171db5375c0f6a1ece9e6fbeb9e9a4386822e17d golangci-lint-1.46.2-freebsd-amd64.tar.gz
|
||||||
131365feb0584cc2736c43192fa673ca50e5b6b765456990cb379ecfb787e568 golangci-lint-1.51.1-freebsd-armv6.tar.gz
|
83eedca1af72e8be055a1235177eb1b33524fbf08bec5730df2e6c3efade2b23 golangci-lint-1.46.2-freebsd-armv6.tar.gz
|
||||||
98fb627927cbb654f5bf85dcffc5f646666b2ce96ea0fed977c9fb28abd51532 golangci-lint-1.51.1-freebsd-armv7.tar.gz
|
513d276c490de6f82baa01f9346d8d78b385f2ae97608f42f05d1f0f1314cd54 golangci-lint-1.46.2-freebsd-armv7.tar.gz
|
||||||
b36a99702fa762c15840261bc0fb41b4b1b16b8b19b8c0941bae98c85bb0f8b8 golangci-lint-1.51.1-linux-386.tar.gz
|
461a60016d516c69d406dc3e2d4957b722dbe684b7085dfac4802d0f84409e27 golangci-lint-1.46.2-linux-386.tar.gz
|
||||||
17aeb26c76820c22efa0e1838b0ab93e90cfedef43fbfc9a2f33f27eb9e5e070 golangci-lint-1.51.1-linux-amd64.tar.gz
|
242cd4f2d6ac0556e315192e8555784d13da5d1874e51304711570769c4f2b9b golangci-lint-1.46.2-linux-amd64.tar.gz
|
||||||
9744bc34e7b8d82ca788b667bfb7155a39b4be9aef43bf9f10318b1372cea338 golangci-lint-1.51.1-linux-arm64.tar.gz
|
ff5448ada2b3982581984d64b0dec614dba0a3ea4cab2d6a343c77927fc89f7e golangci-lint-1.46.2-linux-arm64.tar.gz
|
||||||
0dda8dbeb2ff7455a044ec8e347f2fc6d655d2e99d281b3b95e88167031c673d golangci-lint-1.51.1-linux-armv6.tar.gz
|
177f5210ef04aee282bfbc6ec519d36af5fb7d2b2c8d3f4ea5e59fdba71b0a27 golangci-lint-1.46.2-linux-armv6.tar.gz
|
||||||
0512f311b11d43b8b22989d929f0fe8a2e1e5ebe497f1eb0ff73a0fc3d188fd1 golangci-lint-1.51.1-linux-armv7.tar.gz
|
10dd512a36ee978a1009edbca3ba3af410f0fda8df4d85f0e4793a24213870cc golangci-lint-1.46.2-linux-armv7.tar.gz
|
||||||
d767108dcf84a8eaa844df3454cb0f75a492f4e7102ecc2b0a3545cfe073a566 golangci-lint-1.51.1-linux-loong64.tar.gz
|
67779fa517c688c9db1090c3c456117d95c6b92979c623fe8cce8fb84251f21e golangci-lint-1.46.2-linux-mips64.tar.gz
|
||||||
3bd56c54daec16585b2668e0dfabb27af2c2b38cc0fdb46923e2521e1634846b golangci-lint-1.51.1-linux-mips64.tar.gz
|
c085f0f57bdccbb2c902a41b72ce210a3dfff16ca856789374745ab52004b6ee golangci-lint-1.46.2-linux-mips64le.tar.gz
|
||||||
f72f5adfa2219e15d2414c9a2966f86e74556cf17a85c727a7fb7770a16cf814 golangci-lint-1.51.1-linux-mips64le.tar.gz
|
abecef6421499248e58ed75d2938bc12b4b1f98b057f25060680b77bb51a881e golangci-lint-1.46.2-linux-ppc64le.tar.gz
|
||||||
e605521dac98096d8737e1997c954f41f1d0d8275b8731f62783d410c23574b9 golangci-lint-1.51.1-linux-ppc64le.tar.gz
|
134843a8f5c5c182c11979ea75f5866945d54757b2a04f3e5e04a0cf4fbf3a39 golangci-lint-1.46.2-linux-riscv64.tar.gz
|
||||||
2f683217b814339e74d61ca700922d8407f15addd6d4c5e8b156fbab79f26a87 golangci-lint-1.51.1-linux-riscv64.tar.gz
|
9fe21a9476567aafe7a2e1a926b9641a39f920d4c0ea8eda9d968bc6136337f9 golangci-lint-1.46.2-linux-s390x.tar.gz
|
||||||
d98528292b65971a3594e5880530e7624597dc9806fcfccdfbe39be411713d63 golangci-lint-1.51.1-linux-s390x.tar.gz
|
b48a421ec12a43f8fc8f977b9cf7d4a1ea1c4b97f803a238de7d3ce4ab23a84b golangci-lint-1.46.2-windows-386.zip
|
||||||
9bb2d0fe9e692ed0aea4f2537e3e6862b2f6768fe2849a84f4a6ad09da9fd971 golangci-lint-1.51.1-netbsd-386.tar.gz
|
604acc1378a566abb0eac799362f3a37b7fcb5fa2268aeb2d5d954c829367301 golangci-lint-1.46.2-windows-amd64.zip
|
||||||
34cafdcd11ae73ae88d66c33eb8449f5c976fc3e37b44774dbe9c71caa95e592 golangci-lint-1.51.1-netbsd-amd64.tar.gz
|
927def10db073da9687594072e6a3d9c891f67fa897105a2cfd715e018e7386c golangci-lint-1.46.2-windows-arm64.zip
|
||||||
f8b4e1e47ac17caafe8a5f32f975a2b6a7cb14c27c0f73c1fb15c20ca91c2e03 golangci-lint-1.51.1-netbsd-armv6.tar.gz
|
729b76ed1d8b4e2612e38772b211503cb940e00a137bbaace1aa066f7c943737 golangci-lint-1.46.2-windows-armv6.zip
|
||||||
c4f58b7e227b9fd41f0e9310dc83f4a4e7d026598e2f6e95b78761081a6d9bd2 golangci-lint-1.51.1-netbsd-armv7.tar.gz
|
ea27c86d91e0b245ecbcfbf6cdb4ac0522d4bc6dca56bba02ea1bc77ad2917ac golangci-lint-1.46.2-windows-armv7.zip
|
||||||
6710e2f5375dc75521c1a17980a6cbbe6ff76c2f8b852964a8af558899a97cf5 golangci-lint-1.51.1-windows-386.zip
|
|
||||||
722d7b87b9cdda0a3835d5030b3fc5385c2eba4c107f63f6391cfb2ac35f051d golangci-lint-1.51.1-windows-amd64.zip
|
|
||||||
eb57f9bcb56646f2e3d6ccaf02ec227815fb05077b2e0b1bf9e755805acdc2b9 golangci-lint-1.51.1-windows-arm64.zip
|
|
||||||
bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint-1.51.1-windows-armv6.zip
|
|
||||||
cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip
|
|
||||||
|
|
||||||
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
|
||||||
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz
|
|
||||||
|
339
build/ci.go
339
build/ci.go
@ -31,13 +31,17 @@ Available commands are:
|
|||||||
importkeys -- imports signing keys from env
|
importkeys -- imports signing keys from env
|
||||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||||
nsis -- creates a Windows NSIS installer
|
nsis -- creates a Windows NSIS installer
|
||||||
|
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
|
||||||
|
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
|
||||||
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
||||||
|
|
||||||
For all commands, -n prevents execution of external programs (dry run mode).
|
For all commands, -n prevents execution of external programs (dry run mode).
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"flag"
|
"flag"
|
||||||
@ -47,6 +51,7 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -73,6 +78,7 @@ var (
|
|||||||
executablePath("bootnode"),
|
executablePath("bootnode"),
|
||||||
executablePath("evm"),
|
executablePath("evm"),
|
||||||
executablePath("geth"),
|
executablePath("geth"),
|
||||||
|
executablePath("puppeth"),
|
||||||
executablePath("rlpdump"),
|
executablePath("rlpdump"),
|
||||||
executablePath("clef"),
|
executablePath("clef"),
|
||||||
}
|
}
|
||||||
@ -95,6 +101,10 @@ var (
|
|||||||
BinaryName: "geth",
|
BinaryName: "geth",
|
||||||
Description: "Ethereum CLI client.",
|
Description: "Ethereum CLI client.",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
BinaryName: "puppeth",
|
||||||
|
Description: "Ethereum private network manager.",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
BinaryName: "rlpdump",
|
BinaryName: "rlpdump",
|
||||||
Description: "Developer utility tool that prints RLP structures.",
|
Description: "Developer utility tool that prints RLP structures.",
|
||||||
@ -120,15 +130,15 @@ var (
|
|||||||
// Distros for which packages are created.
|
// Distros for which packages are created.
|
||||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||||
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
|
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
|
||||||
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish
|
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite
|
||||||
debDistroGoBoots = map[string]string{
|
debDistroGoBoots = map[string]string{
|
||||||
"trusty": "golang-1.11", // EOL: 04/2024
|
"trusty": "golang-1.11", // EOL: 04/2024
|
||||||
"xenial": "golang-go", // EOL: 04/2026
|
"xenial": "golang-go", // EOL: 04/2026
|
||||||
"bionic": "golang-go", // EOL: 04/2028
|
"bionic": "golang-go", // EOL: 04/2028
|
||||||
"focal": "golang-go", // EOL: 04/2030
|
"focal": "golang-go", // EOL: 04/2030
|
||||||
|
"impish": "golang-go", // EOL: 07/2022
|
||||||
"jammy": "golang-go", // EOL: 04/2032
|
"jammy": "golang-go", // EOL: 04/2032
|
||||||
"kinetic": "golang-go", // EOL: 07/2023
|
//"kinetic": "golang-go", // EOL: 07/2023
|
||||||
"lunar": "golang-go", // EOL: 01/2024
|
|
||||||
}
|
}
|
||||||
|
|
||||||
debGoBootPaths = map[string]string{
|
debGoBootPaths = map[string]string{
|
||||||
@ -136,18 +146,10 @@ var (
|
|||||||
"golang-go": "/usr/lib/go",
|
"golang-go": "/usr/lib/go",
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the version of Go that will be downloaded by
|
// This is the version of go that will be downloaded by
|
||||||
//
|
//
|
||||||
// go run ci.go install -dlgo
|
// go run ci.go install -dlgo
|
||||||
dlgoVersion = "1.20.3"
|
dlgoVersion = "1.18.1"
|
||||||
|
|
||||||
// This is the version of Go that will be used to bootstrap the PPA builder.
|
|
||||||
//
|
|
||||||
// This version is fine to be old and full of security holes, we just use it
|
|
||||||
// to build the latest Go. Don't change it. If it ever becomes insufficient,
|
|
||||||
// we need to switch over to a recursive builder to jumpt across supported
|
|
||||||
// versions.
|
|
||||||
gobootVersion = "1.19.6"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||||
@ -183,6 +185,10 @@ func main() {
|
|||||||
doDebianSource(os.Args[2:])
|
doDebianSource(os.Args[2:])
|
||||||
case "nsis":
|
case "nsis":
|
||||||
doWindowsInstaller(os.Args[2:])
|
doWindowsInstaller(os.Args[2:])
|
||||||
|
case "aar":
|
||||||
|
doAndroidArchive(os.Args[2:])
|
||||||
|
case "xcode":
|
||||||
|
doXCodeFramework(os.Args[2:])
|
||||||
case "purge":
|
case "purge":
|
||||||
doPurge(os.Args[2:])
|
doPurge(os.Args[2:])
|
||||||
default:
|
default:
|
||||||
@ -197,7 +203,6 @@ func doInstall(cmdline []string) {
|
|||||||
dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
|
dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
|
||||||
arch = flag.String("arch", "", "Architecture to cross build for")
|
arch = flag.String("arch", "", "Architecture to cross build for")
|
||||||
cc = flag.String("cc", "", "C compiler to cross build with")
|
cc = flag.String("cc", "", "C compiler to cross build with")
|
||||||
staticlink = flag.Bool("static", false, "Create statically-linked executable")
|
|
||||||
)
|
)
|
||||||
flag.CommandLine.Parse(cmdline)
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
|
||||||
@ -208,12 +213,9 @@ func doInstall(cmdline []string) {
|
|||||||
tc.Root = build.DownloadGo(csdb, dlgoVersion)
|
tc.Root = build.DownloadGo(csdb, dlgoVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable CLI markdown doc generation in release builds.
|
|
||||||
buildTags := []string{"urfave_cli_no_docs"}
|
|
||||||
|
|
||||||
// Configure the build.
|
// Configure the build.
|
||||||
env := build.Env()
|
env := build.Env()
|
||||||
gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
|
gobuild := tc.Go("build", buildFlags(env)...)
|
||||||
|
|
||||||
// arm64 CI builders are memory-constrained and can't handle concurrent builds,
|
// arm64 CI builders are memory-constrained and can't handle concurrent builds,
|
||||||
// better disable it. This check isn't the best, it should probably
|
// better disable it. This check isn't the best, it should probably
|
||||||
@ -222,6 +224,9 @@ func doInstall(cmdline []string) {
|
|||||||
gobuild.Args = append(gobuild.Args, "-p", "1")
|
gobuild.Args = append(gobuild.Args, "-p", "1")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Disable CLI markdown doc generation in release builds.
|
||||||
|
gobuild.Args = append(gobuild.Args, "-tags", "urfave_cli_no_docs")
|
||||||
|
|
||||||
// We use -trimpath to avoid leaking local paths into the built executables.
|
// We use -trimpath to avoid leaking local paths into the built executables.
|
||||||
gobuild.Args = append(gobuild.Args, "-trimpath")
|
gobuild.Args = append(gobuild.Args, "-trimpath")
|
||||||
|
|
||||||
@ -246,35 +251,25 @@ func doInstall(cmdline []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// buildFlags returns the go tool flags for building.
|
// buildFlags returns the go tool flags for building.
|
||||||
func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) {
|
func buildFlags(env build.Environment) (flags []string) {
|
||||||
var ld []string
|
var ld []string
|
||||||
if env.Commit != "" {
|
if env.Commit != "" {
|
||||||
ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitCommit="+env.Commit)
|
ld = append(ld, "-X", "main.gitCommit="+env.Commit)
|
||||||
ld = append(ld, "-X", "github.com/ethereum/go-ethereum/internal/version.gitDate="+env.Date)
|
ld = append(ld, "-X", "main.gitDate="+env.Date)
|
||||||
}
|
}
|
||||||
// Strip DWARF on darwin. This used to be required for certain things,
|
// Strip DWARF on darwin. This used to be required for certain things,
|
||||||
// and there is no downside to this, so we just keep doing it.
|
// and there is no downside to this, so we just keep doing it.
|
||||||
if runtime.GOOS == "darwin" {
|
if runtime.GOOS == "darwin" {
|
||||||
ld = append(ld, "-s")
|
ld = append(ld, "-s")
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "linux" {
|
|
||||||
// Enforce the stacksize to 8M, which is the case on most platforms apart from
|
// Enforce the stacksize to 8M, which is the case on most platforms apart from
|
||||||
// alpine Linux.
|
// alpine Linux.
|
||||||
extld := []string{"-Wl,-z,stack-size=0x800000"}
|
if runtime.GOOS == "linux" {
|
||||||
if staticLinking {
|
ld = append(ld, "-extldflags", "-Wl,-z,stack-size=0x800000")
|
||||||
extld = append(extld, "-static")
|
|
||||||
// Under static linking, use of certain glibc features must be
|
|
||||||
// disabled to avoid shared library dependencies.
|
|
||||||
buildTags = append(buildTags, "osusergo", "netgo")
|
|
||||||
}
|
|
||||||
ld = append(ld, "-extldflags", "'"+strings.Join(extld, " ")+"'")
|
|
||||||
}
|
}
|
||||||
if len(ld) > 0 {
|
if len(ld) > 0 {
|
||||||
flags = append(flags, "-ldflags", strings.Join(ld, " "))
|
flags = append(flags, "-ldflags", strings.Join(ld, " "))
|
||||||
}
|
}
|
||||||
if len(buildTags) > 0 {
|
|
||||||
flags = append(flags, "-tags", strings.Join(buildTags, ","))
|
|
||||||
}
|
|
||||||
return flags
|
return flags
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,7 +336,7 @@ func doLint(cmdline []string) {
|
|||||||
|
|
||||||
// downloadLinter downloads and unpacks golangci-lint.
|
// downloadLinter downloads and unpacks golangci-lint.
|
||||||
func downloadLinter(cachedir string) string {
|
func downloadLinter(cachedir string) string {
|
||||||
const version = "1.51.1"
|
const version = "1.46.2"
|
||||||
|
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
arch := runtime.GOARCH
|
arch := runtime.GOARCH
|
||||||
@ -465,6 +460,10 @@ func maybeSkipArchive(env build.Environment) {
|
|||||||
log.Printf("skipping archive creation because this is a PR build")
|
log.Printf("skipping archive creation because this is a PR build")
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
if env.IsCronJob {
|
||||||
|
log.Printf("skipping archive creation because this is a cron job")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") {
|
if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") {
|
||||||
log.Printf("skipping archive creation because branch %q, tag %q is not on the inclusion list", env.Branch, env.Tag)
|
log.Printf("skipping archive creation because branch %q, tag %q is not on the inclusion list", env.Branch, env.Tag)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
@ -598,7 +597,7 @@ func doDocker(cmdline []string) {
|
|||||||
}
|
}
|
||||||
if mismatch {
|
if mismatch {
|
||||||
// Build numbers mismatching, retry in a short time to
|
// Build numbers mismatching, retry in a short time to
|
||||||
// avoid concurrent fails in both publisher images. If
|
// avoid concurrent failes in both publisher images. If
|
||||||
// however the retry failed too, it means the concurrent
|
// however the retry failed too, it means the concurrent
|
||||||
// builder is still crunching, let that do the publish.
|
// builder is still crunching, let that do the publish.
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
@ -659,11 +658,10 @@ func doDebianSource(cmdline []string) {
|
|||||||
gpg.Stdin = bytes.NewReader(key)
|
gpg.Stdin = bytes.NewReader(key)
|
||||||
build.MustRun(gpg)
|
build.MustRun(gpg)
|
||||||
}
|
}
|
||||||
// Download and verify the Go source packages.
|
|
||||||
var (
|
// Download and verify the Go source package.
|
||||||
gobootbundle = downloadGoBootstrapSources(*cachedir)
|
gobundle := downloadGoSources(*cachedir)
|
||||||
gobundle = downloadGoSources(*cachedir)
|
|
||||||
)
|
|
||||||
// Download all the dependencies needed to build the sources and run the ci script
|
// Download all the dependencies needed to build the sources and run the ci script
|
||||||
srcdepfetch := tc.Go("mod", "download")
|
srcdepfetch := tc.Go("mod", "download")
|
||||||
srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath"))
|
srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath"))
|
||||||
@ -680,19 +678,12 @@ func doDebianSource(cmdline []string) {
|
|||||||
meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||||
pkgdir := stageDebianSource(*workdir, meta)
|
pkgdir := stageDebianSource(*workdir, meta)
|
||||||
|
|
||||||
// Add bootstrapper Go source code
|
// Add Go source code
|
||||||
if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil {
|
|
||||||
log.Fatalf("Failed to extract bootstrapper Go sources: %v", err)
|
|
||||||
}
|
|
||||||
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".goboot")); err != nil {
|
|
||||||
log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err)
|
|
||||||
}
|
|
||||||
// Add builder Go source code
|
|
||||||
if err := build.ExtractArchive(gobundle, pkgdir); err != nil {
|
if err := build.ExtractArchive(gobundle, pkgdir); err != nil {
|
||||||
log.Fatalf("Failed to extract builder Go sources: %v", err)
|
log.Fatalf("Failed to extract Go sources: %v", err)
|
||||||
}
|
}
|
||||||
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil {
|
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil {
|
||||||
log.Fatalf("Failed to rename builder Go source folder: %v", err)
|
log.Fatalf("Failed to rename Go source folder: %v", err)
|
||||||
}
|
}
|
||||||
// Add all dependency modules in compressed form
|
// Add all dependency modules in compressed form
|
||||||
os.MkdirAll(filepath.Join(pkgdir, ".mod", "cache"), 0755)
|
os.MkdirAll(filepath.Join(pkgdir, ".mod", "cache"), 0755)
|
||||||
@ -721,19 +712,6 @@ func doDebianSource(cmdline []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// downloadGoBootstrapSources downloads the Go source tarball that will be used
|
|
||||||
// to bootstrap the builder Go.
|
|
||||||
func downloadGoBootstrapSources(cachedir string) string {
|
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
|
||||||
file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
|
|
||||||
url := "https://dl.google.com/go/" + file
|
|
||||||
dst := filepath.Join(cachedir, file)
|
|
||||||
if err := csdb.DownloadFile(url, dst); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// downloadGoSources downloads the Go source tarball.
|
// downloadGoSources downloads the Go source tarball.
|
||||||
func downloadGoSources(cachedir string) string {
|
func downloadGoSources(cachedir string) string {
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
@ -993,10 +971,7 @@ func doWindowsInstaller(cmdline []string) {
|
|||||||
if env.Commit != "" {
|
if env.Commit != "" {
|
||||||
version[2] += "-" + env.Commit[:8]
|
version[2] += "-" + env.Commit[:8]
|
||||||
}
|
}
|
||||||
installer, err := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
|
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to convert installer file path: %v", err)
|
|
||||||
}
|
|
||||||
build.MustRunCommand("makensis.exe",
|
build.MustRunCommand("makensis.exe",
|
||||||
"/DOUTPUTFILE="+installer,
|
"/DOUTPUTFILE="+installer,
|
||||||
"/DMAJORVERSION="+version[0],
|
"/DMAJORVERSION="+version[0],
|
||||||
@ -1011,6 +986,236 @@ func doWindowsInstaller(cmdline []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Android archives
|
||||||
|
|
||||||
|
func doAndroidArchive(cmdline []string) {
|
||||||
|
var (
|
||||||
|
local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`)
|
||||||
|
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`)
|
||||||
|
signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. ANDROID_SIGNIFY_KEY)`)
|
||||||
|
deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`)
|
||||||
|
upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`)
|
||||||
|
)
|
||||||
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
env := build.Env()
|
||||||
|
tc := new(build.GoToolchain)
|
||||||
|
|
||||||
|
// Sanity check that the SDK and NDK are installed and set
|
||||||
|
if os.Getenv("ANDROID_HOME") == "" {
|
||||||
|
log.Fatal("Please ensure ANDROID_HOME points to your Android SDK")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build gomobile.
|
||||||
|
install := tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest")
|
||||||
|
install.Env = append(install.Env)
|
||||||
|
build.MustRun(install)
|
||||||
|
|
||||||
|
// Ensure all dependencies are available. This is required to make
|
||||||
|
// gomobile bind work because it expects go.sum to contain all checksums.
|
||||||
|
build.MustRun(tc.Go("mod", "download"))
|
||||||
|
|
||||||
|
// Build the Android archive and Maven resources
|
||||||
|
build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
|
||||||
|
|
||||||
|
if *local {
|
||||||
|
// If we're building locally, copy bundle to build dir and skip Maven
|
||||||
|
os.Rename("geth.aar", filepath.Join(GOBIN, "geth.aar"))
|
||||||
|
os.Rename("geth-sources.jar", filepath.Join(GOBIN, "geth-sources.jar"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
meta := newMavenMetadata(env)
|
||||||
|
build.Render("build/mvn.pom", meta.Package+".pom", 0755, meta)
|
||||||
|
|
||||||
|
// Skip Maven deploy and Azure upload for PR builds
|
||||||
|
maybeSkipArchive(env)
|
||||||
|
|
||||||
|
// Sign and upload the archive to Azure
|
||||||
|
archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar"
|
||||||
|
os.Rename("geth.aar", archive)
|
||||||
|
|
||||||
|
if err := archiveUpload(archive, *upload, *signer, *signify); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
// Sign and upload all the artifacts to Maven Central
|
||||||
|
os.Rename(archive, meta.Package+".aar")
|
||||||
|
if *signer != "" && *deploy != "" {
|
||||||
|
// Import the signing key into the local GPG instance
|
||||||
|
key := getenvBase64(*signer)
|
||||||
|
gpg := exec.Command("gpg", "--import")
|
||||||
|
gpg.Stdin = bytes.NewReader(key)
|
||||||
|
build.MustRun(gpg)
|
||||||
|
keyID, err := build.PGPKeyID(string(key))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
// Upload the artifacts to Sonatype and/or Maven Central
|
||||||
|
repo := *deploy + "/service/local/staging/deploy/maven2"
|
||||||
|
if meta.Develop {
|
||||||
|
repo = *deploy + "/content/repositories/snapshots"
|
||||||
|
}
|
||||||
|
build.MustRunCommand("mvn", "gpg:sign-and-deploy-file", "-e", "-X",
|
||||||
|
"-settings=build/mvn.settings", "-Durl="+repo, "-DrepositoryId=ossrh",
|
||||||
|
"-Dgpg.keyname="+keyID,
|
||||||
|
"-DpomFile="+meta.Package+".pom", "-Dfile="+meta.Package+".aar")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func gomobileTool(subcmd string, args ...string) *exec.Cmd {
|
||||||
|
cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
|
||||||
|
cmd.Args = append(cmd.Args, args...)
|
||||||
|
cmd.Env = []string{
|
||||||
|
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
|
||||||
|
}
|
||||||
|
for _, e := range os.Environ() {
|
||||||
|
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cmd.Env = append(cmd.Env, e)
|
||||||
|
}
|
||||||
|
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
type mavenMetadata struct {
|
||||||
|
Version string
|
||||||
|
Package string
|
||||||
|
Develop bool
|
||||||
|
Contributors []mavenContributor
|
||||||
|
}
|
||||||
|
|
||||||
|
type mavenContributor struct {
|
||||||
|
Name string
|
||||||
|
Email string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMavenMetadata(env build.Environment) mavenMetadata {
|
||||||
|
// Collect the list of authors from the repo root
|
||||||
|
contribs := []mavenContributor{}
|
||||||
|
if authors, err := os.Open("AUTHORS"); err == nil {
|
||||||
|
defer authors.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(authors)
|
||||||
|
for scanner.Scan() {
|
||||||
|
// Skip any whitespace from the authors list
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if line == "" || line[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Split the author and insert as a contributor
|
||||||
|
re := regexp.MustCompile("([^<]+) <(.+)>")
|
||||||
|
parts := re.FindStringSubmatch(line)
|
||||||
|
if len(parts) == 3 {
|
||||||
|
contribs = append(contribs, mavenContributor{Name: parts[1], Email: parts[2]})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Render the version and package strings
|
||||||
|
version := params.Version
|
||||||
|
if isUnstableBuild(env) {
|
||||||
|
version += "-SNAPSHOT"
|
||||||
|
}
|
||||||
|
return mavenMetadata{
|
||||||
|
Version: version,
|
||||||
|
Package: "geth-" + version,
|
||||||
|
Develop: isUnstableBuild(env),
|
||||||
|
Contributors: contribs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// XCode frameworks
|
||||||
|
|
||||||
|
func doXCodeFramework(cmdline []string) {
|
||||||
|
var (
|
||||||
|
local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`)
|
||||||
|
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`)
|
||||||
|
signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. IOS_SIGNIFY_KEY)`)
|
||||||
|
deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`)
|
||||||
|
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
|
||||||
|
)
|
||||||
|
flag.CommandLine.Parse(cmdline)
|
||||||
|
env := build.Env()
|
||||||
|
tc := new(build.GoToolchain)
|
||||||
|
|
||||||
|
// Build gomobile.
|
||||||
|
build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
|
||||||
|
|
||||||
|
// Build the iOS XCode framework
|
||||||
|
bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
||||||
|
|
||||||
|
if *local {
|
||||||
|
// If we're building locally, use the build folder and stop afterwards
|
||||||
|
bind.Dir = GOBIN
|
||||||
|
build.MustRun(bind)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the archive.
|
||||||
|
maybeSkipArchive(env)
|
||||||
|
archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
|
||||||
|
if err := os.MkdirAll(archive, 0755); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
bind.Dir, _ = filepath.Abs(archive)
|
||||||
|
build.MustRun(bind)
|
||||||
|
build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive)
|
||||||
|
|
||||||
|
// Sign and upload the framework to Azure
|
||||||
|
if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
// Prepare and upload a PodSpec to CocoaPods
|
||||||
|
if *deploy != "" {
|
||||||
|
meta := newPodMetadata(env, archive)
|
||||||
|
build.Render("build/pod.podspec", "Geth.podspec", 0755, meta)
|
||||||
|
build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type podMetadata struct {
|
||||||
|
Version string
|
||||||
|
Commit string
|
||||||
|
Archive string
|
||||||
|
Contributors []podContributor
|
||||||
|
}
|
||||||
|
|
||||||
|
type podContributor struct {
|
||||||
|
Name string
|
||||||
|
Email string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPodMetadata(env build.Environment, archive string) podMetadata {
|
||||||
|
// Collect the list of authors from the repo root
|
||||||
|
contribs := []podContributor{}
|
||||||
|
if authors, err := os.Open("AUTHORS"); err == nil {
|
||||||
|
defer authors.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(authors)
|
||||||
|
for scanner.Scan() {
|
||||||
|
// Skip any whitespace from the authors list
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if line == "" || line[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Split the author and insert as a contributor
|
||||||
|
re := regexp.MustCompile("([^<]+) <(.+)>")
|
||||||
|
parts := re.FindStringSubmatch(line)
|
||||||
|
if len(parts) == 3 {
|
||||||
|
contribs = append(contribs, podContributor{Name: parts[1], Email: parts[2]})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
version := params.Version
|
||||||
|
if isUnstableBuild(env) {
|
||||||
|
version += "-unstable." + env.Buildnum
|
||||||
|
}
|
||||||
|
return podMetadata{
|
||||||
|
Archive: archive,
|
||||||
|
Version: version,
|
||||||
|
Commit: env.Commit,
|
||||||
|
Contributors: contribs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Binary distribution cleanups
|
// Binary distribution cleanups
|
||||||
|
|
||||||
func doPurge(cmdline []string) {
|
func doPurge(cmdline []string) {
|
||||||
|
@ -16,11 +16,7 @@ override_dh_auto_build:
|
|||||||
# We can't download a fresh Go within Launchpad, so we're shipping and building
|
# We can't download a fresh Go within Launchpad, so we're shipping and building
|
||||||
# one on the fly. However, we can't build it inside the go-ethereum folder as
|
# one on the fly. However, we can't build it inside the go-ethereum folder as
|
||||||
# bootstrapping clashes with go modules, so build in a sibling folder.
|
# bootstrapping clashes with go modules, so build in a sibling folder.
|
||||||
#
|
(mv .go ../ && cd ../.go/src && ./make.bash)
|
||||||
# We're also shipping the bootstrapper as of Go 1.20 as it had minimum version
|
|
||||||
# requirements opposed to older versions of Go.
|
|
||||||
(mv .goboot ../ && cd ../.goboot/src && ./make.bash)
|
|
||||||
(mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot ./make.bash)
|
|
||||||
|
|
||||||
# We can't download external go modules within Launchpad, so we're shipping the
|
# We can't download external go modules within Launchpad, so we're shipping the
|
||||||
# entire dependency source cache with go-ethereum.
|
# entire dependency source cache with go-ethereum.
|
||||||
|
57
build/mvn.pom
Normal file
57
build/mvn.pom
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
|
||||||
|
http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<groupId>org.ethereum</groupId>
|
||||||
|
<artifactId>geth</artifactId>
|
||||||
|
<version>{{.Version}}</version>
|
||||||
|
<packaging>aar</packaging>
|
||||||
|
|
||||||
|
<name>Android Ethereum Client</name>
|
||||||
|
<description>Android port of the go-ethereum libraries and node</description>
|
||||||
|
<url>https://github.com/ethereum/go-ethereum</url>
|
||||||
|
<inceptionYear>2015</inceptionYear>
|
||||||
|
|
||||||
|
<licenses>
|
||||||
|
<license>
|
||||||
|
<name>GNU Lesser General Public License, Version 3.0</name>
|
||||||
|
<url>https://www.gnu.org/licenses/lgpl-3.0.en.html</url>
|
||||||
|
<distribution>repo</distribution>
|
||||||
|
</license>
|
||||||
|
</licenses>
|
||||||
|
|
||||||
|
<organization>
|
||||||
|
<name>Ethereum</name>
|
||||||
|
<url>https://ethereum.org</url>
|
||||||
|
</organization>
|
||||||
|
|
||||||
|
<developers>
|
||||||
|
<developer>
|
||||||
|
<id>karalabe</id>
|
||||||
|
<name>Péter Szilágyi</name>
|
||||||
|
<email>peterke@gmail.com</email>
|
||||||
|
<url>https://github.com/karalabe</url>
|
||||||
|
<properties>
|
||||||
|
<picUrl>https://www.gravatar.com/avatar/2ecbf0f5b4b79eebf8c193e5d324357f?s=256</picUrl>
|
||||||
|
</properties>
|
||||||
|
</developer>
|
||||||
|
</developers>
|
||||||
|
|
||||||
|
<contributors>{{range .Contributors}}
|
||||||
|
<contributor>
|
||||||
|
<name>{{.Name}}</name>
|
||||||
|
<email>{{.Email}}</email>
|
||||||
|
</contributor>{{end}}
|
||||||
|
</contributors>
|
||||||
|
|
||||||
|
<issueManagement>
|
||||||
|
<system>GitHub Issues</system>
|
||||||
|
<url>https://github.com/ethereum/go-ethereum/issues/</url>
|
||||||
|
</issueManagement>
|
||||||
|
|
||||||
|
<scm>
|
||||||
|
<url>https://github.com/ethereum/go-ethereum</url>
|
||||||
|
</scm>
|
||||||
|
</project>
|
24
build/mvn.settings
Normal file
24
build/mvn.settings
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
|
||||||
|
http://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||||
|
<servers>
|
||||||
|
<server>
|
||||||
|
<id>ossrh</id>
|
||||||
|
<username>${env.ANDROID_SONATYPE_USERNAME}</username>
|
||||||
|
<password>${env.ANDROID_SONATYPE_PASSWORD}</password>
|
||||||
|
</server>
|
||||||
|
</servers>
|
||||||
|
<profiles>
|
||||||
|
<profile>
|
||||||
|
<id>ossrh</id>
|
||||||
|
<activation>
|
||||||
|
<activeByDefault>true</activeByDefault>
|
||||||
|
</activation>
|
||||||
|
<properties>
|
||||||
|
<gpg.executable>gpg</gpg.executable>
|
||||||
|
<gpg.passphrase></gpg.passphrase>
|
||||||
|
</properties>
|
||||||
|
</profile>
|
||||||
|
</profiles>
|
||||||
|
</settings>
|
22
build/pod.podspec
Normal file
22
build/pod.podspec
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Pod::Spec.new do |spec|
|
||||||
|
spec.name = 'Geth'
|
||||||
|
spec.version = '{{.Version}}'
|
||||||
|
spec.license = { :type => 'GNU Lesser General Public License, Version 3.0' }
|
||||||
|
spec.homepage = 'https://github.com/ethereum/go-ethereum'
|
||||||
|
spec.authors = { {{range .Contributors}}
|
||||||
|
'{{.Name}}' => '{{.Email}}',{{end}}
|
||||||
|
}
|
||||||
|
spec.summary = 'iOS Ethereum Client'
|
||||||
|
spec.source = { :git => 'https://github.com/ethereum/go-ethereum.git', :commit => '{{.Commit}}' }
|
||||||
|
|
||||||
|
spec.platform = :ios
|
||||||
|
spec.ios.deployment_target = '9.0'
|
||||||
|
spec.ios.vendored_frameworks = 'Frameworks/Geth.framework'
|
||||||
|
|
||||||
|
spec.prepare_command = <<-CMD
|
||||||
|
curl https://gethstore.blob.core.windows.net/builds/{{.Archive}}.tar.gz | tar -xvz
|
||||||
|
mkdir Frameworks
|
||||||
|
mv {{.Archive}}/Geth.framework Frameworks
|
||||||
|
rm -rf {{.Archive}}
|
||||||
|
CMD
|
||||||
|
end
|
@ -24,4 +24,8 @@ import (
|
|||||||
_ "github.com/fjl/gencodec"
|
_ "github.com/fjl/gencodec"
|
||||||
_ "github.com/golang/protobuf/protoc-gen-go"
|
_ "github.com/golang/protobuf/protoc-gen-go"
|
||||||
_ "golang.org/x/tools/cmd/stringer"
|
_ "golang.org/x/tools/cmd/stringer"
|
||||||
|
|
||||||
|
// Tool imports for mobile build.
|
||||||
|
_ "golang.org/x/mobile/cmd/gobind"
|
||||||
|
_ "golang.org/x/mobile/cmd/gomobile"
|
||||||
)
|
)
|
||||||
|
@ -342,10 +342,7 @@ func isGenerated(file string) bool {
|
|||||||
}
|
}
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
buf := make([]byte, 2048)
|
buf := make([]byte, 2048)
|
||||||
n, err := fd.Read(buf)
|
n, _ := fd.Read(buf)
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
for _, l := range bytes.Split(buf, []byte("\n")) {
|
for _, l := range bytes.Split(buf, []byte("\n")) {
|
||||||
if bytes.HasPrefix(l, []byte("// Code generated")) {
|
if bytes.HasPrefix(l, []byte("// Code generated")) {
|
||||||
|
@ -33,6 +33,14 @@ import (
|
|||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Git SHA1 commit hash of the release (set via linker flags)
|
||||||
|
gitCommit = ""
|
||||||
|
gitDate = ""
|
||||||
|
|
||||||
|
app *cli.App
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Flags needed by abigen
|
// Flags needed by abigen
|
||||||
abiFlag = &cli.StringFlag{
|
abiFlag = &cli.StringFlag{
|
||||||
@ -65,7 +73,7 @@ var (
|
|||||||
}
|
}
|
||||||
langFlag = &cli.StringFlag{
|
langFlag = &cli.StringFlag{
|
||||||
Name: "lang",
|
Name: "lang",
|
||||||
Usage: "Destination language for the bindings (go)",
|
Usage: "Destination language for the bindings (go, java, objc)",
|
||||||
Value: "go",
|
Value: "go",
|
||||||
}
|
}
|
||||||
aliasFlag = &cli.StringFlag{
|
aliasFlag = &cli.StringFlag{
|
||||||
@ -74,9 +82,8 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
var app = flags.NewApp("Ethereum ABI wrapper code generator")
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
|
||||||
app.Name = "abigen"
|
app.Name = "abigen"
|
||||||
app.Flags = []cli.Flag{
|
app.Flags = []cli.Flag{
|
||||||
abiFlag,
|
abiFlag,
|
||||||
@ -102,6 +109,11 @@ func abigen(c *cli.Context) error {
|
|||||||
switch c.String(langFlag.Name) {
|
switch c.String(langFlag.Name) {
|
||||||
case "go":
|
case "go":
|
||||||
lang = bind.LangGo
|
lang = bind.LangGo
|
||||||
|
case "java":
|
||||||
|
lang = bind.LangJava
|
||||||
|
case "objc":
|
||||||
|
lang = bind.LangObjC
|
||||||
|
utils.Fatalf("Objc binding generation is uncompleted")
|
||||||
default:
|
default:
|
||||||
utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name))
|
utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name))
|
||||||
}
|
}
|
||||||
@ -149,12 +161,9 @@ func abigen(c *cli.Context) error {
|
|||||||
types = append(types, kind)
|
types = append(types, kind)
|
||||||
} else {
|
} else {
|
||||||
// Generate the list of types to exclude from binding
|
// Generate the list of types to exclude from binding
|
||||||
var exclude *nameFilter
|
exclude := make(map[string]bool)
|
||||||
if c.IsSet(excFlag.Name) {
|
for _, kind := range strings.Split(c.String(excFlag.Name), ",") {
|
||||||
var err error
|
exclude[strings.ToLower(kind)] = true
|
||||||
if exclude, err = newNameFilter(strings.Split(c.String(excFlag.Name), ",")...); err != nil {
|
|
||||||
utils.Fatalf("Failed to parse excludes: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
var contracts map[string]*compiler.Contract
|
var contracts map[string]*compiler.Contract
|
||||||
|
|
||||||
@ -179,11 +188,7 @@ func abigen(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
// Gather all non-excluded contract for binding
|
// Gather all non-excluded contract for binding
|
||||||
for name, contract := range contracts {
|
for name, contract := range contracts {
|
||||||
// fully qualified name is of the form <solFilePath>:<type>
|
if exclude[strings.ToLower(name)] {
|
||||||
nameParts := strings.Split(name, ":")
|
|
||||||
typeName := nameParts[len(nameParts)-1]
|
|
||||||
if exclude != nil && exclude.Matches(name) {
|
|
||||||
fmt.Fprintf(os.Stderr, "excluding: %v\n", name)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
abi, err := json.Marshal(contract.Info.AbiDefinition) // Flatten the compiler parse
|
abi, err := json.Marshal(contract.Info.AbiDefinition) // Flatten the compiler parse
|
||||||
@ -193,14 +198,15 @@ func abigen(c *cli.Context) error {
|
|||||||
abis = append(abis, string(abi))
|
abis = append(abis, string(abi))
|
||||||
bins = append(bins, contract.Code)
|
bins = append(bins, contract.Code)
|
||||||
sigs = append(sigs, contract.Hashes)
|
sigs = append(sigs, contract.Hashes)
|
||||||
types = append(types, typeName)
|
nameParts := strings.Split(name, ":")
|
||||||
|
types = append(types, nameParts[len(nameParts)-1])
|
||||||
|
|
||||||
// Derive the library placeholder which is a 34 character prefix of the
|
// Derive the library placeholder which is a 34 character prefix of the
|
||||||
// hex encoding of the keccak256 hash of the fully qualified library name.
|
// hex encoding of the keccak256 hash of the fully qualified library name.
|
||||||
// Note that the fully qualified library name is the path of its source
|
// Note that the fully qualified library name is the path of its source
|
||||||
// file and the library name separated by ":".
|
// file and the library name separated by ":".
|
||||||
libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] // the first 2 chars are 0x
|
libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] // the first 2 chars are 0x
|
||||||
libs[libPattern] = typeName
|
libs[libPattern] = nameParts[len(nameParts)-1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Extract all aliases from the flags
|
// Extract all aliases from the flags
|
||||||
|
@ -1,58 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type nameFilter struct {
|
|
||||||
fulls map[string]bool // path/to/contract.sol:Type
|
|
||||||
files map[string]bool // path/to/contract.sol:*
|
|
||||||
types map[string]bool // *:Type
|
|
||||||
}
|
|
||||||
|
|
||||||
func newNameFilter(patterns ...string) (*nameFilter, error) {
|
|
||||||
f := &nameFilter{
|
|
||||||
fulls: make(map[string]bool),
|
|
||||||
files: make(map[string]bool),
|
|
||||||
types: make(map[string]bool),
|
|
||||||
}
|
|
||||||
for _, pattern := range patterns {
|
|
||||||
if err := f.add(pattern); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *nameFilter) add(pattern string) error {
|
|
||||||
ft := strings.Split(pattern, ":")
|
|
||||||
if len(ft) != 2 {
|
|
||||||
// filenames and types must not include ':' symbol
|
|
||||||
return fmt.Errorf("invalid pattern: %s", pattern)
|
|
||||||
}
|
|
||||||
|
|
||||||
file, typ := ft[0], ft[1]
|
|
||||||
if file == "*" {
|
|
||||||
f.types[typ] = true
|
|
||||||
return nil
|
|
||||||
} else if typ == "*" {
|
|
||||||
f.files[file] = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
f.fulls[pattern] = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *nameFilter) Matches(name string) bool {
|
|
||||||
ft := strings.Split(name, ":")
|
|
||||||
if len(ft) != 2 {
|
|
||||||
// If contract names are always of the fully-qualified form
|
|
||||||
// <filePath>:<type>, then this case will never happen.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
file, typ := ft[0], ft[1]
|
|
||||||
// full paths > file paths > types
|
|
||||||
return f.fulls[name] || f.files[file] || f.types[typ]
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNameFilter(t *testing.T) {
|
|
||||||
_, err := newNameFilter("Foo")
|
|
||||||
require.Error(t, err)
|
|
||||||
_, err = newNameFilter("too/many:colons:Foo")
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
f, err := newNameFilter("a/path:A", "*:B", "c/path:*")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, tt := range []struct {
|
|
||||||
name string
|
|
||||||
match bool
|
|
||||||
}{
|
|
||||||
{"a/path:A", true},
|
|
||||||
{"unknown/path:A", false},
|
|
||||||
{"a/path:X", false},
|
|
||||||
{"unknown/path:X", false},
|
|
||||||
{"any/path:B", true},
|
|
||||||
{"c/path:X", true},
|
|
||||||
{"c/path:foo:B", false},
|
|
||||||
} {
|
|
||||||
match := f.Matches(tt.name)
|
|
||||||
if tt.match {
|
|
||||||
assert.True(t, match, "expected match")
|
|
||||||
} else {
|
|
||||||
assert.False(t, match, "expected no match")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -40,7 +40,7 @@ func main() {
|
|||||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
|
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
|
||||||
nodeKeyFile = flag.String("nodekey", "", "private key filename")
|
nodeKeyFile = flag.String("nodekey", "", "private key filename")
|
||||||
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
|
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
|
||||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
|
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
|
||||||
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
||||||
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
||||||
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)")
|
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)")
|
||||||
|
@ -46,7 +46,7 @@ Deploy checkpoint oracle contract. `--signers` indicates the specified trusted s
|
|||||||
checkpoint-admin deploy --rpc <NODE_RPC_ENDPOINT> --clef <CLEF_ENDPOINT> --signer <SIGNER_TO_SIGN_TX> --signers <TRUSTED_SIGNER_LIST> --threshold 1
|
checkpoint-admin deploy --rpc <NODE_RPC_ENDPOINT> --clef <CLEF_ENDPOINT> --signer <SIGNER_TO_SIGN_TX> --signers <TRUSTED_SIGNER_LIST> --threshold 1
|
||||||
```
|
```
|
||||||
|
|
||||||
It is worth noting that checkpoint-admin only supports clef as a signer for transactions and plain text(checkpoint). For more clef usage, please see the clef [tutorial](https://geth.ethereum.org/docs/tools/clef/tutorial) .
|
It is worth noting that checkpoint-admin only supports clef as a signer for transactions and plain text(checkpoint). For more clef usage, please see the clef [tutorial](https://geth.ethereum.org/docs/clef/tutorial) .
|
||||||
|
|
||||||
#### Sign
|
#### Sign
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ checkpoint-admin status --rpc <NODE_RPC_ENDPOINT>
|
|||||||
|
|
||||||
### Enable checkpoint oracle in your private network
|
### Enable checkpoint oracle in your private network
|
||||||
|
|
||||||
Currently, only the Ethereum mainnet and the default supported test networks (rinkeby, goerli) activate this feature. If you want to activate this feature in your private network, you can overwrite the relevant checkpoint oracle settings through the configuration file after deploying the oracle contract.
|
Currently, only the Ethereum mainnet and the default supported test networks (ropsten, rinkeby, goerli) activate this feature. If you want to activate this feature in your private network, you can overwrite the relevant checkpoint oracle settings through the configuration file after deploying the oracle contract.
|
||||||
|
|
||||||
* Get your node configuration file `geth dumpconfig OTHER_COMMAND_LINE_OPTIONS > config.toml`
|
* Get your node configuration file `geth dumpconfig OTHER_COMMAND_LINE_OPTIONS > config.toml`
|
||||||
* Edit the configuration file and add the following information
|
* Edit the configuration file and add the following information
|
||||||
|
@ -28,9 +28,16 @@ import (
|
|||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var app = flags.NewApp("ethereum checkpoint helper tool")
|
var (
|
||||||
|
// Git SHA1 commit hash of the release (set via linker flags)
|
||||||
|
gitCommit = ""
|
||||||
|
gitDate = ""
|
||||||
|
|
||||||
|
app *cli.App
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool")
|
||||||
app.Commands = []*cli.Command{
|
app.Commands = []*cli.Command{
|
||||||
commandStatus,
|
commandStatus,
|
||||||
commandDeploy,
|
commandDeploy,
|
||||||
|
@ -29,7 +29,7 @@ GLOBAL OPTIONS:
|
|||||||
--loglevel value log level to emit to the screen (default: 4)
|
--loglevel value log level to emit to the screen (default: 4)
|
||||||
--keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore")
|
--keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore")
|
||||||
--configdir value Directory for Clef configuration (default: "$HOME/.clef")
|
--configdir value Directory for Clef configuration (default: "$HOME/.clef")
|
||||||
--chainid value Chain id to use for signing (1=mainnet, 4=Rinkeby, 5=Goerli) (default: 1)
|
--chainid value Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli) (default: 1)
|
||||||
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
|
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
|
||||||
--nousb Disables monitoring for and managing USB hardware wallets
|
--nousb Disables monitoring for and managing USB hardware wallets
|
||||||
--pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm")
|
--pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm")
|
||||||
|
@ -1,117 +0,0 @@
|
|||||||
// Copyright 2022 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestImportRaw tests clef --importraw
|
|
||||||
func TestImportRaw(t *testing.T) {
|
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("happy-path", func(t *testing.T) {
|
|
||||||
// Run clef importraw
|
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
|
||||||
clef.input("myverylongpassword").input("myverylongpassword")
|
|
||||||
if out := string(clef.Output()); !strings.Contains(out,
|
|
||||||
"Key imported:\n Address 0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6") {
|
|
||||||
t.Logf("Output\n%v", out)
|
|
||||||
t.Error("Failure")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// tests clef --importraw with mismatched passwords.
|
|
||||||
t.Run("pw-mismatch", func(t *testing.T) {
|
|
||||||
// Run clef importraw
|
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
|
||||||
clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit()
|
|
||||||
if have, want := clef.StderrText(), "Passwords do not match\n"; have != want {
|
|
||||||
t.Errorf("have %q, want %q", have, want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// tests clef --importraw with a too short password.
|
|
||||||
t.Run("short-pw", func(t *testing.T) {
|
|
||||||
// Run clef importraw
|
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
|
||||||
clef.input("shorty").input("shorty").WaitExit()
|
|
||||||
if have, want := clef.StderrText(),
|
|
||||||
"password requirements not met: password too short (<10 characters)\n"; have != want {
|
|
||||||
t.Errorf("have %q, want %q", have, want)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestListAccounts tests clef --list-accounts
|
|
||||||
func TestListAccounts(t *testing.T) {
|
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("no-accounts", func(t *testing.T) {
|
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts")
|
|
||||||
if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") {
|
|
||||||
t.Logf("Output\n%v", out)
|
|
||||||
t.Error("Failure")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
t.Run("one-account", func(t *testing.T) {
|
|
||||||
// First, we need to import
|
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
|
||||||
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
|
||||||
// Secondly, do a listing, using the same datadir
|
|
||||||
clef = runWithKeystore(t, clef.Datadir, "--suppress-bootwarn", "--lightkdf", "list-accounts")
|
|
||||||
if out := string(clef.Output()); !strings.Contains(out, "0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6 (keystore:") {
|
|
||||||
t.Logf("Output\n%v", out)
|
|
||||||
t.Error("Failure")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestListWallets tests clef --list-wallets
|
|
||||||
func TestListWallets(t *testing.T) {
|
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("no-accounts", func(t *testing.T) {
|
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets")
|
|
||||||
if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") {
|
|
||||||
t.Logf("Output\n%v", out)
|
|
||||||
t.Error("Failure")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
t.Run("one-account", func(t *testing.T) {
|
|
||||||
// First, we need to import
|
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
|
||||||
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
|
||||||
// Secondly, do a listing, using the same datadir
|
|
||||||
clef = runWithKeystore(t, clef.Datadir, "--suppress-bootwarn", "--lightkdf", "list-wallets")
|
|
||||||
if out := string(clef.Output()); !strings.Contains(out, "Account 0: 0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6") {
|
|
||||||
t.Logf("Output\n%v", out)
|
|
||||||
t.Error("Failure")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
219
cmd/clef/main.go
219
cmd/clef/main.go
@ -23,7 +23,6 @@ import (
|
|||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -75,7 +74,7 @@ PURPOSE. See the GNU General Public License for more details.
|
|||||||
var (
|
var (
|
||||||
logLevelFlag = &cli.IntFlag{
|
logLevelFlag = &cli.IntFlag{
|
||||||
Name: "loglevel",
|
Name: "loglevel",
|
||||||
Value: 3,
|
Value: 4,
|
||||||
Usage: "log level to emit to the screen",
|
Usage: "log level to emit to the screen",
|
||||||
}
|
}
|
||||||
advancedMode = &cli.BoolFlag{
|
advancedMode = &cli.BoolFlag{
|
||||||
@ -99,7 +98,7 @@ var (
|
|||||||
chainIdFlag = &cli.Int64Flag{
|
chainIdFlag = &cli.Int64Flag{
|
||||||
Name: "chainid",
|
Name: "chainid",
|
||||||
Value: params.MainnetChainConfig.ChainID.Int64(),
|
Value: params.MainnetChainConfig.ChainID.Int64(),
|
||||||
Usage: "Chain id to use for signing (1=mainnet, 4=Rinkeby, 5=Goerli)",
|
Usage: "Chain id to use for signing (1=mainnet, 3=Ropsten, 4=Rinkeby, 5=Goerli)",
|
||||||
}
|
}
|
||||||
rpcPortFlag = &cli.IntFlag{
|
rpcPortFlag = &cli.IntFlag{
|
||||||
Name: "http.port",
|
Name: "http.port",
|
||||||
@ -204,61 +203,25 @@ The delpw command removes a password for a given address (keyfile).
|
|||||||
},
|
},
|
||||||
Description: `
|
Description: `
|
||||||
The newaccount command creates a new keystore-backed account. It is a convenience-method
|
The newaccount command creates a new keystore-backed account. It is a convenience-method
|
||||||
which can be used in lieu of an external UI.
|
which can be used in lieu of an external UI.`,
|
||||||
`}
|
}
|
||||||
|
|
||||||
gendocCommand = &cli.Command{
|
gendocCommand = &cli.Command{
|
||||||
Action: GenDoc,
|
Action: GenDoc,
|
||||||
Name: "gendoc",
|
Name: "gendoc",
|
||||||
Usage: "Generate documentation about json-rpc format",
|
Usage: "Generate documentation about json-rpc format",
|
||||||
Description: `
|
Description: `
|
||||||
The gendoc generates example structures of the json-rpc communication types.
|
The gendoc generates example structures of the json-rpc communication types.
|
||||||
`}
|
|
||||||
listAccountsCommand = &cli.Command{
|
|
||||||
Action: listAccounts,
|
|
||||||
Name: "list-accounts",
|
|
||||||
Usage: "List accounts in the keystore",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
logLevelFlag,
|
|
||||||
keystoreFlag,
|
|
||||||
utils.LightKDFFlag,
|
|
||||||
acceptFlag,
|
|
||||||
},
|
|
||||||
Description: `
|
|
||||||
Lists the accounts in the keystore.
|
|
||||||
`}
|
|
||||||
listWalletsCommand = &cli.Command{
|
|
||||||
Action: listWallets,
|
|
||||||
Name: "list-wallets",
|
|
||||||
Usage: "List wallets known to Clef",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
logLevelFlag,
|
|
||||||
keystoreFlag,
|
|
||||||
utils.LightKDFFlag,
|
|
||||||
acceptFlag,
|
|
||||||
},
|
|
||||||
Description: `
|
|
||||||
Lists the wallets known to Clef.
|
|
||||||
`}
|
|
||||||
importRawCommand = &cli.Command{
|
|
||||||
Action: accountImport,
|
|
||||||
Name: "importraw",
|
|
||||||
Usage: "Import a hex-encoded private key.",
|
|
||||||
ArgsUsage: "<keyfile>",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
logLevelFlag,
|
|
||||||
keystoreFlag,
|
|
||||||
utils.LightKDFFlag,
|
|
||||||
acceptFlag,
|
|
||||||
},
|
|
||||||
Description: `
|
|
||||||
Imports an unencrypted private key from <keyfile> and creates a new account.
|
|
||||||
Prints the address.
|
|
||||||
The keyfile is assumed to contain an unencrypted private key in hexadecimal format.
|
|
||||||
The account is saved in encrypted format, you are prompted for a password.
|
|
||||||
`}
|
`}
|
||||||
)
|
)
|
||||||
|
|
||||||
var app = flags.NewApp("Manage Ethereum account operations")
|
var (
|
||||||
|
// Git SHA1 commit hash of the release (set via linker flags)
|
||||||
|
gitCommit = ""
|
||||||
|
gitDate = ""
|
||||||
|
|
||||||
|
app = flags.NewApp(gitCommit, gitDate, "Manage Ethereum account operations")
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
app.Name = "Clef"
|
app.Name = "Clef"
|
||||||
@ -291,10 +254,7 @@ func init() {
|
|||||||
setCredentialCommand,
|
setCredentialCommand,
|
||||||
delCredentialCommand,
|
delCredentialCommand,
|
||||||
newAccountCommand,
|
newAccountCommand,
|
||||||
importRawCommand,
|
|
||||||
gendocCommand,
|
gendocCommand,
|
||||||
listAccountsCommand,
|
|
||||||
listWalletsCommand,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,22 +357,6 @@ func attestFile(ctx *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func initInternalApi(c *cli.Context) (*core.UIServerAPI, core.UIClientAPI, error) {
|
|
||||||
if err := initialize(c); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
ui = core.NewCommandlineUI()
|
|
||||||
pwStorage storage.Storage = &storage.NoStorage{}
|
|
||||||
ksLoc = c.String(keystoreFlag.Name)
|
|
||||||
lightKdf = c.Bool(utils.LightKDFFlag.Name)
|
|
||||||
)
|
|
||||||
am := core.StartClefAccountManager(ksLoc, true, lightKdf, "")
|
|
||||||
api := core.NewSignerAPI(am, 0, true, ui, nil, false, pwStorage)
|
|
||||||
internalApi := core.NewUIServerAPI(api)
|
|
||||||
return internalApi, ui, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setCredential(ctx *cli.Context) error {
|
func setCredential(ctx *cli.Context) error {
|
||||||
if ctx.NArg() < 1 {
|
if ctx.NArg() < 1 {
|
||||||
utils.Fatalf("This command requires an address to be passed as an argument")
|
utils.Fatalf("This command requires an address to be passed as an argument")
|
||||||
@ -471,6 +415,31 @@ func removeCredential(ctx *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newAccount(c *cli.Context) error {
|
||||||
|
if err := initialize(c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// The newaccount is meant for users using the CLI, since 'real' external
|
||||||
|
// UIs can use the UI-api instead. So we'll just use the native CLI UI here.
|
||||||
|
var (
|
||||||
|
ui = core.NewCommandlineUI()
|
||||||
|
pwStorage storage.Storage = &storage.NoStorage{}
|
||||||
|
ksLoc = c.String(keystoreFlag.Name)
|
||||||
|
lightKdf = c.Bool(utils.LightKDFFlag.Name)
|
||||||
|
)
|
||||||
|
log.Info("Starting clef", "keystore", ksLoc, "light-kdf", lightKdf)
|
||||||
|
am := core.StartClefAccountManager(ksLoc, true, lightKdf, "")
|
||||||
|
// This gives is us access to the external API
|
||||||
|
apiImpl := core.NewSignerAPI(am, 0, true, ui, nil, false, pwStorage)
|
||||||
|
// This gives us access to the internal API
|
||||||
|
internalApi := core.NewUIServerAPI(apiImpl)
|
||||||
|
addr, err := internalApi.New(context.Background())
|
||||||
|
if err == nil {
|
||||||
|
fmt.Printf("Generated account %v\n", addr.String())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func initialize(c *cli.Context) error {
|
func initialize(c *cli.Context) error {
|
||||||
// Set up the logger to print everything
|
// Set up the logger to print everything
|
||||||
logOutput := os.Stdout
|
logOutput := os.Stdout
|
||||||
@ -496,108 +465,6 @@ func initialize(c *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAccount(c *cli.Context) error {
|
|
||||||
internalApi, _, err := initInternalApi(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
addr, err := internalApi.New(context.Background())
|
|
||||||
if err == nil {
|
|
||||||
fmt.Printf("Generated account %v\n", addr.String())
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func listAccounts(c *cli.Context) error {
|
|
||||||
internalApi, _, err := initInternalApi(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
accs, err := internalApi.ListAccounts(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(accs) == 0 {
|
|
||||||
fmt.Println("\nThe keystore is empty.")
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
for _, account := range accs {
|
|
||||||
fmt.Printf("%v (%v)\n", account.Address, account.URL)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func listWallets(c *cli.Context) error {
|
|
||||||
internalApi, _, err := initInternalApi(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
wallets := internalApi.ListWallets()
|
|
||||||
if len(wallets) == 0 {
|
|
||||||
fmt.Println("\nThere are no wallets.")
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
for i, wallet := range wallets {
|
|
||||||
fmt.Printf("- Wallet %d at %v (%v %v)\n", i, wallet.URL, wallet.Status, wallet.Failure)
|
|
||||||
for j, acc := range wallet.Accounts {
|
|
||||||
fmt.Printf(" -Account %d: %v (%v)\n", j, acc.Address, acc.URL)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// accountImport imports a raw hexadecimal private key via CLI.
|
|
||||||
func accountImport(c *cli.Context) error {
|
|
||||||
if c.Args().Len() != 1 {
|
|
||||||
return errors.New("<keyfile> must be given as first argument.")
|
|
||||||
}
|
|
||||||
internalApi, ui, err := initInternalApi(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pKey, err := crypto.LoadECDSA(c.Args().First())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
readPw := func(prompt string) (string, error) {
|
|
||||||
resp, err := ui.OnInputRequired(core.UserInputRequest{
|
|
||||||
Title: "Password",
|
|
||||||
Prompt: prompt,
|
|
||||||
IsPassword: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return resp.Text, nil
|
|
||||||
}
|
|
||||||
first, err := readPw("Please enter a password for the imported account")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
second, err := readPw("Please repeat the password you just entered")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if first != second {
|
|
||||||
return errors.New("Passwords do not match")
|
|
||||||
}
|
|
||||||
acc, err := internalApi.ImportRawKey(hex.EncodeToString(crypto.FromECDSA(pKey)), first)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ui.ShowInfo(fmt.Sprintf(`Key imported:
|
|
||||||
Address %v
|
|
||||||
Keystore file: %v
|
|
||||||
|
|
||||||
The key is now encrypted; losing the password will result in permanently losing
|
|
||||||
access to the key and all associated funds!
|
|
||||||
|
|
||||||
Make sure to backup keystore and passwords in a safe location.`,
|
|
||||||
acc.Address, acc.URL.Path))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||||
// account the set data folders as well as the designated platform we're currently
|
// account the set data folders as well as the designated platform we're currently
|
||||||
// running on.
|
// running on.
|
||||||
@ -707,7 +574,6 @@ func signer(c *cli.Context) error {
|
|||||||
// it with the UI.
|
// it with the UI.
|
||||||
ui.RegisterUIServer(core.NewUIServerAPI(apiImpl))
|
ui.RegisterUIServer(core.NewUIServerAPI(apiImpl))
|
||||||
api = apiImpl
|
api = apiImpl
|
||||||
|
|
||||||
// Audit logging
|
// Audit logging
|
||||||
if logfile := c.String(auditLogFlag.Name); logfile != "" {
|
if logfile := c.String(auditLogFlag.Name); logfile != "" {
|
||||||
api, err = core.NewAuditLogger(logfile, api)
|
api, err = core.NewAuditLogger(logfile, api)
|
||||||
@ -725,7 +591,7 @@ func signer(c *cli.Context) error {
|
|||||||
{
|
{
|
||||||
Namespace: "account",
|
Namespace: "account",
|
||||||
Service: api,
|
Service: api,
|
||||||
},
|
Version: "1.0"},
|
||||||
}
|
}
|
||||||
if c.Bool(utils.HTTPEnabledFlag.Name) {
|
if c.Bool(utils.HTTPEnabledFlag.Name) {
|
||||||
vhosts := utils.SplitAndTrim(c.String(utils.HTTPVirtualHostsFlag.Name))
|
vhosts := utils.SplitAndTrim(c.String(utils.HTTPVirtualHostsFlag.Name))
|
||||||
@ -769,6 +635,7 @@ func signer(c *cli.Context) error {
|
|||||||
log.Info("IPC endpoint closed", "url", ipcapiURL)
|
log.Info("IPC endpoint closed", "url", ipcapiURL)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Bool(testFlag.Name) {
|
if c.Bool(testFlag.Name) {
|
||||||
log.Info("Performing UI test")
|
log.Info("Performing UI test")
|
||||||
go testExternalUI(apiImpl)
|
go testExternalUI(apiImpl)
|
||||||
@ -779,7 +646,8 @@ func signer(c *cli.Context) error {
|
|||||||
"extapi_version": core.ExternalAPIVersion,
|
"extapi_version": core.ExternalAPIVersion,
|
||||||
"extapi_http": extapiURL,
|
"extapi_http": extapiURL,
|
||||||
"extapi_ipc": ipcapiURL,
|
"extapi_ipc": ipcapiURL,
|
||||||
}})
|
},
|
||||||
|
})
|
||||||
|
|
||||||
abortChan := make(chan os.Signal, 1)
|
abortChan := make(chan os.Signal, 1)
|
||||||
signal.Notify(abortChan, os.Interrupt)
|
signal.Notify(abortChan, os.Interrupt)
|
||||||
@ -891,6 +759,7 @@ func confirm(text string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testExternalUI(api *core.SignerAPI) {
|
func testExternalUI(api *core.SignerAPI) {
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), "remote", "clef binary")
|
ctx := context.WithValue(context.Background(), "remote", "clef binary")
|
||||||
ctx = context.WithValue(ctx, "scheme", "in-proc")
|
ctx = context.WithValue(ctx, "scheme", "in-proc")
|
||||||
ctx = context.WithValue(ctx, "local", "main")
|
ctx = context.WithValue(ctx, "local", "main")
|
||||||
@ -990,6 +859,7 @@ func testExternalUI(api *core.SignerAPI) {
|
|||||||
expectDeny("signdata - text", err)
|
expectDeny("signdata - text", err)
|
||||||
}
|
}
|
||||||
{ // Sign transaction
|
{ // Sign transaction
|
||||||
|
|
||||||
api.UI.ShowInfo("Please reject next transaction")
|
api.UI.ShowInfo("Please reject next transaction")
|
||||||
time.Sleep(delay)
|
time.Sleep(delay)
|
||||||
data := hexutil.Bytes([]byte{})
|
data := hexutil.Bytes([]byte{})
|
||||||
@ -1032,6 +902,7 @@ func testExternalUI(api *core.SignerAPI) {
|
|||||||
}
|
}
|
||||||
result := fmt.Sprintf("Tests completed. %d errors:\n%s\n", len(errs), strings.Join(errs, "\n"))
|
result := fmt.Sprintf("Tests completed. %d errors:\n%s\n", len(errs), strings.Join(errs, "\n"))
|
||||||
api.UI.ShowInfo(result)
|
api.UI.ShowInfo(result)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type encryptedSeedStorage struct {
|
type encryptedSeedStorage struct {
|
||||||
@ -1068,6 +939,7 @@ func decryptSeed(keyjson []byte, auth string) ([]byte, error) {
|
|||||||
|
|
||||||
// GenDoc outputs examples of all structures used in json-rpc communication
|
// GenDoc outputs examples of all structures used in json-rpc communication
|
||||||
func GenDoc(ctx *cli.Context) error {
|
func GenDoc(ctx *cli.Context) error {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
a = common.HexToAddress("0xdeadbeef000000000000000000000000deadbeef")
|
a = common.HexToAddress("0xdeadbeef000000000000000000000000deadbeef")
|
||||||
b = common.HexToAddress("0x1111111122222222222233333333334444444444")
|
b = common.HexToAddress("0x1111111122222222222233333333334444444444")
|
||||||
@ -1177,6 +1049,7 @@ func GenDoc(ctx *cli.Context) error {
|
|||||||
var tx types.Transaction
|
var tx types.Transaction
|
||||||
tx.UnmarshalBinary(rlpdata)
|
tx.UnmarshalBinary(rlpdata)
|
||||||
add("OnApproved - SignTransactionResult", desc, ðapi.SignTransactionResult{Raw: rlpdata, Tx: &tx})
|
add("OnApproved - SignTransactionResult", desc, ðapi.SignTransactionResult{Raw: rlpdata, Tx: &tx})
|
||||||
|
|
||||||
}
|
}
|
||||||
{ // User input
|
{ // User input
|
||||||
add("UserInputRequest", "Sent when clef needs the user to provide data. If 'password' is true, the input field should be treated accordingly (echo-free)",
|
add("UserInputRequest", "Sent when clef needs the user to provide data. If 'password' is true, the input field should be treated accordingly (echo-free)",
|
||||||
|
@ -1,109 +0,0 @@
|
|||||||
// Copyright 2022 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/reexec"
|
|
||||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const registeredName = "clef-test"
|
|
||||||
|
|
||||||
type testproc struct {
|
|
||||||
*cmdtest.TestCmd
|
|
||||||
|
|
||||||
// template variables for expect
|
|
||||||
Datadir string
|
|
||||||
Etherbase string
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
reexec.Register(registeredName, func() {
|
|
||||||
if err := app.Run(os.Args); err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
os.Exit(0)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
|
||||||
// check if we have been reexec'd
|
|
||||||
if reexec.Init() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
os.Exit(m.Run())
|
|
||||||
}
|
|
||||||
|
|
||||||
// runClef spawns clef with the given command line args and adds keystore arg.
|
|
||||||
// This method creates a temporary keystore folder which will be removed after
|
|
||||||
// the test exits.
|
|
||||||
func runClef(t *testing.T, args ...string) *testproc {
|
|
||||||
ddir, err := os.MkdirTemp("", "cleftest-*")
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.RemoveAll(ddir)
|
|
||||||
})
|
|
||||||
return runWithKeystore(t, ddir, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// runWithKeystore spawns clef with the given command line args and adds keystore arg.
|
|
||||||
// This method does _not_ create the keystore folder, but it _does_ add the arg
|
|
||||||
// to the args.
|
|
||||||
func runWithKeystore(t *testing.T, keystore string, args ...string) *testproc {
|
|
||||||
args = append([]string{"--keystore", keystore}, args...)
|
|
||||||
tt := &testproc{Datadir: keystore}
|
|
||||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
|
||||||
// Boot "clef". This actually runs the test binary but the TestMain
|
|
||||||
// function will prevent any tests from running.
|
|
||||||
tt.Run(registeredName, args...)
|
|
||||||
return tt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proc *testproc) input(text string) *testproc {
|
|
||||||
proc.TestCmd.InputLine(text)
|
|
||||||
return proc
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
// waitForEndpoint waits for the rpc endpoint to appear, or
|
|
||||||
// aborts after 3 seconds.
|
|
||||||
func (proc *testproc) waitForEndpoint(t *testing.T) *testproc {
|
|
||||||
t.Helper()
|
|
||||||
timeout := 3 * time.Second
|
|
||||||
ipc := filepath.Join(proc.Datadir, "clef.ipc")
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
for time.Since(start) < timeout {
|
|
||||||
if _, err := os.Stat(ipc); !errors.Is(err, os.ErrNotExist) {
|
|
||||||
t.Logf("endpoint %v opened", ipc)
|
|
||||||
return proc
|
|
||||||
}
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
}
|
|
||||||
t.Logf("stderr: \n%v", proc.StderrText())
|
|
||||||
t.Logf("stdout: \n%v", proc.Output())
|
|
||||||
t.Fatal("endpoint", ipc, "did not open within", timeout)
|
|
||||||
return proc
|
|
||||||
}
|
|
||||||
*/
|
|
@ -44,7 +44,7 @@ set to standard output. The following filters are supported:
|
|||||||
- `-limit <N>` limits the output set to N entries, taking the top N nodes by score
|
- `-limit <N>` limits the output set to N entries, taking the top N nodes by score
|
||||||
- `-ip <CIDR>` filters nodes by IP subnet
|
- `-ip <CIDR>` filters nodes by IP subnet
|
||||||
- `-min-age <duration>` filters nodes by 'first seen' time
|
- `-min-age <duration>` filters nodes by 'first seen' time
|
||||||
- `-eth-network <mainnet/rinkeby/goerli/sepolia>` filters nodes by "eth" ENR entry
|
- `-eth-network <mainnet/rinkeby/goerli/ropsten>` filters nodes by "eth" ENR entry
|
||||||
- `-les-server` filters nodes by LES server support
|
- `-les-server` filters nodes by LES server support
|
||||||
- `-snap` filters nodes by snap protocol support
|
- `-snap` filters nodes by snap protocol support
|
||||||
|
|
||||||
@ -135,6 +135,6 @@ replacing `<enode>` with the enode of the geth node:
|
|||||||
```
|
```
|
||||||
|
|
||||||
[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md
|
[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md
|
||||||
[dns-tutorial]: https://geth.ethereum.org/docs/developers/geth-developer/dns-discovery-setup
|
[dns-tutorial]: https://geth.ethereum.org/docs/developers/dns-discovery-setup
|
||||||
[discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md
|
[discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md
|
||||||
[discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md
|
[discv5]: https://github.com/ethereum/devp2p/tree/master/discv5/discv5.md
|
||||||
|
@ -17,8 +17,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
@ -36,17 +34,8 @@ type crawler struct {
|
|||||||
|
|
||||||
// settings
|
// settings
|
||||||
revalidateInterval time.Duration
|
revalidateInterval time.Duration
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
nodeRemoved = iota
|
|
||||||
nodeSkipRecent
|
|
||||||
nodeSkipIncompat
|
|
||||||
nodeAdded
|
|
||||||
nodeUpdated
|
|
||||||
)
|
|
||||||
|
|
||||||
type resolver interface {
|
type resolver interface {
|
||||||
RequestENR(*enode.Node) (*enode.Node, error)
|
RequestENR(*enode.Node) (*enode.Node, error)
|
||||||
}
|
}
|
||||||
@ -70,59 +59,23 @@ func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet {
|
func (c *crawler) run(timeout time.Duration) nodeSet {
|
||||||
var (
|
var (
|
||||||
timeoutTimer = time.NewTimer(timeout)
|
timeoutTimer = time.NewTimer(timeout)
|
||||||
timeoutCh <-chan time.Time
|
timeoutCh <-chan time.Time
|
||||||
statusTicker = time.NewTicker(time.Second * 8)
|
|
||||||
doneCh = make(chan enode.Iterator, len(c.iters))
|
doneCh = make(chan enode.Iterator, len(c.iters))
|
||||||
liveIters = len(c.iters)
|
liveIters = len(c.iters)
|
||||||
)
|
)
|
||||||
if nthreads < 1 {
|
|
||||||
nthreads = 1
|
|
||||||
}
|
|
||||||
defer timeoutTimer.Stop()
|
defer timeoutTimer.Stop()
|
||||||
defer statusTicker.Stop()
|
|
||||||
for _, it := range c.iters {
|
for _, it := range c.iters {
|
||||||
go c.runIterator(doneCh, it)
|
go c.runIterator(doneCh, it)
|
||||||
}
|
}
|
||||||
var (
|
|
||||||
added uint64
|
|
||||||
updated uint64
|
|
||||||
skipped uint64
|
|
||||||
recent uint64
|
|
||||||
removed uint64
|
|
||||||
wg sync.WaitGroup
|
|
||||||
)
|
|
||||||
wg.Add(nthreads)
|
|
||||||
for i := 0; i < nthreads; i++ {
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case n := <-c.ch:
|
|
||||||
switch c.updateNode(n) {
|
|
||||||
case nodeSkipIncompat:
|
|
||||||
atomic.AddUint64(&skipped, 1)
|
|
||||||
case nodeSkipRecent:
|
|
||||||
atomic.AddUint64(&recent, 1)
|
|
||||||
case nodeRemoved:
|
|
||||||
atomic.AddUint64(&removed, 1)
|
|
||||||
case nodeAdded:
|
|
||||||
atomic.AddUint64(&added, 1)
|
|
||||||
default:
|
|
||||||
atomic.AddUint64(&updated, 1)
|
|
||||||
}
|
|
||||||
case <-c.closed:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
case n := <-c.ch:
|
||||||
|
c.updateNode(n)
|
||||||
case it := <-doneCh:
|
case it := <-doneCh:
|
||||||
if it == c.inputIter {
|
if it == c.inputIter {
|
||||||
// Enable timeout when we're done revalidating the input nodes.
|
// Enable timeout when we're done revalidating the input nodes.
|
||||||
@ -136,13 +89,6 @@ loop:
|
|||||||
}
|
}
|
||||||
case <-timeoutCh:
|
case <-timeoutCh:
|
||||||
break loop
|
break loop
|
||||||
case <-statusTicker.C:
|
|
||||||
log.Info("Crawling in progress",
|
|
||||||
"added", atomic.LoadUint64(&added),
|
|
||||||
"updated", atomic.LoadUint64(&updated),
|
|
||||||
"removed", atomic.LoadUint64(&removed),
|
|
||||||
"ignored(recent)", atomic.LoadUint64(&recent),
|
|
||||||
"ignored(incompatible)", atomic.LoadUint64(&skipped))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,7 +99,6 @@ loop:
|
|||||||
for ; liveIters > 0; liveIters-- {
|
for ; liveIters > 0; liveIters-- {
|
||||||
<-doneCh
|
<-doneCh
|
||||||
}
|
}
|
||||||
wg.Wait()
|
|
||||||
return c.output
|
return c.output
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -168,26 +113,22 @@ func (c *crawler) runIterator(done chan<- enode.Iterator, it enode.Iterator) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateNode updates the info about the given node, and returns a status
|
func (c *crawler) updateNode(n *enode.Node) {
|
||||||
// about what changed
|
|
||||||
func (c *crawler) updateNode(n *enode.Node) int {
|
|
||||||
c.mu.RLock()
|
|
||||||
node, ok := c.output[n.ID()]
|
node, ok := c.output[n.ID()]
|
||||||
c.mu.RUnlock()
|
|
||||||
|
|
||||||
// Skip validation of recently-seen nodes.
|
// Skip validation of recently-seen nodes.
|
||||||
if ok && time.Since(node.LastCheck) < c.revalidateInterval {
|
if ok && time.Since(node.LastCheck) < c.revalidateInterval {
|
||||||
return nodeSkipRecent
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Request the node record.
|
// Request the node record.
|
||||||
status := nodeUpdated
|
nn, err := c.disc.RequestENR(n)
|
||||||
node.LastCheck = truncNow()
|
node.LastCheck = truncNow()
|
||||||
if nn, err := c.disc.RequestENR(n); err != nil {
|
if err != nil {
|
||||||
if node.Score == 0 {
|
if node.Score == 0 {
|
||||||
// Node doesn't implement EIP-868.
|
// Node doesn't implement EIP-868.
|
||||||
log.Debug("Skipping node", "id", n.ID())
|
log.Debug("Skipping node", "id", n.ID())
|
||||||
return nodeSkipIncompat
|
return
|
||||||
}
|
}
|
||||||
node.Score /= 2
|
node.Score /= 2
|
||||||
} else {
|
} else {
|
||||||
@ -196,21 +137,18 @@ func (c *crawler) updateNode(n *enode.Node) int {
|
|||||||
node.Score++
|
node.Score++
|
||||||
if node.FirstResponse.IsZero() {
|
if node.FirstResponse.IsZero() {
|
||||||
node.FirstResponse = node.LastCheck
|
node.FirstResponse = node.LastCheck
|
||||||
status = nodeAdded
|
|
||||||
}
|
}
|
||||||
node.LastResponse = node.LastCheck
|
node.LastResponse = node.LastCheck
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store/update node in output set.
|
// Store/update node in output set.
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
if node.Score <= 0 {
|
if node.Score <= 0 {
|
||||||
log.Debug("Removing node", "id", n.ID())
|
log.Info("Removing node", "id", n.ID())
|
||||||
delete(c.output, n.ID())
|
delete(c.output, n.ID())
|
||||||
return nodeRemoved
|
} else {
|
||||||
}
|
log.Info("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score)
|
||||||
log.Debug("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score)
|
|
||||||
c.output[n.ID()] = node
|
c.output[n.ID()] = node
|
||||||
return status
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func truncNow() time.Time {
|
func truncNow() time.Time {
|
||||||
|
@ -19,14 +19,12 @@ package main
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
|
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@ -51,34 +49,32 @@ var (
|
|||||||
Usage: "Sends ping to a node",
|
Usage: "Sends ping to a node",
|
||||||
Action: discv4Ping,
|
Action: discv4Ping,
|
||||||
ArgsUsage: "<node>",
|
ArgsUsage: "<node>",
|
||||||
Flags: discoveryNodeFlags,
|
|
||||||
}
|
}
|
||||||
discv4RequestRecordCommand = &cli.Command{
|
discv4RequestRecordCommand = &cli.Command{
|
||||||
Name: "requestenr",
|
Name: "requestenr",
|
||||||
Usage: "Requests a node record using EIP-868 enrRequest",
|
Usage: "Requests a node record using EIP-868 enrRequest",
|
||||||
Action: discv4RequestRecord,
|
Action: discv4RequestRecord,
|
||||||
ArgsUsage: "<node>",
|
ArgsUsage: "<node>",
|
||||||
Flags: discoveryNodeFlags,
|
|
||||||
}
|
}
|
||||||
discv4ResolveCommand = &cli.Command{
|
discv4ResolveCommand = &cli.Command{
|
||||||
Name: "resolve",
|
Name: "resolve",
|
||||||
Usage: "Finds a node in the DHT",
|
Usage: "Finds a node in the DHT",
|
||||||
Action: discv4Resolve,
|
Action: discv4Resolve,
|
||||||
ArgsUsage: "<node>",
|
ArgsUsage: "<node>",
|
||||||
Flags: discoveryNodeFlags,
|
Flags: []cli.Flag{bootnodesFlag},
|
||||||
}
|
}
|
||||||
discv4ResolveJSONCommand = &cli.Command{
|
discv4ResolveJSONCommand = &cli.Command{
|
||||||
Name: "resolve-json",
|
Name: "resolve-json",
|
||||||
Usage: "Re-resolves nodes in a nodes.json file",
|
Usage: "Re-resolves nodes in a nodes.json file",
|
||||||
Action: discv4ResolveJSON,
|
Action: discv4ResolveJSON,
|
||||||
Flags: discoveryNodeFlags,
|
Flags: []cli.Flag{bootnodesFlag},
|
||||||
ArgsUsage: "<nodes.json file>",
|
ArgsUsage: "<nodes.json file>",
|
||||||
}
|
}
|
||||||
discv4CrawlCommand = &cli.Command{
|
discv4CrawlCommand = &cli.Command{
|
||||||
Name: "crawl",
|
Name: "crawl",
|
||||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||||
Action: discv4Crawl,
|
Action: discv4Crawl,
|
||||||
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
|
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
||||||
}
|
}
|
||||||
discv4TestCommand = &cli.Command{
|
discv4TestCommand = &cli.Command{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
@ -111,20 +107,11 @@ var (
|
|||||||
Name: "addr",
|
Name: "addr",
|
||||||
Usage: "Listening address",
|
Usage: "Listening address",
|
||||||
}
|
}
|
||||||
extAddrFlag = &cli.StringFlag{
|
|
||||||
Name: "extaddr",
|
|
||||||
Usage: "UDP endpoint announced in ENR. You can provide a bare IP address or IP:port as the value of this flag.",
|
|
||||||
}
|
|
||||||
crawlTimeoutFlag = &cli.DurationFlag{
|
crawlTimeoutFlag = &cli.DurationFlag{
|
||||||
Name: "timeout",
|
Name: "timeout",
|
||||||
Usage: "Time limit for the crawl.",
|
Usage: "Time limit for the crawl.",
|
||||||
Value: 30 * time.Minute,
|
Value: 30 * time.Minute,
|
||||||
}
|
}
|
||||||
crawlParallelismFlag = &cli.IntFlag{
|
|
||||||
Name: "parallel",
|
|
||||||
Usage: "How many parallel discoveries to attempt.",
|
|
||||||
Value: 16,
|
|
||||||
}
|
|
||||||
remoteEnodeFlag = &cli.StringFlag{
|
remoteEnodeFlag = &cli.StringFlag{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Usage: "Enode of the remote node under test",
|
Usage: "Enode of the remote node under test",
|
||||||
@ -132,14 +119,6 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
var discoveryNodeFlags = []cli.Flag{
|
|
||||||
bootnodesFlag,
|
|
||||||
nodekeyFlag,
|
|
||||||
nodedbFlag,
|
|
||||||
listenAddrFlag,
|
|
||||||
extAddrFlag,
|
|
||||||
}
|
|
||||||
|
|
||||||
func discv4Ping(ctx *cli.Context) error {
|
func discv4Ping(ctx *cli.Context) error {
|
||||||
n := getNodeArg(ctx)
|
n := getNodeArg(ctx)
|
||||||
disc := startV4(ctx)
|
disc := startV4(ctx)
|
||||||
@ -200,7 +179,7 @@ func discv4ResolveJSON(ctx *cli.Context) error {
|
|||||||
defer disc.Close()
|
defer disc.Close()
|
||||||
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
|
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
|
||||||
c.revalidateInterval = 0
|
c.revalidateInterval = 0
|
||||||
output := c.run(0, 1)
|
output := c.run(0)
|
||||||
writeNodesJSON(nodesFile, output)
|
writeNodesJSON(nodesFile, output)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -219,7 +198,7 @@ func discv4Crawl(ctx *cli.Context) error {
|
|||||||
defer disc.Close()
|
defer disc.Close()
|
||||||
c := newCrawler(inputSet, disc, disc.RandomNodes())
|
c := newCrawler(inputSet, disc, disc.RandomNodes())
|
||||||
c.revalidateInterval = 10 * time.Minute
|
c.revalidateInterval = 10 * time.Minute
|
||||||
output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
|
output := c.run(ctx.Duration(crawlTimeoutFlag.Name))
|
||||||
writeNodesJSON(nodesFile, output)
|
writeNodesJSON(nodesFile, output)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -239,7 +218,7 @@ func discv4Test(ctx *cli.Context) error {
|
|||||||
// startV4 starts an ephemeral discovery V4 node.
|
// startV4 starts an ephemeral discovery V4 node.
|
||||||
func startV4(ctx *cli.Context) *discover.UDPv4 {
|
func startV4(ctx *cli.Context) *discover.UDPv4 {
|
||||||
ln, config := makeDiscoveryConfig(ctx)
|
ln, config := makeDiscoveryConfig(ctx)
|
||||||
socket := listen(ctx, ln)
|
socket := listen(ln, ctx.String(listenAddrFlag.Name))
|
||||||
disc, err := discover.ListenV4(socket, ln, config)
|
disc, err := discover.ListenV4(socket, ln, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit(err)
|
exit(err)
|
||||||
@ -277,28 +256,7 @@ func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) {
|
|||||||
return ln, cfg
|
return ln, cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseExtAddr(spec string) (ip net.IP, port int, ok bool) {
|
func listen(ln *enode.LocalNode, addr string) *net.UDPConn {
|
||||||
ip = net.ParseIP(spec)
|
|
||||||
if ip != nil {
|
|
||||||
return ip, 0, true
|
|
||||||
}
|
|
||||||
host, portstr, err := net.SplitHostPort(spec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, false
|
|
||||||
}
|
|
||||||
ip = net.ParseIP(host)
|
|
||||||
if ip == nil {
|
|
||||||
return nil, 0, false
|
|
||||||
}
|
|
||||||
port, err = strconv.Atoi(portstr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, false
|
|
||||||
}
|
|
||||||
return ip, port, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func listen(ctx *cli.Context, ln *enode.LocalNode) *net.UDPConn {
|
|
||||||
addr := ctx.String(listenAddrFlag.Name)
|
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
addr = "0.0.0.0:0"
|
addr = "0.0.0.0:0"
|
||||||
}
|
}
|
||||||
@ -306,8 +264,6 @@ func listen(ctx *cli.Context, ln *enode.LocalNode) *net.UDPConn {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
exit(err)
|
exit(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure UDP endpoint in ENR from listener address.
|
|
||||||
usocket := socket.(*net.UDPConn)
|
usocket := socket.(*net.UDPConn)
|
||||||
uaddr := socket.LocalAddr().(*net.UDPAddr)
|
uaddr := socket.LocalAddr().(*net.UDPAddr)
|
||||||
if uaddr.IP.IsUnspecified() {
|
if uaddr.IP.IsUnspecified() {
|
||||||
@ -316,22 +272,6 @@ func listen(ctx *cli.Context, ln *enode.LocalNode) *net.UDPConn {
|
|||||||
ln.SetFallbackIP(uaddr.IP)
|
ln.SetFallbackIP(uaddr.IP)
|
||||||
}
|
}
|
||||||
ln.SetFallbackUDP(uaddr.Port)
|
ln.SetFallbackUDP(uaddr.Port)
|
||||||
|
|
||||||
// If an ENR endpoint is set explicitly on the command-line, override
|
|
||||||
// the information from the listening address. Note this is careful not
|
|
||||||
// to set the UDP port if the external address doesn't have it.
|
|
||||||
extAddr := ctx.String(extAddrFlag.Name)
|
|
||||||
if extAddr != "" {
|
|
||||||
ip, port, ok := parseExtAddr(extAddr)
|
|
||||||
if !ok {
|
|
||||||
exit(fmt.Errorf("-%s: invalid external address %q", extAddrFlag.Name, extAddr))
|
|
||||||
}
|
|
||||||
ln.SetStaticIP(ip)
|
|
||||||
if port != 0 {
|
|
||||||
ln.SetFallbackUDP(port)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return usocket
|
return usocket
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
|
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v5test"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
@ -43,21 +42,18 @@ var (
|
|||||||
Name: "ping",
|
Name: "ping",
|
||||||
Usage: "Sends ping to a node",
|
Usage: "Sends ping to a node",
|
||||||
Action: discv5Ping,
|
Action: discv5Ping,
|
||||||
Flags: discoveryNodeFlags,
|
|
||||||
}
|
}
|
||||||
discv5ResolveCommand = &cli.Command{
|
discv5ResolveCommand = &cli.Command{
|
||||||
Name: "resolve",
|
Name: "resolve",
|
||||||
Usage: "Finds a node in the DHT",
|
Usage: "Finds a node in the DHT",
|
||||||
Action: discv5Resolve,
|
Action: discv5Resolve,
|
||||||
Flags: discoveryNodeFlags,
|
Flags: []cli.Flag{bootnodesFlag},
|
||||||
}
|
}
|
||||||
discv5CrawlCommand = &cli.Command{
|
discv5CrawlCommand = &cli.Command{
|
||||||
Name: "crawl",
|
Name: "crawl",
|
||||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||||
Action: discv5Crawl,
|
Action: discv5Crawl,
|
||||||
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
|
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
||||||
crawlTimeoutFlag,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
discv5TestCommand = &cli.Command{
|
discv5TestCommand = &cli.Command{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
@ -74,7 +70,12 @@ var (
|
|||||||
Name: "listen",
|
Name: "listen",
|
||||||
Usage: "Runs a node",
|
Usage: "Runs a node",
|
||||||
Action: discv5Listen,
|
Action: discv5Listen,
|
||||||
Flags: discoveryNodeFlags,
|
Flags: []cli.Flag{
|
||||||
|
bootnodesFlag,
|
||||||
|
nodekeyFlag,
|
||||||
|
nodedbFlag,
|
||||||
|
listenAddrFlag,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -110,7 +111,7 @@ func discv5Crawl(ctx *cli.Context) error {
|
|||||||
defer disc.Close()
|
defer disc.Close()
|
||||||
c := newCrawler(inputSet, disc, disc.RandomNodes())
|
c := newCrawler(inputSet, disc, disc.RandomNodes())
|
||||||
c.revalidateInterval = 10 * time.Minute
|
c.revalidateInterval = 10 * time.Minute
|
||||||
output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
|
output := c.run(ctx.Duration(crawlTimeoutFlag.Name))
|
||||||
writeNodesJSON(nodesFile, output)
|
writeNodesJSON(nodesFile, output)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -136,7 +137,7 @@ func discv5Listen(ctx *cli.Context) error {
|
|||||||
// startV5 starts an ephemeral discovery v5 node.
|
// startV5 starts an ephemeral discovery v5 node.
|
||||||
func startV5(ctx *cli.Context) *discover.UDPv5 {
|
func startV5(ctx *cli.Context) *discover.UDPv5 {
|
||||||
ln, config := makeDiscoveryConfig(ctx)
|
ln, config := makeDiscoveryConfig(ctx)
|
||||||
socket := listen(ctx, ln)
|
socket := listen(ln, ctx.String(listenAddrFlag.Name))
|
||||||
disc, err := discover.ListenV5(socket, ln, config)
|
disc, err := discover.ListenV5(socket, ln, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit(err)
|
exit(err)
|
||||||
|
@ -126,51 +126,41 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over the new records and inject anything missing.
|
// Iterate over the new records and inject anything missing.
|
||||||
log.Info("Updating DNS entries")
|
|
||||||
created := 0
|
|
||||||
updated := 0
|
|
||||||
skipped := 0
|
|
||||||
for path, val := range records {
|
for path, val := range records {
|
||||||
old, exists := existing[path]
|
old, exists := existing[path]
|
||||||
if !exists {
|
if !exists {
|
||||||
// Entry is unknown, push a new one to Cloudflare.
|
// Entry is unknown, push a new one to Cloudflare.
|
||||||
log.Debug(fmt.Sprintf("Creating %s = %q", path, val))
|
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||||
created++
|
|
||||||
ttl := rootTTL
|
ttl := rootTTL
|
||||||
if path != name {
|
if path != name {
|
||||||
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
|
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
|
||||||
|
|
||||||
}
|
}
|
||||||
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
|
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
|
||||||
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
|
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
|
||||||
} else if old.Content != val {
|
} else if old.Content != val {
|
||||||
// Entry already exists, only change its content.
|
// Entry already exists, only change its content.
|
||||||
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
||||||
updated++
|
|
||||||
old.Content = val
|
old.Content = val
|
||||||
err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old)
|
err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old)
|
||||||
} else {
|
} else {
|
||||||
skipped++
|
|
||||||
log.Debug(fmt.Sprintf("Skipping %s = %q", path, val))
|
log.Debug(fmt.Sprintf("Skipping %s = %q", path, val))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to publish %s: %v", path, err)
|
return fmt.Errorf("failed to publish %s: %v", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Info("Updated DNS entries", "new", created, "updated", updated, "untouched", skipped)
|
|
||||||
// Iterate over the old records and delete anything stale.
|
// Iterate over the old records and delete anything stale.
|
||||||
deleted := 0
|
|
||||||
log.Info("Deleting stale DNS entries")
|
|
||||||
for path, entry := range existing {
|
for path, entry := range existing {
|
||||||
if _, ok := records[path]; ok {
|
if _, ok := records[path]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Stale entry, nuke it.
|
// Stale entry, nuke it.
|
||||||
log.Debug(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
|
log.Info(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
|
||||||
deleted++
|
|
||||||
if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil {
|
if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil {
|
||||||
return fmt.Errorf("failed to delete %s: %v", path, err)
|
return fmt.Errorf("failed to delete %s: %v", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Info("Deleted stale DNS entries", "count", deleted)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -221,13 +221,7 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e
|
|||||||
}
|
}
|
||||||
records = lrecords
|
records = lrecords
|
||||||
|
|
||||||
var (
|
var changes []types.Change
|
||||||
changes []types.Change
|
|
||||||
inserts int
|
|
||||||
upserts int
|
|
||||||
skips int
|
|
||||||
)
|
|
||||||
|
|
||||||
for path, newValue := range records {
|
for path, newValue := range records {
|
||||||
prevRecords, exists := existing[path]
|
prevRecords, exists := existing[path]
|
||||||
prevValue := strings.Join(prevRecords.values, "")
|
prevValue := strings.Join(prevRecords.values, "")
|
||||||
@ -243,30 +237,20 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e
|
|||||||
|
|
||||||
if !exists {
|
if !exists {
|
||||||
// Entry is unknown, push a new one
|
// Entry is unknown, push a new one
|
||||||
log.Debug(fmt.Sprintf("Creating %s = %s", path, newValue))
|
log.Info(fmt.Sprintf("Creating %s = %s", path, newValue))
|
||||||
changes = append(changes, newTXTChange("CREATE", path, ttl, newValue))
|
changes = append(changes, newTXTChange("CREATE", path, ttl, newValue))
|
||||||
inserts++
|
|
||||||
} else if prevValue != newValue || prevRecords.ttl != ttl {
|
} else if prevValue != newValue || prevRecords.ttl != ttl {
|
||||||
// Entry already exists, only change its content.
|
// Entry already exists, only change its content.
|
||||||
log.Info(fmt.Sprintf("Updating %s from %s to %s", path, prevValue, newValue))
|
log.Info(fmt.Sprintf("Updating %s from %s to %s", path, prevValue, newValue))
|
||||||
changes = append(changes, newTXTChange("UPSERT", path, ttl, newValue))
|
changes = append(changes, newTXTChange("UPSERT", path, ttl, newValue))
|
||||||
upserts++
|
|
||||||
} else {
|
} else {
|
||||||
log.Debug(fmt.Sprintf("Skipping %s = %s", path, newValue))
|
log.Debug(fmt.Sprintf("Skipping %s = %s", path, newValue))
|
||||||
skips++
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over the old records and delete anything stale.
|
// Iterate over the old records and delete anything stale.
|
||||||
deletions := makeDeletionChanges(existing, records)
|
changes = append(changes, makeDeletionChanges(existing, records)...)
|
||||||
changes = append(changes, deletions...)
|
|
||||||
|
|
||||||
log.Info("Computed DNS changes",
|
|
||||||
"changes", len(changes),
|
|
||||||
"inserts", inserts,
|
|
||||||
"skips", skips,
|
|
||||||
"deleted", len(deletions),
|
|
||||||
"upserts", upserts)
|
|
||||||
// Ensure changes are in the correct order.
|
// Ensure changes are in the correct order.
|
||||||
sortChanges(changes)
|
sortChanges(changes)
|
||||||
return changes
|
return changes
|
||||||
@ -279,7 +263,7 @@ func makeDeletionChanges(records map[string]recordSet, keep map[string]string) [
|
|||||||
if _, ok := keep[path]; ok {
|
if _, ok := keep[path]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Debug(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, "")))
|
log.Info(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, "")))
|
||||||
changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
|
changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
|
||||||
}
|
}
|
||||||
return changes
|
return changes
|
||||||
@ -345,9 +329,8 @@ func (c *route53Client) collectRecords(name string) (map[string]recordSet, error
|
|||||||
var req route53.ListResourceRecordSetsInput
|
var req route53.ListResourceRecordSetsInput
|
||||||
req.HostedZoneId = &c.zoneID
|
req.HostedZoneId = &c.zoneID
|
||||||
existing := make(map[string]recordSet)
|
existing := make(map[string]recordSet)
|
||||||
log.Info("Loading existing TXT records", "name", name, "zone", c.zoneID)
|
|
||||||
for page := 0; ; page++ {
|
for page := 0; ; page++ {
|
||||||
log.Debug("Loading existing TXT records", "name", name, "zone", c.zoneID, "page", page)
|
log.Info("Loading existing TXT records", "name", name, "zone", c.zoneID, "page", page)
|
||||||
resp, err := c.api.ListResourceRecordSets(context.TODO(), &req)
|
resp, err := c.api.ListResourceRecordSets(context.TODO(), &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return existing, err
|
return existing, err
|
||||||
@ -377,7 +360,7 @@ func (c *route53Client) collectRecords(name string) (map[string]recordSet, error
|
|||||||
req.StartRecordName = resp.NextRecordName
|
req.StartRecordName = resp.NextRecordName
|
||||||
req.StartRecordType = resp.NextRecordType
|
req.StartRecordType = resp.NextRecordType
|
||||||
}
|
}
|
||||||
log.Info("Loaded existing TXT records", "name", name, "zone", c.zoneID, "records", len(existing))
|
|
||||||
return existing, nil
|
return existing, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ func (c *Chain) RootAt(height int) common.Hash {
|
|||||||
|
|
||||||
// ForkID gets the fork id of the chain.
|
// ForkID gets the fork id of the chain.
|
||||||
func (c *Chain) ForkID() forkid.ID {
|
func (c *Chain) ForkID() forkid.ID {
|
||||||
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
|
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shorten returns a copy chain of a desired height from the imported
|
// Shorten returns a copy chain of a desired height from the imported
|
||||||
@ -96,12 +96,12 @@ func (c *Chain) Head() *types.Block {
|
|||||||
return c.blocks[c.Len()-1]
|
return c.blocks[c.Len()-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chain) GetHeaders(req *GetBlockHeaders) ([]*types.Header, error) {
|
func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) {
|
||||||
if req.Amount < 1 {
|
if req.Amount < 1 {
|
||||||
return nil, fmt.Errorf("no block headers requested")
|
return nil, fmt.Errorf("no block headers requested")
|
||||||
}
|
}
|
||||||
|
|
||||||
headers := make([]*types.Header, req.Amount)
|
headers := make(BlockHeaders, req.Amount)
|
||||||
var blockNumber uint64
|
var blockNumber uint64
|
||||||
|
|
||||||
// range over blocks to check if our chain has the requested header
|
// range over blocks to check if our chain has the requested header
|
||||||
@ -119,6 +119,7 @@ func (c *Chain) GetHeaders(req *GetBlockHeaders) ([]*types.Header, error) {
|
|||||||
for i := 1; i < int(req.Amount); i++ {
|
for i := 1; i < int(req.Amount); i++ {
|
||||||
blockNumber -= (1 - req.Skip)
|
blockNumber -= (1 - req.Skip)
|
||||||
headers[i] = c.blocks[blockNumber].Header()
|
headers[i] = c.blocks[blockNumber].Header()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return headers, nil
|
return headers, nil
|
||||||
@ -139,7 +140,7 @@ func loadChain(chainfile string, genesis string) (*Chain, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
gblock := gen.ToBlock()
|
gblock := gen.ToBlock(nil)
|
||||||
|
|
||||||
blocks, err := blocksFromFile(chainfile, gblock)
|
blocks, err := blocksFromFile(chainfile, gblock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -141,18 +140,18 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
req GetBlockHeaders
|
req GetBlockHeaders
|
||||||
expected []*types.Header
|
expected BlockHeaders
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
Origin: eth.HashOrNumber{
|
||||||
Origin: eth.HashOrNumber{Number: uint64(2)},
|
Number: uint64(2),
|
||||||
|
},
|
||||||
Amount: uint64(5),
|
Amount: uint64(5),
|
||||||
Skip: 1,
|
Skip: 1,
|
||||||
Reverse: false,
|
Reverse: false,
|
||||||
},
|
},
|
||||||
},
|
expected: BlockHeaders{
|
||||||
expected: []*types.Header{
|
|
||||||
chain.blocks[2].Header(),
|
chain.blocks[2].Header(),
|
||||||
chain.blocks[4].Header(),
|
chain.blocks[4].Header(),
|
||||||
chain.blocks[6].Header(),
|
chain.blocks[6].Header(),
|
||||||
@ -162,14 +161,14 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
Origin: eth.HashOrNumber{
|
||||||
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
|
Number: uint64(chain.Len() - 1),
|
||||||
|
},
|
||||||
Amount: uint64(3),
|
Amount: uint64(3),
|
||||||
Skip: 0,
|
Skip: 0,
|
||||||
Reverse: true,
|
Reverse: true,
|
||||||
},
|
},
|
||||||
},
|
expected: BlockHeaders{
|
||||||
expected: []*types.Header{
|
|
||||||
chain.blocks[chain.Len()-1].Header(),
|
chain.blocks[chain.Len()-1].Header(),
|
||||||
chain.blocks[chain.Len()-2].Header(),
|
chain.blocks[chain.Len()-2].Header(),
|
||||||
chain.blocks[chain.Len()-3].Header(),
|
chain.blocks[chain.Len()-3].Header(),
|
||||||
@ -177,14 +176,14 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
Origin: eth.HashOrNumber{
|
||||||
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
|
Hash: chain.Head().Hash(),
|
||||||
|
},
|
||||||
Amount: uint64(1),
|
Amount: uint64(1),
|
||||||
Skip: 0,
|
Skip: 0,
|
||||||
Reverse: false,
|
Reverse: false,
|
||||||
},
|
},
|
||||||
},
|
expected: BlockHeaders{
|
||||||
expected: []*types.Header{
|
|
||||||
chain.Head().Header(),
|
chain.Head().Header(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -192,7 +191,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
headers, err := chain.GetHeaders(&tt.req)
|
headers, err := chain.GetHeaders(tt.req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,21 @@ var (
|
|||||||
timeout = 20 * time.Second
|
timeout = 20 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Is_66 checks if the node supports the eth66 protocol version,
|
||||||
|
// and if not, exists the test suite
|
||||||
|
func (s *Suite) Is_66(t *utesting.T) {
|
||||||
|
conn, err := s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
if err := conn.handshake(); err != nil {
|
||||||
|
t.Fatalf("handshake failed: %v", err)
|
||||||
|
}
|
||||||
|
if conn.negotiatedProtoVersion < 66 {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// dial attempts to dial the given node and perform a handshake,
|
// dial attempts to dial the given node and perform a handshake,
|
||||||
// returning the created Conn if successful.
|
// returning the created Conn if successful.
|
||||||
func (s *Suite) dial() (*Conn, error) {
|
func (s *Suite) dial() (*Conn, error) {
|
||||||
@ -61,20 +76,34 @@ func (s *Suite) dial() (*Conn, error) {
|
|||||||
}
|
}
|
||||||
// set default p2p capabilities
|
// set default p2p capabilities
|
||||||
conn.caps = []p2p.Cap{
|
conn.caps = []p2p.Cap{
|
||||||
{Name: "eth", Version: 66},
|
{Name: "eth", Version: 64},
|
||||||
{Name: "eth", Version: 67},
|
{Name: "eth", Version: 65},
|
||||||
{Name: "eth", Version: 68},
|
|
||||||
}
|
}
|
||||||
conn.ourHighestProtoVersion = 68
|
conn.ourHighestProtoVersion = 65
|
||||||
return &conn, nil
|
return &conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// dialSnap creates a connection with snap/1 capability.
|
// dial66 attempts to dial the given node and perform a handshake,
|
||||||
func (s *Suite) dialSnap() (*Conn, error) {
|
// returning the created Conn with additional eth66 capabilities if
|
||||||
|
// successful
|
||||||
|
func (s *Suite) dial66() (*Conn, error) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("dial failed: %v", err)
|
return nil, fmt.Errorf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
|
conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66})
|
||||||
|
conn.ourHighestProtoVersion = 66
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dial66 attempts to dial the given node and perform a handshake,
|
||||||
|
// returning the created Conn with additional snap/1 capabilities if
|
||||||
|
// successful.
|
||||||
|
func (s *Suite) dialSnap() (*Conn, error) {
|
||||||
|
conn, err := s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("dial failed: %v", err)
|
||||||
|
}
|
||||||
conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
|
conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
|
||||||
conn.ourHighestSnapProtoVersion = 1
|
conn.ourHighestSnapProtoVersion = 1
|
||||||
return conn, nil
|
return conn, nil
|
||||||
@ -206,40 +235,60 @@ loop:
|
|||||||
|
|
||||||
// createSendAndRecvConns creates two connections, one for sending messages to the
|
// createSendAndRecvConns creates two connections, one for sending messages to the
|
||||||
// node, and one for receiving messages from the node.
|
// node, and one for receiving messages from the node.
|
||||||
func (s *Suite) createSendAndRecvConns() (*Conn, *Conn, error) {
|
func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) {
|
||||||
sendConn, err := s.dial()
|
var (
|
||||||
|
sendConn *Conn
|
||||||
|
recvConn *Conn
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if isEth66 {
|
||||||
|
sendConn, err = s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("dial failed: %v", err)
|
return nil, nil, fmt.Errorf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
recvConn, err := s.dial()
|
recvConn, err = s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sendConn.Close()
|
sendConn.Close()
|
||||||
return nil, nil, fmt.Errorf("dial failed: %v", err)
|
return nil, nil, fmt.Errorf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
sendConn, err = s.dial()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
recvConn, err = s.dial()
|
||||||
|
if err != nil {
|
||||||
|
sendConn.Close()
|
||||||
|
return nil, nil, fmt.Errorf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return sendConn, recvConn, nil
|
return sendConn, recvConn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
|
||||||
|
if c.negotiatedProtoVersion == 66 {
|
||||||
|
_, msg := c.readAndServe66(chain, timeout)
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
return c.readAndServe65(chain, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
// readAndServe serves GetBlockHeaders requests while waiting
|
// readAndServe serves GetBlockHeaders requests while waiting
|
||||||
// on another message from the node.
|
// on another message from the node.
|
||||||
func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
|
func (c *Conn) readAndServe65(chain *Chain, timeout time.Duration) Message {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
for time.Since(start) < timeout {
|
for time.Since(start) < timeout {
|
||||||
c.SetReadDeadline(time.Now().Add(10 * time.Second))
|
c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||||
|
switch msg := c.Read().(type) {
|
||||||
msg := c.Read()
|
|
||||||
switch msg := msg.(type) {
|
|
||||||
case *Ping:
|
case *Ping:
|
||||||
c.Write(&Pong{})
|
c.Write(&Pong{})
|
||||||
case *GetBlockHeaders:
|
case *GetBlockHeaders:
|
||||||
headers, err := chain.GetHeaders(msg)
|
req := *msg
|
||||||
|
headers, err := chain.GetHeaders(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errorf("could not get headers for inbound header request: %v", err)
|
return errorf("could not get headers for inbound header request: %v", err)
|
||||||
}
|
}
|
||||||
resp := &BlockHeaders{
|
if err := c.Write(headers); err != nil {
|
||||||
RequestId: msg.ReqID(),
|
|
||||||
BlockHeadersPacket: eth.BlockHeadersPacket(headers),
|
|
||||||
}
|
|
||||||
if err := c.Write(resp); err != nil {
|
|
||||||
return errorf("could not write to connection: %v", err)
|
return errorf("could not write to connection: %v", err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -249,25 +298,54 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
|
|||||||
return errorf("no message received within %v", timeout)
|
return errorf("no message received within %v", timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readAndServe66 serves eth66 GetBlockHeaders requests while waiting
|
||||||
|
// on another message from the node.
|
||||||
|
func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) {
|
||||||
|
start := time.Now()
|
||||||
|
for time.Since(start) < timeout {
|
||||||
|
c.SetReadDeadline(time.Now().Add(10 * time.Second))
|
||||||
|
|
||||||
|
reqID, msg := c.Read66()
|
||||||
|
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
case *Ping:
|
||||||
|
c.Write(&Pong{})
|
||||||
|
case GetBlockHeaders:
|
||||||
|
headers, err := chain.GetHeaders(msg)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errorf("could not get headers for inbound header request: %v", err)
|
||||||
|
}
|
||||||
|
resp := ð.BlockHeadersPacket66{
|
||||||
|
RequestId: reqID,
|
||||||
|
BlockHeadersPacket: eth.BlockHeadersPacket(headers),
|
||||||
|
}
|
||||||
|
if err := c.Write66(resp, BlockHeaders{}.Code()); err != nil {
|
||||||
|
return 0, errorf("could not write to connection: %v", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return reqID, msg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, errorf("no message received within %v", timeout)
|
||||||
|
}
|
||||||
|
|
||||||
// headersRequest executes the given `GetBlockHeaders` request.
|
// headersRequest executes the given `GetBlockHeaders` request.
|
||||||
func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint64) ([]*types.Header, error) {
|
func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bool, reqID uint64) (BlockHeaders, error) {
|
||||||
defer c.SetReadDeadline(time.Time{})
|
defer c.SetReadDeadline(time.Time{})
|
||||||
c.SetReadDeadline(time.Now().Add(20 * time.Second))
|
c.SetReadDeadline(time.Now().Add(20 * time.Second))
|
||||||
|
// if on eth66 connection, perform eth66 GetBlockHeaders request
|
||||||
// write request
|
if isEth66 {
|
||||||
request.RequestId = reqID
|
return getBlockHeaders66(chain, c, request, reqID)
|
||||||
|
}
|
||||||
if err := c.Write(request); err != nil {
|
if err := c.Write(request); err != nil {
|
||||||
return nil, fmt.Errorf("could not write to connection: %v", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
switch msg := c.readAndServe(chain, timeout).(type) {
|
||||||
// wait for response
|
case *BlockHeaders:
|
||||||
msg := c.waitForResponse(chain, timeout, request.RequestId)
|
return *msg, nil
|
||||||
resp, ok := msg.(*BlockHeaders)
|
default:
|
||||||
if !ok {
|
return nil, fmt.Errorf("invalid message: %s", pretty.Sdump(msg))
|
||||||
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
|
|
||||||
}
|
}
|
||||||
headers := []*types.Header(resp.BlockHeadersPacket)
|
|
||||||
return headers, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
|
func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
|
||||||
@ -279,8 +357,28 @@ func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error
|
|||||||
return c.ReadSnap(id)
|
return c.ReadSnap(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
|
||||||
|
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
|
||||||
|
// write request
|
||||||
|
packet := eth.GetBlockHeadersPacket(*request)
|
||||||
|
req := ð.GetBlockHeadersPacket66{
|
||||||
|
RequestId: id,
|
||||||
|
GetBlockHeadersPacket: &packet,
|
||||||
|
}
|
||||||
|
if err := conn.Write66(req, GetBlockHeaders{}.Code()); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not write to connection: %v", err)
|
||||||
|
}
|
||||||
|
// wait for response
|
||||||
|
msg := conn.waitForResponse(chain, timeout, req.RequestId)
|
||||||
|
headers, ok := msg.(BlockHeaders)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
|
||||||
|
}
|
||||||
|
return headers, nil
|
||||||
|
}
|
||||||
|
|
||||||
// headersMatch returns whether the received headers match the given request
|
// headersMatch returns whether the received headers match the given request
|
||||||
func headersMatch(expected []*types.Header, headers []*types.Header) bool {
|
func headersMatch(expected BlockHeaders, headers BlockHeaders) bool {
|
||||||
return reflect.DeepEqual(expected, headers)
|
return reflect.DeepEqual(expected, headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,8 +386,8 @@ func headersMatch(expected []*types.Header, headers []*types.Header) bool {
|
|||||||
// request ID is received.
|
// request ID is received.
|
||||||
func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message {
|
func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message {
|
||||||
for {
|
for {
|
||||||
msg := c.readAndServe(chain, timeout)
|
id, msg := c.readAndServe66(chain, timeout)
|
||||||
if msg.ReqID() == requestID {
|
if id == requestID {
|
||||||
return msg
|
return msg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -297,9 +395,9 @@ func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID ui
|
|||||||
|
|
||||||
// sendNextBlock broadcasts the next block in the chain and waits
|
// sendNextBlock broadcasts the next block in the chain and waits
|
||||||
// for the node to propagate the block and import it into its chain.
|
// for the node to propagate the block and import it into its chain.
|
||||||
func (s *Suite) sendNextBlock() error {
|
func (s *Suite) sendNextBlock(isEth66 bool) error {
|
||||||
// set up sending and receiving connections
|
// set up sending and receiving connections
|
||||||
sendConn, recvConn, err := s.createSendAndRecvConns()
|
sendConn, recvConn, err := s.createSendAndRecvConns(isEth66)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -322,7 +420,7 @@ func (s *Suite) sendNextBlock() error {
|
|||||||
return fmt.Errorf("failed to announce block: %v", err)
|
return fmt.Errorf("failed to announce block: %v", err)
|
||||||
}
|
}
|
||||||
// wait for client to update its chain
|
// wait for client to update its chain
|
||||||
if err = s.waitForBlockImport(recvConn, nextBlock); err != nil {
|
if err = s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil {
|
||||||
return fmt.Errorf("failed to receive confirmation of block import: %v", err)
|
return fmt.Errorf("failed to receive confirmation of block import: %v", err)
|
||||||
}
|
}
|
||||||
// update test suite chain
|
// update test suite chain
|
||||||
@ -358,37 +456,38 @@ func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error {
|
|||||||
return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash)
|
return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
// ignore tx announcements from previous tests
|
|
||||||
case *NewPooledTransactionHashes66:
|
|
||||||
continue
|
|
||||||
case *NewPooledTransactionHashes:
|
case *NewPooledTransactionHashes:
|
||||||
|
// ignore tx announcements from previous tests
|
||||||
continue
|
continue
|
||||||
case *Transactions:
|
|
||||||
continue
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
|
func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block, isEth66 bool) error {
|
||||||
defer conn.SetReadDeadline(time.Time{})
|
defer conn.SetReadDeadline(time.Time{})
|
||||||
conn.SetReadDeadline(time.Now().Add(20 * time.Second))
|
conn.SetReadDeadline(time.Now().Add(20 * time.Second))
|
||||||
// create request
|
// create request
|
||||||
req := &GetBlockHeaders{
|
req := &GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
Origin: eth.HashOrNumber{
|
||||||
Origin: eth.HashOrNumber{Hash: block.Hash()},
|
Hash: block.Hash(),
|
||||||
Amount: 1,
|
|
||||||
},
|
},
|
||||||
|
Amount: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
// loop until BlockHeaders response contains desired block, confirming the
|
// loop until BlockHeaders response contains desired block, confirming the
|
||||||
// node imported the block
|
// node imported the block
|
||||||
for {
|
for {
|
||||||
|
var (
|
||||||
|
headers BlockHeaders
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if isEth66 {
|
||||||
requestID := uint64(54)
|
requestID := uint64(54)
|
||||||
headers, err := conn.headersRequest(req, s.chain, requestID)
|
headers, err = conn.headersRequest(req, s.chain, eth66, requestID)
|
||||||
|
} else {
|
||||||
|
headers, err = conn.headersRequest(req, s.chain, eth65, 0)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("GetBlockHeader request failed: %v", err)
|
return fmt.Errorf("GetBlockHeader request failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -404,8 +503,8 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) oldAnnounce() error {
|
func (s *Suite) oldAnnounce(isEth66 bool) error {
|
||||||
sendConn, receiveConn, err := s.createSendAndRecvConns()
|
sendConn, receiveConn, err := s.createSendAndRecvConns(isEth66)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -451,13 +550,23 @@ func (s *Suite) oldAnnounce() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) maliciousHandshakes(t *utesting.T) error {
|
func (s *Suite) maliciousHandshakes(t *utesting.T, isEth66 bool) error {
|
||||||
conn, err := s.dial()
|
var (
|
||||||
|
conn *Conn
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if isEth66 {
|
||||||
|
conn, err = s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dial failed: %v", err)
|
return fmt.Errorf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
conn, err = s.dial()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
// write hello to client
|
// write hello to client
|
||||||
pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:]
|
pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:]
|
||||||
handshakes := []*Hello{
|
handshakes := []*Hello{
|
||||||
@ -518,11 +627,18 @@ func (s *Suite) maliciousHandshakes(t *utesting.T) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// dial for the next round
|
// dial for the next round
|
||||||
|
if isEth66 {
|
||||||
|
conn, err = s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
conn, err = s.dial()
|
conn, err = s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dial failed: %v", err)
|
return fmt.Errorf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -538,7 +654,6 @@ func (s *Suite) maliciousStatus(conn *Conn) error {
|
|||||||
Genesis: s.chain.blocks[0].Hash(),
|
Genesis: s.chain.blocks[0].Hash(),
|
||||||
ForkID: s.chain.ForkID(),
|
ForkID: s.chain.ForkID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// get status
|
// get status
|
||||||
msg, err := conn.statusExchange(s.chain, status)
|
msg, err := conn.statusExchange(s.chain, status)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -549,7 +664,6 @@ func (s *Suite) maliciousStatus(conn *Conn) error {
|
|||||||
default:
|
default:
|
||||||
return fmt.Errorf("expected status, got: %#v ", msg)
|
return fmt.Errorf("expected status, got: %#v ", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for disconnect
|
// wait for disconnect
|
||||||
switch msg := conn.readAndServe(s.chain, timeout).(type) {
|
switch msg := conn.readAndServe(s.chain, timeout).(type) {
|
||||||
case *Disconnect:
|
case *Disconnect:
|
||||||
@ -561,9 +675,9 @@ func (s *Suite) maliciousStatus(conn *Conn) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) hashAnnounce() error {
|
func (s *Suite) hashAnnounce(isEth66 bool) error {
|
||||||
// create connections
|
// create connections
|
||||||
sendConn, recvConn, err := s.createSendAndRecvConns()
|
sendConn, recvConn, err := s.createSendAndRecvConns(isEth66)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create connections: %v", err)
|
return fmt.Errorf("failed to create connections: %v", err)
|
||||||
}
|
}
|
||||||
@ -575,7 +689,6 @@ func (s *Suite) hashAnnounce() error {
|
|||||||
if err := recvConn.peer(s.chain, nil); err != nil {
|
if err := recvConn.peer(s.chain, nil); err != nil {
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// create NewBlockHashes announcement
|
// create NewBlockHashes announcement
|
||||||
type anno struct {
|
type anno struct {
|
||||||
Hash common.Hash // Hash of one particular block being announced
|
Hash common.Hash // Hash of one particular block being announced
|
||||||
@ -587,11 +700,18 @@ func (s *Suite) hashAnnounce() error {
|
|||||||
if err := sendConn.Write(newBlockHash); err != nil {
|
if err := sendConn.Write(newBlockHash); err != nil {
|
||||||
return fmt.Errorf("failed to write to connection: %v", err)
|
return fmt.Errorf("failed to write to connection: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Announcement sent, now wait for a header request
|
// Announcement sent, now wait for a header request
|
||||||
msg := sendConn.Read()
|
var (
|
||||||
blockHeaderReq, ok := msg.(*GetBlockHeaders)
|
id uint64
|
||||||
if !ok {
|
msg Message
|
||||||
|
blockHeaderReq GetBlockHeaders
|
||||||
|
)
|
||||||
|
if isEth66 {
|
||||||
|
id, msg = sendConn.Read66()
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
case GetBlockHeaders:
|
||||||
|
blockHeaderReq = msg
|
||||||
|
default:
|
||||||
return fmt.Errorf("unexpected %s", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
if blockHeaderReq.Amount != 1 {
|
if blockHeaderReq.Amount != 1 {
|
||||||
@ -602,14 +722,34 @@ func (s *Suite) hashAnnounce() error {
|
|||||||
pretty.Sdump(announcement),
|
pretty.Sdump(announcement),
|
||||||
pretty.Sdump(blockHeaderReq))
|
pretty.Sdump(blockHeaderReq))
|
||||||
}
|
}
|
||||||
err = sendConn.Write(&BlockHeaders{
|
if err := sendConn.Write66(ð.BlockHeadersPacket66{
|
||||||
RequestId: blockHeaderReq.ReqID(),
|
RequestId: id,
|
||||||
BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()},
|
BlockHeadersPacket: eth.BlockHeadersPacket{
|
||||||
})
|
nextBlock.Header(),
|
||||||
if err != nil {
|
},
|
||||||
|
}, BlockHeaders{}.Code()); err != nil {
|
||||||
return fmt.Errorf("failed to write to connection: %v", err)
|
return fmt.Errorf("failed to write to connection: %v", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
msg = sendConn.Read()
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
case *GetBlockHeaders:
|
||||||
|
blockHeaderReq = *msg
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected %s", pretty.Sdump(msg))
|
||||||
|
}
|
||||||
|
if blockHeaderReq.Amount != 1 {
|
||||||
|
return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount)
|
||||||
|
}
|
||||||
|
if blockHeaderReq.Origin.Hash != announcement.Hash {
|
||||||
|
return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v",
|
||||||
|
pretty.Sdump(announcement),
|
||||||
|
pretty.Sdump(blockHeaderReq))
|
||||||
|
}
|
||||||
|
if err := sendConn.Write(&BlockHeaders{nextBlock.Header()}); err != nil {
|
||||||
|
return fmt.Errorf("failed to write to connection: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
// wait for block announcement
|
// wait for block announcement
|
||||||
msg = recvConn.readAndServe(s.chain, timeout)
|
msg = recvConn.readAndServe(s.chain, timeout)
|
||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
@ -622,7 +762,6 @@ func (s *Suite) hashAnnounce() error {
|
|||||||
return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(),
|
return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(),
|
||||||
hashes[0].Hash)
|
hashes[0].Hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
case *NewBlock:
|
case *NewBlock:
|
||||||
// node should only propagate NewBlock without having requested the body if the body is empty
|
// node should only propagate NewBlock without having requested the body if the body is empty
|
||||||
nextBlockBody := nextBlock.Body()
|
nextBlockBody := nextBlock.Body()
|
||||||
@ -641,7 +780,7 @@ func (s *Suite) hashAnnounce() error {
|
|||||||
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
// confirm node imported block
|
// confirm node imported block
|
||||||
if err := s.waitForBlockImport(recvConn, nextBlock); err != nil {
|
if err := s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil {
|
||||||
return fmt.Errorf("error waiting for node to import new block: %v", err)
|
return fmt.Errorf("error waiting for node to import new block: %v", err)
|
||||||
}
|
}
|
||||||
// update the chain
|
// update the chain
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
@ -91,7 +90,7 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
|||||||
{4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
|
{4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
|
||||||
// A 127 block old stateroot, expected to be served
|
// A 127 block old stateroot, expected to be served
|
||||||
{4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
|
{4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
|
||||||
// A root which is not actually an account root, but a storage root
|
// A root which is not actually an account root, but a storage orot
|
||||||
{4000, storageRoot, zero, ffHash, 0, zero, zero},
|
{4000, storageRoot, zero, ffHash, 0, zero, zero},
|
||||||
|
|
||||||
// And some non-sensical requests
|
// And some non-sensical requests
|
||||||
@ -122,7 +121,7 @@ type stRangesTest struct {
|
|||||||
expSlots int
|
expSlots int
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
|
// TestSnapGetStorageRange various forms of GetStorageRanges requests.
|
||||||
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
||||||
var (
|
var (
|
||||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||||
@ -211,6 +210,13 @@ type byteCodesTest struct {
|
|||||||
expHashes int
|
expHashes int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// emptyRoot is the known root hash of an empty trie.
|
||||||
|
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
|
// emptyCode is the known hash of the empty EVM bytecode.
|
||||||
|
emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||||
|
)
|
||||||
|
|
||||||
// TestSnapGetByteCodes various forms of GetByteCodes requests.
|
// TestSnapGetByteCodes various forms of GetByteCodes requests.
|
||||||
func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
|
func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
|
||||||
// The halfchain import should yield these bytecodes
|
// The halfchain import should yield these bytecodes
|
||||||
@ -257,15 +263,15 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
|
|||||||
},
|
},
|
||||||
// Empties
|
// Empties
|
||||||
{
|
{
|
||||||
nBytes: 10000, hashes: []common.Hash{types.EmptyRootHash},
|
nBytes: 10000, hashes: []common.Hash{emptyRoot},
|
||||||
expHashes: 0,
|
expHashes: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nBytes: 10000, hashes: []common.Hash{types.EmptyCodeHash},
|
nBytes: 10000, hashes: []common.Hash{emptyCode},
|
||||||
expHashes: 1,
|
expHashes: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nBytes: 10000, hashes: []common.Hash{types.EmptyCodeHash, types.EmptyCodeHash, types.EmptyCodeHash},
|
nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode},
|
||||||
expHashes: 3,
|
expHashes: 3,
|
||||||
},
|
},
|
||||||
// The existing bytecodes
|
// The existing bytecodes
|
||||||
@ -344,6 +350,7 @@ func hexToCompact(hex []byte) []byte {
|
|||||||
|
|
||||||
// TestSnapTrieNodes various forms of GetTrieNodes requests.
|
// TestSnapTrieNodes various forms of GetTrieNodes requests.
|
||||||
func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
||||||
|
|
||||||
key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
// helper function to iterate the key, and generate the compact-encoded
|
// helper function to iterate the key, and generate the compact-encoded
|
||||||
// trie paths along the way.
|
// trie paths along the way.
|
||||||
@ -357,7 +364,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
|||||||
for i := 1; i <= 65; i++ {
|
for i := 1; i <= 65; i++ {
|
||||||
accPaths = append(accPaths, pathTo(i))
|
accPaths = append(accPaths, pathTo(i))
|
||||||
}
|
}
|
||||||
empty := types.EmptyCodeHash
|
empty := emptyCode
|
||||||
for i, tc := range []trieNodesTest{
|
for i, tc := range []trieNodesTest{
|
||||||
{
|
{
|
||||||
root: s.chain.RootAt(999),
|
root: s.chain.RootAt(999),
|
||||||
@ -401,9 +408,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
|||||||
{[]byte{1}, []byte{0}},
|
{[]byte{1}, []byte{0}},
|
||||||
},
|
},
|
||||||
nBytes: 5000,
|
nBytes: 5000,
|
||||||
expHashes: []common.Hash{
|
expHashes: []common.Hash{},
|
||||||
common.HexToHash("0x1ee1bb2fbac4d46eab331f3e8551e18a0805d084ed54647883aa552809ca968d"),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
|
// The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
|
||||||
@ -433,35 +438,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
|||||||
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
|
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
} {
|
||||||
/*
|
|
||||||
A test against this account, requesting trie nodes for the storage trie
|
|
||||||
{
|
|
||||||
"balance": "0",
|
|
||||||
"nonce": 1,
|
|
||||||
"root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790",
|
|
||||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
|
||||||
"storage": {
|
|
||||||
"0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02",
|
|
||||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01",
|
|
||||||
"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03"
|
|
||||||
},
|
|
||||||
"key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
root: s.chain.RootAt(999),
|
|
||||||
paths: []snap.TrieNodePathSet{
|
|
||||||
{
|
|
||||||
common.FromHex("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"),
|
|
||||||
[]byte{0},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
nBytes: 5000,
|
|
||||||
expHashes: []common.Hash{
|
|
||||||
common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}[7:] {
|
|
||||||
tc := tc
|
tc := tc
|
||||||
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
||||||
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
||||||
@ -519,10 +496,10 @@ func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
|
|||||||
}
|
}
|
||||||
if len(hashes) > 0 {
|
if len(hashes) > 0 {
|
||||||
if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
|
if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
|
||||||
return fmt.Errorf("expected first account %#x, got %#x", exp, got)
|
return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got)
|
||||||
}
|
}
|
||||||
if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
|
if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
|
||||||
return fmt.Errorf("expected last account %#x, got %#x", exp, got)
|
return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Reconstruct a partial trie from the response and verify it
|
// Reconstruct a partial trie from the response and verify it
|
||||||
|
@ -21,40 +21,32 @@ import "github.com/ethereum/go-ethereum/eth/protocols/snap"
|
|||||||
// GetAccountRange represents an account range query.
|
// GetAccountRange represents an account range query.
|
||||||
type GetAccountRange snap.GetAccountRangePacket
|
type GetAccountRange snap.GetAccountRangePacket
|
||||||
|
|
||||||
func (msg GetAccountRange) Code() int { return 33 }
|
func (g GetAccountRange) Code() int { return 33 }
|
||||||
func (msg GetAccountRange) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type AccountRange snap.AccountRangePacket
|
type AccountRange snap.AccountRangePacket
|
||||||
|
|
||||||
func (msg AccountRange) Code() int { return 34 }
|
func (g AccountRange) Code() int { return 34 }
|
||||||
func (msg AccountRange) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type GetStorageRanges snap.GetStorageRangesPacket
|
type GetStorageRanges snap.GetStorageRangesPacket
|
||||||
|
|
||||||
func (msg GetStorageRanges) Code() int { return 35 }
|
func (g GetStorageRanges) Code() int { return 35 }
|
||||||
func (msg GetStorageRanges) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type StorageRanges snap.StorageRangesPacket
|
type StorageRanges snap.StorageRangesPacket
|
||||||
|
|
||||||
func (msg StorageRanges) Code() int { return 36 }
|
func (g StorageRanges) Code() int { return 36 }
|
||||||
func (msg StorageRanges) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type GetByteCodes snap.GetByteCodesPacket
|
type GetByteCodes snap.GetByteCodesPacket
|
||||||
|
|
||||||
func (msg GetByteCodes) Code() int { return 37 }
|
func (g GetByteCodes) Code() int { return 37 }
|
||||||
func (msg GetByteCodes) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type ByteCodes snap.ByteCodesPacket
|
type ByteCodes snap.ByteCodesPacket
|
||||||
|
|
||||||
func (msg ByteCodes) Code() int { return 38 }
|
func (g ByteCodes) Code() int { return 38 }
|
||||||
func (msg ByteCodes) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type GetTrieNodes snap.GetTrieNodesPacket
|
type GetTrieNodes snap.GetTrieNodesPacket
|
||||||
|
|
||||||
func (msg GetTrieNodes) Code() int { return 39 }
|
func (g GetTrieNodes) Code() int { return 39 }
|
||||||
func (msg GetTrieNodes) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type TrieNodes snap.TrieNodesPacket
|
type TrieNodes snap.TrieNodesPacket
|
||||||
|
|
||||||
func (msg TrieNodes) Code() int { return 40 }
|
func (g TrieNodes) Code() int { return 40 }
|
||||||
func (msg TrieNodes) ReqID() uint64 { return msg.ID }
|
|
||||||
|
@ -49,30 +49,79 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) EthTests() []utesting.Test {
|
func (s *Suite) AllEthTests() []utesting.Test {
|
||||||
return []utesting.Test{
|
return []utesting.Test{
|
||||||
// status
|
// status
|
||||||
{Name: "TestStatus", Fn: s.TestStatus},
|
{Name: "TestStatus65", Fn: s.TestStatus65},
|
||||||
|
{Name: "TestStatus66", Fn: s.TestStatus66},
|
||||||
// get block headers
|
// get block headers
|
||||||
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
|
{Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
|
||||||
{Name: "TestSimultaneousRequests", Fn: s.TestSimultaneousRequests},
|
{Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66},
|
||||||
{Name: "TestSameRequestID", Fn: s.TestSameRequestID},
|
{Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66},
|
||||||
{Name: "TestZeroRequestID", Fn: s.TestZeroRequestID},
|
{Name: "TestSameRequestID66", Fn: s.TestSameRequestID66},
|
||||||
|
{Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66},
|
||||||
// get block bodies
|
// get block bodies
|
||||||
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
|
{Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
|
||||||
|
{Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66},
|
||||||
// broadcast
|
// broadcast
|
||||||
{Name: "TestBroadcast", Fn: s.TestBroadcast},
|
{Name: "TestBroadcast65", Fn: s.TestBroadcast65},
|
||||||
{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce},
|
{Name: "TestBroadcast66", Fn: s.TestBroadcast66},
|
||||||
{Name: "TestOldAnnounce", Fn: s.TestOldAnnounce},
|
{Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
|
||||||
{Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce},
|
{Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66},
|
||||||
|
{Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
|
||||||
|
{Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66},
|
||||||
|
{Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
|
||||||
|
{Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66},
|
||||||
// malicious handshakes + status
|
// malicious handshakes + status
|
||||||
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
|
{Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
|
||||||
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
|
{Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
|
||||||
|
{Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66},
|
||||||
|
{Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66},
|
||||||
// test transactions
|
// test transactions
|
||||||
{Name: "TestTransaction", Fn: s.TestTransaction},
|
{Name: "TestTransaction65", Fn: s.TestTransaction65},
|
||||||
{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx},
|
{Name: "TestTransaction66", Fn: s.TestTransaction66},
|
||||||
{Name: "TestLargeTxRequest", Fn: s.TestLargeTxRequest},
|
{Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
|
||||||
{Name: "TestNewPooledTxs", Fn: s.TestNewPooledTxs},
|
{Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66},
|
||||||
|
{Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66},
|
||||||
|
{Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) EthTests() []utesting.Test {
|
||||||
|
return []utesting.Test{
|
||||||
|
{Name: "TestStatus65", Fn: s.TestStatus65},
|
||||||
|
{Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
|
||||||
|
{Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
|
||||||
|
{Name: "TestBroadcast65", Fn: s.TestBroadcast65},
|
||||||
|
{Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
|
||||||
|
{Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
|
||||||
|
{Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
|
||||||
|
{Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
|
||||||
|
{Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
|
||||||
|
{Name: "TestTransaction65", Fn: s.TestTransaction65},
|
||||||
|
{Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) Eth66Tests() []utesting.Test {
|
||||||
|
return []utesting.Test{
|
||||||
|
// only proceed with eth66 test suite if node supports eth 66 protocol
|
||||||
|
{Name: "TestStatus66", Fn: s.TestStatus66},
|
||||||
|
{Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66},
|
||||||
|
{Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66},
|
||||||
|
{Name: "TestSameRequestID66", Fn: s.TestSameRequestID66},
|
||||||
|
{Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66},
|
||||||
|
{Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66},
|
||||||
|
{Name: "TestBroadcast66", Fn: s.TestBroadcast66},
|
||||||
|
{Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66},
|
||||||
|
{Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66},
|
||||||
|
{Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66},
|
||||||
|
{Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66},
|
||||||
|
{Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66},
|
||||||
|
{Name: "TestTransaction66", Fn: s.TestTransaction66},
|
||||||
|
{Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66},
|
||||||
|
{Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66},
|
||||||
|
{Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,9 +135,14 @@ func (s *Suite) SnapTests() []utesting.Test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestStatus attempts to connect to the given node and exchange
|
var (
|
||||||
// a status message with it on the eth protocol.
|
eth66 = true // indicates whether suite should negotiate eth66 connection
|
||||||
func (s *Suite) TestStatus(t *utesting.T) {
|
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestStatus65 attempts to connect to the given node and exchange
|
||||||
|
// a status message with it.
|
||||||
|
func (s *Suite) TestStatus65(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -99,32 +153,79 @@ func (s *Suite) TestStatus(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetBlockHeaders tests whether the given node can respond to
|
// TestStatus66 attempts to connect to the given node and exchange
|
||||||
// an eth `GetBlockHeaders` request and that the response is accurate.
|
// a status message with it on the eth66 protocol.
|
||||||
func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
func (s *Suite) TestStatus66(t *utesting.T) {
|
||||||
|
conn, err := s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err := conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestGetBlockHeaders65 tests whether the given node can respond to
|
||||||
|
// a `GetBlockHeaders` request accurately.
|
||||||
|
func (s *Suite) TestGetBlockHeaders65(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
if err := conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("handshake(s) failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetBlockHeaders{
|
||||||
|
Origin: eth.HashOrNumber{
|
||||||
|
Hash: s.chain.blocks[1].Hash(),
|
||||||
|
},
|
||||||
|
Amount: 2,
|
||||||
|
Skip: 1,
|
||||||
|
Reverse: false,
|
||||||
|
}
|
||||||
|
headers, err := conn.headersRequest(req, s.chain, eth65, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("GetBlockHeaders request failed: %v", err)
|
||||||
|
}
|
||||||
|
// check for correct headers
|
||||||
|
expected, err := s.chain.GetHeaders(*req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get headers for given request: %v", err)
|
||||||
|
}
|
||||||
|
if !headersMatch(expected, headers) {
|
||||||
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestGetBlockHeaders66 tests whether the given node can respond to
|
||||||
|
// an eth66 `GetBlockHeaders` request and that the response is accurate.
|
||||||
|
func (s *Suite) TestGetBlockHeaders66(t *utesting.T) {
|
||||||
|
conn, err := s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
if err = conn.peer(s.chain, nil); err != nil {
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
t.Fatalf("peering failed: %v", err)
|
t.Fatalf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
// write request
|
// write request
|
||||||
req := &GetBlockHeaders{
|
req := &GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
Origin: eth.HashOrNumber{
|
||||||
Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
|
Hash: s.chain.blocks[1].Hash(),
|
||||||
|
},
|
||||||
Amount: 2,
|
Amount: 2,
|
||||||
Skip: 1,
|
Skip: 1,
|
||||||
Reverse: false,
|
Reverse: false,
|
||||||
},
|
|
||||||
}
|
}
|
||||||
headers, err := conn.headersRequest(req, s.chain, 33)
|
headers, err := conn.headersRequest(req, s.chain, eth66, 33)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not get block headers: %v", err)
|
t.Fatalf("could not get block headers: %v", err)
|
||||||
}
|
}
|
||||||
// check for correct headers
|
// check for correct headers
|
||||||
expected, err := s.chain.GetHeaders(req)
|
expected, err := s.chain.GetHeaders(*req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get headers for given request: %v", err)
|
t.Fatalf("failed to get headers for given request: %v", err)
|
||||||
}
|
}
|
||||||
@ -133,12 +234,12 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSimultaneousRequests sends two simultaneous `GetBlockHeader` requests from
|
// TestSimultaneousRequests66 sends two simultaneous `GetBlockHeader` requests from
|
||||||
// the same connection with different request IDs and checks to make sure the node
|
// the same connection with different request IDs and checks to make sure the node
|
||||||
// responds with the correct headers per request.
|
// responds with the correct headers per request.
|
||||||
func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
func (s *Suite) TestSimultaneousRequests66(t *utesting.T) {
|
||||||
// create a connection
|
// create a connection
|
||||||
conn, err := s.dial()
|
conn, err := s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -146,9 +247,8 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||||||
if err := conn.peer(s.chain, nil); err != nil {
|
if err := conn.peer(s.chain, nil); err != nil {
|
||||||
t.Fatalf("peering failed: %v", err)
|
t.Fatalf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// create two requests
|
// create two requests
|
||||||
req1 := &GetBlockHeaders{
|
req1 := ð.GetBlockHeadersPacket66{
|
||||||
RequestId: uint64(111),
|
RequestId: uint64(111),
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
@ -159,7 +259,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||||||
Reverse: false,
|
Reverse: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
req2 := &GetBlockHeaders{
|
req2 := ð.GetBlockHeadersPacket66{
|
||||||
RequestId: uint64(222),
|
RequestId: uint64(222),
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
@ -170,49 +270,46 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
|
|||||||
Reverse: false,
|
Reverse: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// write the first request
|
// write the first request
|
||||||
if err := conn.Write(req1); err != nil {
|
if err := conn.Write66(req1, GetBlockHeaders{}.Code()); err != nil {
|
||||||
t.Fatalf("failed to write to connection: %v", err)
|
t.Fatalf("failed to write to connection: %v", err)
|
||||||
}
|
}
|
||||||
// write the second request
|
// write the second request
|
||||||
if err := conn.Write(req2); err != nil {
|
if err := conn.Write66(req2, GetBlockHeaders{}.Code()); err != nil {
|
||||||
t.Fatalf("failed to write to connection: %v", err)
|
t.Fatalf("failed to write to connection: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for responses
|
// wait for responses
|
||||||
msg := conn.waitForResponse(s.chain, timeout, req1.RequestId)
|
msg := conn.waitForResponse(s.chain, timeout, req1.RequestId)
|
||||||
headers1, ok := msg.(*BlockHeaders)
|
headers1, ok := msg.(BlockHeaders)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("unexpected %s", pretty.Sdump(msg))
|
t.Fatalf("unexpected %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
msg = conn.waitForResponse(s.chain, timeout, req2.RequestId)
|
msg = conn.waitForResponse(s.chain, timeout, req2.RequestId)
|
||||||
headers2, ok := msg.(*BlockHeaders)
|
headers2, ok := msg.(BlockHeaders)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("unexpected %s", pretty.Sdump(msg))
|
t.Fatalf("unexpected %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
// check received headers for accuracy
|
// check received headers for accuracy
|
||||||
expected1, err := s.chain.GetHeaders(req1)
|
expected1, err := s.chain.GetHeaders(GetBlockHeaders(*req1.GetBlockHeadersPacket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get expected headers for request 1: %v", err)
|
t.Fatalf("failed to get expected headers for request 1: %v", err)
|
||||||
}
|
}
|
||||||
expected2, err := s.chain.GetHeaders(req2)
|
expected2, err := s.chain.GetHeaders(GetBlockHeaders(*req2.GetBlockHeadersPacket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get expected headers for request 2: %v", err)
|
t.Fatalf("failed to get expected headers for request 2: %v", err)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected1, headers1.BlockHeadersPacket) {
|
if !headersMatch(expected1, headers1) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected2, headers2.BlockHeadersPacket) {
|
if !headersMatch(expected2, headers2) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSameRequestID sends two requests with the same request ID to a
|
// TestSameRequestID66 sends two requests with the same request ID to a
|
||||||
// single node.
|
// single node.
|
||||||
func (s *Suite) TestSameRequestID(t *utesting.T) {
|
func (s *Suite) TestSameRequestID66(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -222,7 +319,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
// create requests
|
// create requests
|
||||||
reqID := uint64(1234)
|
reqID := uint64(1234)
|
||||||
request1 := &GetBlockHeaders{
|
request1 := ð.GetBlockHeadersPacket66{
|
||||||
RequestId: reqID,
|
RequestId: reqID,
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
@ -231,7 +328,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||||||
Amount: 2,
|
Amount: 2,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
request2 := &GetBlockHeaders{
|
request2 := ð.GetBlockHeadersPacket66{
|
||||||
RequestId: reqID,
|
RequestId: reqID,
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
||||||
Origin: eth.HashOrNumber{
|
Origin: eth.HashOrNumber{
|
||||||
@ -240,48 +337,45 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
|
|||||||
Amount: 2,
|
Amount: 2,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// write the requests
|
// write the requests
|
||||||
if err = conn.Write(request1); err != nil {
|
if err = conn.Write66(request1, GetBlockHeaders{}.Code()); err != nil {
|
||||||
t.Fatalf("failed to write to connection: %v", err)
|
t.Fatalf("failed to write to connection: %v", err)
|
||||||
}
|
}
|
||||||
if err = conn.Write(request2); err != nil {
|
if err = conn.Write66(request2, GetBlockHeaders{}.Code()); err != nil {
|
||||||
t.Fatalf("failed to write to connection: %v", err)
|
t.Fatalf("failed to write to connection: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for responses
|
// wait for responses
|
||||||
msg := conn.waitForResponse(s.chain, timeout, reqID)
|
msg := conn.waitForResponse(s.chain, timeout, reqID)
|
||||||
headers1, ok := msg.(*BlockHeaders)
|
headers1, ok := msg.(BlockHeaders)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("unexpected %s", pretty.Sdump(msg))
|
t.Fatalf("unexpected %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
msg = conn.waitForResponse(s.chain, timeout, reqID)
|
msg = conn.waitForResponse(s.chain, timeout, reqID)
|
||||||
headers2, ok := msg.(*BlockHeaders)
|
headers2, ok := msg.(BlockHeaders)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("unexpected %s", pretty.Sdump(msg))
|
t.Fatalf("unexpected %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if headers match
|
// check if headers match
|
||||||
expected1, err := s.chain.GetHeaders(request1)
|
expected1, err := s.chain.GetHeaders(GetBlockHeaders(*request1.GetBlockHeadersPacket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get expected block headers: %v", err)
|
t.Fatalf("failed to get expected block headers: %v", err)
|
||||||
}
|
}
|
||||||
expected2, err := s.chain.GetHeaders(request2)
|
expected2, err := s.chain.GetHeaders(GetBlockHeaders(*request2.GetBlockHeadersPacket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get expected block headers: %v", err)
|
t.Fatalf("failed to get expected block headers: %v", err)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected1, headers1.BlockHeadersPacket) {
|
if !headersMatch(expected1, headers1) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected2, headers2.BlockHeadersPacket) {
|
if !headersMatch(expected2, headers2) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestZeroRequestID checks that a message with a request ID of zero is still handled
|
// TestZeroRequestID_66 checks that a message with a request ID of zero is still handled
|
||||||
// by the node.
|
// by the node.
|
||||||
func (s *Suite) TestZeroRequestID(t *utesting.T) {
|
func (s *Suite) TestZeroRequestID66(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -290,16 +384,16 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
|
|||||||
t.Fatalf("peering failed: %v", err)
|
t.Fatalf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
req := &GetBlockHeaders{
|
req := &GetBlockHeaders{
|
||||||
GetBlockHeadersPacket: ð.GetBlockHeadersPacket{
|
Origin: eth.HashOrNumber{
|
||||||
Origin: eth.HashOrNumber{Number: 0},
|
Number: 0,
|
||||||
Amount: 2,
|
|
||||||
},
|
},
|
||||||
|
Amount: 2,
|
||||||
}
|
}
|
||||||
headers, err := conn.headersRequest(req, s.chain, 0)
|
headers, err := conn.headersRequest(req, s.chain, eth66, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get block headers: %v", err)
|
t.Fatalf("failed to get block headers: %v", err)
|
||||||
}
|
}
|
||||||
expected, err := s.chain.GetHeaders(req)
|
expected, err := s.chain.GetHeaders(*req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get expected block headers: %v", err)
|
t.Fatalf("failed to get expected block headers: %v", err)
|
||||||
}
|
}
|
||||||
@ -308,9 +402,9 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGetBlockBodies tests whether the given node can respond to
|
// TestGetBlockBodies65 tests whether the given node can respond to
|
||||||
// a `GetBlockBodies` request and that the response is accurate.
|
// a `GetBlockBodies` request and that the response is accurate.
|
||||||
func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
func (s *Suite) TestGetBlockBodies65(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -321,39 +415,126 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
// create block bodies request
|
// create block bodies request
|
||||||
req := &GetBlockBodies{
|
req := &GetBlockBodies{
|
||||||
|
s.chain.blocks[54].Hash(),
|
||||||
|
s.chain.blocks[75].Hash(),
|
||||||
|
}
|
||||||
|
if err := conn.Write(req); err != nil {
|
||||||
|
t.Fatalf("could not write to connection: %v", err)
|
||||||
|
}
|
||||||
|
// wait for response
|
||||||
|
switch msg := conn.readAndServe(s.chain, timeout).(type) {
|
||||||
|
case *BlockBodies:
|
||||||
|
t.Logf("received %d block bodies", len(*msg))
|
||||||
|
if len(*msg) != len(*req) {
|
||||||
|
t.Fatalf("wrong bodies in response: expected %d bodies, "+
|
||||||
|
"got %d", len(*req), len(*msg))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestGetBlockBodies66 tests whether the given node can respond to
|
||||||
|
// a `GetBlockBodies` request and that the response is accurate over
|
||||||
|
// the eth66 protocol.
|
||||||
|
func (s *Suite) TestGetBlockBodies66(t *utesting.T) {
|
||||||
|
conn, err := s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err := conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// create block bodies request
|
||||||
|
req := ð.GetBlockBodiesPacket66{
|
||||||
RequestId: uint64(55),
|
RequestId: uint64(55),
|
||||||
GetBlockBodiesPacket: eth.GetBlockBodiesPacket{
|
GetBlockBodiesPacket: eth.GetBlockBodiesPacket{
|
||||||
s.chain.blocks[54].Hash(),
|
s.chain.blocks[54].Hash(),
|
||||||
s.chain.blocks[75].Hash(),
|
s.chain.blocks[75].Hash(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if err := conn.Write(req); err != nil {
|
if err := conn.Write66(req, GetBlockBodies{}.Code()); err != nil {
|
||||||
t.Fatalf("could not write to connection: %v", err)
|
t.Fatalf("could not write to connection: %v", err)
|
||||||
}
|
}
|
||||||
// wait for block bodies response
|
// wait for block bodies response
|
||||||
msg := conn.waitForResponse(s.chain, timeout, req.RequestId)
|
msg := conn.waitForResponse(s.chain, timeout, req.RequestId)
|
||||||
resp, ok := msg.(*BlockBodies)
|
blockBodies, ok := msg.(BlockBodies)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
|
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
bodies := resp.BlockBodiesPacket
|
t.Logf("received %d block bodies", len(blockBodies))
|
||||||
t.Logf("received %d block bodies", len(bodies))
|
if len(blockBodies) != len(req.GetBlockBodiesPacket) {
|
||||||
if len(bodies) != len(req.GetBlockBodiesPacket) {
|
|
||||||
t.Fatalf("wrong bodies in response: expected %d bodies, "+
|
t.Fatalf("wrong bodies in response: expected %d bodies, "+
|
||||||
"got %d", len(req.GetBlockBodiesPacket), len(bodies))
|
"got %d", len(req.GetBlockBodiesPacket), len(blockBodies))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBroadcast tests whether a block announcement is correctly
|
// TestBroadcast65 tests whether a block announcement is correctly
|
||||||
// propagated to the node's peers.
|
// propagated to the given node's peer(s).
|
||||||
func (s *Suite) TestBroadcast(t *utesting.T) {
|
func (s *Suite) TestBroadcast65(t *utesting.T) {
|
||||||
if err := s.sendNextBlock(); err != nil {
|
if err := s.sendNextBlock(eth65); err != nil {
|
||||||
t.Fatalf("block broadcast failed: %v", err)
|
t.Fatalf("block broadcast failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestLargeAnnounce tests the announcement mechanism with a large block.
|
// TestBroadcast66 tests whether a block announcement is correctly
|
||||||
func (s *Suite) TestLargeAnnounce(t *utesting.T) {
|
// propagated to the given node's peer(s) on the eth66 protocol.
|
||||||
|
func (s *Suite) TestBroadcast66(t *utesting.T) {
|
||||||
|
if err := s.sendNextBlock(eth66); err != nil {
|
||||||
|
t.Fatalf("block broadcast failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLargeAnnounce65 tests the announcement mechanism with a large block.
|
||||||
|
func (s *Suite) TestLargeAnnounce65(t *utesting.T) {
|
||||||
|
nextBlock := len(s.chain.blocks)
|
||||||
|
blocks := []*NewBlock{
|
||||||
|
{
|
||||||
|
Block: largeBlock(),
|
||||||
|
TD: s.fullChain.TotalDifficultyAt(nextBlock),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Block: s.fullChain.blocks[nextBlock],
|
||||||
|
TD: largeNumber(2),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Block: largeBlock(),
|
||||||
|
TD: largeNumber(2),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, blockAnnouncement := range blocks {
|
||||||
|
t.Logf("Testing malicious announcement: %v\n", i)
|
||||||
|
conn, err := s.dial()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
if err = conn.Write(blockAnnouncement); err != nil {
|
||||||
|
t.Fatalf("could not write to connection: %v", err)
|
||||||
|
}
|
||||||
|
// Invalid announcement, check that peer disconnected
|
||||||
|
switch msg := conn.readAndServe(s.chain, time.Second*8).(type) {
|
||||||
|
case *Disconnect:
|
||||||
|
case *Error:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg))
|
||||||
|
}
|
||||||
|
conn.Close()
|
||||||
|
}
|
||||||
|
// Test the last block as a valid block
|
||||||
|
if err := s.sendNextBlock(eth65); err != nil {
|
||||||
|
t.Fatalf("failed to broadcast next block: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLargeAnnounce66 tests the announcement mechanism with a large
|
||||||
|
// block over the eth66 protocol.
|
||||||
|
func (s *Suite) TestLargeAnnounce66(t *utesting.T) {
|
||||||
nextBlock := len(s.chain.blocks)
|
nextBlock := len(s.chain.blocks)
|
||||||
blocks := []*NewBlock{
|
blocks := []*NewBlock{
|
||||||
{
|
{
|
||||||
@ -372,7 +553,7 @@ func (s *Suite) TestLargeAnnounce(t *utesting.T) {
|
|||||||
|
|
||||||
for i, blockAnnouncement := range blocks[0:3] {
|
for i, blockAnnouncement := range blocks[0:3] {
|
||||||
t.Logf("Testing malicious announcement: %v\n", i)
|
t.Logf("Testing malicious announcement: %v\n", i)
|
||||||
conn, err := s.dial()
|
conn, err := s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -383,7 +564,7 @@ func (s *Suite) TestLargeAnnounce(t *utesting.T) {
|
|||||||
t.Fatalf("could not write to connection: %v", err)
|
t.Fatalf("could not write to connection: %v", err)
|
||||||
}
|
}
|
||||||
// Invalid announcement, check that peer disconnected
|
// Invalid announcement, check that peer disconnected
|
||||||
switch msg := conn.readAndServe(s.chain, 8*time.Second).(type) {
|
switch msg := conn.readAndServe(s.chain, time.Second*8).(type) {
|
||||||
case *Disconnect:
|
case *Disconnect:
|
||||||
case *Error:
|
case *Error:
|
||||||
break
|
break
|
||||||
@ -393,35 +574,58 @@ func (s *Suite) TestLargeAnnounce(t *utesting.T) {
|
|||||||
conn.Close()
|
conn.Close()
|
||||||
}
|
}
|
||||||
// Test the last block as a valid block
|
// Test the last block as a valid block
|
||||||
if err := s.sendNextBlock(); err != nil {
|
if err := s.sendNextBlock(eth66); err != nil {
|
||||||
t.Fatalf("failed to broadcast next block: %v", err)
|
t.Fatalf("failed to broadcast next block: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestOldAnnounce tests the announcement mechanism with an old block.
|
// TestOldAnnounce65 tests the announcement mechanism with an old block.
|
||||||
func (s *Suite) TestOldAnnounce(t *utesting.T) {
|
func (s *Suite) TestOldAnnounce65(t *utesting.T) {
|
||||||
if err := s.oldAnnounce(); err != nil {
|
if err := s.oldAnnounce(eth65); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBlockHashAnnounce sends a new block hash announcement and expects
|
// TestOldAnnounce66 tests the announcement mechanism with an old block,
|
||||||
|
// over the eth66 protocol.
|
||||||
|
func (s *Suite) TestOldAnnounce66(t *utesting.T) {
|
||||||
|
if err := s.oldAnnounce(eth66); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBlockHashAnnounce65 sends a new block hash announcement and expects
|
||||||
// the node to perform a `GetBlockHeaders` request.
|
// the node to perform a `GetBlockHeaders` request.
|
||||||
func (s *Suite) TestBlockHashAnnounce(t *utesting.T) {
|
func (s *Suite) TestBlockHashAnnounce65(t *utesting.T) {
|
||||||
if err := s.hashAnnounce(); err != nil {
|
if err := s.hashAnnounce(eth65); err != nil {
|
||||||
t.Fatalf("block hash announcement failed: %v", err)
|
t.Fatalf("block hash announcement failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaliciousHandshake tries to send malicious data during the handshake.
|
// TestBlockHashAnnounce66 sends a new block hash announcement and expects
|
||||||
func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
|
// the node to perform a `GetBlockHeaders` request.
|
||||||
if err := s.maliciousHandshakes(t); err != nil {
|
func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) {
|
||||||
|
if err := s.hashAnnounce(eth66); err != nil {
|
||||||
|
t.Fatalf("block hash announcement failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMaliciousHandshake65 tries to send malicious data during the handshake.
|
||||||
|
func (s *Suite) TestMaliciousHandshake65(t *utesting.T) {
|
||||||
|
if err := s.maliciousHandshakes(t, eth65); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaliciousStatus sends a status package with a large total difficulty.
|
// TestMaliciousHandshake66 tries to send malicious data during the handshake.
|
||||||
func (s *Suite) TestMaliciousStatus(t *utesting.T) {
|
func (s *Suite) TestMaliciousHandshake66(t *utesting.T) {
|
||||||
|
if err := s.maliciousHandshakes(t, eth66); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMaliciousStatus65 sends a status package with a large total difficulty.
|
||||||
|
func (s *Suite) TestMaliciousStatus65(t *utesting.T) {
|
||||||
conn, err := s.dial()
|
conn, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
@ -433,28 +637,58 @@ func (s *Suite) TestMaliciousStatus(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTransaction sends a valid transaction to the node and
|
// TestMaliciousStatus66 sends a status package with a large total
|
||||||
|
// difficulty over the eth66 protocol.
|
||||||
|
func (s *Suite) TestMaliciousStatus66(t *utesting.T) {
|
||||||
|
conn, err := s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
if err := s.maliciousStatus(conn); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestTransaction65 sends a valid transaction to the node and
|
||||||
// checks if the transaction gets propagated.
|
// checks if the transaction gets propagated.
|
||||||
func (s *Suite) TestTransaction(t *utesting.T) {
|
func (s *Suite) TestTransaction65(t *utesting.T) {
|
||||||
if err := s.sendSuccessfulTxs(t); err != nil {
|
if err := s.sendSuccessfulTxs(t, eth65); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaliciousTx sends several invalid transactions and tests whether
|
// TestTransaction66 sends a valid transaction to the node and
|
||||||
|
// checks if the transaction gets propagated.
|
||||||
|
func (s *Suite) TestTransaction66(t *utesting.T) {
|
||||||
|
if err := s.sendSuccessfulTxs(t, eth66); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMaliciousTx65 sends several invalid transactions and tests whether
|
||||||
// the node will propagate them.
|
// the node will propagate them.
|
||||||
func (s *Suite) TestMaliciousTx(t *utesting.T) {
|
func (s *Suite) TestMaliciousTx65(t *utesting.T) {
|
||||||
if err := s.sendMaliciousTxs(t); err != nil {
|
if err := s.sendMaliciousTxs(t, eth65); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestLargeTxRequest tests whether a node can fulfill a large GetPooledTransactions
|
// TestMaliciousTx66 sends several invalid transactions and tests whether
|
||||||
|
// the node will propagate them.
|
||||||
|
func (s *Suite) TestMaliciousTx66(t *utesting.T) {
|
||||||
|
if err := s.sendMaliciousTxs(t, eth66); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLargeTxRequest66 tests whether a node can fulfill a large GetPooledTransactions
|
||||||
// request.
|
// request.
|
||||||
func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
func (s *Suite) TestLargeTxRequest66(t *utesting.T) {
|
||||||
// send the next block to ensure the node is no longer syncing and
|
// send the next block to ensure the node is no longer syncing and
|
||||||
// is able to accept txs
|
// is able to accept txs
|
||||||
if err := s.sendNextBlock(); err != nil {
|
if err := s.sendNextBlock(eth66); err != nil {
|
||||||
t.Fatalf("failed to send next block: %v", err)
|
t.Fatalf("failed to send next block: %v", err)
|
||||||
}
|
}
|
||||||
// send 2000 transactions to the node
|
// send 2000 transactions to the node
|
||||||
@ -467,7 +701,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
// set up connection to receive to ensure node is peered with the receiving connection
|
// set up connection to receive to ensure node is peered with the receiving connection
|
||||||
// before tx request is sent
|
// before tx request is sent
|
||||||
conn, err := s.dial()
|
conn, err := s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -480,17 +714,17 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||||||
for _, hash := range hashMap {
|
for _, hash := range hashMap {
|
||||||
hashes = append(hashes, hash)
|
hashes = append(hashes, hash)
|
||||||
}
|
}
|
||||||
getTxReq := &GetPooledTransactions{
|
getTxReq := ð.GetPooledTransactionsPacket66{
|
||||||
RequestId: 1234,
|
RequestId: 1234,
|
||||||
GetPooledTransactionsPacket: hashes,
|
GetPooledTransactionsPacket: hashes,
|
||||||
}
|
}
|
||||||
if err = conn.Write(getTxReq); err != nil {
|
if err = conn.Write66(getTxReq, GetPooledTransactions{}.Code()); err != nil {
|
||||||
t.Fatalf("could not write to conn: %v", err)
|
t.Fatalf("could not write to conn: %v", err)
|
||||||
}
|
}
|
||||||
// check that all received transactions match those that were sent to node
|
// check that all received transactions match those that were sent to node
|
||||||
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
|
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
|
||||||
case *PooledTransactions:
|
case PooledTransactions:
|
||||||
for _, gotTx := range msg.PooledTransactionsPacket {
|
for _, gotTx := range msg {
|
||||||
if _, exists := hashMap[gotTx.Hash()]; !exists {
|
if _, exists := hashMap[gotTx.Hash()]; !exists {
|
||||||
t.Fatalf("unexpected tx received: %v", gotTx.Hash())
|
t.Fatalf("unexpected tx received: %v", gotTx.Hash())
|
||||||
}
|
}
|
||||||
@ -500,31 +734,30 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestNewPooledTxs tests whether a node will do a GetPooledTransactions
|
// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions
|
||||||
// request upon receiving a NewPooledTransactionHashes announcement.
|
// request upon receiving a NewPooledTransactionHashes announcement.
|
||||||
func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
func (s *Suite) TestNewPooledTxs66(t *utesting.T) {
|
||||||
// send the next block to ensure the node is no longer syncing and
|
// send the next block to ensure the node is no longer syncing and
|
||||||
// is able to accept txs
|
// is able to accept txs
|
||||||
if err := s.sendNextBlock(); err != nil {
|
if err := s.sendNextBlock(eth66); err != nil {
|
||||||
t.Fatalf("failed to send next block: %v", err)
|
t.Fatalf("failed to send next block: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate 50 txs
|
// generate 50 txs
|
||||||
_, txs, err := generateTxs(s, 50)
|
hashMap, _, err := generateTxs(s, 50)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to generate transactions: %v", err)
|
t.Fatalf("failed to generate transactions: %v", err)
|
||||||
}
|
}
|
||||||
hashes := make([]common.Hash, len(txs))
|
|
||||||
types := make([]byte, len(txs))
|
// create new pooled tx hashes announcement
|
||||||
sizes := make([]uint32, len(txs))
|
hashes := make([]common.Hash, 0)
|
||||||
for i, tx := range txs {
|
for _, hash := range hashMap {
|
||||||
hashes[i] = tx.Hash()
|
hashes = append(hashes, hash)
|
||||||
types[i] = tx.Type()
|
|
||||||
sizes[i] = uint32(tx.Size())
|
|
||||||
}
|
}
|
||||||
|
announce := NewPooledTransactionHashes(hashes)
|
||||||
|
|
||||||
// send announcement
|
// send announcement
|
||||||
conn, err := s.dial()
|
conn, err := s.dial66()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("dial failed: %v", err)
|
t.Fatalf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -532,34 +765,22 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
|||||||
if err = conn.peer(s.chain, nil); err != nil {
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
t.Fatalf("peering failed: %v", err)
|
t.Fatalf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
if err = conn.Write(announce); err != nil {
|
||||||
var ann Message = NewPooledTransactionHashes{Types: types, Sizes: sizes, Hashes: hashes}
|
|
||||||
if conn.negotiatedProtoVersion < eth.ETH68 {
|
|
||||||
ann = NewPooledTransactionHashes66(hashes)
|
|
||||||
}
|
|
||||||
err = conn.Write(ann)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to write to connection: %v", err)
|
t.Fatalf("failed to write to connection: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for GetPooledTxs request
|
// wait for GetPooledTxs request
|
||||||
for {
|
for {
|
||||||
msg := conn.readAndServe(s.chain, timeout)
|
_, msg := conn.readAndServe66(s.chain, timeout)
|
||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
case *GetPooledTransactions:
|
case GetPooledTransactions:
|
||||||
if len(msg.GetPooledTransactionsPacket) != len(hashes) {
|
if len(msg) != len(hashes) {
|
||||||
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket))
|
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
||||||
// ignore propagated txs from previous tests
|
// ignore propagated txs from previous tests
|
||||||
case *NewPooledTransactionHashes66:
|
|
||||||
continue
|
|
||||||
case *NewPooledTransactionHashes:
|
case *NewPooledTransactionHashes:
|
||||||
continue
|
continue
|
||||||
case *Transactions:
|
|
||||||
continue
|
|
||||||
|
|
||||||
// ignore block announcements from previous tests
|
// ignore block announcements from previous tests
|
||||||
case *NewBlockHashes:
|
case *NewBlockHashes:
|
||||||
continue
|
continue
|
||||||
|
@ -45,7 +45,7 @@ func TestEthSuite(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not create new test suite: %v", err)
|
t.Fatalf("could not create new test suite: %v", err)
|
||||||
}
|
}
|
||||||
for _, test := range suite.EthTests() {
|
for _, test := range suite.Eth66Tests() {
|
||||||
t.Run(test.Name, func(t *testing.T) {
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
||||||
if result[0].Failed {
|
if result[0].Failed {
|
||||||
|
@ -29,10 +29,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
// var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
|
//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
|
||||||
var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
|
||||||
func (s *Suite) sendSuccessfulTxs(t *utesting.T) error {
|
func (s *Suite) sendSuccessfulTxs(t *utesting.T, isEth66 bool) error {
|
||||||
tests := []*types.Transaction{
|
tests := []*types.Transaction{
|
||||||
getNextTxFromChain(s),
|
getNextTxFromChain(s),
|
||||||
unknownTx(s),
|
unknownTx(s),
|
||||||
@ -48,15 +48,15 @@ func (s *Suite) sendSuccessfulTxs(t *utesting.T) error {
|
|||||||
prevTx = tests[i-1]
|
prevTx = tests[i-1]
|
||||||
}
|
}
|
||||||
// write tx to connection
|
// write tx to connection
|
||||||
if err := sendSuccessfulTx(s, tx, prevTx); err != nil {
|
if err := sendSuccessfulTx(s, tx, prevTx, isEth66); err != nil {
|
||||||
return fmt.Errorf("send successful tx test failed: %v", err)
|
return fmt.Errorf("send successful tx test failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction) error {
|
func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction, isEth66 bool) error {
|
||||||
sendConn, recvConn, err := s.createSendAndRecvConns()
|
sendConn, recvConn, err := s.createSendAndRecvConns(isEth66)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -73,10 +73,8 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction
|
|||||||
if err = recvConn.peer(s.chain, nil); err != nil {
|
if err = recvConn.peer(s.chain, nil); err != nil {
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// update last nonce seen
|
// update last nonce seen
|
||||||
nonce = tx.Nonce()
|
nonce = tx.Nonce()
|
||||||
|
|
||||||
// Wait for the transaction announcement
|
// Wait for the transaction announcement
|
||||||
for {
|
for {
|
||||||
switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
|
switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
|
||||||
@ -95,7 +93,7 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash())
|
return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash())
|
||||||
case *NewPooledTransactionHashes66:
|
case *NewPooledTransactionHashes:
|
||||||
txHashes := *msg
|
txHashes := *msg
|
||||||
// if you receive an old tx propagation, read from connection again
|
// if you receive an old tx propagation, read from connection again
|
||||||
if len(txHashes) == 1 && prevTx != nil {
|
if len(txHashes) == 1 && prevTx != nil {
|
||||||
@ -110,41 +108,13 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash())
|
return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash())
|
||||||
case *NewPooledTransactionHashes:
|
|
||||||
txHashes := msg.Hashes
|
|
||||||
if len(txHashes) != len(msg.Sizes) {
|
|
||||||
return fmt.Errorf("invalid msg size lengths: hashes: %v sizes: %v", len(txHashes), len(msg.Sizes))
|
|
||||||
}
|
|
||||||
if len(txHashes) != len(msg.Types) {
|
|
||||||
return fmt.Errorf("invalid msg type lengths: hashes: %v types: %v", len(txHashes), len(msg.Types))
|
|
||||||
}
|
|
||||||
// if you receive an old tx propagation, read from connection again
|
|
||||||
if len(txHashes) == 1 && prevTx != nil {
|
|
||||||
if txHashes[0] == prevTx.Hash() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for index, gotHash := range txHashes {
|
|
||||||
if gotHash == tx.Hash() {
|
|
||||||
if msg.Sizes[index] != uint32(tx.Size()) {
|
|
||||||
return fmt.Errorf("invalid tx size: got %v want %v", msg.Sizes[index], tx.Size())
|
|
||||||
}
|
|
||||||
if msg.Types[index] != tx.Type() {
|
|
||||||
return fmt.Errorf("invalid tx type: got %v want %v", msg.Types[index], tx.Type())
|
|
||||||
}
|
|
||||||
// Ok
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash())
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Suite) sendMaliciousTxs(t *utesting.T) error {
|
func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error {
|
||||||
badTxs := []*types.Transaction{
|
badTxs := []*types.Transaction{
|
||||||
getOldTxFromChain(s),
|
getOldTxFromChain(s),
|
||||||
invalidNonceTx(s),
|
invalidNonceTx(s),
|
||||||
@ -152,9 +122,16 @@ func (s *Suite) sendMaliciousTxs(t *utesting.T) error {
|
|||||||
hugeGasPrice(s),
|
hugeGasPrice(s),
|
||||||
hugeData(s),
|
hugeData(s),
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup receiving connection before sending malicious txs
|
// setup receiving connection before sending malicious txs
|
||||||
recvConn, err := s.dial()
|
var (
|
||||||
|
recvConn *Conn
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if isEth66 {
|
||||||
|
recvConn, err = s.dial66()
|
||||||
|
} else {
|
||||||
|
recvConn, err = s.dial()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dial failed: %v", err)
|
return fmt.Errorf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -162,10 +139,9 @@ func (s *Suite) sendMaliciousTxs(t *utesting.T) error {
|
|||||||
if err = recvConn.peer(s.chain, nil); err != nil {
|
if err = recvConn.peer(s.chain, nil); err != nil {
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tx := range badTxs {
|
for i, tx := range badTxs {
|
||||||
t.Logf("Testing malicious tx propagation: %v\n", i)
|
t.Logf("Testing malicious tx propagation: %v\n", i)
|
||||||
if err = sendMaliciousTx(s, tx); err != nil {
|
if err = sendMaliciousTx(s, tx, isEth66); err != nil {
|
||||||
return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err)
|
return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -173,8 +149,17 @@ func (s *Suite) sendMaliciousTxs(t *utesting.T) error {
|
|||||||
return checkMaliciousTxPropagation(s, badTxs, recvConn)
|
return checkMaliciousTxPropagation(s, badTxs, recvConn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendMaliciousTx(s *Suite, tx *types.Transaction) error {
|
func sendMaliciousTx(s *Suite, tx *types.Transaction, isEth66 bool) error {
|
||||||
conn, err := s.dial()
|
// setup connection
|
||||||
|
var (
|
||||||
|
conn *Conn
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if isEth66 {
|
||||||
|
conn, err = s.dial66()
|
||||||
|
} else {
|
||||||
|
conn, err = s.dial()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dial failed: %v", err)
|
return fmt.Errorf("dial failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -182,7 +167,6 @@ func sendMaliciousTx(s *Suite, tx *types.Transaction) error {
|
|||||||
if err = conn.peer(s.chain, nil); err != nil {
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// write malicious tx
|
// write malicious tx
|
||||||
if err = conn.Write(&Transactions{tx}); err != nil {
|
if err = conn.Write(&Transactions{tx}); err != nil {
|
||||||
return fmt.Errorf("failed to write to connection: %v", err)
|
return fmt.Errorf("failed to write to connection: %v", err)
|
||||||
@ -198,7 +182,7 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction
|
|||||||
txMsg := Transactions(txs)
|
txMsg := Transactions(txs)
|
||||||
t.Logf("sending %d txs\n", len(txs))
|
t.Logf("sending %d txs\n", len(txs))
|
||||||
|
|
||||||
sendConn, recvConn, err := s.createSendAndRecvConns()
|
sendConn, recvConn, err := s.createSendAndRecvConns(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -210,29 +194,23 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction
|
|||||||
if err = recvConn.peer(s.chain, nil); err != nil {
|
if err = recvConn.peer(s.chain, nil); err != nil {
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send the transactions
|
// Send the transactions
|
||||||
if err = sendConn.Write(&txMsg); err != nil {
|
if err = sendConn.Write(&txMsg); err != nil {
|
||||||
return fmt.Errorf("failed to write message to connection: %v", err)
|
return fmt.Errorf("failed to write message to connection: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// update nonce
|
// update nonce
|
||||||
nonce = txs[len(txs)-1].Nonce()
|
nonce = txs[len(txs)-1].Nonce()
|
||||||
|
// Wait for the transaction announcement(s) and make sure all sent txs are being propagated
|
||||||
// Wait for the transaction announcement(s) and make sure all sent txs are being propagated.
|
|
||||||
// all txs should be announced within a couple announcements.
|
|
||||||
recvHashes := make([]common.Hash, 0)
|
recvHashes := make([]common.Hash, 0)
|
||||||
|
// all txs should be announced within 3 announcements
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
|
switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
|
||||||
case *Transactions:
|
case *Transactions:
|
||||||
for _, tx := range *msg {
|
for _, tx := range *msg {
|
||||||
recvHashes = append(recvHashes, tx.Hash())
|
recvHashes = append(recvHashes, tx.Hash())
|
||||||
}
|
}
|
||||||
case *NewPooledTransactionHashes66:
|
|
||||||
recvHashes = append(recvHashes, *msg...)
|
|
||||||
case *NewPooledTransactionHashes:
|
case *NewPooledTransactionHashes:
|
||||||
recvHashes = append(recvHashes, msg.Hashes...)
|
recvHashes = append(recvHashes, *msg...)
|
||||||
default:
|
default:
|
||||||
if !strings.Contains(pretty.Sdump(msg), "i/o timeout") {
|
if !strings.Contains(pretty.Sdump(msg), "i/o timeout") {
|
||||||
return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg))
|
||||||
@ -276,13 +254,8 @@ func checkMaliciousTxPropagation(s *Suite, txs []*types.Transaction, conn *Conn)
|
|||||||
if len(badTxs) > 0 {
|
if len(badTxs) > 0 {
|
||||||
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
|
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
|
||||||
}
|
}
|
||||||
case *NewPooledTransactionHashes66:
|
|
||||||
badTxs, _ := compareReceivedTxs(*msg, txs)
|
|
||||||
if len(badTxs) > 0 {
|
|
||||||
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
|
|
||||||
}
|
|
||||||
case *NewPooledTransactionHashes:
|
case *NewPooledTransactionHashes:
|
||||||
badTxs, _ := compareReceivedTxs(msg.Hashes, txs)
|
badTxs, _ := compareReceivedTxs(*msg, txs)
|
||||||
if len(badTxs) > 0 {
|
if len(badTxs) > 0 {
|
||||||
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
|
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
|
|
||||||
type Message interface {
|
type Message interface {
|
||||||
Code() int
|
Code() int
|
||||||
ReqID() uint64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Error struct {
|
type Error struct {
|
||||||
@ -38,10 +37,8 @@ type Error struct {
|
|||||||
|
|
||||||
func (e *Error) Unwrap() error { return e.err }
|
func (e *Error) Unwrap() error { return e.err }
|
||||||
func (e *Error) Error() string { return e.err.Error() }
|
func (e *Error) Error() string { return e.err.Error() }
|
||||||
func (e *Error) String() string { return e.Error() }
|
|
||||||
|
|
||||||
func (e *Error) Code() int { return -1 }
|
func (e *Error) Code() int { return -1 }
|
||||||
func (e *Error) ReqID() uint64 { return 0 }
|
func (e *Error) String() string { return e.Error() }
|
||||||
|
|
||||||
func errorf(format string, args ...interface{}) *Error {
|
func errorf(format string, args ...interface{}) *Error {
|
||||||
return &Error{fmt.Errorf(format, args...)}
|
return &Error{fmt.Errorf(format, args...)}
|
||||||
@ -59,94 +56,73 @@ type Hello struct {
|
|||||||
Rest []rlp.RawValue `rlp:"tail"`
|
Rest []rlp.RawValue `rlp:"tail"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (msg Hello) Code() int { return 0x00 }
|
func (h Hello) Code() int { return 0x00 }
|
||||||
func (msg Hello) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// Disconnect is the RLP structure for a disconnect message.
|
// Disconnect is the RLP structure for a disconnect message.
|
||||||
type Disconnect struct {
|
type Disconnect struct {
|
||||||
Reason p2p.DiscReason
|
Reason p2p.DiscReason
|
||||||
}
|
}
|
||||||
|
|
||||||
func (msg Disconnect) Code() int { return 0x01 }
|
func (d Disconnect) Code() int { return 0x01 }
|
||||||
func (msg Disconnect) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
type Ping struct{}
|
type Ping struct{}
|
||||||
|
|
||||||
func (msg Ping) Code() int { return 0x02 }
|
func (p Ping) Code() int { return 0x02 }
|
||||||
func (msg Ping) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
type Pong struct{}
|
type Pong struct{}
|
||||||
|
|
||||||
func (msg Pong) Code() int { return 0x03 }
|
func (p Pong) Code() int { return 0x03 }
|
||||||
func (msg Pong) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// Status is the network packet for the status message for eth/64 and later.
|
// Status is the network packet for the status message for eth/64 and later.
|
||||||
type Status eth.StatusPacket
|
type Status eth.StatusPacket
|
||||||
|
|
||||||
func (msg Status) Code() int { return 16 }
|
func (s Status) Code() int { return 16 }
|
||||||
func (msg Status) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// NewBlockHashes is the network packet for the block announcements.
|
// NewBlockHashes is the network packet for the block announcements.
|
||||||
type NewBlockHashes eth.NewBlockHashesPacket
|
type NewBlockHashes eth.NewBlockHashesPacket
|
||||||
|
|
||||||
func (msg NewBlockHashes) Code() int { return 17 }
|
func (nbh NewBlockHashes) Code() int { return 17 }
|
||||||
func (msg NewBlockHashes) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
type Transactions eth.TransactionsPacket
|
type Transactions eth.TransactionsPacket
|
||||||
|
|
||||||
func (msg Transactions) Code() int { return 18 }
|
func (t Transactions) Code() int { return 18 }
|
||||||
func (msg Transactions) ReqID() uint64 { return 18 }
|
|
||||||
|
|
||||||
// GetBlockHeaders represents a block header query.
|
// GetBlockHeaders represents a block header query.
|
||||||
type GetBlockHeaders eth.GetBlockHeadersPacket66
|
type GetBlockHeaders eth.GetBlockHeadersPacket
|
||||||
|
|
||||||
func (msg GetBlockHeaders) Code() int { return 19 }
|
func (g GetBlockHeaders) Code() int { return 19 }
|
||||||
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
type BlockHeaders eth.BlockHeadersPacket66
|
type BlockHeaders eth.BlockHeadersPacket
|
||||||
|
|
||||||
func (msg BlockHeaders) Code() int { return 20 }
|
func (bh BlockHeaders) Code() int { return 20 }
|
||||||
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
// GetBlockBodies represents a GetBlockBodies request
|
// GetBlockBodies represents a GetBlockBodies request
|
||||||
type GetBlockBodies eth.GetBlockBodiesPacket66
|
type GetBlockBodies eth.GetBlockBodiesPacket
|
||||||
|
|
||||||
func (msg GetBlockBodies) Code() int { return 21 }
|
func (gbb GetBlockBodies) Code() int { return 21 }
|
||||||
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
// BlockBodies is the network packet for block content distribution.
|
// BlockBodies is the network packet for block content distribution.
|
||||||
type BlockBodies eth.BlockBodiesPacket66
|
type BlockBodies eth.BlockBodiesPacket
|
||||||
|
|
||||||
func (msg BlockBodies) Code() int { return 22 }
|
func (bb BlockBodies) Code() int { return 22 }
|
||||||
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
// NewBlock is the network packet for the block propagation message.
|
// NewBlock is the network packet for the block propagation message.
|
||||||
type NewBlock eth.NewBlockPacket
|
type NewBlock eth.NewBlockPacket
|
||||||
|
|
||||||
func (msg NewBlock) Code() int { return 23 }
|
func (nb NewBlock) Code() int { return 23 }
|
||||||
func (msg NewBlock) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
|
|
||||||
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66
|
|
||||||
|
|
||||||
func (msg NewPooledTransactionHashes66) Code() int { return 24 }
|
|
||||||
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// NewPooledTransactionHashes is the network packet for the tx hash propagation message.
|
// NewPooledTransactionHashes is the network packet for the tx hash propagation message.
|
||||||
type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68
|
type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket
|
||||||
|
|
||||||
func (msg NewPooledTransactionHashes) Code() int { return 24 }
|
func (nb NewPooledTransactionHashes) Code() int { return 24 }
|
||||||
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
type GetPooledTransactions eth.GetPooledTransactionsPacket66
|
type GetPooledTransactions eth.GetPooledTransactionsPacket
|
||||||
|
|
||||||
func (msg GetPooledTransactions) Code() int { return 25 }
|
func (gpt GetPooledTransactions) Code() int { return 25 }
|
||||||
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
type PooledTransactions eth.PooledTransactionsPacket66
|
type PooledTransactions eth.PooledTransactionsPacket
|
||||||
|
|
||||||
func (msg PooledTransactions) Code() int { return 26 }
|
func (pt PooledTransactions) Code() int { return 26 }
|
||||||
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
// Conn represents an individual connection with a peer
|
// Conn represents an individual connection with a peer
|
||||||
type Conn struct {
|
type Conn struct {
|
||||||
@ -159,7 +135,7 @@ type Conn struct {
|
|||||||
caps []p2p.Cap
|
caps []p2p.Cap
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads an eth66 packet from the connection.
|
// Read reads an eth packet from the connection.
|
||||||
func (c *Conn) Read() Message {
|
func (c *Conn) Read() Message {
|
||||||
code, rawData, _, err := c.Conn.Read()
|
code, rawData, _, err := c.Conn.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -179,65 +155,109 @@ func (c *Conn) Read() Message {
|
|||||||
case (Status{}).Code():
|
case (Status{}).Code():
|
||||||
msg = new(Status)
|
msg = new(Status)
|
||||||
case (GetBlockHeaders{}).Code():
|
case (GetBlockHeaders{}).Code():
|
||||||
ethMsg := new(eth.GetBlockHeadersPacket66)
|
msg = new(GetBlockHeaders)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*GetBlockHeaders)(ethMsg)
|
|
||||||
case (BlockHeaders{}).Code():
|
case (BlockHeaders{}).Code():
|
||||||
ethMsg := new(eth.BlockHeadersPacket66)
|
msg = new(BlockHeaders)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*BlockHeaders)(ethMsg)
|
|
||||||
case (GetBlockBodies{}).Code():
|
case (GetBlockBodies{}).Code():
|
||||||
ethMsg := new(eth.GetBlockBodiesPacket66)
|
msg = new(GetBlockBodies)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*GetBlockBodies)(ethMsg)
|
|
||||||
case (BlockBodies{}).Code():
|
case (BlockBodies{}).Code():
|
||||||
ethMsg := new(eth.BlockBodiesPacket66)
|
msg = new(BlockBodies)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*BlockBodies)(ethMsg)
|
|
||||||
case (NewBlock{}).Code():
|
case (NewBlock{}).Code():
|
||||||
msg = new(NewBlock)
|
msg = new(NewBlock)
|
||||||
case (NewBlockHashes{}).Code():
|
case (NewBlockHashes{}).Code():
|
||||||
msg = new(NewBlockHashes)
|
msg = new(NewBlockHashes)
|
||||||
case (Transactions{}).Code():
|
case (Transactions{}).Code():
|
||||||
msg = new(Transactions)
|
msg = new(Transactions)
|
||||||
case (NewPooledTransactionHashes66{}).Code():
|
case (NewPooledTransactionHashes{}).Code():
|
||||||
// Try decoding to eth68
|
msg = new(NewPooledTransactionHashes)
|
||||||
ethMsg := new(NewPooledTransactionHashes)
|
case (GetPooledTransactions{}.Code()):
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err == nil {
|
msg = new(GetPooledTransactions)
|
||||||
return ethMsg
|
case (PooledTransactions{}.Code()):
|
||||||
|
msg = new(PooledTransactions)
|
||||||
|
default:
|
||||||
|
return errorf("invalid message code: %d", code)
|
||||||
}
|
}
|
||||||
msg = new(NewPooledTransactionHashes66)
|
// if message is devp2p, decode here
|
||||||
|
if err := rlp.DecodeBytes(rawData, msg); err != nil {
|
||||||
|
return errorf("could not rlp decode message: %v", err)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read66 reads an eth66 packet from the connection.
|
||||||
|
func (c *Conn) Read66() (uint64, Message) {
|
||||||
|
code, rawData, _, err := c.Conn.Read()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errorf("could not read from connection: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var msg Message
|
||||||
|
switch int(code) {
|
||||||
|
case (Hello{}).Code():
|
||||||
|
msg = new(Hello)
|
||||||
|
case (Ping{}).Code():
|
||||||
|
msg = new(Ping)
|
||||||
|
case (Pong{}).Code():
|
||||||
|
msg = new(Pong)
|
||||||
|
case (Disconnect{}).Code():
|
||||||
|
msg = new(Disconnect)
|
||||||
|
case (Status{}).Code():
|
||||||
|
msg = new(Status)
|
||||||
|
case (GetBlockHeaders{}).Code():
|
||||||
|
ethMsg := new(eth.GetBlockHeadersPacket66)
|
||||||
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
|
return 0, errorf("could not rlp decode message: %v", err)
|
||||||
|
}
|
||||||
|
return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket)
|
||||||
|
case (BlockHeaders{}).Code():
|
||||||
|
ethMsg := new(eth.BlockHeadersPacket66)
|
||||||
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
|
return 0, errorf("could not rlp decode message: %v", err)
|
||||||
|
}
|
||||||
|
return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket)
|
||||||
|
case (GetBlockBodies{}).Code():
|
||||||
|
ethMsg := new(eth.GetBlockBodiesPacket66)
|
||||||
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
|
return 0, errorf("could not rlp decode message: %v", err)
|
||||||
|
}
|
||||||
|
return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket)
|
||||||
|
case (BlockBodies{}).Code():
|
||||||
|
ethMsg := new(eth.BlockBodiesPacket66)
|
||||||
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
|
return 0, errorf("could not rlp decode message: %v", err)
|
||||||
|
}
|
||||||
|
return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket)
|
||||||
|
case (NewBlock{}).Code():
|
||||||
|
msg = new(NewBlock)
|
||||||
|
case (NewBlockHashes{}).Code():
|
||||||
|
msg = new(NewBlockHashes)
|
||||||
|
case (Transactions{}).Code():
|
||||||
|
msg = new(Transactions)
|
||||||
|
case (NewPooledTransactionHashes{}).Code():
|
||||||
|
msg = new(NewPooledTransactionHashes)
|
||||||
case (GetPooledTransactions{}.Code()):
|
case (GetPooledTransactions{}.Code()):
|
||||||
ethMsg := new(eth.GetPooledTransactionsPacket66)
|
ethMsg := new(eth.GetPooledTransactionsPacket66)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return 0, errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
return (*GetPooledTransactions)(ethMsg)
|
return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket)
|
||||||
case (PooledTransactions{}.Code()):
|
case (PooledTransactions{}.Code()):
|
||||||
ethMsg := new(eth.PooledTransactionsPacket66)
|
ethMsg := new(eth.PooledTransactionsPacket66)
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return 0, errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
return (*PooledTransactions)(ethMsg)
|
return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket)
|
||||||
default:
|
default:
|
||||||
msg = errorf("invalid message code: %d", code)
|
msg = errorf("invalid message code: %d", code)
|
||||||
}
|
}
|
||||||
|
|
||||||
if msg != nil {
|
if msg != nil {
|
||||||
if err := rlp.DecodeBytes(rawData, msg); err != nil {
|
if err := rlp.DecodeBytes(rawData, msg); err != nil {
|
||||||
return errorf("could not rlp decode message: %v", err)
|
return 0, errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
return msg
|
return 0, msg
|
||||||
}
|
}
|
||||||
return errorf("invalid message: %s", string(rawData))
|
return 0, errorf("invalid message: %s", string(rawData))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write writes a eth packet to the connection.
|
// Write writes a eth packet to the connection.
|
||||||
@ -250,6 +270,16 @@ func (c *Conn) Write(msg Message) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write66 writes an eth66 packet to the connection.
|
||||||
|
func (c *Conn) Write66(req eth.Packet, code int) error {
|
||||||
|
payload, err := rlp.EncodeToBytes(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.Conn.Write(uint64(code), payload)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// ReadSnap reads a snap/1 response with the given id from the connection.
|
// ReadSnap reads a snap/1 response with the given id from the connection.
|
||||||
func (c *Conn) ReadSnap(id uint64) (Message, error) {
|
func (c *Conn) ReadSnap(id uint64) (Message, error) {
|
||||||
respId := id + 1
|
respId := id + 1
|
||||||
@ -285,6 +315,7 @@ func (c *Conn) ReadSnap(id uint64) (Message, error) {
|
|||||||
return nil, fmt.Errorf("could not rlp decode message: %v", err)
|
return nil, fmt.Errorf("could not rlp decode message: %v", err)
|
||||||
}
|
}
|
||||||
return snpMsg.(Message), nil
|
return snpMsg.(Message), nil
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("request timed out")
|
return nil, fmt.Errorf("request timed out")
|
||||||
}
|
}
|
||||||
|
@ -37,9 +37,9 @@ const (
|
|||||||
var (
|
var (
|
||||||
// Remote node under test
|
// Remote node under test
|
||||||
Remote string
|
Remote string
|
||||||
// Listen1 is the IP where the first tester is listening, port will be assigned
|
// IP where the first tester is listening, port will be assigned
|
||||||
Listen1 string = "127.0.0.1"
|
Listen1 string = "127.0.0.1"
|
||||||
// Listen2 is the IP where the second tester is listening, port will be assigned
|
// IP where the second tester is listening, port will be assigned
|
||||||
// Before running the test, you may have to `sudo ifconfig lo0 add 127.0.0.2` (on MacOS at least)
|
// Before running the test, you may have to `sudo ifconfig lo0 add 127.0.0.2` (on MacOS at least)
|
||||||
Listen2 string = "127.0.0.2"
|
Listen2 string = "127.0.0.2"
|
||||||
)
|
)
|
||||||
@ -68,7 +68,7 @@ func futureExpiration() uint64 {
|
|||||||
return uint64(time.Now().Add(expiration).Unix())
|
return uint64(time.Now().Add(expiration).Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
// BasicPing just sends a PING packet and expects a response.
|
// This test just sends a PING packet and expects a response.
|
||||||
func BasicPing(t *utesting.T) {
|
func BasicPing(t *utesting.T) {
|
||||||
te := newTestEnv(Remote, Listen1, Listen2)
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
defer te.close()
|
defer te.close()
|
||||||
@ -137,7 +137,7 @@ func (te *testenv) checkPong(reply v4wire.Packet, pingHash []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PingWrongTo sends a PING packet with wrong 'to' field and expects a PONG response.
|
// This test sends a PING packet with wrong 'to' field and expects a PONG response.
|
||||||
func PingWrongTo(t *utesting.T) {
|
func PingWrongTo(t *utesting.T) {
|
||||||
te := newTestEnv(Remote, Listen1, Listen2)
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
defer te.close()
|
defer te.close()
|
||||||
@ -154,7 +154,7 @@ func PingWrongTo(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PingWrongFrom sends a PING packet with wrong 'from' field and expects a PONG response.
|
// This test sends a PING packet with wrong 'from' field and expects a PONG response.
|
||||||
func PingWrongFrom(t *utesting.T) {
|
func PingWrongFrom(t *utesting.T) {
|
||||||
te := newTestEnv(Remote, Listen1, Listen2)
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
defer te.close()
|
defer te.close()
|
||||||
@ -172,7 +172,7 @@ func PingWrongFrom(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PingExtraData This test sends a PING packet with additional data at the end and expects a PONG
|
// This test sends a PING packet with additional data at the end and expects a PONG
|
||||||
// response. The remote node should respond because EIP-8 mandates ignoring additional
|
// response. The remote node should respond because EIP-8 mandates ignoring additional
|
||||||
// trailing data.
|
// trailing data.
|
||||||
func PingExtraData(t *utesting.T) {
|
func PingExtraData(t *utesting.T) {
|
||||||
@ -256,7 +256,6 @@ func WrongPacketType(t *utesting.T) {
|
|||||||
func BondThenPingWithWrongFrom(t *utesting.T) {
|
func BondThenPingWithWrongFrom(t *utesting.T) {
|
||||||
te := newTestEnv(Remote, Listen1, Listen2)
|
te := newTestEnv(Remote, Listen1, Listen2)
|
||||||
defer te.close()
|
defer te.close()
|
||||||
|
|
||||||
bond(t, te)
|
bond(t, te)
|
||||||
|
|
||||||
wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")}
|
wrongEndpoint := v4wire.Endpoint{IP: net.ParseIP("192.0.2.0")}
|
||||||
@ -266,26 +265,11 @@ func BondThenPingWithWrongFrom(t *utesting.T) {
|
|||||||
To: te.remoteEndpoint(),
|
To: te.remoteEndpoint(),
|
||||||
Expiration: futureExpiration(),
|
Expiration: futureExpiration(),
|
||||||
})
|
})
|
||||||
|
if reply, _, err := te.read(te.l1); err != nil {
|
||||||
waitForPong:
|
|
||||||
for {
|
|
||||||
reply, _, err := te.read(te.l1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
} else if err := te.checkPong(reply, pingHash); err != nil {
|
||||||
switch reply.Kind() {
|
|
||||||
case v4wire.PongPacket:
|
|
||||||
if err := te.checkPong(reply, pingHash); err != nil {
|
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
break waitForPong
|
|
||||||
case v4wire.FindnodePacket:
|
|
||||||
// FINDNODE from the node is acceptable here since the endpoint
|
|
||||||
// verification was performed earlier.
|
|
||||||
default:
|
|
||||||
t.Fatalf("Expected PONG, got %v %v", reply.Name(), reply)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test just sends FINDNODE. The remote node should not reply
|
// This test just sends FINDNODE. The remote node should not reply
|
||||||
@ -395,7 +379,7 @@ func FindnodePastExpiration(t *utesting.T) {
|
|||||||
|
|
||||||
// bond performs the endpoint proof with the remote node.
|
// bond performs the endpoint proof with the remote node.
|
||||||
func bond(t *utesting.T, te *testenv) {
|
func bond(t *utesting.T, te *testenv) {
|
||||||
pingHash := te.send(te.l1, &v4wire.Ping{
|
te.send(te.l1, &v4wire.Ping{
|
||||||
Version: 4,
|
Version: 4,
|
||||||
From: te.localEndpoint(te.l1),
|
From: te.localEndpoint(te.l1),
|
||||||
To: te.remoteEndpoint(),
|
To: te.remoteEndpoint(),
|
||||||
@ -417,9 +401,7 @@ func bond(t *utesting.T, te *testenv) {
|
|||||||
})
|
})
|
||||||
gotPing = true
|
gotPing = true
|
||||||
case *v4wire.Pong:
|
case *v4wire.Pong:
|
||||||
if err := te.checkPong(req, pingHash); err != nil {
|
// TODO: maybe verify pong data here
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
gotPong = true
|
gotPong = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ func (s *Suite) AllTests() []utesting.Test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPing sends PING and expects a PONG response.
|
// This test sends PING and expects a PONG response.
|
||||||
func (s *Suite) TestPing(t *utesting.T) {
|
func (s *Suite) TestPing(t *utesting.T) {
|
||||||
conn, l1 := s.listen1(t)
|
conn, l1 := s.listen1(t)
|
||||||
defer conn.close()
|
defer conn.close()
|
||||||
@ -84,7 +84,7 @@ func checkPong(t *utesting.T, pong *v5wire.Pong, ping *v5wire.Ping, c net.Packet
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPingLargeRequestID sends PING with a 9-byte request ID, which isn't allowed by the spec.
|
// This test sends PING with a 9-byte request ID, which isn't allowed by the spec.
|
||||||
// The remote node should not respond.
|
// The remote node should not respond.
|
||||||
func (s *Suite) TestPingLargeRequestID(t *utesting.T) {
|
func (s *Suite) TestPingLargeRequestID(t *utesting.T) {
|
||||||
conn, l1 := s.listen1(t)
|
conn, l1 := s.listen1(t)
|
||||||
@ -103,7 +103,7 @@ func (s *Suite) TestPingLargeRequestID(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPingMultiIP establishes a session from one IP as usual. The session is then reused
|
// In this test, a session is established from one IP as usual. The session is then reused
|
||||||
// on another IP, which shouldn't work. The remote node should respond with WHOAREYOU for
|
// on another IP, which shouldn't work. The remote node should respond with WHOAREYOU for
|
||||||
// the attempt from a different IP.
|
// the attempt from a different IP.
|
||||||
func (s *Suite) TestPingMultiIP(t *utesting.T) {
|
func (s *Suite) TestPingMultiIP(t *utesting.T) {
|
||||||
@ -153,7 +153,7 @@ func (s *Suite) TestPingMultiIP(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPingHandshakeInterrupted starts a handshake, but doesn't finish it and sends a second ordinary message
|
// This test starts a handshake, but doesn't finish it and sends a second ordinary message
|
||||||
// packet instead of a handshake message packet. The remote node should respond with
|
// packet instead of a handshake message packet. The remote node should respond with
|
||||||
// another WHOAREYOU challenge for the second packet.
|
// another WHOAREYOU challenge for the second packet.
|
||||||
func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) {
|
func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) {
|
||||||
@ -180,7 +180,7 @@ func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTalkRequest sends TALKREQ and expects an empty TALKRESP response.
|
// This test sends TALKREQ and expects an empty TALKRESP response.
|
||||||
func (s *Suite) TestTalkRequest(t *utesting.T) {
|
func (s *Suite) TestTalkRequest(t *utesting.T) {
|
||||||
conn, l1 := s.listen1(t)
|
conn, l1 := s.listen1(t)
|
||||||
defer conn.close()
|
defer conn.close()
|
||||||
@ -215,7 +215,7 @@ func (s *Suite) TestTalkRequest(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFindnodeZeroDistance checks that the remote node returns itself for FINDNODE with distance zero.
|
// This test checks that the remote node returns itself for FINDNODE with distance zero.
|
||||||
func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) {
|
func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) {
|
||||||
conn, l1 := s.listen1(t)
|
conn, l1 := s.listen1(t)
|
||||||
defer conn.close()
|
defer conn.close()
|
||||||
@ -232,7 +232,7 @@ func (s *Suite) TestFindnodeZeroDistance(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFindnodeResults pings the node under test from multiple nodes. After waiting for them to be
|
// In this test, multiple nodes ping the node under test. After waiting for them to be
|
||||||
// accepted into the remote table, the test checks that they are returned by FINDNODE.
|
// accepted into the remote table, the test checks that they are returned by FINDNODE.
|
||||||
func (s *Suite) TestFindnodeResults(t *utesting.T) {
|
func (s *Suite) TestFindnodeResults(t *utesting.T) {
|
||||||
// Create bystanders.
|
// Create bystanders.
|
||||||
@ -355,7 +355,7 @@ func (bn *bystander) loop() {
|
|||||||
wasAdded = true
|
wasAdded = true
|
||||||
bn.notifyAdded()
|
bn.notifyAdded()
|
||||||
case *v5wire.Findnode:
|
case *v5wire.Findnode:
|
||||||
bn.conn.write(bn.l, &v5wire.Nodes{ReqID: p.ReqID, RespCount: 1}, nil)
|
bn.conn.write(bn.l, &v5wire.Nodes{ReqID: p.ReqID, Total: 1}, nil)
|
||||||
wasAdded = true
|
wasAdded = true
|
||||||
bn.notifyAdded()
|
bn.notifyAdded()
|
||||||
case *v5wire.TalkRequest:
|
case *v5wire.TalkRequest:
|
||||||
|
@ -44,8 +44,6 @@ func (p *readError) Unwrap() error { return p.err }
|
|||||||
func (p *readError) RequestID() []byte { return nil }
|
func (p *readError) RequestID() []byte { return nil }
|
||||||
func (p *readError) SetRequestID([]byte) {}
|
func (p *readError) SetRequestID([]byte) {}
|
||||||
|
|
||||||
func (p *readError) AppendLogInfo(ctx []interface{}) []interface{} { return ctx }
|
|
||||||
|
|
||||||
// readErrorf creates a readError with the given text.
|
// readErrorf creates a readError with the given text.
|
||||||
func readErrorf(format string, args ...interface{}) *readError {
|
func readErrorf(format string, args ...interface{}) *readError {
|
||||||
return &readError{fmt.Errorf(format, args...)}
|
return &readError{fmt.Errorf(format, args...)}
|
||||||
@ -88,7 +86,7 @@ func newConn(dest *enode.Node, log logger) *conn {
|
|||||||
localNode: ln,
|
localNode: ln,
|
||||||
remote: dest,
|
remote: dest,
|
||||||
remoteAddr: &net.UDPAddr{IP: dest.IP(), Port: dest.UDP()},
|
remoteAddr: &net.UDPAddr{IP: dest.IP(), Port: dest.UDP()},
|
||||||
codec: v5wire.NewCodec(ln, key, mclock.System{}, nil),
|
codec: v5wire.NewCodec(ln, key, mclock.System{}),
|
||||||
log: log,
|
log: log,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -173,16 +171,16 @@ func (tc *conn) findnode(c net.PacketConn, dists []uint) ([]*enode.Node, error)
|
|||||||
// Check total count. It should be greater than one
|
// Check total count. It should be greater than one
|
||||||
// and needs to be the same across all responses.
|
// and needs to be the same across all responses.
|
||||||
if first {
|
if first {
|
||||||
if resp.RespCount == 0 || resp.RespCount > 6 {
|
if resp.Total == 0 || resp.Total > 6 {
|
||||||
return nil, fmt.Errorf("invalid NODES response count %d (not in (0,7))", resp.RespCount)
|
return nil, fmt.Errorf("invalid NODES response 'total' %d (not in (0,7))", resp.Total)
|
||||||
}
|
}
|
||||||
total = resp.RespCount
|
total = resp.Total
|
||||||
n = int(total) - 1
|
n = int(total) - 1
|
||||||
first = false
|
first = false
|
||||||
} else {
|
} else {
|
||||||
n--
|
n--
|
||||||
if resp.RespCount != total {
|
if resp.Total != total {
|
||||||
return nil, fmt.Errorf("invalid NODES response count %d (!= %d)", resp.RespCount, total)
|
return nil, fmt.Errorf("invalid NODES response 'total' %d (!= %d)", resp.Total, total)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check nodes.
|
// Check nodes.
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -32,9 +31,7 @@ var (
|
|||||||
Usage: "Operations on node keys",
|
Usage: "Operations on node keys",
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
keyGenerateCommand,
|
keyGenerateCommand,
|
||||||
keyToIDCommand,
|
|
||||||
keyToNodeCommand,
|
keyToNodeCommand,
|
||||||
keyToRecordCommand,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
keyGenerateCommand = &cli.Command{
|
keyGenerateCommand = &cli.Command{
|
||||||
@ -43,13 +40,6 @@ var (
|
|||||||
ArgsUsage: "keyfile",
|
ArgsUsage: "keyfile",
|
||||||
Action: genkey,
|
Action: genkey,
|
||||||
}
|
}
|
||||||
keyToIDCommand = &cli.Command{
|
|
||||||
Name: "to-id",
|
|
||||||
Usage: "Creates a node ID from a node key file",
|
|
||||||
ArgsUsage: "keyfile",
|
|
||||||
Action: keyToID,
|
|
||||||
Flags: []cli.Flag{},
|
|
||||||
}
|
|
||||||
keyToNodeCommand = &cli.Command{
|
keyToNodeCommand = &cli.Command{
|
||||||
Name: "to-enode",
|
Name: "to-enode",
|
||||||
Usage: "Creates an enode URL from a node key file",
|
Usage: "Creates an enode URL from a node key file",
|
||||||
@ -57,13 +47,6 @@ var (
|
|||||||
Action: keyToURL,
|
Action: keyToURL,
|
||||||
Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag},
|
Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag},
|
||||||
}
|
}
|
||||||
keyToRecordCommand = &cli.Command{
|
|
||||||
Name: "to-enr",
|
|
||||||
Usage: "Creates an ENR from a node key file",
|
|
||||||
ArgsUsage: "keyfile",
|
|
||||||
Action: keyToRecord,
|
|
||||||
Flags: []cli.Flag{hostFlag, tcpPortFlag, udpPortFlag},
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -97,36 +80,9 @@ func genkey(ctx *cli.Context) error {
|
|||||||
return crypto.SaveECDSA(file, key)
|
return crypto.SaveECDSA(file, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyToID(ctx *cli.Context) error {
|
|
||||||
n, err := makeRecord(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Println(n.ID())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyToURL(ctx *cli.Context) error {
|
func keyToURL(ctx *cli.Context) error {
|
||||||
n, err := makeRecord(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Println(n.URLv4())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyToRecord(ctx *cli.Context) error {
|
|
||||||
n, err := makeRecord(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Println(n.String())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeRecord(ctx *cli.Context) (*enode.Node, error) {
|
|
||||||
if ctx.NArg() != 1 {
|
if ctx.NArg() != 1 {
|
||||||
return nil, fmt.Errorf("need key file as argument")
|
return fmt.Errorf("need key file as argument")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -137,26 +93,13 @@ func makeRecord(ctx *cli.Context) (*enode.Node, error) {
|
|||||||
)
|
)
|
||||||
key, err := crypto.LoadECDSA(file)
|
key, err := crypto.LoadECDSA(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var r enr.Record
|
|
||||||
if host != "" {
|
|
||||||
ip := net.ParseIP(host)
|
ip := net.ParseIP(host)
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
return nil, fmt.Errorf("invalid IP address %q", host)
|
return fmt.Errorf("invalid IP address %q", host)
|
||||||
}
|
}
|
||||||
r.Set(enr.IP(ip))
|
node := enode.NewV4(&key.PublicKey, ip, tcp, udp)
|
||||||
}
|
fmt.Println(node.URLv4())
|
||||||
if udp != 0 {
|
return nil
|
||||||
r.Set(enr.UDP(udp))
|
|
||||||
}
|
|
||||||
if tcp != 0 {
|
|
||||||
r.Set(enr.TCP(tcp))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := enode.SignV4(&r, key); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return enode.New(enode.ValidSchemes, &r)
|
|
||||||
}
|
}
|
||||||
|
@ -19,16 +19,30 @@ package main
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var app = flags.NewApp("go-ethereum devp2p tool")
|
var (
|
||||||
|
// Git information set by linker when building with ci.go.
|
||||||
|
gitCommit string
|
||||||
|
gitDate string
|
||||||
|
app = &cli.App{
|
||||||
|
Name: filepath.Base(os.Args[0]),
|
||||||
|
Usage: "go-ethereum devp2p tool",
|
||||||
|
Version: params.VersionWithCommit(gitCommit, gitDate),
|
||||||
|
Writer: os.Stdout,
|
||||||
|
HideVersion: true,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
// Set up the CLI app.
|
||||||
app.Flags = append(app.Flags, debug.Flags...)
|
app.Flags = append(app.Flags, debug.Flags...)
|
||||||
app.Before = func(ctx *cli.Context) error {
|
app.Before = func(ctx *cli.Context) error {
|
||||||
flags.MigrateGlobalFlags(ctx)
|
flags.MigrateGlobalFlags(ctx)
|
||||||
@ -42,7 +56,6 @@ func init() {
|
|||||||
fmt.Fprintf(os.Stderr, "No such command: %s\n", cmd)
|
fmt.Fprintf(os.Stderr, "No such command: %s\n", cmd)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add subcommands.
|
// Add subcommands.
|
||||||
app.Commands = []*cli.Command{
|
app.Commands = []*cli.Command{
|
||||||
enrdumpCommand,
|
enrdumpCommand,
|
||||||
|
@ -181,7 +181,7 @@ func parseFilterLimit(args []string) (int, error) {
|
|||||||
return limit, nil
|
return limit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// andFilter parses node filters in args and returns a single filter that requires all
|
// andFilter parses node filters in args and and returns a single filter that requires all
|
||||||
// of them to match.
|
// of them to match.
|
||||||
func andFilter(args []string) (nodeFilter, error) {
|
func andFilter(args []string) (nodeFilter, error) {
|
||||||
checks, err := parseFilters(args)
|
checks, err := parseFilters(args)
|
||||||
@ -233,6 +233,8 @@ func ethFilter(args []string) (nodeFilter, error) {
|
|||||||
filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash)
|
filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash)
|
||||||
case "goerli":
|
case "goerli":
|
||||||
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
||||||
|
case "ropsten":
|
||||||
|
filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash)
|
||||||
case "sepolia":
|
case "sepolia":
|
||||||
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash)
|
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash)
|
||||||
default:
|
default:
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest"
|
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
@ -109,7 +110,12 @@ func rlpxEthTest(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
exit(err)
|
exit(err)
|
||||||
}
|
}
|
||||||
|
// check if given node supports eth66, and if so, run eth66 protocol tests as well
|
||||||
|
is66Failed, _ := utesting.Run(utesting.Test{Name: "Is_66", Fn: suite.Is_66})
|
||||||
|
if is66Failed {
|
||||||
return runTests(ctx, suite.EthTests())
|
return runTests(ctx, suite.EthTests())
|
||||||
|
}
|
||||||
|
return runTests(ctx, suite.AllEthTests())
|
||||||
}
|
}
|
||||||
|
|
||||||
// rlpxSnapTest runs the snap protocol test suite.
|
// rlpxSnapTest runs the snap protocol test suite.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user