forked from cerc-io/plugeth
Merge pull request #106 from openrelayxyz/merge/geth-v1.13.9
Merge/geth v1.13.9
This commit is contained in:
commit
432bfa1e0e
@ -6,7 +6,7 @@ version: 2.1
|
|||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
docker:
|
docker:
|
||||||
- image: cimg/go:1.20
|
- image: cimg/go:1.21.0
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@ -38,7 +38,7 @@ jobs:
|
|||||||
command: go test ./core/rawdb/
|
command: go test ./core/rawdb/
|
||||||
build_geth_push:
|
build_geth_push:
|
||||||
docker: # run the steps with Docker
|
docker: # run the steps with Docker
|
||||||
- image: cimg/go:1.20 # ...with this image as the primary container
|
- image: cimg/go:1.21.0 # ...with this image as the primary container
|
||||||
# this is where all `steps` will run
|
# this is where all `steps` will run
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
|
23
.github/workflows/go.yml
vendored
Normal file
23
.github/workflows/go.yml
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
name: i386 linux tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ master ]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: self-hosted
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.21.4
|
||||||
|
- name: Run tests
|
||||||
|
run: go test ./...
|
||||||
|
env:
|
||||||
|
GOOS: linux
|
||||||
|
GOARCH: 386
|
@ -12,7 +12,6 @@ run:
|
|||||||
linters:
|
linters:
|
||||||
disable-all: true
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
- goconst
|
|
||||||
- goimports
|
- goimports
|
||||||
- gosimple
|
- gosimple
|
||||||
- govet
|
- govet
|
||||||
@ -39,9 +38,6 @@ linters:
|
|||||||
linters-settings:
|
linters-settings:
|
||||||
gofmt:
|
gofmt:
|
||||||
simplify: true
|
simplify: true
|
||||||
goconst:
|
|
||||||
min-len: 3 # minimum length of string constant
|
|
||||||
min-occurrences: 6 # minimum number of occurrences
|
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
12
.travis.yml
12
.travis.yml
@ -9,18 +9,6 @@ jobs:
|
|||||||
- azure-osx
|
- azure-osx
|
||||||
|
|
||||||
include:
|
include:
|
||||||
# This builder only tests code linters on latest version of Go
|
|
||||||
- stage: lint
|
|
||||||
os: linux
|
|
||||||
dist: bionic
|
|
||||||
go: 1.21.x
|
|
||||||
env:
|
|
||||||
- lint
|
|
||||||
git:
|
|
||||||
submodules: false # avoid cloning ethereum/tests
|
|
||||||
script:
|
|
||||||
- go run build/ci.go lint
|
|
||||||
|
|
||||||
# These builders create the Docker sub-images for multi-arch push and each
|
# These builders create the Docker sub-images for multi-arch push and each
|
||||||
# will attempt to push the multi-arch image if they are the last builder
|
# will attempt to push the multi-arch image if they are the last builder
|
||||||
- stage: build
|
- stage: build
|
||||||
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.20-alpine as builder
|
FROM golang:1.21-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
@ -251,7 +251,7 @@ var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
|
|||||||
var panicSelector = crypto.Keccak256([]byte("Panic(uint256)"))[:4]
|
var panicSelector = crypto.Keccak256([]byte("Panic(uint256)"))[:4]
|
||||||
|
|
||||||
// panicReasons map is for readable panic codes
|
// panicReasons map is for readable panic codes
|
||||||
// see this linkage for the deails
|
// see this linkage for the details
|
||||||
// https://docs.soliditylang.org/en/v0.8.21/control-structures.html#panic-via-assert-and-error-via-require
|
// https://docs.soliditylang.org/en/v0.8.21/control-structures.html#panic-via-assert-and-error-via-require
|
||||||
// the reason string list is copied from ether.js
|
// the reason string list is copied from ether.js
|
||||||
// https://github.com/ethers-io/ethers.js/blob/fa3a883ff7c88611ce766f58bdd4b8ac90814470/src.ts/abi/interface.ts#L207-L218
|
// https://github.com/ethers-io/ethers.js/blob/fa3a883ff7c88611ce766f58bdd4b8ac90814470/src.ts/abi/interface.ts#L207-L218
|
||||||
|
@ -120,6 +120,7 @@ var methods = map[string]Method{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReader(t *testing.T) {
|
func TestReader(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi := ABI{
|
abi := ABI{
|
||||||
Methods: methods,
|
Methods: methods,
|
||||||
}
|
}
|
||||||
@ -151,6 +152,7 @@ func TestReader(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidABI(t *testing.T) {
|
func TestInvalidABI(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
json := `[{ "type" : "function", "name" : "", "constant" : fals }]`
|
json := `[{ "type" : "function", "name" : "", "constant" : fals }]`
|
||||||
_, err := JSON(strings.NewReader(json))
|
_, err := JSON(strings.NewReader(json))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -170,6 +172,7 @@ func TestInvalidABI(t *testing.T) {
|
|||||||
// constructor(uint256 a, uint256 b) public{}
|
// constructor(uint256 a, uint256 b) public{}
|
||||||
// }
|
// }
|
||||||
func TestConstructor(t *testing.T) {
|
func TestConstructor(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
|
json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
|
||||||
method := NewMethod("", "", Constructor, "nonpayable", false, false, []Argument{{"a", Uint256, false}, {"b", Uint256, false}}, nil)
|
method := NewMethod("", "", Constructor, "nonpayable", false, false, []Argument{{"a", Uint256, false}, {"b", Uint256, false}}, nil)
|
||||||
// Test from JSON
|
// Test from JSON
|
||||||
@ -199,6 +202,7 @@ func TestConstructor(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTestNumbers(t *testing.T) {
|
func TestTestNumbers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -236,6 +240,7 @@ func TestTestNumbers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodSignature(t *testing.T) {
|
func TestMethodSignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil)
|
m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil)
|
||||||
exp := "foo(string,string)"
|
exp := "foo(string,string)"
|
||||||
if m.Sig != exp {
|
if m.Sig != exp {
|
||||||
@ -274,6 +279,7 @@ func TestMethodSignature(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOverloadedMethodSignature(t *testing.T) {
|
func TestOverloadedMethodSignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
json := `[{"constant":true,"inputs":[{"name":"i","type":"uint256"},{"name":"j","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"name":"i","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"}],"name":"bar","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"},{"indexed":false,"name":"j","type":"uint256"}],"name":"bar","type":"event"}]`
|
json := `[{"constant":true,"inputs":[{"name":"i","type":"uint256"},{"name":"j","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"name":"i","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"}],"name":"bar","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"},{"indexed":false,"name":"j","type":"uint256"}],"name":"bar","type":"event"}]`
|
||||||
abi, err := JSON(strings.NewReader(json))
|
abi, err := JSON(strings.NewReader(json))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -297,6 +303,7 @@ func TestOverloadedMethodSignature(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCustomErrors(t *testing.T) {
|
func TestCustomErrors(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]`
|
json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]`
|
||||||
abi, err := JSON(strings.NewReader(json))
|
abi, err := JSON(strings.NewReader(json))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -311,6 +318,7 @@ func TestCustomErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiPack(t *testing.T) {
|
func TestMultiPack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -348,6 +356,7 @@ func ExampleJSON() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInputVariableInputLength(t *testing.T) {
|
func TestInputVariableInputLength(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "type" : "function", "name" : "strOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
|
{ "type" : "function", "name" : "strOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
|
||||||
{ "type" : "function", "name" : "bytesOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
|
{ "type" : "function", "name" : "bytesOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
|
||||||
@ -476,6 +485,7 @@ func TestInputVariableInputLength(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@ -650,6 +660,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDefaultFunctionParsing(t *testing.T) {
|
func TestDefaultFunctionParsing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[{ "name" : "balance", "type" : "function" }]`
|
const definition = `[{ "name" : "balance", "type" : "function" }]`
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
@ -663,6 +674,7 @@ func TestDefaultFunctionParsing(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBareEvents(t *testing.T) {
|
func TestBareEvents(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "type" : "event", "name" : "balance" },
|
{ "type" : "event", "name" : "balance" },
|
||||||
{ "type" : "event", "name" : "anon", "anonymous" : true},
|
{ "type" : "event", "name" : "anon", "anonymous" : true},
|
||||||
@ -739,6 +751,7 @@ func TestBareEvents(t *testing.T) {
|
|||||||
//
|
//
|
||||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||||
func TestUnpackEvent(t *testing.T) {
|
func TestUnpackEvent(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -777,6 +790,7 @@ func TestUnpackEvent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackEventIntoMap(t *testing.T) {
|
func TestUnpackEventIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -827,6 +841,7 @@ func TestUnpackEventIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackMethodIntoMap(t *testing.T) {
|
func TestUnpackMethodIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
|
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -877,6 +892,7 @@ func TestUnpackMethodIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIntoMapNamingConflict(t *testing.T) {
|
func TestUnpackIntoMapNamingConflict(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Two methods have the same name
|
// Two methods have the same name
|
||||||
var abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"get","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
|
var abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"get","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
@ -960,6 +976,7 @@ func TestUnpackIntoMapNamingConflict(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestABI_MethodById(t *testing.T) {
|
func TestABI_MethodById(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -992,6 +1009,7 @@ func TestABI_MethodById(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestABI_EventById(t *testing.T) {
|
func TestABI_EventById(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
json string
|
json string
|
||||||
@ -1058,6 +1076,7 @@ func TestABI_EventById(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestABI_ErrorByID(t *testing.T) {
|
func TestABI_ErrorByID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(`[
|
abi, err := JSON(strings.NewReader(`[
|
||||||
{"inputs":[{"internalType":"uint256","name":"x","type":"uint256"}],"name":"MyError1","type":"error"},
|
{"inputs":[{"internalType":"uint256","name":"x","type":"uint256"}],"name":"MyError1","type":"error"},
|
||||||
{"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"x","type":"tuple"},{"internalType":"address","name":"y","type":"address"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"z","type":"tuple"}],"name":"MyError2","type":"error"},
|
{"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"x","type":"tuple"},{"internalType":"address","name":"y","type":"address"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"z","type":"tuple"}],"name":"MyError2","type":"error"},
|
||||||
@ -1088,6 +1107,7 @@ func TestABI_ErrorByID(t *testing.T) {
|
|||||||
// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name
|
// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name
|
||||||
// conflict and that the second transfer method will be renamed transfer1.
|
// conflict and that the second transfer method will be renamed transfer1.
|
||||||
func TestDoubleDuplicateMethodNames(t *testing.T) {
|
func TestDoubleDuplicateMethodNames(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
|
abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
|
||||||
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1117,6 +1137,7 @@ func TestDoubleDuplicateMethodNames(t *testing.T) {
|
|||||||
// event send();
|
// event send();
|
||||||
// }
|
// }
|
||||||
func TestDoubleDuplicateEventNames(t *testing.T) {
|
func TestDoubleDuplicateEventNames(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abiJSON := `[{"anonymous": false,"inputs": [{"indexed": false,"internalType": "uint256","name": "a","type": "uint256"}],"name": "send","type": "event"},{"anonymous": false,"inputs": [],"name": "send0","type": "event"},{ "anonymous": false, "inputs": [],"name": "send","type": "event"}]`
|
abiJSON := `[{"anonymous": false,"inputs": [{"indexed": false,"internalType": "uint256","name": "a","type": "uint256"}],"name": "send","type": "event"},{"anonymous": false,"inputs": [],"name": "send0","type": "event"},{ "anonymous": false, "inputs": [],"name": "send","type": "event"}]`
|
||||||
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1144,6 +1165,7 @@ func TestDoubleDuplicateEventNames(t *testing.T) {
|
|||||||
// event send(uint256, uint256);
|
// event send(uint256, uint256);
|
||||||
// }
|
// }
|
||||||
func TestUnnamedEventParam(t *testing.T) {
|
func TestUnnamedEventParam(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]`
|
abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]`
|
||||||
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1177,7 +1199,9 @@ func TestUnpackRevert(t *testing.T) {
|
|||||||
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
|
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
|
||||||
}
|
}
|
||||||
for index, c := range cases {
|
for index, c := range cases {
|
||||||
|
index, c := index, c
|
||||||
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
|
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
got, err := UnpackRevert(common.Hex2Bytes(c.input))
|
got, err := UnpackRevert(common.Hex2Bytes(c.input))
|
||||||
if c.expectErr != nil {
|
if c.expectErr != nil {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -22,33 +22,32 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
|
||||||
fuzz "github.com/google/gofuzz"
|
fuzz "github.com/google/gofuzz"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestReplicate can be used to replicate crashers from the fuzzing tests.
|
// TestReplicate can be used to replicate crashers from the fuzzing tests.
|
||||||
// Just replace testString with the data in .quoted
|
// Just replace testString with the data in .quoted
|
||||||
func TestReplicate(t *testing.T) {
|
func TestReplicate(t *testing.T) {
|
||||||
testString := "\x20\x20\x20\x20\x20\x20\x20\x20\x80\x00\x00\x00\x20\x20\x20\x20\x00"
|
t.Parallel()
|
||||||
data := []byte(testString)
|
//t.Skip("Test only useful for reproducing issues")
|
||||||
fuzzAbi(data)
|
fuzzAbi([]byte("\x20\x20\x20\x20\x20\x20\x20\x20\x80\x00\x00\x00\x20\x20\x20\x20\x00"))
|
||||||
|
//fuzzAbi([]byte("asdfasdfkadsf;lasdf;lasd;lfk"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Fuzz(f *testing.F) {
|
// FuzzABI is the main entrypoint for fuzzing
|
||||||
|
func FuzzABI(f *testing.F) {
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
fuzzAbi(data)
|
fuzzAbi(data)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
names = []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"}
|
names = []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"}
|
||||||
stateMut = []string{"", "pure", "view", "payable"}
|
stateMut = []string{"pure", "view", "payable"}
|
||||||
stateMutabilites = []*string{&stateMut[0], &stateMut[1], &stateMut[2], &stateMut[3]}
|
pays = []string{"true", "false"}
|
||||||
pays = []string{"", "true", "false"}
|
vNames = []string{"a", "b", "c", "d", "e", "f", "g"}
|
||||||
payables = []*string{&pays[0], &pays[1]}
|
varNames = append(vNames, names...)
|
||||||
vNames = []string{"a", "b", "c", "d", "e", "f", "g"}
|
varTypes = []string{"bool", "address", "bytes", "string",
|
||||||
varNames = append(vNames, names...)
|
|
||||||
varTypes = []string{"bool", "address", "bytes", "string",
|
|
||||||
"uint8", "int8", "uint8", "int8", "uint16", "int16",
|
"uint8", "int8", "uint8", "int8", "uint16", "int16",
|
||||||
"uint24", "int24", "uint32", "int32", "uint40", "int40", "uint48", "int48", "uint56", "int56",
|
"uint24", "int24", "uint32", "int32", "uint40", "int40", "uint48", "int48", "uint56", "int56",
|
||||||
"uint64", "int64", "uint72", "int72", "uint80", "int80", "uint88", "int88", "uint96", "int96",
|
"uint64", "int64", "uint72", "int72", "uint80", "int80", "uint88", "int88", "uint96", "int96",
|
||||||
@ -62,7 +61,7 @@ var (
|
|||||||
"bytes32", "bytes"}
|
"bytes32", "bytes"}
|
||||||
)
|
)
|
||||||
|
|
||||||
func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool) {
|
func unpackPack(abi ABI, method string, input []byte) ([]interface{}, bool) {
|
||||||
if out, err := abi.Unpack(method, input); err == nil {
|
if out, err := abi.Unpack(method, input); err == nil {
|
||||||
_, err := abi.Pack(method, out...)
|
_, err := abi.Pack(method, out...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -78,7 +77,7 @@ func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool)
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool {
|
func packUnpack(abi ABI, method string, input *[]interface{}) bool {
|
||||||
if packed, err := abi.Pack(method, input); err == nil {
|
if packed, err := abi.Pack(method, input); err == nil {
|
||||||
outptr := reflect.New(reflect.TypeOf(input))
|
outptr := reflect.New(reflect.TypeOf(input))
|
||||||
err := abi.UnpackIntoInterface(outptr.Interface(), method, packed)
|
err := abi.UnpackIntoInterface(outptr.Interface(), method, packed)
|
||||||
@ -94,12 +93,12 @@ func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
type args struct {
|
type arg struct {
|
||||||
name string
|
name string
|
||||||
typ string
|
typ string
|
||||||
}
|
}
|
||||||
|
|
||||||
func createABI(name string, stateMutability, payable *string, inputs []args) (abi.ABI, error) {
|
func createABI(name string, stateMutability, payable *string, inputs []arg) (ABI, error) {
|
||||||
sig := fmt.Sprintf(`[{ "type" : "function", "name" : "%v" `, name)
|
sig := fmt.Sprintf(`[{ "type" : "function", "name" : "%v" `, name)
|
||||||
if stateMutability != nil {
|
if stateMutability != nil {
|
||||||
sig += fmt.Sprintf(`, "stateMutability": "%v" `, *stateMutability)
|
sig += fmt.Sprintf(`, "stateMutability": "%v" `, *stateMutability)
|
||||||
@ -126,56 +125,55 @@ func createABI(name string, stateMutability, payable *string, inputs []args) (ab
|
|||||||
sig += "} ]"
|
sig += "} ]"
|
||||||
}
|
}
|
||||||
sig += `}]`
|
sig += `}]`
|
||||||
|
//fmt.Printf("sig: %s\n", sig)
|
||||||
return abi.JSON(strings.NewReader(sig))
|
return JSON(strings.NewReader(sig))
|
||||||
}
|
}
|
||||||
|
|
||||||
func fuzzAbi(input []byte) int {
|
func fuzzAbi(input []byte) {
|
||||||
good := false
|
var (
|
||||||
fuzzer := fuzz.NewFromGoFuzz(input)
|
fuzzer = fuzz.NewFromGoFuzz(input)
|
||||||
|
name = oneOf(fuzzer, names)
|
||||||
name := names[getUInt(fuzzer)%len(names)]
|
stateM = oneOfOrNil(fuzzer, stateMut)
|
||||||
stateM := stateMutabilites[getUInt(fuzzer)%len(stateMutabilites)]
|
payable = oneOfOrNil(fuzzer, pays)
|
||||||
payable := payables[getUInt(fuzzer)%len(payables)]
|
arguments []arg
|
||||||
maxLen := 5
|
)
|
||||||
for k := 1; k < maxLen; k++ {
|
for i := 0; i < upTo(fuzzer, 10); i++ {
|
||||||
var arg []args
|
argName := oneOf(fuzzer, varNames)
|
||||||
for i := k; i > 0; i-- {
|
argTyp := oneOf(fuzzer, varTypes)
|
||||||
argName := varNames[i]
|
switch upTo(fuzzer, 10) {
|
||||||
argTyp := varTypes[getUInt(fuzzer)%len(varTypes)]
|
case 0: // 10% chance to make it a slice
|
||||||
if getUInt(fuzzer)%10 == 0 {
|
argTyp += "[]"
|
||||||
argTyp += "[]"
|
case 1: // 10% chance to make it an array
|
||||||
} else if getUInt(fuzzer)%10 == 0 {
|
argTyp += fmt.Sprintf("[%d]", 1+upTo(fuzzer, 30))
|
||||||
arrayArgs := getUInt(fuzzer)%30 + 1
|
default:
|
||||||
argTyp += fmt.Sprintf("[%d]", arrayArgs)
|
|
||||||
}
|
|
||||||
arg = append(arg, args{
|
|
||||||
name: argName,
|
|
||||||
typ: argTyp,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
abi, err := createABI(name, stateM, payable, arg)
|
arguments = append(arguments, arg{name: argName, typ: argTyp})
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
structs, b := unpackPack(abi, name, input)
|
|
||||||
c := packUnpack(abi, name, &structs)
|
|
||||||
good = good || b || c
|
|
||||||
}
|
}
|
||||||
if good {
|
abi, err := createABI(name, stateM, payable, arguments)
|
||||||
return 1
|
if err != nil {
|
||||||
|
//fmt.Printf("err: %v\n", err)
|
||||||
|
panic(err)
|
||||||
}
|
}
|
||||||
return 0
|
structs, _ := unpackPack(abi, name, input)
|
||||||
|
_ = packUnpack(abi, name, &structs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUInt(fuzzer *fuzz.Fuzzer) int {
|
func upTo(fuzzer *fuzz.Fuzzer, max int) int {
|
||||||
var i int
|
var i int
|
||||||
fuzzer.Fuzz(&i)
|
fuzzer.Fuzz(&i)
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
i = -i
|
return (-1 - i) % max
|
||||||
if i < 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return i
|
return i % max
|
||||||
|
}
|
||||||
|
|
||||||
|
func oneOf(fuzzer *fuzz.Fuzzer, options []string) string {
|
||||||
|
return options[upTo(fuzzer, len(options))]
|
||||||
|
}
|
||||||
|
|
||||||
|
func oneOfOrNil(fuzzer *fuzz.Fuzzer, options []string) *string {
|
||||||
|
if i := upTo(fuzzer, len(options)+1); i < len(options) {
|
||||||
|
return &options[i]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
@ -80,7 +80,7 @@ func (arguments Arguments) isTuple() bool {
|
|||||||
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
if len(arguments.NonIndexed()) != 0 {
|
if len(arguments.NonIndexed()) != 0 {
|
||||||
return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
|
return nil, errors.New("abi: attempting to unmarshal an empty string while arguments are expected")
|
||||||
}
|
}
|
||||||
return make([]interface{}, 0), nil
|
return make([]interface{}, 0), nil
|
||||||
}
|
}
|
||||||
@ -95,7 +95,7 @@ func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte)
|
|||||||
}
|
}
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
if len(arguments.NonIndexed()) != 0 {
|
if len(arguments.NonIndexed()) != 0 {
|
||||||
return errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
|
return errors.New("abi: attempting to unmarshal an empty string while arguments are expected")
|
||||||
}
|
}
|
||||||
return nil // Nothing to unmarshal, return
|
return nil // Nothing to unmarshal, return
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewKeyStoreTransactor is a utility method to easily create a transaction signer from
|
// NewKeyStoreTransactor is a utility method to easily create a transaction signer from
|
||||||
// an decrypted key from a keystore.
|
// a decrypted key from a keystore.
|
||||||
//
|
//
|
||||||
// Deprecated: Use NewKeyStoreTransactorWithChainID instead.
|
// Deprecated: Use NewKeyStoreTransactorWithChainID instead.
|
||||||
func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) {
|
func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) {
|
||||||
@ -117,7 +117,7 @@ func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.I
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from
|
// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from
|
||||||
// an decrypted key from a keystore.
|
// a decrypted key from a keystore.
|
||||||
func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) {
|
func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) {
|
||||||
if chainID == nil {
|
if chainID == nil {
|
||||||
return nil, ErrNoChainID
|
return nil, ErrNoChainID
|
||||||
|
@ -75,7 +75,7 @@ type BlockHashContractCaller interface {
|
|||||||
// CodeAtHash returns the code of the given account in the state at the specified block hash.
|
// CodeAtHash returns the code of the given account in the state at the specified block hash.
|
||||||
CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error)
|
CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error)
|
||||||
|
|
||||||
// CallContractAtHash executes an Ethereum contract all against the state at the specified block hash.
|
// CallContractAtHash executes an Ethereum contract call against the state at the specified block hash.
|
||||||
CallContractAtHash(ctx context.Context, call ethereum.CallMsg, blockHash common.Hash) ([]byte, error)
|
CallContractAtHash(ctx context.Context, call ethereum.CallMsg, blockHash common.Hash) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,6 +84,11 @@ type BlockHashContractCaller interface {
|
|||||||
// used when the user does not provide some needed values, but rather leaves it up
|
// used when the user does not provide some needed values, but rather leaves it up
|
||||||
// to the transactor to decide.
|
// to the transactor to decide.
|
||||||
type ContractTransactor interface {
|
type ContractTransactor interface {
|
||||||
|
ethereum.GasEstimator
|
||||||
|
ethereum.GasPricer
|
||||||
|
ethereum.GasPricer1559
|
||||||
|
ethereum.TransactionSender
|
||||||
|
|
||||||
// HeaderByNumber returns a block header from the current canonical chain. If
|
// HeaderByNumber returns a block header from the current canonical chain. If
|
||||||
// number is nil, the latest known header is returned.
|
// number is nil, the latest known header is returned.
|
||||||
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
|
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
|
||||||
@ -93,38 +98,6 @@ type ContractTransactor interface {
|
|||||||
|
|
||||||
// PendingNonceAt retrieves the current pending nonce associated with an account.
|
// PendingNonceAt retrieves the current pending nonce associated with an account.
|
||||||
PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
|
PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
|
||||||
|
|
||||||
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
|
|
||||||
// execution of a transaction.
|
|
||||||
SuggestGasPrice(ctx context.Context) (*big.Int, error)
|
|
||||||
|
|
||||||
// SuggestGasTipCap retrieves the currently suggested 1559 priority fee to allow
|
|
||||||
// a timely execution of a transaction.
|
|
||||||
SuggestGasTipCap(ctx context.Context) (*big.Int, error)
|
|
||||||
|
|
||||||
// EstimateGas tries to estimate the gas needed to execute a specific
|
|
||||||
// transaction based on the current pending state of the backend blockchain.
|
|
||||||
// There is no guarantee that this is the true gas limit requirement as other
|
|
||||||
// transactions may be added or removed by miners, but it should provide a basis
|
|
||||||
// for setting a reasonable default.
|
|
||||||
EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error)
|
|
||||||
|
|
||||||
// SendTransaction injects the transaction into the pending pool for execution.
|
|
||||||
SendTransaction(ctx context.Context, tx *types.Transaction) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContractFilterer defines the methods needed to access log events using one-off
|
|
||||||
// queries or continuous event subscriptions.
|
|
||||||
type ContractFilterer interface {
|
|
||||||
// FilterLogs executes a log filter operation, blocking during execution and
|
|
||||||
// returning all the results in one batch.
|
|
||||||
//
|
|
||||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
|
||||||
FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error)
|
|
||||||
|
|
||||||
// SubscribeFilterLogs creates a background log filtering operation, returning
|
|
||||||
// a subscription immediately, which can be used to stream the found events.
|
|
||||||
SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
||||||
@ -133,6 +106,12 @@ type DeployBackend interface {
|
|||||||
CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error)
|
CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContractFilterer defines the methods needed to access log events using one-off
|
||||||
|
// queries or continuous event subscriptions.
|
||||||
|
type ContractFilterer interface {
|
||||||
|
ethereum.LogFilterer
|
||||||
|
}
|
||||||
|
|
||||||
// ContractBackend defines the methods needed to work with contracts on a read-write basis.
|
// ContractBackend defines the methods needed to work with contracts on a read-write basis.
|
||||||
type ContractBackend interface {
|
type ContractBackend interface {
|
||||||
ContractCaller
|
ContractCaller
|
||||||
|
@ -18,958 +18,35 @@ package backends
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
"github.com/ethereum/go-ethereum/ethclient/simulated"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
|
||||||
"github.com/ethereum/go-ethereum/eth/filters"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
"github.com/ethereum/go-ethereum/event"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This nil assignment ensures at compile time that SimulatedBackend implements bind.ContractBackend.
|
// SimulatedBackend is a simulated blockchain.
|
||||||
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
|
// Deprecated: use package github.com/ethereum/go-ethereum/ethclient/simulated instead.
|
||||||
|
|
||||||
var (
|
|
||||||
errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
|
|
||||||
errBlockHashUnsupported = errors.New("simulatedBackend cannot access blocks by hash other than the latest block")
|
|
||||||
errBlockDoesNotExist = errors.New("block does not exist in blockchain")
|
|
||||||
errTransactionDoesNotExist = errors.New("transaction does not exist")
|
|
||||||
)
|
|
||||||
|
|
||||||
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
|
|
||||||
// the background. Its main purpose is to allow for easy testing of contract bindings.
|
|
||||||
// Simulated backend implements the following interfaces:
|
|
||||||
// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor,
|
|
||||||
// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender
|
|
||||||
type SimulatedBackend struct {
|
type SimulatedBackend struct {
|
||||||
database ethdb.Database // In memory database to store our testing data
|
*simulated.Backend
|
||||||
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
|
simulated.Client
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
pendingBlock *types.Block // Currently pending block that will be imported on request
|
|
||||||
pendingState *state.StateDB // Currently pending state that will be the active on request
|
|
||||||
pendingReceipts types.Receipts // Currently receipts for the pending block
|
|
||||||
|
|
||||||
events *filters.EventSystem // for filtering log events live
|
|
||||||
filterSystem *filters.FilterSystem // for filtering database logs
|
|
||||||
|
|
||||||
config *params.ChainConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSimulatedBackendWithDatabase creates a new binding backend based on the given database
|
// Fork sets the head to a new block, which is based on the provided parentHash.
|
||||||
// and uses a simulated blockchain for testing purposes.
|
func (b *SimulatedBackend) Fork(ctx context.Context, parentHash common.Hash) error {
|
||||||
// A simulated backend always uses chainID 1337.
|
return b.Backend.Fork(parentHash)
|
||||||
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
|
||||||
genesis := core.Genesis{
|
|
||||||
Config: params.AllEthashProtocolChanges,
|
|
||||||
GasLimit: gasLimit,
|
|
||||||
Alloc: alloc,
|
|
||||||
}
|
|
||||||
blockchain, _ := core.NewBlockChain(database, nil, &genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
|
||||||
|
|
||||||
backend := &SimulatedBackend{
|
|
||||||
database: database,
|
|
||||||
blockchain: blockchain,
|
|
||||||
config: genesis.Config,
|
|
||||||
}
|
|
||||||
|
|
||||||
filterBackend := &filterBackend{database, blockchain, backend}
|
|
||||||
backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{})
|
|
||||||
backend.events = filters.NewEventSystem(backend.filterSystem, false)
|
|
||||||
|
|
||||||
header := backend.blockchain.CurrentBlock()
|
|
||||||
block := backend.blockchain.GetBlock(header.Hash(), header.Number.Uint64())
|
|
||||||
|
|
||||||
backend.rollback(block)
|
|
||||||
return backend
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
||||||
// for testing purposes.
|
// for testing purposes.
|
||||||
|
//
|
||||||
// A simulated backend always uses chainID 1337.
|
// A simulated backend always uses chainID 1337.
|
||||||
|
//
|
||||||
|
// Deprecated: please use simulated.Backend from package
|
||||||
|
// github.com/ethereum/go-ethereum/ethclient/simulated instead.
|
||||||
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
||||||
return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit)
|
b := simulated.New(alloc, gasLimit)
|
||||||
}
|
return &SimulatedBackend{
|
||||||
|
Backend: b,
|
||||||
// Close terminates the underlying blockchain's update loop.
|
Client: b.Client(),
|
||||||
func (b *SimulatedBackend) Close() error {
|
|
||||||
b.blockchain.Stop()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit imports all the pending transactions as a single block and starts a
|
|
||||||
// fresh new state.
|
|
||||||
func (b *SimulatedBackend) Commit() common.Hash {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil {
|
|
||||||
panic(err) // This cannot happen unless the simulator is wrong, fail in that case
|
|
||||||
}
|
|
||||||
blockHash := b.pendingBlock.Hash()
|
|
||||||
|
|
||||||
// Using the last inserted block here makes it possible to build on a side
|
|
||||||
// chain after a fork.
|
|
||||||
b.rollback(b.pendingBlock)
|
|
||||||
|
|
||||||
return blockHash
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rollback aborts all pending transactions, reverting to the last committed state.
|
|
||||||
func (b *SimulatedBackend) Rollback() {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
header := b.blockchain.CurrentBlock()
|
|
||||||
block := b.blockchain.GetBlock(header.Hash(), header.Number.Uint64())
|
|
||||||
|
|
||||||
b.rollback(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *SimulatedBackend) rollback(parent *types.Block) {
|
|
||||||
blocks, _ := core.GenerateChain(b.config, parent, ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {})
|
|
||||||
|
|
||||||
b.pendingBlock = blocks[0]
|
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), b.blockchain.StateCache(), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fork creates a side-chain that can be used to simulate reorgs.
|
|
||||||
//
|
|
||||||
// This function should be called with the ancestor block where the new side
|
|
||||||
// chain should be started. Transactions (old and new) can then be applied on
|
|
||||||
// top and Commit-ed.
|
|
||||||
//
|
|
||||||
// Note, the side-chain will only become canonical (and trigger the events) when
|
|
||||||
// it becomes longer. Until then CallContract will still operate on the current
|
|
||||||
// canonical chain.
|
|
||||||
//
|
|
||||||
// There is a % chance that the side chain becomes canonical at the same length
|
|
||||||
// to simulate live network behavior.
|
|
||||||
func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if len(b.pendingBlock.Transactions()) != 0 {
|
|
||||||
return errors.New("pending block dirty")
|
|
||||||
}
|
|
||||||
block, err := b.blockByHash(ctx, parent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.rollback(block)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateByBlockNumber retrieves a state by a given blocknumber.
|
|
||||||
func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
|
|
||||||
if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number) == 0 {
|
|
||||||
return b.blockchain.State()
|
|
||||||
}
|
|
||||||
block, err := b.blockByNumber(ctx, blockNumber)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b.blockchain.StateAt(block.Root())
|
|
||||||
}
|
|
||||||
|
|
||||||
// CodeAt returns the code associated with a certain account in the blockchain.
|
|
||||||
func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return stateDB.GetCode(contract), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CodeAtHash returns the code associated with a certain account in the blockchain.
|
|
||||||
func (b *SimulatedBackend) CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
header, err := b.headerByHash(blockHash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stateDB, err := b.blockchain.StateAt(header.Root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return stateDB.GetCode(contract), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BalanceAt returns the wei balance of a certain account in the blockchain.
|
|
||||||
func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Address, blockNumber *big.Int) (*big.Int, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return stateDB.GetBalance(contract), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NonceAt returns the nonce of a certain account in the blockchain.
|
|
||||||
func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, blockNumber *big.Int) (uint64, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return stateDB.GetNonce(contract), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StorageAt returns the value of key in the storage of an account in the blockchain.
|
|
||||||
func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
stateDB, err := b.stateByBlockNumber(ctx, blockNumber)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
val := stateDB.GetState(contract, key)
|
|
||||||
return val[:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransactionReceipt returns the receipt of a transaction.
|
|
||||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
|
||||||
if receipt == nil {
|
|
||||||
return nil, ethereum.NotFound
|
|
||||||
}
|
|
||||||
return receipt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransactionByHash checks the pool of pending transactions in addition to the
|
|
||||||
// blockchain. The isPending return value indicates whether the transaction has been
|
|
||||||
// mined yet. Note that the transaction may not be part of the canonical chain even if
|
|
||||||
// it's not pending.
|
|
||||||
func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
tx := b.pendingBlock.Transaction(txHash)
|
|
||||||
if tx != nil {
|
|
||||||
return tx, true, nil
|
|
||||||
}
|
|
||||||
tx, _, _, _ = rawdb.ReadTransaction(b.database, txHash)
|
|
||||||
if tx != nil {
|
|
||||||
return tx, false, nil
|
|
||||||
}
|
|
||||||
return nil, false, ethereum.NotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockByHash retrieves a block based on the block hash.
|
|
||||||
func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
return b.blockByHash(ctx, hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// blockByHash retrieves a block based on the block hash without Locking.
|
|
||||||
func (b *SimulatedBackend) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
|
|
||||||
if hash == b.pendingBlock.Hash() {
|
|
||||||
return b.pendingBlock, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
block := b.blockchain.GetBlockByHash(hash)
|
|
||||||
if block != nil {
|
|
||||||
return block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errBlockDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockByNumber retrieves a block from the database by number, caching it
|
|
||||||
// (associated with its hash) if found.
|
|
||||||
func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
return b.blockByNumber(ctx, number)
|
|
||||||
}
|
|
||||||
|
|
||||||
// blockByNumber retrieves a block from the database by number, caching it
|
|
||||||
// (associated with its hash) if found without Lock.
|
|
||||||
func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
|
|
||||||
if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
|
|
||||||
return b.blockByHash(ctx, b.blockchain.CurrentBlock().Hash())
|
|
||||||
}
|
|
||||||
|
|
||||||
block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
|
|
||||||
if block == nil {
|
|
||||||
return nil, errBlockDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
return block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderByHash returns a block header from the current canonical chain.
|
|
||||||
func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
return b.headerByHash(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// headerByHash retrieves a header from the database by hash without Lock.
|
|
||||||
func (b *SimulatedBackend) headerByHash(hash common.Hash) (*types.Header, error) {
|
|
||||||
if hash == b.pendingBlock.Hash() {
|
|
||||||
return b.pendingBlock.Header(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
header := b.blockchain.GetHeaderByHash(hash)
|
|
||||||
if header == nil {
|
|
||||||
return nil, errBlockDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
return header, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderByNumber returns a block header from the current canonical chain. If number is
|
|
||||||
// nil, the latest known header is returned.
|
|
||||||
func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if block == nil || block.Cmp(b.pendingBlock.Number()) == 0 {
|
|
||||||
return b.blockchain.CurrentHeader(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransactionCount returns the number of transactions in a given block.
|
|
||||||
func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if blockHash == b.pendingBlock.Hash() {
|
|
||||||
return uint(b.pendingBlock.Transactions().Len()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
block := b.blockchain.GetBlockByHash(blockHash)
|
|
||||||
if block == nil {
|
|
||||||
return uint(0), errBlockDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
return uint(block.Transactions().Len()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransactionInBlock returns the transaction for a specific block at a specific index.
|
|
||||||
func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if blockHash == b.pendingBlock.Hash() {
|
|
||||||
transactions := b.pendingBlock.Transactions()
|
|
||||||
if uint(len(transactions)) < index+1 {
|
|
||||||
return nil, errTransactionDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
return transactions[index], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
block := b.blockchain.GetBlockByHash(blockHash)
|
|
||||||
if block == nil {
|
|
||||||
return nil, errBlockDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
transactions := block.Transactions()
|
|
||||||
if uint(len(transactions)) < index+1 {
|
|
||||||
return nil, errTransactionDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
return transactions[index], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PendingCodeAt returns the code associated with an account in the pending state.
|
|
||||||
func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
return b.pendingState.GetCode(contract), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRevertError(result *core.ExecutionResult) *revertError {
|
|
||||||
reason, errUnpack := abi.UnpackRevert(result.Revert())
|
|
||||||
err := errors.New("execution reverted")
|
|
||||||
if errUnpack == nil {
|
|
||||||
err = fmt.Errorf("execution reverted: %v", reason)
|
|
||||||
}
|
|
||||||
return &revertError{
|
|
||||||
error: err,
|
|
||||||
reason: hexutil.Encode(result.Revert()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// revertError is an API error that encompasses an EVM revert with JSON error
|
|
||||||
// code and a binary data blob.
|
|
||||||
type revertError struct {
|
|
||||||
error
|
|
||||||
reason string // revert reason hex encoded
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorCode returns the JSON error code for a revert.
|
|
||||||
// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal
|
|
||||||
func (e *revertError) ErrorCode() int {
|
|
||||||
return 3
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorData returns the hex encoded revert reason.
|
|
||||||
func (e *revertError) ErrorData() interface{} {
|
|
||||||
return e.reason
|
|
||||||
}
|
|
||||||
|
|
||||||
// CallContract executes a contract call.
|
|
||||||
func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number) != 0 {
|
|
||||||
return nil, errBlockNumberUnsupported
|
|
||||||
}
|
|
||||||
return b.callContractAtHead(ctx, call)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CallContractAtHash executes a contract call on a specific block hash.
|
|
||||||
func (b *SimulatedBackend) CallContractAtHash(ctx context.Context, call ethereum.CallMsg, blockHash common.Hash) ([]byte, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if blockHash != b.blockchain.CurrentBlock().Hash() {
|
|
||||||
return nil, errBlockHashUnsupported
|
|
||||||
}
|
|
||||||
return b.callContractAtHead(ctx, call)
|
|
||||||
}
|
|
||||||
|
|
||||||
// callContractAtHead executes a contract call against the latest block state.
|
|
||||||
func (b *SimulatedBackend) callContractAtHead(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
|
|
||||||
stateDB, err := b.blockchain.State()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), stateDB)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// If the result contains a revert reason, try to unpack and return it.
|
|
||||||
if len(res.Revert()) > 0 {
|
|
||||||
return nil, newRevertError(res)
|
|
||||||
}
|
|
||||||
return res.Return(), res.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PendingCallContract executes a contract call on the pending state.
|
|
||||||
func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
|
|
||||||
|
|
||||||
res, err := b.callContract(ctx, call, b.pendingBlock.Header(), b.pendingState)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// If the result contains a revert reason, try to unpack and return it.
|
|
||||||
if len(res.Revert()) > 0 {
|
|
||||||
return nil, newRevertError(res)
|
|
||||||
}
|
|
||||||
return res.Return(), res.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving
|
|
||||||
// the nonce currently pending for the account.
|
|
||||||
func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
return b.pendingState.GetOrNewStateObject(account).Nonce(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
|
|
||||||
// chain doesn't have miners, we just return a gas price of 1 for any call.
|
|
||||||
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if b.pendingBlock.Header().BaseFee != nil {
|
|
||||||
return b.pendingBlock.Header().BaseFee, nil
|
|
||||||
}
|
|
||||||
return big.NewInt(1), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SuggestGasTipCap implements ContractTransactor.SuggestGasTipCap. Since the simulated
|
|
||||||
// chain doesn't have miners, we just return a gas tip of 1 for any call.
|
|
||||||
func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
|
|
||||||
return big.NewInt(1), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EstimateGas executes the requested code against the currently pending block/state and
|
|
||||||
// returns the used amount of gas.
|
|
||||||
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
// Determine the lowest and highest possible gas limits to binary search in between
|
|
||||||
var (
|
|
||||||
lo uint64 = params.TxGas - 1
|
|
||||||
hi uint64
|
|
||||||
cap uint64
|
|
||||||
)
|
|
||||||
if call.Gas >= params.TxGas {
|
|
||||||
hi = call.Gas
|
|
||||||
} else {
|
|
||||||
hi = b.pendingBlock.GasLimit()
|
|
||||||
}
|
|
||||||
// Normalize the max fee per gas the call is willing to spend.
|
|
||||||
var feeCap *big.Int
|
|
||||||
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
|
|
||||||
return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
|
||||||
} else if call.GasPrice != nil {
|
|
||||||
feeCap = call.GasPrice
|
|
||||||
} else if call.GasFeeCap != nil {
|
|
||||||
feeCap = call.GasFeeCap
|
|
||||||
} else {
|
|
||||||
feeCap = common.Big0
|
|
||||||
}
|
|
||||||
// Recap the highest gas allowance with account's balance.
|
|
||||||
if feeCap.BitLen() != 0 {
|
|
||||||
balance := b.pendingState.GetBalance(call.From) // from can't be nil
|
|
||||||
available := new(big.Int).Set(balance)
|
|
||||||
if call.Value != nil {
|
|
||||||
if call.Value.Cmp(available) >= 0 {
|
|
||||||
return 0, core.ErrInsufficientFundsForTransfer
|
|
||||||
}
|
|
||||||
available.Sub(available, call.Value)
|
|
||||||
}
|
|
||||||
allowance := new(big.Int).Div(available, feeCap)
|
|
||||||
if allowance.IsUint64() && hi > allowance.Uint64() {
|
|
||||||
transfer := call.Value
|
|
||||||
if transfer == nil {
|
|
||||||
transfer = new(big.Int)
|
|
||||||
}
|
|
||||||
log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance,
|
|
||||||
"sent", transfer, "feecap", feeCap, "fundable", allowance)
|
|
||||||
hi = allowance.Uint64()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cap = hi
|
|
||||||
|
|
||||||
// Create a helper to check if a gas allowance results in an executable transaction
|
|
||||||
executable := func(gas uint64) (bool, *core.ExecutionResult, error) {
|
|
||||||
call.Gas = gas
|
|
||||||
|
|
||||||
snapshot := b.pendingState.Snapshot()
|
|
||||||
res, err := b.callContract(ctx, call, b.pendingBlock.Header(), b.pendingState)
|
|
||||||
b.pendingState.RevertToSnapshot(snapshot)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, core.ErrIntrinsicGas) {
|
|
||||||
return true, nil, nil // Special case, raise gas limit
|
|
||||||
}
|
|
||||||
return true, nil, err // Bail out
|
|
||||||
}
|
|
||||||
return res.Failed(), res, nil
|
|
||||||
}
|
|
||||||
// Execute the binary search and hone in on an executable gas limit
|
|
||||||
for lo+1 < hi {
|
|
||||||
mid := (hi + lo) / 2
|
|
||||||
failed, _, err := executable(mid)
|
|
||||||
|
|
||||||
// If the error is not nil(consensus error), it means the provided message
|
|
||||||
// call or transaction will never be accepted no matter how much gas it is
|
|
||||||
// assigned. Return the error directly, don't struggle any more
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if failed {
|
|
||||||
lo = mid
|
|
||||||
} else {
|
|
||||||
hi = mid
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Reject the transaction as invalid if it still fails at the highest allowance
|
|
||||||
if hi == cap {
|
|
||||||
failed, result, err := executable(hi)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if failed {
|
|
||||||
if result != nil && !errors.Is(result.Err, vm.ErrOutOfGas) {
|
|
||||||
if len(result.Revert()) > 0 {
|
|
||||||
return 0, newRevertError(result)
|
|
||||||
}
|
|
||||||
return 0, result.Err
|
|
||||||
}
|
|
||||||
// Otherwise, the specified gas cap is too low
|
|
||||||
return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hi, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// callContract implements common code between normal and pending contract calls.
|
|
||||||
// state is modified during execution, make sure to copy it if necessary.
|
|
||||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, header *types.Header, stateDB *state.StateDB) (*core.ExecutionResult, error) {
|
|
||||||
// Gas prices post 1559 need to be initialized
|
|
||||||
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
|
|
||||||
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
|
||||||
}
|
|
||||||
if !b.blockchain.Config().IsLondon(header.Number) {
|
|
||||||
// If there's no basefee, then it must be a non-1559 execution
|
|
||||||
if call.GasPrice == nil {
|
|
||||||
call.GasPrice = new(big.Int)
|
|
||||||
}
|
|
||||||
call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice
|
|
||||||
} else {
|
|
||||||
// A basefee is provided, necessitating 1559-type execution
|
|
||||||
if call.GasPrice != nil {
|
|
||||||
// User specified the legacy gas field, convert to 1559 gas typing
|
|
||||||
call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice
|
|
||||||
} else {
|
|
||||||
// User specified 1559 gas fields (or none), use those
|
|
||||||
if call.GasFeeCap == nil {
|
|
||||||
call.GasFeeCap = new(big.Int)
|
|
||||||
}
|
|
||||||
if call.GasTipCap == nil {
|
|
||||||
call.GasTipCap = new(big.Int)
|
|
||||||
}
|
|
||||||
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
|
|
||||||
call.GasPrice = new(big.Int)
|
|
||||||
if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 {
|
|
||||||
call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, header.BaseFee), call.GasFeeCap)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Ensure message is initialized properly.
|
|
||||||
if call.Gas == 0 {
|
|
||||||
call.Gas = 10 * header.GasLimit
|
|
||||||
}
|
|
||||||
if call.Value == nil {
|
|
||||||
call.Value = new(big.Int)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set infinite balance to the fake caller account.
|
|
||||||
from := stateDB.GetOrNewStateObject(call.From)
|
|
||||||
from.SetBalance(math.MaxBig256)
|
|
||||||
|
|
||||||
// Execute the call.
|
|
||||||
msg := &core.Message{
|
|
||||||
From: call.From,
|
|
||||||
To: call.To,
|
|
||||||
Value: call.Value,
|
|
||||||
GasLimit: call.Gas,
|
|
||||||
GasPrice: call.GasPrice,
|
|
||||||
GasFeeCap: call.GasFeeCap,
|
|
||||||
GasTipCap: call.GasTipCap,
|
|
||||||
Data: call.Data,
|
|
||||||
AccessList: call.AccessList,
|
|
||||||
SkipAccountChecks: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new environment which holds all relevant information
|
|
||||||
// about the transaction and calling mechanisms.
|
|
||||||
txContext := core.NewEVMTxContext(msg)
|
|
||||||
evmContext := core.NewEVMBlockContext(header, b.blockchain, nil)
|
|
||||||
vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true})
|
|
||||||
gasPool := new(core.GasPool).AddGas(math.MaxUint64)
|
|
||||||
|
|
||||||
return core.ApplyMessage(vmEnv, msg, gasPool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendTransaction updates the pending block to include the given transaction.
|
|
||||||
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
// Get the last block
|
|
||||||
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("could not fetch parent")
|
|
||||||
}
|
|
||||||
// Check transaction validity
|
|
||||||
signer := types.MakeSigner(b.blockchain.Config(), block.Number(), block.Time())
|
|
||||||
sender, err := types.Sender(signer, tx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid transaction: %v", err)
|
|
||||||
}
|
|
||||||
nonce := b.pendingState.GetNonce(sender)
|
|
||||||
if tx.Nonce() != nonce {
|
|
||||||
return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
|
|
||||||
}
|
|
||||||
// Include tx in chain
|
|
||||||
blocks, receipts := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
|
||||||
for _, tx := range b.pendingBlock.Transactions() {
|
|
||||||
block.AddTxWithChain(b.blockchain, tx)
|
|
||||||
}
|
|
||||||
block.AddTxWithChain(b.blockchain, tx)
|
|
||||||
})
|
|
||||||
stateDB, err := b.blockchain.State()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.pendingBlock = blocks[0]
|
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
|
|
||||||
b.pendingReceipts = receipts[0]
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterLogs executes a log filter operation, blocking during execution and
|
|
||||||
// returning all the results in one batch.
|
|
||||||
//
|
|
||||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
|
||||||
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
|
|
||||||
var filter *filters.Filter
|
|
||||||
if query.BlockHash != nil {
|
|
||||||
// Block filter requested, construct a single-shot filter
|
|
||||||
filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics)
|
|
||||||
} else {
|
|
||||||
// Initialize unset filter boundaries to run from genesis to chain head
|
|
||||||
from := int64(0)
|
|
||||||
if query.FromBlock != nil {
|
|
||||||
from = query.FromBlock.Int64()
|
|
||||||
}
|
|
||||||
to := int64(-1)
|
|
||||||
if query.ToBlock != nil {
|
|
||||||
to = query.ToBlock.Int64()
|
|
||||||
}
|
|
||||||
// Construct the range filter
|
|
||||||
filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics)
|
|
||||||
}
|
|
||||||
// Run the filter and return all the logs
|
|
||||||
logs, err := filter.Logs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res := make([]types.Log, len(logs))
|
|
||||||
for i, nLog := range logs {
|
|
||||||
res[i] = *nLog
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeFilterLogs creates a background log filtering operation, returning a
|
|
||||||
// subscription immediately, which can be used to stream the found events.
|
|
||||||
func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
|
|
||||||
// Subscribe to contract events
|
|
||||||
sink := make(chan []*types.Log)
|
|
||||||
|
|
||||||
sub, err := b.events.SubscribeLogs(query, sink)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Since we're getting logs in batches, we need to flatten them into a plain stream
|
|
||||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
|
||||||
defer sub.Unsubscribe()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case logs := <-sink:
|
|
||||||
for _, nlog := range logs {
|
|
||||||
select {
|
|
||||||
case ch <- *nlog:
|
|
||||||
case err := <-sub.Err():
|
|
||||||
return err
|
|
||||||
case <-quit:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case err := <-sub.Err():
|
|
||||||
return err
|
|
||||||
case <-quit:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SubscribeNewHead returns an event subscription for a new header.
|
|
||||||
func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
|
|
||||||
// subscribe to a new head
|
|
||||||
sink := make(chan *types.Header)
|
|
||||||
sub := b.events.SubscribeNewHeads(sink)
|
|
||||||
|
|
||||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
|
||||||
defer sub.Unsubscribe()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case head := <-sink:
|
|
||||||
select {
|
|
||||||
case ch <- head:
|
|
||||||
case err := <-sub.Err():
|
|
||||||
return err
|
|
||||||
case <-quit:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case err := <-sub.Err():
|
|
||||||
return err
|
|
||||||
case <-quit:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AdjustTime adds a time shift to the simulated clock.
|
|
||||||
// It can only be called on empty blocks.
|
|
||||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
if len(b.pendingBlock.Transactions()) != 0 {
|
|
||||||
return errors.New("Could not adjust time on non-empty block")
|
|
||||||
}
|
|
||||||
// Get the last block
|
|
||||||
block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash())
|
|
||||||
if block == nil {
|
|
||||||
return errors.New("could not find parent")
|
|
||||||
}
|
|
||||||
|
|
||||||
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
|
||||||
block.OffsetTime(int64(adjustment.Seconds()))
|
|
||||||
})
|
|
||||||
stateDB, err := b.blockchain.State()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.pendingBlock = blocks[0]
|
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Blockchain returns the underlying blockchain.
|
|
||||||
func (b *SimulatedBackend) Blockchain() *core.BlockChain {
|
|
||||||
return b.blockchain
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterBackend implements filters.Backend to support filtering for logs without
|
|
||||||
// taking bloom-bits acceleration structures into account.
|
|
||||||
type filterBackend struct {
|
|
||||||
db ethdb.Database
|
|
||||||
bc *core.BlockChain
|
|
||||||
backend *SimulatedBackend
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
|
|
||||||
|
|
||||||
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
|
|
||||||
|
|
||||||
func (fb *filterBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {
|
|
||||||
switch number {
|
|
||||||
case rpc.PendingBlockNumber:
|
|
||||||
if block := fb.backend.pendingBlock; block != nil {
|
|
||||||
return block.Header(), nil
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
case rpc.LatestBlockNumber:
|
|
||||||
return fb.bc.CurrentHeader(), nil
|
|
||||||
case rpc.FinalizedBlockNumber:
|
|
||||||
return fb.bc.CurrentFinalBlock(), nil
|
|
||||||
case rpc.SafeBlockNumber:
|
|
||||||
return fb.bc.CurrentSafeBlock(), nil
|
|
||||||
default:
|
|
||||||
return fb.bc.GetHeaderByNumber(uint64(number.Int64())), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
|
||||||
return fb.bc.GetHeaderByHash(hash), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
|
|
||||||
if body := fb.bc.GetBody(hash); body != nil {
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("block body not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
|
|
||||||
return fb.backend.pendingBlock, fb.backend.pendingReceipts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
|
||||||
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
|
||||||
if number == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
header := rawdb.ReadHeader(fb.db, hash, *number)
|
|
||||||
if header == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return rawdb.ReadReceipts(fb.db, hash, *number, header.Time, fb.bc.Config()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
|
|
||||||
logs := rawdb.ReadLogs(fb.db, hash, number)
|
|
||||||
return logs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
|
||||||
return nullSubscription()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
|
||||||
return fb.bc.SubscribeChainEvent(ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
|
||||||
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
|
||||||
return fb.bc.SubscribeLogsEvent(ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
|
||||||
return nullSubscription()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
|
||||||
|
|
||||||
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
|
||||||
panic("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) ChainConfig() *params.ChainConfig {
|
|
||||||
panic("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fb *filterBackend) CurrentHeader() *types.Header {
|
|
||||||
panic("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func nullSubscription() event.Subscription {
|
|
||||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
|
||||||
<-quit
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -238,7 +238,7 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// todo(rjl493456442) check the method is payable or not,
|
// todo(rjl493456442) check whether the method is payable or not,
|
||||||
// reject invalid transaction at the first place
|
// reject invalid transaction at the first place
|
||||||
return c.transact(opts, &c.address, input)
|
return c.transact(opts, &c.address, input)
|
||||||
}
|
}
|
||||||
@ -246,7 +246,7 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in
|
|||||||
// RawTransact initiates a transaction with the given raw calldata as the input.
|
// RawTransact initiates a transaction with the given raw calldata as the input.
|
||||||
// It's usually used to initiate transactions for invoking **Fallback** function.
|
// It's usually used to initiate transactions for invoking **Fallback** function.
|
||||||
func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
|
func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
|
||||||
// todo(rjl493456442) check the method is payable or not,
|
// todo(rjl493456442) check whether the method is payable or not,
|
||||||
// reject invalid transaction at the first place
|
// reject invalid transaction at the first place
|
||||||
return c.transact(opts, &c.address, calldata)
|
return c.transact(opts, &c.address, calldata)
|
||||||
}
|
}
|
||||||
|
@ -135,6 +135,7 @@ func (mc *mockBlockHashCaller) CallContractAtHash(ctx context.Context, call ethe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPassingBlockNumber(t *testing.T) {
|
func TestPassingBlockNumber(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
mc := &mockPendingCaller{
|
mc := &mockPendingCaller{
|
||||||
mockCaller: &mockCaller{
|
mockCaller: &mockCaller{
|
||||||
codeAtBytes: []byte{1, 2, 3},
|
codeAtBytes: []byte{1, 2, 3},
|
||||||
@ -186,6 +187,7 @@ func TestPassingBlockNumber(t *testing.T) {
|
|||||||
const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158"
|
const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158"
|
||||||
|
|
||||||
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
hash := crypto.Keccak256Hash([]byte("testName"))
|
hash := crypto.Keccak256Hash([]byte("testName"))
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")),
|
crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")),
|
||||||
@ -207,6 +209,7 @@ func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackAnonymousLogIntoMap(t *testing.T) {
|
func TestUnpackAnonymousLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
mockLog := newMockLog(nil, common.HexToHash("0x0"))
|
mockLog := newMockLog(nil, common.HexToHash("0x0"))
|
||||||
|
|
||||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]`
|
abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]`
|
||||||
@ -224,6 +227,7 @@ func TestUnpackAnonymousLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
|
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -249,6 +253,7 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
arrBytes, err := rlp.EncodeToBytes([2]common.Address{common.HexToAddress("0x0"), common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")})
|
arrBytes, err := rlp.EncodeToBytes([2]common.Address{common.HexToAddress("0x0"), common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -274,6 +279,7 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
mockAddress := common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")
|
mockAddress := common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")
|
||||||
addrBytes := mockAddress.Bytes()
|
addrBytes := mockAddress.Bytes()
|
||||||
hash := crypto.Keccak256Hash([]byte("mockFunction(address,uint)"))
|
hash := crypto.Keccak256Hash([]byte("mockFunction(address,uint)"))
|
||||||
@ -300,6 +306,7 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
bytes := []byte{1, 2, 3, 4, 5}
|
bytes := []byte{1, 2, 3, 4, 5}
|
||||||
hash := crypto.Keccak256Hash(bytes)
|
hash := crypto.Keccak256Hash(bytes)
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
@ -322,6 +329,7 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTransactGasFee(t *testing.T) {
|
func TestTransactGasFee(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
// GasTipCap and GasFeeCap
|
// GasTipCap and GasFeeCap
|
||||||
@ -397,6 +405,7 @@ func newMockLog(topics []common.Hash, txHash common.Hash) types.Log {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCall(t *testing.T) {
|
func TestCall(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var method, methodWithArg = "something", "somethingArrrrg"
|
var method, methodWithArg = "something", "somethingArrrrg"
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name, method string
|
name, method string
|
||||||
@ -572,6 +581,7 @@ func TestCall(t *testing.T) {
|
|||||||
|
|
||||||
// TestCrashers contains some strings which previously caused the abi codec to crash.
|
// TestCrashers contains some strings which previously caused the abi codec to crash.
|
||||||
func TestCrashers(t *testing.T) {
|
func TestCrashers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"_1"}]}]}]`))
|
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"_1"}]}]}]`))
|
||||||
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"&"}]}]}]`))
|
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"&"}]}]}]`))
|
||||||
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"----"}]}]}]`))
|
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"----"}]}]}]`))
|
||||||
|
@ -79,7 +79,7 @@ func isKeyWord(arg string) bool {
|
|||||||
|
|
||||||
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
|
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
|
||||||
// to be used as is in client code, but rather as an intermediate struct which
|
// to be used as is in client code, but rather as an intermediate struct which
|
||||||
// enforces compile time type safety and naming convention opposed to having to
|
// enforces compile time type safety and naming convention as opposed to having to
|
||||||
// manually maintain hard coded strings that break on runtime.
|
// manually maintain hard coded strings that break on runtime.
|
||||||
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string, aliases map[string]string) (string, error) {
|
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string, aliases map[string]string) (string, error) {
|
||||||
var (
|
var (
|
||||||
@ -363,7 +363,7 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
|||||||
// parameters that are not value types i.e. arrays and structs are not
|
// parameters that are not value types i.e. arrays and structs are not
|
||||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||||
//
|
//
|
||||||
// We only convert stringS and bytes to hash, still need to deal with
|
// We only convert strings and bytes to hash, still need to deal with
|
||||||
// array(both fixed-size and dynamic-size) and struct.
|
// array(both fixed-size and dynamic-size) and struct.
|
||||||
if bound == "string" || bound == "[]byte" {
|
if bound == "string" || bound == "[]byte" {
|
||||||
bound = "common.Hash"
|
bound = "common.Hash"
|
||||||
|
@ -305,6 +305,7 @@ var bindTests = []struct {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to deploy interactor contract: %v", err)
|
t.Fatalf("Failed to deploy interactor contract: %v", err)
|
||||||
}
|
}
|
||||||
|
sim.Commit()
|
||||||
if _, err := interactor.Transact(auth, "Transact string"); err != nil {
|
if _, err := interactor.Transact(auth, "Transact string"); err != nil {
|
||||||
t.Fatalf("Failed to transact with interactor contract: %v", err)
|
t.Fatalf("Failed to transact with interactor contract: %v", err)
|
||||||
}
|
}
|
||||||
@ -512,6 +513,7 @@ var bindTests = []struct {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to deploy defaulter contract: %v", err)
|
t.Fatalf("Failed to deploy defaulter contract: %v", err)
|
||||||
}
|
}
|
||||||
|
sim.Commit()
|
||||||
if _, err := (&DefaulterRaw{defaulter}).Transfer(auth); err != nil {
|
if _, err := (&DefaulterRaw{defaulter}).Transfer(auth); err != nil {
|
||||||
t.Fatalf("Failed to invoke default method: %v", err)
|
t.Fatalf("Failed to invoke default method: %v", err)
|
||||||
}
|
}
|
||||||
@ -1677,7 +1679,7 @@ var bindTests = []struct {
|
|||||||
}
|
}
|
||||||
sim.Commit()
|
sim.Commit()
|
||||||
|
|
||||||
// This test the existence of the free retreiver call for view and pure functions
|
// This test the existence of the free retriever call for view and pure functions
|
||||||
if num, err := pav.PureFunc(nil); err != nil {
|
if num, err := pav.PureFunc(nil); err != nil {
|
||||||
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
||||||
} else if num.Cmp(big.NewInt(42)) != 0 {
|
} else if num.Cmp(big.NewInt(42)) != 0 {
|
||||||
@ -1874,6 +1876,7 @@ var bindTests = []struct {
|
|||||||
[]string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"},
|
[]string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"},
|
||||||
[]string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`},
|
[]string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`},
|
||||||
`
|
`
|
||||||
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
@ -1895,7 +1898,7 @@ var bindTests = []struct {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
sim.Commit()
|
sim.Commit()
|
||||||
_, err = bind.WaitDeployed(nil, sim, tx)
|
_, err = bind.WaitDeployed(context.Background(), sim, tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@ -1926,6 +1929,7 @@ var bindTests = []struct {
|
|||||||
bytecode: []string{`0x608060405234801561001057600080fd5b506040516101c43803806101c48339818101604052810190610032919061014a565b50610177565b6000604051905090565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6100958261004c565b810181811067ffffffffffffffff821117156100b4576100b361005d565b5b80604052505050565b60006100c7610038565b90506100d3828261008c565b919050565b6000819050919050565b6100eb816100d8565b81146100f657600080fd5b50565b600081519050610108816100e2565b92915050565b60006020828403121561012457610123610047565b5b61012e60206100bd565b9050600061013e848285016100f9565b60008301525092915050565b6000602082840312156101605761015f610042565b5b600061016e8482850161010e565b91505092915050565b603f806101856000396000f3fe6080604052600080fdfea2646970667358221220cdffa667affecefac5561f65f4a4ba914204a8d4eb859d8cd426fb306e5c12a364736f6c634300080a0033`},
|
bytecode: []string{`0x608060405234801561001057600080fd5b506040516101c43803806101c48339818101604052810190610032919061014a565b50610177565b6000604051905090565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6100958261004c565b810181811067ffffffffffffffff821117156100b4576100b361005d565b5b80604052505050565b60006100c7610038565b90506100d3828261008c565b919050565b6000819050919050565b6100eb816100d8565b81146100f657600080fd5b50565b600081519050610108816100e2565b92915050565b60006020828403121561012457610123610047565b5b61012e60206100bd565b9050600061013e848285016100f9565b60008301525092915050565b6000602082840312156101605761015f610042565b5b600061016e8482850161010e565b91505092915050565b603f806101856000396000f3fe6080604052600080fdfea2646970667358221220cdffa667affecefac5561f65f4a4ba914204a8d4eb859d8cd426fb306e5c12a364736f6c634300080a0033`},
|
||||||
abi: []string{`[{"inputs":[{"components":[{"internalType":"uint256","name":"field","type":"uint256"}],"internalType":"struct ConstructorWithStructParam.StructType","name":"st","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"}]`},
|
abi: []string{`[{"inputs":[{"components":[{"internalType":"uint256","name":"field","type":"uint256"}],"internalType":"struct ConstructorWithStructParam.StructType","name":"st","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"}]`},
|
||||||
imports: `
|
imports: `
|
||||||
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
@ -1948,7 +1952,7 @@ var bindTests = []struct {
|
|||||||
}
|
}
|
||||||
sim.Commit()
|
sim.Commit()
|
||||||
|
|
||||||
if _, err = bind.WaitDeployed(nil, sim, tx); err != nil {
|
if _, err = bind.WaitDeployed(context.Background(), sim, tx); err != nil {
|
||||||
t.Logf("Deployment tx: %+v", tx)
|
t.Logf("Deployment tx: %+v", tx)
|
||||||
t.Errorf("bind.WaitDeployed(nil, %T, <deployment tx>) got err %v; want nil err", sim, err)
|
t.Errorf("bind.WaitDeployed(nil, %T, <deployment tx>) got err %v; want nil err", sim, err)
|
||||||
}
|
}
|
||||||
@ -1974,6 +1978,7 @@ var bindTests = []struct {
|
|||||||
bytecode: []string{"0x608060405234801561001057600080fd5b5061042b806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063c2bb515f1461003b578063cce7b04814610059575b600080fd5b610043610075565b60405161005091906101af565b60405180910390f35b610073600480360381019061006e91906103ac565b6100b5565b005b61007d6100b8565b604051806040016040528060405180602001604052806000815250815260200160405180602001604052806000815250815250905090565b50565b604051806040016040528060608152602001606081525090565b600081519050919050565b600082825260208201905092915050565b60005b8381101561010c5780820151818401526020810190506100f1565b8381111561011b576000848401525b50505050565b6000601f19601f8301169050919050565b600061013d826100d2565b61014781856100dd565b93506101578185602086016100ee565b61016081610121565b840191505092915050565b600060408301600083015184820360008601526101888282610132565b915050602083015184820360208601526101a28282610132565b9150508091505092915050565b600060208201905081810360008301526101c9818461016b565b905092915050565b6000604051905090565b600080fd5b600080fd5b600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61022282610121565b810181811067ffffffffffffffff82111715610241576102406101ea565b5b80604052505050565b60006102546101d1565b90506102608282610219565b919050565b600080fd5b600080fd5b600080fd5b600067ffffffffffffffff82111561028f5761028e6101ea565b5b61029882610121565b9050602081019050919050565b82818337600083830152505050565b60006102c76102c284610274565b61024a565b9050828152602081018484840111156102e3576102e261026f565b5b6102ee8482856102a5565b509392505050565b600082601f83011261030b5761030a61026a565b5b813561031b8482602086016102b4565b91505092915050565b60006040828403121561033a576103396101e5565b5b610344604061024a565b9050600082013567ffffffffffffffff81111561036457610363610265565b5b610370848285016102f6565b600083015250602082013567ffffffffffffffff81111561039457610393610265565b5b6103a0848285016102f6565b60208301525092915050565b6000602082840312156103c2576103c16101db565b5b600082013567ffffffffffffffff8111156103e0576103df6101e0565b5b6103ec84828501610324565b9150509291505056fea264697066735822122033bca1606af9b6aeba1673f98c52003cec19338539fb44b86690ce82c51483b564736f6c634300080e0033"},
|
bytecode: []string{"0x608060405234801561001057600080fd5b5061042b806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063c2bb515f1461003b578063cce7b04814610059575b600080fd5b610043610075565b60405161005091906101af565b60405180910390f35b610073600480360381019061006e91906103ac565b6100b5565b005b61007d6100b8565b604051806040016040528060405180602001604052806000815250815260200160405180602001604052806000815250815250905090565b50565b604051806040016040528060608152602001606081525090565b600081519050919050565b600082825260208201905092915050565b60005b8381101561010c5780820151818401526020810190506100f1565b8381111561011b576000848401525b50505050565b6000601f19601f8301169050919050565b600061013d826100d2565b61014781856100dd565b93506101578185602086016100ee565b61016081610121565b840191505092915050565b600060408301600083015184820360008601526101888282610132565b915050602083015184820360208601526101a28282610132565b9150508091505092915050565b600060208201905081810360008301526101c9818461016b565b905092915050565b6000604051905090565b600080fd5b600080fd5b600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61022282610121565b810181811067ffffffffffffffff82111715610241576102406101ea565b5b80604052505050565b60006102546101d1565b90506102608282610219565b919050565b600080fd5b600080fd5b600080fd5b600067ffffffffffffffff82111561028f5761028e6101ea565b5b61029882610121565b9050602081019050919050565b82818337600083830152505050565b60006102c76102c284610274565b61024a565b9050828152602081018484840111156102e3576102e261026f565b5b6102ee8482856102a5565b509392505050565b600082601f83011261030b5761030a61026a565b5b813561031b8482602086016102b4565b91505092915050565b60006040828403121561033a576103396101e5565b5b610344604061024a565b9050600082013567ffffffffffffffff81111561036457610363610265565b5b610370848285016102f6565b600083015250602082013567ffffffffffffffff81111561039457610393610265565b5b6103a0848285016102f6565b60208301525092915050565b6000602082840312156103c2576103c16101db565b5b600082013567ffffffffffffffff8111156103e0576103df6101e0565b5b6103ec84828501610324565b9150509291505056fea264697066735822122033bca1606af9b6aeba1673f98c52003cec19338539fb44b86690ce82c51483b564736f6c634300080e0033"},
|
||||||
abi: []string{`[ { "anonymous": false, "inputs": [ { "indexed": false, "internalType": "int256", "name": "msg", "type": "int256" }, { "indexed": false, "internalType": "int256", "name": "_msg", "type": "int256" } ], "name": "log", "type": "event" }, { "inputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "req", "type": "tuple" } ], "name": "addRequest", "outputs": [], "stateMutability": "pure", "type": "function" }, { "inputs": [], "name": "getRequest", "outputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "", "type": "tuple" } ], "stateMutability": "pure", "type": "function" } ]`},
|
abi: []string{`[ { "anonymous": false, "inputs": [ { "indexed": false, "internalType": "int256", "name": "msg", "type": "int256" }, { "indexed": false, "internalType": "int256", "name": "_msg", "type": "int256" } ], "name": "log", "type": "event" }, { "inputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "req", "type": "tuple" } ], "name": "addRequest", "outputs": [], "stateMutability": "pure", "type": "function" }, { "inputs": [], "name": "getRequest", "outputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "", "type": "tuple" } ], "stateMutability": "pure", "type": "function" } ]`},
|
||||||
imports: `
|
imports: `
|
||||||
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
@ -1996,7 +2001,7 @@ var bindTests = []struct {
|
|||||||
}
|
}
|
||||||
sim.Commit()
|
sim.Commit()
|
||||||
|
|
||||||
if _, err = bind.WaitDeployed(nil, sim, tx); err != nil {
|
if _, err = bind.WaitDeployed(context.Background(), sim, tx); err != nil {
|
||||||
t.Logf("Deployment tx: %+v", tx)
|
t.Logf("Deployment tx: %+v", tx)
|
||||||
t.Errorf("bind.WaitDeployed(nil, %T, <deployment tx>) got err %v; want nil err", sim, err)
|
t.Errorf("bind.WaitDeployed(nil, %T, <deployment tx>) got err %v; want nil err", sim, err)
|
||||||
}
|
}
|
||||||
@ -2014,6 +2019,7 @@ var bindTests = []struct {
|
|||||||
bytecode: []string{"0x608060405234801561001057600080fd5b5060dc8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063527a119f14602d575b600080fd5b60436004803603810190603f9190605b565b6045565b005b50565b6000813590506055816092565b92915050565b600060208284031215606e57606d608d565b5b6000607a848285016048565b91505092915050565b6000819050919050565b600080fd5b6099816083565b811460a357600080fd5b5056fea2646970667358221220d4f4525e2615516394055d369fb17df41c359e5e962734f27fd683ea81fd9db164736f6c63430008070033"},
|
bytecode: []string{"0x608060405234801561001057600080fd5b5060dc8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063527a119f14602d575b600080fd5b60436004803603810190603f9190605b565b6045565b005b50565b6000813590506055816092565b92915050565b600060208284031215606e57606d608d565b5b6000607a848285016048565b91505092915050565b6000819050919050565b600080fd5b6099816083565b811460a357600080fd5b5056fea2646970667358221220d4f4525e2615516394055d369fb17df41c359e5e962734f27fd683ea81fd9db164736f6c63430008070033"},
|
||||||
abi: []string{`[{"inputs":[{"internalType":"uint256","name":"range","type":"uint256"}],"name":"functionWithKeywordParameter","outputs":[],"stateMutability":"pure","type":"function"}]`},
|
abi: []string{`[{"inputs":[{"internalType":"uint256","name":"range","type":"uint256"}],"name":"functionWithKeywordParameter","outputs":[],"stateMutability":"pure","type":"function"}]`},
|
||||||
imports: `
|
imports: `
|
||||||
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
@ -2034,7 +2040,7 @@ var bindTests = []struct {
|
|||||||
}
|
}
|
||||||
sim.Commit()
|
sim.Commit()
|
||||||
|
|
||||||
if _, err = bind.WaitDeployed(nil, sim, tx); err != nil {
|
if _, err = bind.WaitDeployed(context.Background(), sim, tx); err != nil {
|
||||||
t.Errorf("error deploying the contract: %v", err)
|
t.Errorf("error deploying the contract: %v", err)
|
||||||
}
|
}
|
||||||
`,
|
`,
|
||||||
@ -2067,6 +2073,7 @@ var bindTests = []struct {
|
|||||||
// Tests that packages generated by the binder can be successfully compiled and
|
// Tests that packages generated by the binder can be successfully compiled and
|
||||||
// the requested tester run against it.
|
// the requested tester run against it.
|
||||||
func TestGolangBindings(t *testing.T) {
|
func TestGolangBindings(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Skip the test if no Go command can be found
|
// Skip the test if no Go command can be found
|
||||||
gocmd := runtime.GOROOT() + "/bin/go"
|
gocmd := runtime.GOROOT() + "/bin/go"
|
||||||
if !common.FileExist(gocmd) {
|
if !common.FileExist(gocmd) {
|
||||||
|
@ -24,11 +24,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/ethclient/simulated"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
@ -53,8 +54,9 @@ var waitDeployedTests = map[string]struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWaitDeployed(t *testing.T) {
|
func TestWaitDeployed(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for name, test := range waitDeployedTests {
|
for name, test := range waitDeployedTests {
|
||||||
backend := backends.NewSimulatedBackend(
|
backend := simulated.New(
|
||||||
core.GenesisAlloc{
|
core.GenesisAlloc{
|
||||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
||||||
},
|
},
|
||||||
@ -63,11 +65,11 @@ func TestWaitDeployed(t *testing.T) {
|
|||||||
defer backend.Close()
|
defer backend.Close()
|
||||||
|
|
||||||
// Create the transaction
|
// Create the transaction
|
||||||
head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
||||||
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
|
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
|
||||||
|
|
||||||
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
|
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
|
||||||
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
|
||||||
|
|
||||||
// Wait for it to get mined in the background.
|
// Wait for it to get mined in the background.
|
||||||
var (
|
var (
|
||||||
@ -77,12 +79,12 @@ func TestWaitDeployed(t *testing.T) {
|
|||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
)
|
)
|
||||||
go func() {
|
go func() {
|
||||||
address, err = bind.WaitDeployed(ctx, backend, tx)
|
address, err = bind.WaitDeployed(ctx, backend.Client(), tx)
|
||||||
close(mined)
|
close(mined)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Send and mine the transaction.
|
// Send and mine the transaction.
|
||||||
backend.SendTransaction(ctx, tx)
|
backend.Client().SendTransaction(ctx, tx)
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -100,7 +102,7 @@ func TestWaitDeployed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWaitDeployedCornerCases(t *testing.T) {
|
func TestWaitDeployedCornerCases(t *testing.T) {
|
||||||
backend := backends.NewSimulatedBackend(
|
backend := simulated.New(
|
||||||
core.GenesisAlloc{
|
core.GenesisAlloc{
|
||||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
||||||
},
|
},
|
||||||
@ -108,33 +110,33 @@ func TestWaitDeployedCornerCases(t *testing.T) {
|
|||||||
)
|
)
|
||||||
defer backend.Close()
|
defer backend.Close()
|
||||||
|
|
||||||
head, _ := backend.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
||||||
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
|
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
|
||||||
|
|
||||||
// Create a transaction to an account.
|
// Create a transaction to an account.
|
||||||
code := "6060604052600a8060106000396000f360606040526008565b00"
|
code := "6060604052600a8060106000396000f360606040526008565b00"
|
||||||
tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
|
tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
|
||||||
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
backend.SendTransaction(ctx, tx)
|
backend.Client().SendTransaction(ctx, tx)
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
notContentCreation := errors.New("tx is not contract creation")
|
notContractCreation := errors.New("tx is not contract creation")
|
||||||
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContentCreation.Error() {
|
if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != notContractCreation.Error() {
|
||||||
t.Errorf("error missmatch: want %q, got %q, ", notContentCreation, err)
|
t.Errorf("error mismatch: want %q, got %q, ", notContractCreation, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a transaction that is not mined.
|
// Create a transaction that is not mined.
|
||||||
tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
|
tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code))
|
||||||
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
|
tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
contextCanceled := errors.New("context canceled")
|
contextCanceled := errors.New("context canceled")
|
||||||
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() {
|
if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != contextCanceled.Error() {
|
||||||
t.Errorf("error missmatch: want %q, got %q, ", contextCanceled, err)
|
t.Errorf("error mismatch: want %q, got %q, ", contextCanceled, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
backend.SendTransaction(ctx, tx)
|
backend.Client().SendTransaction(ctx, tx)
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,6 @@ package abi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -84,10 +83,10 @@ func (e Error) String() string {
|
|||||||
|
|
||||||
func (e *Error) Unpack(data []byte) (interface{}, error) {
|
func (e *Error) Unpack(data []byte) (interface{}, error) {
|
||||||
if len(data) < 4 {
|
if len(data) < 4 {
|
||||||
return "", errors.New("invalid data for unpacking")
|
return "", fmt.Errorf("insufficient data for unpacking: have %d, want at least 4", len(data))
|
||||||
}
|
}
|
||||||
if !bytes.Equal(data[:4], e.ID[:4]) {
|
if !bytes.Equal(data[:4], e.ID[:4]) {
|
||||||
return "", errors.New("invalid data for unpacking")
|
return "", fmt.Errorf("invalid identifier, have %#x want %#x", data[:4], e.ID[:4])
|
||||||
}
|
}
|
||||||
return e.Inputs.Unpack(data[4:])
|
return e.Inputs.Unpack(data[4:])
|
||||||
}
|
}
|
||||||
|
@ -81,6 +81,7 @@ var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa
|
|||||||
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
|
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
|
||||||
|
|
||||||
func TestEventId(t *testing.T) {
|
func TestEventId(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var table = []struct {
|
var table = []struct {
|
||||||
definition string
|
definition string
|
||||||
expectations map[string]common.Hash
|
expectations map[string]common.Hash
|
||||||
@ -112,6 +113,7 @@ func TestEventId(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEventString(t *testing.T) {
|
func TestEventString(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var table = []struct {
|
var table = []struct {
|
||||||
definition string
|
definition string
|
||||||
expectations map[string]string
|
expectations map[string]string
|
||||||
@ -146,6 +148,7 @@ func TestEventString(t *testing.T) {
|
|||||||
|
|
||||||
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
||||||
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -161,6 +164,7 @@ func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEventTupleUnpack(t *testing.T) {
|
func TestEventTupleUnpack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type EventTransfer struct {
|
type EventTransfer struct {
|
||||||
Value *big.Int
|
Value *big.Int
|
||||||
}
|
}
|
||||||
@ -351,6 +355,7 @@ func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, ass
|
|||||||
|
|
||||||
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
||||||
func TestEventUnpackIndexed(t *testing.T) {
|
func TestEventUnpackIndexed(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||||
type testStruct struct {
|
type testStruct struct {
|
||||||
Value1 uint8 // indexed
|
Value1 uint8 // indexed
|
||||||
@ -368,6 +373,7 @@ func TestEventUnpackIndexed(t *testing.T) {
|
|||||||
|
|
||||||
// TestEventIndexedWithArrayUnpack verifies that decoder will not overflow when static array is indexed input.
|
// TestEventIndexedWithArrayUnpack verifies that decoder will not overflow when static array is indexed input.
|
||||||
func TestEventIndexedWithArrayUnpack(t *testing.T) {
|
func TestEventIndexedWithArrayUnpack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]`
|
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]`
|
||||||
type testStruct struct {
|
type testStruct struct {
|
||||||
Value1 [2]uint8 // indexed
|
Value1 [2]uint8 // indexed
|
||||||
|
@ -117,15 +117,6 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
|
|||||||
sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
|
sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
|
||||||
id = crypto.Keccak256([]byte(sig))[:4]
|
id = crypto.Keccak256([]byte(sig))[:4]
|
||||||
}
|
}
|
||||||
// Extract meaningful state mutability of solidity method.
|
|
||||||
// If it's default value, never print it.
|
|
||||||
state := mutability
|
|
||||||
if state == "nonpayable" {
|
|
||||||
state = ""
|
|
||||||
}
|
|
||||||
if state != "" {
|
|
||||||
state = state + " "
|
|
||||||
}
|
|
||||||
identity := fmt.Sprintf("function %v", rawName)
|
identity := fmt.Sprintf("function %v", rawName)
|
||||||
switch funType {
|
switch funType {
|
||||||
case Fallback:
|
case Fallback:
|
||||||
@ -135,7 +126,14 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
|
|||||||
case Constructor:
|
case Constructor:
|
||||||
identity = "constructor"
|
identity = "constructor"
|
||||||
}
|
}
|
||||||
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
|
var str string
|
||||||
|
// Extract meaningful state mutability of solidity method.
|
||||||
|
// If it's empty string or default value "nonpayable", never print it.
|
||||||
|
if mutability == "" || mutability == "nonpayable" {
|
||||||
|
str = fmt.Sprintf("%v(%v) returns(%v)", identity, strings.Join(inputNames, ", "), strings.Join(outputNames, ", "))
|
||||||
|
} else {
|
||||||
|
str = fmt.Sprintf("%v(%v) %s returns(%v)", identity, strings.Join(inputNames, ", "), mutability, strings.Join(outputNames, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
return Method{
|
return Method{
|
||||||
Name: name,
|
Name: name,
|
||||||
|
@ -35,6 +35,7 @@ const methoddata = `
|
|||||||
]`
|
]`
|
||||||
|
|
||||||
func TestMethodString(t *testing.T) {
|
func TestMethodString(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var table = []struct {
|
var table = []struct {
|
||||||
method string
|
method string
|
||||||
expectation string
|
expectation string
|
||||||
@ -99,6 +100,7 @@ func TestMethodString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodSig(t *testing.T) {
|
func TestMethodSig(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
method string
|
method string
|
||||||
expect string
|
expect string
|
||||||
|
@ -57,7 +57,7 @@ func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
|
|||||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||||
}
|
}
|
||||||
if reflectValue.Type() != reflect.TypeOf([]byte{}) {
|
if reflectValue.Type() != reflect.TypeOf([]byte{}) {
|
||||||
return []byte{}, errors.New("Bytes type is neither slice nor array")
|
return []byte{}, errors.New("bytes type is neither slice nor array")
|
||||||
}
|
}
|
||||||
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil
|
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil
|
||||||
case FixedBytesTy, FunctionTy:
|
case FixedBytesTy, FunctionTy:
|
||||||
@ -66,7 +66,7 @@ func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return common.RightPadBytes(reflectValue.Bytes(), 32), nil
|
return common.RightPadBytes(reflectValue.Bytes(), 32), nil
|
||||||
default:
|
default:
|
||||||
return []byte{}, fmt.Errorf("Could not pack element, unknown type: %v", t.T)
|
return []byte{}, fmt.Errorf("could not pack element, unknown type: %v", t.T)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,8 +32,11 @@ import (
|
|||||||
|
|
||||||
// TestPack tests the general pack/unpack tests in packing_test.go
|
// TestPack tests the general pack/unpack tests in packing_test.go
|
||||||
func TestPack(t *testing.T) {
|
func TestPack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, test := range packUnpackTests {
|
for i, test := range packUnpackTests {
|
||||||
|
i, test := i, test
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
encb, err := hex.DecodeString(test.packed)
|
encb, err := hex.DecodeString(test.packed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("invalid hex %s: %v", test.packed, err)
|
t.Fatalf("invalid hex %s: %v", test.packed, err)
|
||||||
@ -57,6 +60,7 @@ func TestPack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodPack(t *testing.T) {
|
func TestMethodPack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -177,6 +181,7 @@ func TestMethodPack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPackNumber(t *testing.T) {
|
func TestPackNumber(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
value reflect.Value
|
value reflect.Value
|
||||||
packed []byte
|
packed []byte
|
||||||
|
@ -134,7 +134,7 @@ func setSlice(dst, src reflect.Value) error {
|
|||||||
dst.Set(slice)
|
dst.Set(slice)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.New("Cannot set slice, destination not settable")
|
return errors.New("cannot set slice, destination not settable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func setArray(dst, src reflect.Value) error {
|
func setArray(dst, src reflect.Value) error {
|
||||||
@ -155,7 +155,7 @@ func setArray(dst, src reflect.Value) error {
|
|||||||
dst.Set(array)
|
dst.Set(array)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.New("Cannot set array, destination not settable")
|
return errors.New("cannot set array, destination not settable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func setStruct(dst, src reflect.Value) error {
|
func setStruct(dst, src reflect.Value) error {
|
||||||
@ -163,7 +163,7 @@ func setStruct(dst, src reflect.Value) error {
|
|||||||
srcField := src.Field(i)
|
srcField := src.Field(i)
|
||||||
dstField := dst.Field(i)
|
dstField := dst.Field(i)
|
||||||
if !dstField.IsValid() || !srcField.IsValid() {
|
if !dstField.IsValid() || !srcField.IsValid() {
|
||||||
return fmt.Errorf("Could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
|
return fmt.Errorf("could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
|
||||||
}
|
}
|
||||||
if err := set(dstField, srcField); err != nil {
|
if err := set(dstField, srcField); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -170,8 +170,11 @@ var reflectTests = []reflectTest{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReflectNameToStruct(t *testing.T) {
|
func TestReflectNameToStruct(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for _, test := range reflectTests {
|
for _, test := range reflectTests {
|
||||||
|
test := test
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
|
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
|
||||||
if len(test.err) > 0 {
|
if len(test.err) > 0 {
|
||||||
if err == nil || err.Error() != test.err {
|
if err == nil || err.Error() != test.err {
|
||||||
@ -192,6 +195,7 @@ func TestReflectNameToStruct(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConvertType(t *testing.T) {
|
func TestConvertType(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Test Basic Struct
|
// Test Basic Struct
|
||||||
type T struct {
|
type T struct {
|
||||||
X *big.Int
|
X *big.Int
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestParseSelector(t *testing.T) {
|
func TestParseSelector(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
mkType := func(types ...interface{}) []ArgumentMarshaling {
|
mkType := func(types ...interface{}) []ArgumentMarshaling {
|
||||||
var result []ArgumentMarshaling
|
var result []ArgumentMarshaling
|
||||||
for i, typeOrComponents := range types {
|
for i, typeOrComponents := range types {
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -41,8 +42,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
|||||||
case common.Address:
|
case common.Address:
|
||||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||||
case *big.Int:
|
case *big.Int:
|
||||||
blob := rule.Bytes()
|
copy(topic[:], math.U256Bytes(rule))
|
||||||
copy(topic[common.HashLength-len(blob):], blob)
|
|
||||||
case bool:
|
case bool:
|
||||||
if rule {
|
if rule {
|
||||||
topic[common.HashLength-1] = 1
|
topic[common.HashLength-1] = 1
|
||||||
@ -75,7 +75,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
|||||||
copy(topic[:], hash[:])
|
copy(topic[:], hash[:])
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// todo(rjl493456442) according solidity documentation, indexed event
|
// todo(rjl493456442) according to solidity documentation, indexed event
|
||||||
// parameters that are not value types i.e. arrays and structs are not
|
// parameters that are not value types i.e. arrays and structs are not
|
||||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||||
//
|
//
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package abi
|
package abi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
@ -26,6 +27,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestMakeTopics(t *testing.T) {
|
func TestMakeTopics(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type args struct {
|
type args struct {
|
||||||
query [][]interface{}
|
query [][]interface{}
|
||||||
}
|
}
|
||||||
@ -54,9 +56,27 @@ func TestMakeTopics(t *testing.T) {
|
|||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"support *big.Int types in topics",
|
"support positive *big.Int types in topics",
|
||||||
args{[][]interface{}{{big.NewInt(1).Lsh(big.NewInt(2), 254)}}},
|
args{[][]interface{}{
|
||||||
[][]common.Hash{{common.Hash{128}}},
|
{big.NewInt(1)},
|
||||||
|
{big.NewInt(1).Lsh(big.NewInt(2), 254)},
|
||||||
|
}},
|
||||||
|
[][]common.Hash{
|
||||||
|
{common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001")},
|
||||||
|
{common.Hash{128}},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"support negative *big.Int types in topics",
|
||||||
|
args{[][]interface{}{
|
||||||
|
{big.NewInt(-1)},
|
||||||
|
{big.NewInt(math.MinInt64)},
|
||||||
|
}},
|
||||||
|
[][]common.Hash{
|
||||||
|
{common.MaxHash},
|
||||||
|
{common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffff8000000000000000")},
|
||||||
|
},
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -117,7 +137,9 @@ func TestMakeTopics(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
got, err := MakeTopics(tt.args.query...)
|
got, err := MakeTopics(tt.args.query...)
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
@ -347,10 +369,13 @@ func setupTopicsTests() []topicTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseTopics(t *testing.T) {
|
func TestParseTopics(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := setupTopicsTests()
|
tests := setupTopicsTests()
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
createObj := tt.args.createObj()
|
createObj := tt.args.createObj()
|
||||||
if err := ParseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
if err := ParseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
@ -364,10 +389,13 @@ func TestParseTopics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseTopicsIntoMap(t *testing.T) {
|
func TestParseTopicsIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := setupTopicsTests()
|
tests := setupTopicsTests()
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
outMap := make(map[string]interface{})
|
outMap := make(map[string]interface{})
|
||||||
if err := ParseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
if err := ParseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
@ -31,6 +31,7 @@ type typeWithoutStringer Type
|
|||||||
|
|
||||||
// Tests that all allowed types get recognized by the type parser.
|
// Tests that all allowed types get recognized by the type parser.
|
||||||
func TestTypeRegexp(t *testing.T) {
|
func TestTypeRegexp(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
blob string
|
blob string
|
||||||
components []ArgumentMarshaling
|
components []ArgumentMarshaling
|
||||||
@ -117,6 +118,7 @@ func TestTypeRegexp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTypeCheck(t *testing.T) {
|
func TestTypeCheck(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
typ string
|
typ string
|
||||||
components []ArgumentMarshaling
|
components []ArgumentMarshaling
|
||||||
@ -308,6 +310,7 @@ func TestTypeCheck(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalType(t *testing.T) {
|
func TestInternalType(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
components := []ArgumentMarshaling{{Name: "a", Type: "int64"}}
|
components := []ArgumentMarshaling{{Name: "a", Type: "int64"}}
|
||||||
internalType := "struct a.b[]"
|
internalType := "struct a.b[]"
|
||||||
kind := Type{
|
kind := Type{
|
||||||
@ -332,6 +335,7 @@ func TestInternalType(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetTypeSize(t *testing.T) {
|
func TestGetTypeSize(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var testCases = []struct {
|
var testCases = []struct {
|
||||||
typ string
|
typ string
|
||||||
components []ArgumentMarshaling
|
components []ArgumentMarshaling
|
||||||
@ -368,6 +372,7 @@ func TestGetTypeSize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFixedBytesOver32(t *testing.T) {
|
func TestNewFixedBytesOver32(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, err := NewType("bytes4096", "", nil)
|
_, err := NewType("bytes4096", "", nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("fixed bytes with size over 32 is not spec'd")
|
t.Errorf("fixed bytes with size over 32 is not spec'd")
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
|
|
||||||
// TestUnpack tests the general pack/unpack tests in packing_test.go
|
// TestUnpack tests the general pack/unpack tests in packing_test.go
|
||||||
func TestUnpack(t *testing.T) {
|
func TestUnpack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, test := range packUnpackTests {
|
for i, test := range packUnpackTests {
|
||||||
t.Run(strconv.Itoa(i)+" "+test.def, func(t *testing.T) {
|
t.Run(strconv.Itoa(i)+" "+test.def, func(t *testing.T) {
|
||||||
//Unpack
|
//Unpack
|
||||||
@ -206,13 +207,13 @@ var unpackTests = []unpackTest{
|
|||||||
def: `[{"type":"bool"}]`,
|
def: `[{"type":"bool"}]`,
|
||||||
enc: "",
|
enc: "",
|
||||||
want: false,
|
want: false,
|
||||||
err: "abi: attempting to unmarshall an empty string while arguments are expected",
|
err: "abi: attempting to unmarshal an empty string while arguments are expected",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type":"bytes32","indexed":true},{"type":"uint256","indexed":false}]`,
|
def: `[{"type":"bytes32","indexed":true},{"type":"uint256","indexed":false}]`,
|
||||||
enc: "",
|
enc: "",
|
||||||
want: false,
|
want: false,
|
||||||
err: "abi: attempting to unmarshall an empty string while arguments are expected",
|
err: "abi: attempting to unmarshal an empty string while arguments are expected",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type":"bool","indexed":true},{"type":"uint64","indexed":true}]`,
|
def: `[{"type":"bool","indexed":true},{"type":"uint64","indexed":true}]`,
|
||||||
@ -224,6 +225,7 @@ var unpackTests = []unpackTest{
|
|||||||
// TestLocalUnpackTests runs test specially designed only for unpacking.
|
// TestLocalUnpackTests runs test specially designed only for unpacking.
|
||||||
// All test cases that can be used to test packing and unpacking should move to packing_test.go
|
// All test cases that can be used to test packing and unpacking should move to packing_test.go
|
||||||
func TestLocalUnpackTests(t *testing.T) {
|
func TestLocalUnpackTests(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, test := range unpackTests {
|
for i, test := range unpackTests {
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
//Unpack
|
//Unpack
|
||||||
@ -251,6 +253,7 @@ func TestLocalUnpackTests(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) {
|
func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
|
abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -321,6 +324,7 @@ func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOut
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodMultiReturn(t *testing.T) {
|
func TestMethodMultiReturn(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type reversed struct {
|
type reversed struct {
|
||||||
String string
|
String string
|
||||||
Int *big.Int
|
Int *big.Int
|
||||||
@ -400,6 +404,7 @@ func TestMethodMultiReturn(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithArray(t *testing.T) {
|
func TestMultiReturnWithArray(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -423,6 +428,7 @@ func TestMultiReturnWithArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithStringArray(t *testing.T) {
|
func TestMultiReturnWithStringArray(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -453,6 +459,7 @@ func TestMultiReturnWithStringArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithStringSlice(t *testing.T) {
|
func TestMultiReturnWithStringSlice(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -485,6 +492,7 @@ func TestMultiReturnWithStringSlice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Similar to TestMultiReturnWithArray, but with a special case in mind:
|
// Similar to TestMultiReturnWithArray, but with a special case in mind:
|
||||||
// values of nested static arrays count towards the size as well, and any element following
|
// values of nested static arrays count towards the size as well, and any element following
|
||||||
// after such nested array argument should be read with the correct offset,
|
// after such nested array argument should be read with the correct offset,
|
||||||
@ -525,6 +533,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshal(t *testing.T) {
|
func TestUnmarshal(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "name" : "int", "type": "function", "outputs": [ { "type": "uint256" } ] },
|
{ "name" : "int", "type": "function", "outputs": [ { "type": "uint256" } ] },
|
||||||
{ "name" : "bool", "type": "function", "outputs": [ { "type": "bool" } ] },
|
{ "name" : "bool", "type": "function", "outputs": [ { "type": "bool" } ] },
|
||||||
@ -774,6 +783,7 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackTuple(t *testing.T) {
|
func TestUnpackTuple(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const simpleTuple = `[{"name":"tuple","type":"function","outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
|
const simpleTuple = `[{"name":"tuple","type":"function","outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
|
||||||
abi, err := JSON(strings.NewReader(simpleTuple))
|
abi, err := JSON(strings.NewReader(simpleTuple))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -876,6 +886,7 @@ func TestUnpackTuple(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOOMMaliciousInput(t *testing.T) {
|
func TestOOMMaliciousInput(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
oomTests := []unpackTest{
|
oomTests := []unpackTest{
|
||||||
{
|
{
|
||||||
def: `[{"type": "uint8[]"}]`,
|
def: `[{"type": "uint8[]"}]`,
|
||||||
@ -946,6 +957,7 @@ func TestOOMMaliciousInput(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPackAndUnpackIncompatibleNumber(t *testing.T) {
|
func TestPackAndUnpackIncompatibleNumber(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var encodeABI Arguments
|
var encodeABI Arguments
|
||||||
uint256Ty, err := NewType("uint256", "", nil)
|
uint256Ty, err := NewType("uint256", "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestTextHash(t *testing.T) {
|
func TestTextHash(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
hash := TextHash([]byte("Hello Joe"))
|
hash := TextHash([]byte("Hello Joe"))
|
||||||
want := hexutil.MustDecode("0xa080337ae51c4e064c189e113edd0ba391df9206e2f49db658bb32cf2911730b")
|
want := hexutil.MustDecode("0xa080337ae51c4e064c189e113edd0ba391df9206e2f49db658bb32cf2911730b")
|
||||||
if !bytes.Equal(hash, want) {
|
if !bytes.Equal(hash, want) {
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
// Tests that HD derivation paths can be correctly parsed into our internal binary
|
// Tests that HD derivation paths can be correctly parsed into our internal binary
|
||||||
// representation.
|
// representation.
|
||||||
func TestHDPathParsing(t *testing.T) {
|
func TestHDPathParsing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input string
|
input string
|
||||||
output DerivationPath
|
output DerivationPath
|
||||||
@ -89,6 +90,7 @@ func testDerive(t *testing.T, next func() DerivationPath, expected []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHdPathIteration(t *testing.T) {
|
func TestHdPathIteration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testDerive(t, DefaultIterator(DefaultBaseDerivationPath),
|
testDerive(t, DefaultIterator(DefaultBaseDerivationPath),
|
||||||
[]string{
|
[]string{
|
||||||
"m/44'/60'/0'/0/0", "m/44'/60'/0'/0/1",
|
"m/44'/60'/0'/0/0", "m/44'/60'/0'/0/1",
|
||||||
|
@ -68,7 +68,7 @@ func waitWatcherStart(ks *KeyStore) bool {
|
|||||||
|
|
||||||
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
||||||
var list []accounts.Account
|
var list []accounts.Account
|
||||||
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(200 * time.Millisecond) {
|
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(100 * time.Millisecond) {
|
||||||
list = ks.Accounts()
|
list = ks.Accounts()
|
||||||
if reflect.DeepEqual(list, wantAccounts) {
|
if reflect.DeepEqual(list, wantAccounts) {
|
||||||
// ks should have also received change notifications
|
// ks should have also received change notifications
|
||||||
@ -152,6 +152,7 @@ func TestWatchNoDir(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheInitialReload(t *testing.T) {
|
func TestCacheInitialReload(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cache, _ := newAccountCache(cachetestDir)
|
cache, _ := newAccountCache(cachetestDir)
|
||||||
accounts := cache.accounts()
|
accounts := cache.accounts()
|
||||||
if !reflect.DeepEqual(accounts, cachetestAccounts) {
|
if !reflect.DeepEqual(accounts, cachetestAccounts) {
|
||||||
@ -160,6 +161,7 @@ func TestCacheInitialReload(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheAddDeleteOrder(t *testing.T) {
|
func TestCacheAddDeleteOrder(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cache, _ := newAccountCache("testdata/no-such-dir")
|
cache, _ := newAccountCache("testdata/no-such-dir")
|
||||||
cache.watcher.running = true // prevent unexpected reloads
|
cache.watcher.running = true // prevent unexpected reloads
|
||||||
|
|
||||||
@ -244,6 +246,7 @@ func TestCacheAddDeleteOrder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheFind(t *testing.T) {
|
func TestCacheFind(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
dir := filepath.Join("testdata", "dir")
|
dir := filepath.Join("testdata", "dir")
|
||||||
cache, _ := newAccountCache(dir)
|
cache, _ := newAccountCache(dir)
|
||||||
cache.watcher.running = true // prevent unexpected reloads
|
cache.watcher.running = true // prevent unexpected reloads
|
||||||
@ -350,7 +353,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||||
time.Sleep(time.Second)
|
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||||
|
|
||||||
// Now replace file contents
|
// Now replace file contents
|
||||||
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
||||||
@ -366,7 +369,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||||
time.Sleep(time.Second)
|
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||||
|
|
||||||
// Now replace file contents again
|
// Now replace file contents again
|
||||||
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
||||||
@ -382,7 +385,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// needed so that modTime of `file` is different to its current value after os.WriteFile
|
// needed so that modTime of `file` is different to its current value after os.WriteFile
|
||||||
time.Sleep(time.Second)
|
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||||
|
|
||||||
// Now replace file contents with crap
|
// Now replace file contents with crap
|
||||||
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
|
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
|
||||||
|
@ -16,10 +16,19 @@
|
|||||||
|
|
||||||
package keystore
|
package keystore
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
func Fuzz(f *testing.F) {
|
func FuzzPassword(f *testing.F) {
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
f.Fuzz(func(t *testing.T, password string) {
|
||||||
fuzz(data)
|
ks := NewKeyStore(t.TempDir(), LightScryptN, LightScryptP)
|
||||||
|
a, err := ks.NewAccount(password)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := ks.Unlock(a, password); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
@ -36,6 +36,7 @@ import (
|
|||||||
var testSigData = make([]byte, 32)
|
var testSigData = make([]byte, 32)
|
||||||
|
|
||||||
func TestKeyStore(t *testing.T) {
|
func TestKeyStore(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
dir, ks := tmpKeyStore(t, true)
|
dir, ks := tmpKeyStore(t, true)
|
||||||
|
|
||||||
a, err := ks.NewAccount("foo")
|
a, err := ks.NewAccount("foo")
|
||||||
@ -70,6 +71,7 @@ func TestKeyStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSign(t *testing.T) {
|
func TestSign(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
|
|
||||||
pass := "" // not used but required by API
|
pass := "" // not used but required by API
|
||||||
@ -86,6 +88,7 @@ func TestSign(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSignWithPassphrase(t *testing.T) {
|
func TestSignWithPassphrase(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
|
|
||||||
pass := "passwd"
|
pass := "passwd"
|
||||||
@ -280,6 +283,7 @@ type walletEvent struct {
|
|||||||
// Tests that wallet notifications and correctly fired when accounts are added
|
// Tests that wallet notifications and correctly fired when accounts are added
|
||||||
// or deleted from the keystore.
|
// or deleted from the keystore.
|
||||||
func TestWalletNotifications(t *testing.T) {
|
func TestWalletNotifications(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
|
|
||||||
// Subscribe to the wallet feed and collect events.
|
// Subscribe to the wallet feed and collect events.
|
||||||
@ -341,6 +345,7 @@ func TestWalletNotifications(t *testing.T) {
|
|||||||
|
|
||||||
// TestImportExport tests the import functionality of a keystore.
|
// TestImportExport tests the import functionality of a keystore.
|
||||||
func TestImportECDSA(t *testing.T) {
|
func TestImportECDSA(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
key, err := crypto.GenerateKey()
|
key, err := crypto.GenerateKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -359,6 +364,7 @@ func TestImportECDSA(t *testing.T) {
|
|||||||
|
|
||||||
// TestImportECDSA tests the import and export functionality of a keystore.
|
// TestImportECDSA tests the import and export functionality of a keystore.
|
||||||
func TestImportExport(t *testing.T) {
|
func TestImportExport(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
acc, err := ks.NewAccount("old")
|
acc, err := ks.NewAccount("old")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -387,6 +393,7 @@ func TestImportExport(t *testing.T) {
|
|||||||
// TestImportRace tests the keystore on races.
|
// TestImportRace tests the keystore on races.
|
||||||
// This test should fail under -race if importing races.
|
// This test should fail under -race if importing races.
|
||||||
func TestImportRace(t *testing.T) {
|
func TestImportRace(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
acc, err := ks.NewAccount("old")
|
acc, err := ks.NewAccount("old")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -136,7 +136,7 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
|
|||||||
return filepath.Join(ks.keysDirPath, filename)
|
return filepath.Join(ks.keysDirPath, filename)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encryptdata encrypts the data given as 'data' with the password 'auth'.
|
// EncryptDataV3 encrypts the data given as 'data' with the password 'auth'.
|
||||||
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
|
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
|
||||||
salt := make([]byte, 32)
|
salt := make([]byte, 32)
|
||||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||||
|
@ -30,6 +30,7 @@ const (
|
|||||||
|
|
||||||
// Tests that a json key file can be decrypted and encrypted in multiple rounds.
|
// Tests that a json key file can be decrypted and encrypted in multiple rounds.
|
||||||
func TestKeyEncryptDecrypt(t *testing.T) {
|
func TestKeyEncryptDecrypt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
keyjson, err := os.ReadFile("testdata/very-light-scrypt.json")
|
keyjson, err := os.ReadFile("testdata/very-light-scrypt.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -54,7 +55,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
|
|||||||
// Recrypt with a new password and start over
|
// Recrypt with a new password and start over
|
||||||
password += "new data appended" // nolint: gosec
|
password += "new data appended" // nolint: gosec
|
||||||
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
|
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
|
||||||
t.Errorf("test %d: failed to recrypt key %v", i, err)
|
t.Errorf("test %d: failed to re-encrypt key %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@ func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePlain(t *testing.T) {
|
func TestKeyStorePlain(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStoreIface(t, false)
|
_, ks := tmpKeyStoreIface(t, false)
|
||||||
|
|
||||||
pass := "" // not used but required by API
|
pass := "" // not used but required by API
|
||||||
@ -60,6 +61,7 @@ func TestKeyStorePlain(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePassphrase(t *testing.T) {
|
func TestKeyStorePassphrase(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStoreIface(t, true)
|
_, ks := tmpKeyStoreIface(t, true)
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
@ -80,6 +82,7 @@ func TestKeyStorePassphrase(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStoreIface(t, true)
|
_, ks := tmpKeyStoreIface(t, true)
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
@ -93,6 +96,7 @@ func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestImportPreSaleKey(t *testing.T) {
|
func TestImportPreSaleKey(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
dir, ks := tmpKeyStoreIface(t, true)
|
dir, ks := tmpKeyStoreIface(t, true)
|
||||||
|
|
||||||
// file content of a presale key file generated with:
|
// file content of a presale key file generated with:
|
||||||
|
@ -125,7 +125,7 @@ func (w *watcher) loop() {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Info("Filsystem watcher error", "err", err)
|
log.Info("Filesystem watcher error", "err", err)
|
||||||
case <-debounce.C:
|
case <-debounce.C:
|
||||||
w.ac.scanAccounts()
|
w.ac.scanAccounts()
|
||||||
rescanTriggered = false
|
rescanTriggered = false
|
||||||
|
@ -98,6 +98,9 @@ func NewManager(config *Config, backends ...Backend) *Manager {
|
|||||||
|
|
||||||
// Close terminates the account manager's internal notification processes.
|
// Close terminates the account manager's internal notification processes.
|
||||||
func (am *Manager) Close() error {
|
func (am *Manager) Close() error {
|
||||||
|
for _, w := range am.wallets {
|
||||||
|
w.Close()
|
||||||
|
}
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
am.quit <- errc
|
am.quit <- errc
|
||||||
return <-errc
|
return <-errc
|
||||||
|
@ -776,16 +776,16 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
|
|||||||
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := strings.SplitN(account.URL.Path, "/", 2)
|
url, path, found := strings.Cut(account.URL.Path, "/")
|
||||||
if len(parts) != 2 {
|
if !found {
|
||||||
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
if url != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
||||||
return nil, fmt.Errorf("URL %s is not for this wallet", account.URL)
|
return nil, fmt.Errorf("URL %s is not for this wallet", account.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
return accounts.ParseDerivationPath(parts[1])
|
return accounts.ParseDerivationPath(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Session represents a secured communication session with the wallet.
|
// Session represents a secured communication session with the wallet.
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestURLParsing(t *testing.T) {
|
func TestURLParsing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
url, err := parseURL("https://ethereum.org")
|
url, err := parseURL("https://ethereum.org")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -40,6 +41,7 @@ func TestURLParsing(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestURLString(t *testing.T) {
|
func TestURLString(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||||
if url.String() != "https://ethereum.org" {
|
if url.String() != "https://ethereum.org" {
|
||||||
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
|
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
|
||||||
@ -52,10 +54,11 @@ func TestURLString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestURLMarshalJSON(t *testing.T) {
|
func TestURLMarshalJSON(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||||
json, err := url.MarshalJSON()
|
json, err := url.MarshalJSON()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpcted error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if string(json) != "\"https://ethereum.org\"" {
|
if string(json) != "\"https://ethereum.org\"" {
|
||||||
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
|
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
|
||||||
@ -63,10 +66,11 @@ func TestURLMarshalJSON(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestURLUnmarshalJSON(t *testing.T) {
|
func TestURLUnmarshalJSON(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
url := &URL{}
|
url := &URL{}
|
||||||
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
|
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpcted error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if url.Scheme != "https" {
|
if url.Scheme != "https" {
|
||||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||||
@ -77,6 +81,7 @@ func TestURLUnmarshalJSON(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestURLComparison(t *testing.T) {
|
func TestURLComparison(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
urlA URL
|
urlA URL
|
||||||
urlB URL
|
urlB URL
|
||||||
|
@ -483,6 +483,10 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
|||||||
w.stateLock.Lock()
|
w.stateLock.Lock()
|
||||||
defer w.stateLock.Unlock()
|
defer w.stateLock.Unlock()
|
||||||
|
|
||||||
|
if w.device == nil {
|
||||||
|
return accounts.Account{}, accounts.ErrWalletClosed
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := w.paths[address]; !ok {
|
if _, ok := w.paths[address]; !ok {
|
||||||
w.accounts = append(w.accounts, account)
|
w.accounts = append(w.accounts, account)
|
||||||
w.paths[address] = make(accounts.DerivationPath, len(path))
|
w.paths[address] = make(accounts.DerivationPath, len(path))
|
||||||
|
125
beacon/light/canonical.go
Normal file
125
beacon/light/canonical.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/lru"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// canonicalStore stores instances of the given type in a database and caches
|
||||||
|
// them in memory, associated with a continuous range of period numbers.
|
||||||
|
// Note: canonicalStore is not thread safe and it is the caller's responsibility
|
||||||
|
// to avoid concurrent access.
|
||||||
|
type canonicalStore[T any] struct {
|
||||||
|
keyPrefix []byte
|
||||||
|
periods periodRange
|
||||||
|
cache *lru.Cache[uint64, T]
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCanonicalStore creates a new canonicalStore and loads all keys associated
|
||||||
|
// with the keyPrefix in order to determine the ranges available in the database.
|
||||||
|
func newCanonicalStore[T any](db ethdb.Iteratee, keyPrefix []byte) (*canonicalStore[T], error) {
|
||||||
|
cs := &canonicalStore[T]{
|
||||||
|
keyPrefix: keyPrefix,
|
||||||
|
cache: lru.NewCache[uint64, T](100),
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
iter = db.NewIterator(keyPrefix, nil)
|
||||||
|
kl = len(keyPrefix)
|
||||||
|
first = true
|
||||||
|
)
|
||||||
|
defer iter.Release()
|
||||||
|
|
||||||
|
for iter.Next() {
|
||||||
|
if len(iter.Key()) != kl+8 {
|
||||||
|
log.Warn("Invalid key length in the canonical chain database", "key", fmt.Sprintf("%#x", iter.Key()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
period := binary.BigEndian.Uint64(iter.Key()[kl : kl+8])
|
||||||
|
if first {
|
||||||
|
cs.periods.Start = period
|
||||||
|
} else if cs.periods.End != period {
|
||||||
|
return nil, fmt.Errorf("gap in the canonical chain database between periods %d and %d", cs.periods.End, period-1)
|
||||||
|
}
|
||||||
|
first = false
|
||||||
|
cs.periods.End = period + 1
|
||||||
|
}
|
||||||
|
return cs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// databaseKey returns the database key belonging to the given period.
|
||||||
|
func (cs *canonicalStore[T]) databaseKey(period uint64) []byte {
|
||||||
|
return binary.BigEndian.AppendUint64(append([]byte{}, cs.keyPrefix...), period)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds the given item to the database. It also ensures that the range remains
|
||||||
|
// continuous. Can be used either with a batch or database backend.
|
||||||
|
func (cs *canonicalStore[T]) add(backend ethdb.KeyValueWriter, period uint64, value T) error {
|
||||||
|
if !cs.periods.canExpand(period) {
|
||||||
|
return fmt.Errorf("period expansion is not allowed, first: %d, next: %d, period: %d", cs.periods.Start, cs.periods.End, period)
|
||||||
|
}
|
||||||
|
enc, err := rlp.EncodeToBytes(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := backend.Put(cs.databaseKey(period), enc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cs.cache.Add(period, value)
|
||||||
|
cs.periods.expand(period)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFrom removes items starting from the given period.
|
||||||
|
func (cs *canonicalStore[T]) deleteFrom(db ethdb.KeyValueWriter, fromPeriod uint64) (deleted periodRange) {
|
||||||
|
keepRange, deleteRange := cs.periods.split(fromPeriod)
|
||||||
|
deleteRange.each(func(period uint64) {
|
||||||
|
db.Delete(cs.databaseKey(period))
|
||||||
|
cs.cache.Remove(period)
|
||||||
|
})
|
||||||
|
cs.periods = keepRange
|
||||||
|
return deleteRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// get returns the item at the given period or the null value of the given type
|
||||||
|
// if no item is present.
|
||||||
|
func (cs *canonicalStore[T]) get(backend ethdb.KeyValueReader, period uint64) (T, bool) {
|
||||||
|
var null, value T
|
||||||
|
if !cs.periods.contains(period) {
|
||||||
|
return null, false
|
||||||
|
}
|
||||||
|
if value, ok := cs.cache.Get(period); ok {
|
||||||
|
return value, true
|
||||||
|
}
|
||||||
|
enc, err := backend.Get(cs.databaseKey(period))
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Canonical store value not found", "period", period, "start", cs.periods.Start, "end", cs.periods.End)
|
||||||
|
return null, false
|
||||||
|
}
|
||||||
|
if err := rlp.DecodeBytes(enc, &value); err != nil {
|
||||||
|
log.Error("Error decoding canonical store value", "error", err)
|
||||||
|
return null, false
|
||||||
|
}
|
||||||
|
cs.cache.Add(period, value)
|
||||||
|
return value, true
|
||||||
|
}
|
514
beacon/light/committee_chain.go
Normal file
514
beacon/light/committee_chain.go
Normal file
@ -0,0 +1,514 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/params"
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/types"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/lru"
|
||||||
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNeedCommittee = errors.New("sync committee required")
|
||||||
|
ErrInvalidUpdate = errors.New("invalid committee update")
|
||||||
|
ErrInvalidPeriod = errors.New("invalid update period")
|
||||||
|
ErrWrongCommitteeRoot = errors.New("wrong committee root")
|
||||||
|
ErrCannotReorg = errors.New("can not reorg committee chain")
|
||||||
|
)
|
||||||
|
|
||||||
|
// CommitteeChain is a passive data structure that can validate, hold and update
|
||||||
|
// a chain of beacon light sync committees and updates. It requires at least one
|
||||||
|
// externally set fixed committee root at the beginning of the chain which can
|
||||||
|
// be set either based on a BootstrapData or a trusted source (a local beacon
|
||||||
|
// full node). This makes the structure useful for both light client and light
|
||||||
|
// server setups.
|
||||||
|
//
|
||||||
|
// It always maintains the following consistency constraints:
|
||||||
|
// - a committee can only be present if its root hash matches an existing fixed
|
||||||
|
// root or if it is proven by an update at the previous period
|
||||||
|
// - an update can only be present if a committee is present at the same period
|
||||||
|
// and the update signature is valid and has enough participants.
|
||||||
|
// The committee at the next period (proven by the update) should also be
|
||||||
|
// present (note that this means they can only be added together if neither
|
||||||
|
// is present yet). If a fixed root is present at the next period then the
|
||||||
|
// update can only be present if it proves the same committee root.
|
||||||
|
//
|
||||||
|
// Once synced to the current sync period, CommitteeChain can also validate
|
||||||
|
// signed beacon headers.
|
||||||
|
type CommitteeChain struct {
|
||||||
|
// chainmu guards against concurrent access to the canonicalStore structures
|
||||||
|
// (updates, committees, fixedCommitteeRoots) and ensures that they stay consistent
|
||||||
|
// with each other and with committeeCache.
|
||||||
|
chainmu sync.RWMutex
|
||||||
|
db ethdb.KeyValueStore
|
||||||
|
updates *canonicalStore[*types.LightClientUpdate]
|
||||||
|
committees *canonicalStore[*types.SerializedSyncCommittee]
|
||||||
|
fixedCommitteeRoots *canonicalStore[common.Hash]
|
||||||
|
committeeCache *lru.Cache[uint64, syncCommittee] // cache deserialized committees
|
||||||
|
|
||||||
|
clock mclock.Clock // monotonic clock (simulated clock in tests)
|
||||||
|
unixNano func() int64 // system clock (simulated clock in tests)
|
||||||
|
sigVerifier committeeSigVerifier // BLS sig verifier (dummy verifier in tests)
|
||||||
|
|
||||||
|
config *types.ChainConfig
|
||||||
|
signerThreshold int
|
||||||
|
minimumUpdateScore types.UpdateScore
|
||||||
|
enforceTime bool // enforceTime specifies whether the age of a signed header should be checked
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCommitteeChain creates a new CommitteeChain.
|
||||||
|
func NewCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
|
||||||
|
return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() })
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCommitteeChain creates a new CommitteeChain with the option of replacing the
|
||||||
|
// clock source and signature verification for testing purposes.
|
||||||
|
func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
|
||||||
|
s := &CommitteeChain{
|
||||||
|
committeeCache: lru.NewCache[uint64, syncCommittee](10),
|
||||||
|
db: db,
|
||||||
|
sigVerifier: sigVerifier,
|
||||||
|
clock: clock,
|
||||||
|
unixNano: unixNano,
|
||||||
|
config: config,
|
||||||
|
signerThreshold: signerThreshold,
|
||||||
|
enforceTime: enforceTime,
|
||||||
|
minimumUpdateScore: types.UpdateScore{
|
||||||
|
SignerCount: uint32(signerThreshold),
|
||||||
|
SubPeriodIndex: params.SyncPeriodLength / 16,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var err1, err2, err3 error
|
||||||
|
if s.fixedCommitteeRoots, err1 = newCanonicalStore[common.Hash](db, rawdb.FixedCommitteeRootKey); err1 != nil {
|
||||||
|
log.Error("Error creating fixed committee root store", "error", err1)
|
||||||
|
}
|
||||||
|
if s.committees, err2 = newCanonicalStore[*types.SerializedSyncCommittee](db, rawdb.SyncCommitteeKey); err2 != nil {
|
||||||
|
log.Error("Error creating committee store", "error", err2)
|
||||||
|
}
|
||||||
|
if s.updates, err3 = newCanonicalStore[*types.LightClientUpdate](db, rawdb.BestUpdateKey); err3 != nil {
|
||||||
|
log.Error("Error creating update store", "error", err3)
|
||||||
|
}
|
||||||
|
if err1 != nil || err2 != nil || err3 != nil || !s.checkConstraints() {
|
||||||
|
log.Info("Resetting invalid committee chain")
|
||||||
|
s.Reset()
|
||||||
|
}
|
||||||
|
// roll back invalid updates (might be necessary if forks have been changed since last time)
|
||||||
|
for !s.updates.periods.isEmpty() {
|
||||||
|
update, ok := s.updates.get(s.db, s.updates.periods.End-1)
|
||||||
|
if !ok {
|
||||||
|
log.Error("Sync committee update missing", "period", s.updates.periods.End-1)
|
||||||
|
s.Reset()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if valid, err := s.verifyUpdate(update); err != nil {
|
||||||
|
log.Error("Error validating update", "period", s.updates.periods.End-1, "error", err)
|
||||||
|
} else if valid {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := s.rollback(s.updates.periods.End); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !s.committees.periods.isEmpty() {
|
||||||
|
log.Trace("Sync committee chain loaded", "first period", s.committees.periods.Start, "last period", s.committees.periods.End-1)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkConstraints checks committee chain validity constraints
|
||||||
|
func (s *CommitteeChain) checkConstraints() bool {
|
||||||
|
isNotInFixedCommitteeRootRange := func(r periodRange) bool {
|
||||||
|
return s.fixedCommitteeRoots.periods.isEmpty() ||
|
||||||
|
r.Start < s.fixedCommitteeRoots.periods.Start ||
|
||||||
|
r.Start >= s.fixedCommitteeRoots.periods.End
|
||||||
|
}
|
||||||
|
|
||||||
|
valid := true
|
||||||
|
if !s.updates.periods.isEmpty() {
|
||||||
|
if isNotInFixedCommitteeRootRange(s.updates.periods) {
|
||||||
|
log.Error("Start update is not in the fixed roots range")
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
if s.committees.periods.Start > s.updates.periods.Start || s.committees.periods.End <= s.updates.periods.End {
|
||||||
|
log.Error("Missing committees in update range")
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !s.committees.periods.isEmpty() {
|
||||||
|
if isNotInFixedCommitteeRootRange(s.committees.periods) {
|
||||||
|
log.Error("Start committee is not in the fixed roots range")
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
if s.committees.periods.End > s.fixedCommitteeRoots.periods.End && s.committees.periods.End > s.updates.periods.End+1 {
|
||||||
|
log.Error("Last committee is neither in the fixed roots range nor proven by updates")
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return valid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the committee chain.
|
||||||
|
func (s *CommitteeChain) Reset() {
|
||||||
|
s.chainmu.Lock()
|
||||||
|
defer s.chainmu.Unlock()
|
||||||
|
|
||||||
|
if err := s.rollback(0); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckpointInit initializes a CommitteeChain based on the checkpoint.
|
||||||
|
// Note: if the chain is already initialized and the committees proven by the
|
||||||
|
// checkpoint do match the existing chain then the chain is retained and the
|
||||||
|
// new checkpoint becomes fixed.
|
||||||
|
func (s *CommitteeChain) CheckpointInit(bootstrap *types.BootstrapData) error {
|
||||||
|
s.chainmu.Lock()
|
||||||
|
defer s.chainmu.Unlock()
|
||||||
|
|
||||||
|
if err := bootstrap.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
period := bootstrap.Header.SyncPeriod()
|
||||||
|
if err := s.deleteFixedCommitteeRootsFrom(period + 2); err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if s.addFixedCommitteeRoot(period, bootstrap.CommitteeRoot) != nil {
|
||||||
|
s.Reset()
|
||||||
|
if err := s.addFixedCommitteeRoot(period, bootstrap.CommitteeRoot); err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := s.addFixedCommitteeRoot(period+1, common.Hash(bootstrap.CommitteeBranch[0])); err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.addCommittee(period, bootstrap.Committee); err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addFixedCommitteeRoot sets a fixed committee root at the given period.
|
||||||
|
// Note that the period where the first committee is added has to have a fixed
|
||||||
|
// root which can either come from a BootstrapData or a trusted source.
|
||||||
|
func (s *CommitteeChain) addFixedCommitteeRoot(period uint64, root common.Hash) error {
|
||||||
|
if root == (common.Hash{}) {
|
||||||
|
return ErrWrongCommitteeRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := s.db.NewBatch()
|
||||||
|
oldRoot := s.getCommitteeRoot(period)
|
||||||
|
if !s.fixedCommitteeRoots.periods.canExpand(period) {
|
||||||
|
// Note: the fixed committee root range should always be continuous and
|
||||||
|
// therefore the expected syncing method is to forward sync and optionally
|
||||||
|
// backward sync periods one by one, starting from a checkpoint. The only
|
||||||
|
// case when a root that is not adjacent to the already fixed ones can be
|
||||||
|
// fixed is when the same root has already been proven by an update chain.
|
||||||
|
// In this case the all roots in between can and should be fixed.
|
||||||
|
// This scenario makes sense when a new trusted checkpoint is added to an
|
||||||
|
// existing chain, ensuring that it will not be rolled back (might be
|
||||||
|
// important in case of low signer participation rate).
|
||||||
|
if root != oldRoot {
|
||||||
|
return ErrInvalidPeriod
|
||||||
|
}
|
||||||
|
// if the old root exists and matches the new one then it is guaranteed
|
||||||
|
// that the given period is after the existing fixed range and the roots
|
||||||
|
// in between can also be fixed.
|
||||||
|
for p := s.fixedCommitteeRoots.periods.End; p < period; p++ {
|
||||||
|
if err := s.fixedCommitteeRoots.add(batch, p, s.getCommitteeRoot(p)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if oldRoot != (common.Hash{}) && (oldRoot != root) {
|
||||||
|
// existing old root was different, we have to reorg the chain
|
||||||
|
if err := s.rollback(period); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := s.fixedCommitteeRoots.add(batch, period, root); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFixedCommitteeRootsFrom deletes fixed roots starting from the given period.
|
||||||
|
// It also maintains chain consistency, meaning that it also deletes updates and
|
||||||
|
// committees if they are no longer supported by a valid update chain.
|
||||||
|
func (s *CommitteeChain) deleteFixedCommitteeRootsFrom(period uint64) error {
|
||||||
|
if period >= s.fixedCommitteeRoots.periods.End {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
batch := s.db.NewBatch()
|
||||||
|
s.fixedCommitteeRoots.deleteFrom(batch, period)
|
||||||
|
if s.updates.periods.isEmpty() || period <= s.updates.periods.Start {
|
||||||
|
// Note: the first period of the update chain should always be fixed so if
|
||||||
|
// the fixed root at the first update is removed then the entire update chain
|
||||||
|
// and the proven committees have to be removed. Earlier committees in the
|
||||||
|
// remaining fixed root range can stay.
|
||||||
|
s.updates.deleteFrom(batch, period)
|
||||||
|
s.deleteCommitteesFrom(batch, period)
|
||||||
|
} else {
|
||||||
|
// The update chain stays intact, some previously fixed committee roots might
|
||||||
|
// get unfixed but are still proven by the update chain. If there were
|
||||||
|
// committees present after the range proven by updates, those should be
|
||||||
|
// removed if the belonging fixed roots are also removed.
|
||||||
|
fromPeriod := s.updates.periods.End + 1 // not proven by updates
|
||||||
|
if period > fromPeriod {
|
||||||
|
fromPeriod = period // also not justified by fixed roots
|
||||||
|
}
|
||||||
|
s.deleteCommitteesFrom(batch, fromPeriod)
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteCommitteesFrom deletes committees starting from the given period.
|
||||||
|
func (s *CommitteeChain) deleteCommitteesFrom(batch ethdb.Batch, period uint64) {
|
||||||
|
deleted := s.committees.deleteFrom(batch, period)
|
||||||
|
for period := deleted.Start; period < deleted.End; period++ {
|
||||||
|
s.committeeCache.Remove(period)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addCommittee adds a committee at the given period if possible.
|
||||||
|
func (s *CommitteeChain) addCommittee(period uint64, committee *types.SerializedSyncCommittee) error {
|
||||||
|
if !s.committees.periods.canExpand(period) {
|
||||||
|
return ErrInvalidPeriod
|
||||||
|
}
|
||||||
|
root := s.getCommitteeRoot(period)
|
||||||
|
if root == (common.Hash{}) {
|
||||||
|
return ErrInvalidPeriod
|
||||||
|
}
|
||||||
|
if root != committee.Root() {
|
||||||
|
return ErrWrongCommitteeRoot
|
||||||
|
}
|
||||||
|
if !s.committees.periods.contains(period) {
|
||||||
|
if err := s.committees.add(s.db, period, committee); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.committeeCache.Remove(period)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertUpdate adds a new update if possible.
|
||||||
|
func (s *CommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error {
|
||||||
|
s.chainmu.Lock()
|
||||||
|
defer s.chainmu.Unlock()
|
||||||
|
|
||||||
|
period := update.AttestedHeader.Header.SyncPeriod()
|
||||||
|
if !s.updates.periods.canExpand(period) || !s.committees.periods.contains(period) {
|
||||||
|
return ErrInvalidPeriod
|
||||||
|
}
|
||||||
|
if s.minimumUpdateScore.BetterThan(update.Score()) {
|
||||||
|
return ErrInvalidUpdate
|
||||||
|
}
|
||||||
|
oldRoot := s.getCommitteeRoot(period + 1)
|
||||||
|
reorg := oldRoot != (common.Hash{}) && oldRoot != update.NextSyncCommitteeRoot
|
||||||
|
if oldUpdate, ok := s.updates.get(s.db, period); ok && !update.Score().BetterThan(oldUpdate.Score()) {
|
||||||
|
// a better or equal update already exists; no changes, only fail if new one tried to reorg
|
||||||
|
if reorg {
|
||||||
|
return ErrCannotReorg
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.fixedCommitteeRoots.periods.contains(period+1) && reorg {
|
||||||
|
return ErrCannotReorg
|
||||||
|
}
|
||||||
|
if ok, err := s.verifyUpdate(update); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !ok {
|
||||||
|
return ErrInvalidUpdate
|
||||||
|
}
|
||||||
|
addCommittee := !s.committees.periods.contains(period+1) || reorg
|
||||||
|
if addCommittee {
|
||||||
|
if nextCommittee == nil {
|
||||||
|
return ErrNeedCommittee
|
||||||
|
}
|
||||||
|
if nextCommittee.Root() != update.NextSyncCommitteeRoot {
|
||||||
|
return ErrWrongCommitteeRoot
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if reorg {
|
||||||
|
if err := s.rollback(period + 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
batch := s.db.NewBatch()
|
||||||
|
if addCommittee {
|
||||||
|
if err := s.committees.add(batch, period+1, nextCommittee); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.committeeCache.Remove(period + 1)
|
||||||
|
}
|
||||||
|
if err := s.updates.add(batch, period, update); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("Inserted new committee update", "period", period, "next committee root", update.NextSyncCommitteeRoot)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextSyncPeriod returns the next period where an update can be added and also
|
||||||
|
// whether the chain is initialized at all.
|
||||||
|
func (s *CommitteeChain) NextSyncPeriod() (uint64, bool) {
|
||||||
|
s.chainmu.RLock()
|
||||||
|
defer s.chainmu.RUnlock()
|
||||||
|
|
||||||
|
if s.committees.periods.isEmpty() {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
if !s.updates.periods.isEmpty() {
|
||||||
|
return s.updates.periods.End, true
|
||||||
|
}
|
||||||
|
return s.committees.periods.End - 1, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// rollback removes all committees and fixed roots from the given period and updates
|
||||||
|
// starting from the previous period.
|
||||||
|
func (s *CommitteeChain) rollback(period uint64) error {
|
||||||
|
max := s.updates.periods.End + 1
|
||||||
|
if s.committees.periods.End > max {
|
||||||
|
max = s.committees.periods.End
|
||||||
|
}
|
||||||
|
if s.fixedCommitteeRoots.periods.End > max {
|
||||||
|
max = s.fixedCommitteeRoots.periods.End
|
||||||
|
}
|
||||||
|
for max > period {
|
||||||
|
max--
|
||||||
|
batch := s.db.NewBatch()
|
||||||
|
s.deleteCommitteesFrom(batch, max)
|
||||||
|
s.fixedCommitteeRoots.deleteFrom(batch, max)
|
||||||
|
if max > 0 {
|
||||||
|
s.updates.deleteFrom(batch, max-1)
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCommitteeRoot returns the committee root at the given period, either fixed,
|
||||||
|
// proven by a previous update or both. It returns an empty hash if the committee
|
||||||
|
// root is unknown.
|
||||||
|
func (s *CommitteeChain) getCommitteeRoot(period uint64) common.Hash {
|
||||||
|
if root, ok := s.fixedCommitteeRoots.get(s.db, period); ok || period == 0 {
|
||||||
|
return root
|
||||||
|
}
|
||||||
|
if update, ok := s.updates.get(s.db, period-1); ok {
|
||||||
|
return update.NextSyncCommitteeRoot
|
||||||
|
}
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSyncCommittee returns the deserialized sync committee at the given period.
|
||||||
|
func (s *CommitteeChain) getSyncCommittee(period uint64) (syncCommittee, error) {
|
||||||
|
if c, ok := s.committeeCache.Get(period); ok {
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
if sc, ok := s.committees.get(s.db, period); ok {
|
||||||
|
c, err := s.sigVerifier.deserializeSyncCommittee(sc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Sync committee #%d deserialization error: %v", period, err)
|
||||||
|
}
|
||||||
|
s.committeeCache.Add(period, c)
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Missing serialized sync committee #%d", period)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignedHeader returns true if the given signed header has a valid signature
|
||||||
|
// according to the local committee chain. The caller should ensure that the
|
||||||
|
// committees advertised by the same source where the signed header came from are
|
||||||
|
// synced before verifying the signature.
|
||||||
|
// The age of the header is also returned (the time elapsed since the beginning
|
||||||
|
// of the given slot, according to the local system clock). If enforceTime is
|
||||||
|
// true then negative age (future) headers are rejected.
|
||||||
|
func (s *CommitteeChain) VerifySignedHeader(head types.SignedHeader) (bool, time.Duration, error) {
|
||||||
|
s.chainmu.RLock()
|
||||||
|
defer s.chainmu.RUnlock()
|
||||||
|
|
||||||
|
return s.verifySignedHeader(head)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CommitteeChain) verifySignedHeader(head types.SignedHeader) (bool, time.Duration, error) {
|
||||||
|
var age time.Duration
|
||||||
|
now := s.unixNano()
|
||||||
|
if head.Header.Slot < (uint64(now-math.MinInt64)/uint64(time.Second)-s.config.GenesisTime)/12 {
|
||||||
|
age = time.Duration(now - int64(time.Second)*int64(s.config.GenesisTime+head.Header.Slot*12))
|
||||||
|
} else {
|
||||||
|
age = time.Duration(math.MinInt64)
|
||||||
|
}
|
||||||
|
if s.enforceTime && age < 0 {
|
||||||
|
return false, age, nil
|
||||||
|
}
|
||||||
|
committee, err := s.getSyncCommittee(types.SyncPeriod(head.SignatureSlot))
|
||||||
|
if err != nil {
|
||||||
|
return false, 0, err
|
||||||
|
}
|
||||||
|
if committee == nil {
|
||||||
|
return false, age, nil
|
||||||
|
}
|
||||||
|
if signingRoot, err := s.config.Forks.SigningRoot(head.Header); err == nil {
|
||||||
|
return s.sigVerifier.verifySignature(committee, signingRoot, &head.Signature), age, nil
|
||||||
|
}
|
||||||
|
return false, age, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyUpdate checks whether the header signature is correct and the update
|
||||||
|
// fits into the specified constraints (assumes that the update has been
|
||||||
|
// successfully validated previously)
|
||||||
|
func (s *CommitteeChain) verifyUpdate(update *types.LightClientUpdate) (bool, error) {
|
||||||
|
// Note: SignatureSlot determines the sync period of the committee used for signature
|
||||||
|
// verification. Though in reality SignatureSlot is always bigger than update.Header.Slot,
|
||||||
|
// setting them as equal here enforces the rule that they have to be in the same sync
|
||||||
|
// period in order for the light client update proof to be meaningful.
|
||||||
|
ok, age, err := s.verifySignedHeader(update.AttestedHeader)
|
||||||
|
if age < 0 {
|
||||||
|
log.Warn("Future committee update received", "age", age)
|
||||||
|
}
|
||||||
|
return ok, err
|
||||||
|
}
|
356
beacon/light/committee_chain_test.go
Normal file
356
beacon/light/committee_chain_test.go
Normal file
@ -0,0 +1,356 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/params"
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/types"
|
||||||
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
testGenesis = newTestGenesis()
|
||||||
|
testGenesis2 = newTestGenesis()
|
||||||
|
|
||||||
|
tfBase = newTestForks(testGenesis, types.Forks{
|
||||||
|
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||||
|
})
|
||||||
|
tfAlternative = newTestForks(testGenesis, types.Forks{
|
||||||
|
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||||
|
&types.Fork{Epoch: 0x700, Version: []byte{1}},
|
||||||
|
})
|
||||||
|
tfAnotherGenesis = newTestForks(testGenesis2, types.Forks{
|
||||||
|
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||||
|
})
|
||||||
|
|
||||||
|
tcBase = newTestCommitteeChain(nil, tfBase, true, 0, 10, 400, false)
|
||||||
|
tcBaseWithInvalidUpdates = newTestCommitteeChain(tcBase, tfBase, false, 5, 10, 200, false) // signer count too low
|
||||||
|
tcBaseWithBetterUpdates = newTestCommitteeChain(tcBase, tfBase, false, 5, 10, 440, false)
|
||||||
|
tcReorgWithWorseUpdates = newTestCommitteeChain(tcBase, tfBase, true, 5, 10, 400, false)
|
||||||
|
tcReorgWithWorseUpdates2 = newTestCommitteeChain(tcBase, tfBase, true, 5, 10, 380, false)
|
||||||
|
tcReorgWithBetterUpdates = newTestCommitteeChain(tcBase, tfBase, true, 5, 10, 420, false)
|
||||||
|
tcReorgWithFinalizedUpdates = newTestCommitteeChain(tcBase, tfBase, true, 5, 10, 400, true)
|
||||||
|
tcFork = newTestCommitteeChain(tcBase, tfAlternative, true, 7, 10, 400, false)
|
||||||
|
tcAnotherGenesis = newTestCommitteeChain(nil, tfAnotherGenesis, true, 0, 10, 400, false)
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCommitteeChainFixedCommitteeRoots(t *testing.T) {
|
||||||
|
for _, reload := range []bool{false, true} {
|
||||||
|
c := newCommitteeChainTest(t, tfBase, 300, true)
|
||||||
|
c.setClockPeriod(7)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 4, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 5, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 6, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 8, ErrInvalidPeriod) // range has to be continuous
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 3, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 2, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.addCommittee(tcBase, 4, nil)
|
||||||
|
c.addCommittee(tcBase, 6, ErrInvalidPeriod) // range has to be continuous
|
||||||
|
c.addCommittee(tcBase, 5, nil)
|
||||||
|
c.addCommittee(tcBase, 6, nil)
|
||||||
|
c.addCommittee(tcAnotherGenesis, 3, ErrWrongCommitteeRoot)
|
||||||
|
c.addCommittee(tcBase, 3, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 6)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommitteeChainCheckpointSync(t *testing.T) {
|
||||||
|
for _, enforceTime := range []bool{false, true} {
|
||||||
|
for _, reload := range []bool{false, true} {
|
||||||
|
c := newCommitteeChainTest(t, tfBase, 300, enforceTime)
|
||||||
|
if enforceTime {
|
||||||
|
c.setClockPeriod(6)
|
||||||
|
}
|
||||||
|
c.insertUpdate(tcBase, 3, true, ErrInvalidPeriod)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 3, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 4, nil)
|
||||||
|
c.insertUpdate(tcBase, 4, true, ErrInvalidPeriod) // still no committee
|
||||||
|
c.addCommittee(tcBase, 3, nil)
|
||||||
|
c.addCommittee(tcBase, 4, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 4)
|
||||||
|
c.insertUpdate(tcBase, 3, false, nil) // update can be added without committee here
|
||||||
|
c.insertUpdate(tcBase, 4, false, ErrNeedCommittee) // but not here as committee 5 is not there yet
|
||||||
|
c.insertUpdate(tcBase, 4, true, nil)
|
||||||
|
c.verifyRange(tcBase, 3, 5)
|
||||||
|
c.insertUpdate(tcBaseWithInvalidUpdates, 5, true, ErrInvalidUpdate) // signer count too low
|
||||||
|
c.insertUpdate(tcBase, 5, true, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
if enforceTime {
|
||||||
|
c.insertUpdate(tcBase, 6, true, ErrInvalidUpdate) // future update rejected
|
||||||
|
c.setClockPeriod(7)
|
||||||
|
}
|
||||||
|
c.insertUpdate(tcBase, 6, true, nil) // when the time comes it's accepted
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
if enforceTime {
|
||||||
|
c.verifyRange(tcBase, 3, 6) // committee 7 is there but still in the future
|
||||||
|
c.setClockPeriod(8)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 7) // now period 7 can also be verified
|
||||||
|
// try reverse syncing an update
|
||||||
|
c.insertUpdate(tcBase, 2, false, ErrInvalidPeriod) // fixed committee is needed first
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 2, nil)
|
||||||
|
c.addCommittee(tcBase, 2, nil)
|
||||||
|
c.insertUpdate(tcBase, 2, false, nil)
|
||||||
|
c.verifyRange(tcBase, 2, 7)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommitteeChainReorg(t *testing.T) {
|
||||||
|
for _, reload := range []bool{false, true} {
|
||||||
|
for _, addBetterUpdates := range []bool{false, true} {
|
||||||
|
c := newCommitteeChainTest(t, tfBase, 300, true)
|
||||||
|
c.setClockPeriod(11)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 3, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 4, nil)
|
||||||
|
c.addCommittee(tcBase, 3, nil)
|
||||||
|
for period := uint64(3); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcBase, period, true, nil)
|
||||||
|
}
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 10)
|
||||||
|
c.insertUpdate(tcReorgWithWorseUpdates, 5, true, ErrCannotReorg)
|
||||||
|
c.insertUpdate(tcReorgWithWorseUpdates2, 5, true, ErrCannotReorg)
|
||||||
|
if addBetterUpdates {
|
||||||
|
// add better updates for the base chain and expect first reorg to fail
|
||||||
|
// (only add updates as committees should be the same)
|
||||||
|
for period := uint64(5); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcBaseWithBetterUpdates, period, false, nil)
|
||||||
|
}
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 10) // still on the same chain
|
||||||
|
c.insertUpdate(tcReorgWithBetterUpdates, 5, true, ErrCannotReorg)
|
||||||
|
} else {
|
||||||
|
// reorg with better updates
|
||||||
|
c.insertUpdate(tcReorgWithBetterUpdates, 5, false, ErrNeedCommittee)
|
||||||
|
c.verifyRange(tcBase, 3, 10) // no success yet, still on the base chain
|
||||||
|
c.verifyRange(tcReorgWithBetterUpdates, 3, 5)
|
||||||
|
c.insertUpdate(tcReorgWithBetterUpdates, 5, true, nil)
|
||||||
|
// successful reorg, base chain should only match before the reorg period
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 5)
|
||||||
|
c.verifyRange(tcReorgWithBetterUpdates, 3, 6)
|
||||||
|
for period := uint64(6); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcReorgWithBetterUpdates, period, true, nil)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcReorgWithBetterUpdates, 3, 10)
|
||||||
|
}
|
||||||
|
// reorg with finalized updates; should succeed even if base chain updates
|
||||||
|
// have been improved because a finalized update beats everything else
|
||||||
|
c.insertUpdate(tcReorgWithFinalizedUpdates, 5, false, ErrNeedCommittee)
|
||||||
|
c.insertUpdate(tcReorgWithFinalizedUpdates, 5, true, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcReorgWithFinalizedUpdates, 3, 6)
|
||||||
|
for period := uint64(6); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcReorgWithFinalizedUpdates, period, true, nil)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcReorgWithFinalizedUpdates, 3, 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommitteeChainFork(t *testing.T) {
|
||||||
|
c := newCommitteeChainTest(t, tfAlternative, 300, true)
|
||||||
|
c.setClockPeriod(11)
|
||||||
|
// trying to sync a chain on an alternative fork with the base chain data
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 0, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 1, nil)
|
||||||
|
c.addCommittee(tcBase, 0, nil)
|
||||||
|
// shared section should sync without errors
|
||||||
|
for period := uint64(0); period < 7; period++ {
|
||||||
|
c.insertUpdate(tcBase, period, true, nil)
|
||||||
|
}
|
||||||
|
c.insertUpdate(tcBase, 7, true, ErrInvalidUpdate) // wrong fork
|
||||||
|
// committee root #7 is still the same but signatures are already signed with
|
||||||
|
// a different fork id so period 7 should only verify on the alternative fork
|
||||||
|
c.verifyRange(tcBase, 0, 6)
|
||||||
|
c.verifyRange(tcFork, 0, 7)
|
||||||
|
for period := uint64(7); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcFork, period, true, nil)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcFork, 0, 10)
|
||||||
|
// reload the chain while switching to the base fork
|
||||||
|
c.config = tfBase
|
||||||
|
c.reloadChain()
|
||||||
|
// updates 7..9 should be rolled back now
|
||||||
|
c.verifyRange(tcFork, 0, 6) // again, period 7 only verifies on the right fork
|
||||||
|
c.verifyRange(tcBase, 0, 7)
|
||||||
|
c.insertUpdate(tcFork, 7, true, ErrInvalidUpdate) // wrong fork
|
||||||
|
for period := uint64(7); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcBase, period, true, nil)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 0, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
type committeeChainTest struct {
|
||||||
|
t *testing.T
|
||||||
|
db *memorydb.Database
|
||||||
|
clock *mclock.Simulated
|
||||||
|
config types.ChainConfig
|
||||||
|
signerThreshold int
|
||||||
|
enforceTime bool
|
||||||
|
chain *CommitteeChain
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCommitteeChainTest(t *testing.T, config types.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
|
||||||
|
c := &committeeChainTest{
|
||||||
|
t: t,
|
||||||
|
db: memorydb.New(),
|
||||||
|
clock: &mclock.Simulated{},
|
||||||
|
config: config,
|
||||||
|
signerThreshold: signerThreshold,
|
||||||
|
enforceTime: enforceTime,
|
||||||
|
}
|
||||||
|
c.chain = newCommitteeChain(c.db, &config, signerThreshold, enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) })
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) reloadChain() {
|
||||||
|
c.chain = newCommitteeChain(c.db, &c.config, c.signerThreshold, c.enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) setClockPeriod(period float64) {
|
||||||
|
target := mclock.AbsTime(period * float64(time.Second*12*params.SyncPeriodLength))
|
||||||
|
wait := time.Duration(target - c.clock.Now())
|
||||||
|
if wait < 0 {
|
||||||
|
c.t.Fatalf("Invalid setClockPeriod")
|
||||||
|
}
|
||||||
|
c.clock.Run(wait)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) addFixedCommitteeRoot(tc *testCommitteeChain, period uint64, expErr error) {
|
||||||
|
if err := c.chain.addFixedCommitteeRoot(period, tc.periods[period].committee.Root()); err != expErr {
|
||||||
|
c.t.Errorf("Incorrect error output from addFixedCommitteeRoot at period %d (expected %v, got %v)", period, expErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) addCommittee(tc *testCommitteeChain, period uint64, expErr error) {
|
||||||
|
if err := c.chain.addCommittee(period, tc.periods[period].committee); err != expErr {
|
||||||
|
c.t.Errorf("Incorrect error output from addCommittee at period %d (expected %v, got %v)", period, expErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) insertUpdate(tc *testCommitteeChain, period uint64, addCommittee bool, expErr error) {
|
||||||
|
var committee *types.SerializedSyncCommittee
|
||||||
|
if addCommittee {
|
||||||
|
committee = tc.periods[period+1].committee
|
||||||
|
}
|
||||||
|
if err := c.chain.InsertUpdate(tc.periods[period].update, committee); err != expErr {
|
||||||
|
c.t.Errorf("Incorrect error output from InsertUpdate at period %d (expected %v, got %v)", period, expErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) verifySignedHeader(tc *testCommitteeChain, period float64, expOk bool) {
|
||||||
|
slot := uint64(period * float64(params.SyncPeriodLength))
|
||||||
|
signedHead := GenerateTestSignedHeader(types.Header{Slot: slot}, &tc.config, tc.periods[types.SyncPeriod(slot)].committee, slot+1, 400)
|
||||||
|
if ok, _, _ := c.chain.VerifySignedHeader(signedHead); ok != expOk {
|
||||||
|
c.t.Errorf("Incorrect output from VerifySignedHeader at period %f (expected %v, got %v)", period, expOk, ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) verifyRange(tc *testCommitteeChain, begin, end uint64) {
|
||||||
|
if begin > 0 {
|
||||||
|
c.verifySignedHeader(tc, float64(begin)-0.5, false)
|
||||||
|
}
|
||||||
|
for period := begin; period <= end; period++ {
|
||||||
|
c.verifySignedHeader(tc, float64(period)+0.5, true)
|
||||||
|
}
|
||||||
|
c.verifySignedHeader(tc, float64(end)+1.5, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestGenesis() types.ChainConfig {
|
||||||
|
var config types.ChainConfig
|
||||||
|
rand.Read(config.GenesisValidatorsRoot[:])
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestForks(config types.ChainConfig, forks types.Forks) types.ChainConfig {
|
||||||
|
for _, fork := range forks {
|
||||||
|
config.AddFork(fork.Name, fork.Epoch, fork.Version)
|
||||||
|
}
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestCommitteeChain(parent *testCommitteeChain, config types.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
|
||||||
|
tc := &testCommitteeChain{
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
if parent != nil {
|
||||||
|
tc.periods = make([]testPeriod, len(parent.periods))
|
||||||
|
copy(tc.periods, parent.periods)
|
||||||
|
}
|
||||||
|
if newCommittees {
|
||||||
|
if begin == 0 {
|
||||||
|
tc.fillCommittees(begin, end+1)
|
||||||
|
} else {
|
||||||
|
tc.fillCommittees(begin+1, end+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tc.fillUpdates(begin, end, signerCount, finalizedHeader)
|
||||||
|
return tc
|
||||||
|
}
|
||||||
|
|
||||||
|
type testPeriod struct {
|
||||||
|
committee *types.SerializedSyncCommittee
|
||||||
|
update *types.LightClientUpdate
|
||||||
|
}
|
||||||
|
|
||||||
|
type testCommitteeChain struct {
|
||||||
|
periods []testPeriod
|
||||||
|
config types.ChainConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testCommitteeChain) fillCommittees(begin, end int) {
|
||||||
|
if len(tc.periods) <= end {
|
||||||
|
tc.periods = append(tc.periods, make([]testPeriod, end+1-len(tc.periods))...)
|
||||||
|
}
|
||||||
|
for i := begin; i <= end; i++ {
|
||||||
|
tc.periods[i].committee = GenerateTestCommittee()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testCommitteeChain) fillUpdates(begin, end int, signerCount int, finalizedHeader bool) {
|
||||||
|
for i := begin; i <= end; i++ {
|
||||||
|
tc.periods[i].update = GenerateTestUpdate(&tc.config, uint64(i), tc.periods[i].committee, tc.periods[i+1].committee, signerCount, finalizedHeader)
|
||||||
|
}
|
||||||
|
}
|
78
beacon/light/range.go
Normal file
78
beacon/light/range.go
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
// periodRange represents a (possibly zero-length) range of integers (sync periods).
|
||||||
|
type periodRange struct {
|
||||||
|
Start, End uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// isEmpty returns true if the length of the range is zero.
|
||||||
|
func (a periodRange) isEmpty() bool {
|
||||||
|
return a.End == a.Start
|
||||||
|
}
|
||||||
|
|
||||||
|
// contains returns true if the range includes the given period.
|
||||||
|
func (a periodRange) contains(period uint64) bool {
|
||||||
|
return period >= a.Start && period < a.End
|
||||||
|
}
|
||||||
|
|
||||||
|
// canExpand returns true if the range includes or can be expanded with the given
|
||||||
|
// period (either the range is empty or the given period is inside, right before or
|
||||||
|
// right after the range).
|
||||||
|
func (a periodRange) canExpand(period uint64) bool {
|
||||||
|
return a.isEmpty() || (period+1 >= a.Start && period <= a.End)
|
||||||
|
}
|
||||||
|
|
||||||
|
// expand expands the range with the given period.
|
||||||
|
// This method assumes that canExpand returned true: otherwise this is a no-op.
|
||||||
|
func (a *periodRange) expand(period uint64) {
|
||||||
|
if a.isEmpty() {
|
||||||
|
a.Start, a.End = period, period+1
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if a.Start == period+1 {
|
||||||
|
a.Start--
|
||||||
|
}
|
||||||
|
if a.End == period {
|
||||||
|
a.End++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// split splits the range into two ranges. The 'fromPeriod' will be the first
|
||||||
|
// element in the second range (if present).
|
||||||
|
// The original range is unchanged by this operation
|
||||||
|
func (a *periodRange) split(fromPeriod uint64) (periodRange, periodRange) {
|
||||||
|
if fromPeriod <= a.Start {
|
||||||
|
// First range empty, everything in second range,
|
||||||
|
return periodRange{}, *a
|
||||||
|
}
|
||||||
|
if fromPeriod >= a.End {
|
||||||
|
// Second range empty, everything in first range,
|
||||||
|
return *a, periodRange{}
|
||||||
|
}
|
||||||
|
x := periodRange{a.Start, fromPeriod}
|
||||||
|
y := periodRange{fromPeriod, a.End}
|
||||||
|
return x, y
|
||||||
|
}
|
||||||
|
|
||||||
|
// each invokes the supplied function fn once per period in range
|
||||||
|
func (a *periodRange) each(fn func(uint64)) {
|
||||||
|
for p := a.Start; p < a.End; p++ {
|
||||||
|
fn(p)
|
||||||
|
}
|
||||||
|
}
|
152
beacon/light/test_helpers.go
Normal file
152
beacon/light/test_helpers.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
mrand "math/rand"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/merkle"
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/params"
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/types"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GenerateTestCommittee() *types.SerializedSyncCommittee {
|
||||||
|
s := new(types.SerializedSyncCommittee)
|
||||||
|
rand.Read(s[:32])
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateTestUpdate(config *types.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
|
||||||
|
update := new(types.LightClientUpdate)
|
||||||
|
update.NextSyncCommitteeRoot = nextCommittee.Root()
|
||||||
|
var attestedHeader types.Header
|
||||||
|
if finalizedHeader {
|
||||||
|
update.FinalizedHeader = new(types.Header)
|
||||||
|
*update.FinalizedHeader, update.NextSyncCommitteeBranch = makeTestHeaderWithMerkleProof(types.SyncPeriodStart(period)+100, params.StateIndexNextSyncCommittee, merkle.Value(update.NextSyncCommitteeRoot))
|
||||||
|
attestedHeader, update.FinalityBranch = makeTestHeaderWithMerkleProof(types.SyncPeriodStart(period)+200, params.StateIndexFinalBlock, merkle.Value(update.FinalizedHeader.Hash()))
|
||||||
|
} else {
|
||||||
|
attestedHeader, update.NextSyncCommitteeBranch = makeTestHeaderWithMerkleProof(types.SyncPeriodStart(period)+2000, params.StateIndexNextSyncCommittee, merkle.Value(update.NextSyncCommitteeRoot))
|
||||||
|
}
|
||||||
|
update.AttestedHeader = GenerateTestSignedHeader(attestedHeader, config, committee, attestedHeader.Slot+1, signerCount)
|
||||||
|
return update
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateTestSignedHeader(header types.Header, config *types.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
|
||||||
|
bitmask := makeBitmask(signerCount)
|
||||||
|
signingRoot, _ := config.Forks.SigningRoot(header)
|
||||||
|
c, _ := dummyVerifier{}.deserializeSyncCommittee(committee)
|
||||||
|
return types.SignedHeader{
|
||||||
|
Header: header,
|
||||||
|
Signature: types.SyncAggregate{
|
||||||
|
Signers: bitmask,
|
||||||
|
Signature: makeDummySignature(c.(dummySyncCommittee), signingRoot, bitmask),
|
||||||
|
},
|
||||||
|
SignatureSlot: signatureSlot,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateTestCheckpoint(period uint64, committee *types.SerializedSyncCommittee) *types.BootstrapData {
|
||||||
|
header, branch := makeTestHeaderWithMerkleProof(types.SyncPeriodStart(period)+200, params.StateIndexSyncCommittee, merkle.Value(committee.Root()))
|
||||||
|
return &types.BootstrapData{
|
||||||
|
Header: header,
|
||||||
|
Committee: committee,
|
||||||
|
CommitteeRoot: committee.Root(),
|
||||||
|
CommitteeBranch: branch,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeBitmask(signerCount int) (bitmask [params.SyncCommitteeBitmaskSize]byte) {
|
||||||
|
for i := 0; i < params.SyncCommitteeSize; i++ {
|
||||||
|
if mrand.Intn(params.SyncCommitteeSize-i) < signerCount {
|
||||||
|
bitmask[i/8] += byte(1) << (i & 7)
|
||||||
|
signerCount--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestHeaderWithMerkleProof(slot, index uint64, value merkle.Value) (types.Header, merkle.Values) {
|
||||||
|
var branch merkle.Values
|
||||||
|
hasher := sha256.New()
|
||||||
|
for index > 1 {
|
||||||
|
var proofHash merkle.Value
|
||||||
|
rand.Read(proofHash[:])
|
||||||
|
hasher.Reset()
|
||||||
|
if index&1 == 0 {
|
||||||
|
hasher.Write(value[:])
|
||||||
|
hasher.Write(proofHash[:])
|
||||||
|
} else {
|
||||||
|
hasher.Write(proofHash[:])
|
||||||
|
hasher.Write(value[:])
|
||||||
|
}
|
||||||
|
hasher.Sum(value[:0])
|
||||||
|
index >>= 1
|
||||||
|
branch = append(branch, proofHash)
|
||||||
|
}
|
||||||
|
return types.Header{Slot: slot, StateRoot: common.Hash(value)}, branch
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncCommittee holds either a blsSyncCommittee or a fake dummySyncCommittee used for testing
|
||||||
|
type syncCommittee interface{}
|
||||||
|
|
||||||
|
// committeeSigVerifier verifies sync committee signatures (either proper BLS
|
||||||
|
// signatures or fake signatures used for testing)
|
||||||
|
type committeeSigVerifier interface {
|
||||||
|
deserializeSyncCommittee(s *types.SerializedSyncCommittee) (syncCommittee, error)
|
||||||
|
verifySignature(committee syncCommittee, signedRoot common.Hash, aggregate *types.SyncAggregate) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// blsVerifier implements committeeSigVerifier
|
||||||
|
type blsVerifier struct{}
|
||||||
|
|
||||||
|
// deserializeSyncCommittee implements committeeSigVerifier
|
||||||
|
func (blsVerifier) deserializeSyncCommittee(s *types.SerializedSyncCommittee) (syncCommittee, error) {
|
||||||
|
return s.Deserialize()
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifySignature implements committeeSigVerifier
|
||||||
|
func (blsVerifier) verifySignature(committee syncCommittee, signingRoot common.Hash, aggregate *types.SyncAggregate) bool {
|
||||||
|
return committee.(*types.SyncCommittee).VerifySignature(signingRoot, aggregate)
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummySyncCommittee [32]byte
|
||||||
|
|
||||||
|
// dummyVerifier implements committeeSigVerifier
|
||||||
|
type dummyVerifier struct{}
|
||||||
|
|
||||||
|
// deserializeSyncCommittee implements committeeSigVerifier
|
||||||
|
func (dummyVerifier) deserializeSyncCommittee(s *types.SerializedSyncCommittee) (syncCommittee, error) {
|
||||||
|
var sc dummySyncCommittee
|
||||||
|
copy(sc[:], s[:32])
|
||||||
|
return sc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifySignature implements committeeSigVerifier
|
||||||
|
func (dummyVerifier) verifySignature(committee syncCommittee, signingRoot common.Hash, aggregate *types.SyncAggregate) bool {
|
||||||
|
return aggregate.Signature == makeDummySignature(committee.(dummySyncCommittee), signingRoot, aggregate.Signers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeDummySignature(committee dummySyncCommittee, signingRoot common.Hash, bitmask [params.SyncCommitteeBitmaskSize]byte) (sig [params.BLSSignatureSize]byte) {
|
||||||
|
for i, b := range committee[:] {
|
||||||
|
sig[i] = b ^ signingRoot[i]
|
||||||
|
}
|
||||||
|
copy(sig[32:], bitmask[:])
|
||||||
|
return
|
||||||
|
}
|
@ -25,6 +25,24 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// BootstrapData contains a sync committee where light sync can be started,
|
||||||
|
// together with a proof through a beacon header and corresponding state.
|
||||||
|
// Note: BootstrapData is fetched from a server based on a known checkpoint hash.
|
||||||
|
type BootstrapData struct {
|
||||||
|
Header Header
|
||||||
|
CommitteeRoot common.Hash
|
||||||
|
Committee *SerializedSyncCommittee `rlp:"-"`
|
||||||
|
CommitteeBranch merkle.Values
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate verifies the proof included in BootstrapData.
|
||||||
|
func (c *BootstrapData) Validate() error {
|
||||||
|
if c.CommitteeRoot != c.Committee.Root() {
|
||||||
|
return errors.New("wrong committee root")
|
||||||
|
}
|
||||||
|
return merkle.VerifyProof(c.Header.StateRoot, params.StateIndexSyncCommittee, c.CommitteeBranch, merkle.Value(c.CommitteeRoot))
|
||||||
|
}
|
||||||
|
|
||||||
// LightClientUpdate is a proof of the next sync committee root based on a header
|
// LightClientUpdate is a proof of the next sync committee root based on a header
|
||||||
// signed by the sync committee of the given period. Optionally, the update can
|
// signed by the sync committee of the given period. Optionally, the update can
|
||||||
// prove quasi-finality by the signed header referring to a previous, finalized
|
// prove quasi-finality by the signed header referring to a previous, finalized
|
@ -5,52 +5,53 @@
|
|||||||
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
|
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
|
||||||
485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
|
485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
|
||||||
|
|
||||||
# version:golang 1.21.4
|
# version:golang 1.21.5
|
||||||
# https://go.dev/dl/
|
# https://go.dev/dl/
|
||||||
47b26a83d2b65a3c1c1bcace273b69bee49a7a7b5168a7604ded3d26a37bd787 go1.21.4.src.tar.gz
|
285cbbdf4b6e6e62ed58f370f3f6d8c30825d6e56c5853c66d3c23bcdb09db19 go1.21.5.src.tar.gz
|
||||||
cd3bdcc802b759b70e8418bc7afbc4a65ca73a3fe576060af9fc8a2a5e71c3b8 go1.21.4.darwin-amd64.tar.gz
|
a2e1d5743e896e5fe1e7d96479c0a769254aed18cf216cf8f4c3a2300a9b3923 go1.21.5.darwin-amd64.tar.gz
|
||||||
8b7caf2ac60bdff457dba7d4ff2a01def889592b834453431ae3caecf884f6a5 go1.21.4.darwin-arm64.tar.gz
|
d0f8ac0c4fb3efc223a833010901d02954e3923cfe2c9a2ff0e4254a777cc9cc go1.21.5.darwin-arm64.tar.gz
|
||||||
f1e685d086eb36f4be5b8b953b52baf7752bc6235400d84bb7d87e500b65f03e go1.21.4.freebsd-386.tar.gz
|
2c05bbe0dc62456b90b7ddd354a54f373b7c377a98f8b22f52ab694b4f6cca58 go1.21.5.freebsd-386.tar.gz
|
||||||
59f9b32187efb98d344a3818a631d3815ebb5c7bbefc367bab6515caaca544e9 go1.21.4.freebsd-amd64.tar.gz
|
30b6c64e9a77129605bc12f836422bf09eec577a8c899ee46130aeff81567003 go1.21.5.freebsd-amd64.tar.gz
|
||||||
64d3e5d295806e137c9e39d1e1f10b00a30fcd5c2f230d72b3298f579bb3c89a go1.21.4.linux-386.tar.gz
|
8f4dba9cf5c61757bbd7e9ebdb93b6a30a1b03f4a636a1ba0cc2f27b907ab8e1 go1.21.5.linux-386.tar.gz
|
||||||
73cac0215254d0c7d1241fa40837851f3b9a8a742d0b54714cbdfb3feaf8f0af go1.21.4.linux-amd64.tar.gz
|
e2bc0b3e4b64111ec117295c088bde5f00eeed1567999ff77bc859d7df70078e go1.21.5.linux-amd64.tar.gz
|
||||||
ce1983a7289856c3a918e1fd26d41e072cc39f928adfb11ba1896440849b95da go1.21.4.linux-arm64.tar.gz
|
841cced7ecda9b2014f139f5bab5ae31785f35399f236b8b3e75dff2a2978d96 go1.21.5.linux-arm64.tar.gz
|
||||||
6c62e89113750cc77c498194d13a03fadfda22bd2c7d44e8a826fd354db60252 go1.21.4.linux-armv6l.tar.gz
|
837f4bf4e22fcdf920ffeaa4abf3d02d1314e03725431065f4d44c46a01b42fe go1.21.5.linux-armv6l.tar.gz
|
||||||
2c63b36d2adcfb22013102a2ee730f058ec2f93b9f27479793c80b2e3641783f go1.21.4.linux-ppc64le.tar.gz
|
907b8c6ec4be9b184952e5d3493be66b1746442394a8bc78556c56834cd7c38b go1.21.5.linux-ppc64le.tar.gz
|
||||||
7a75ba4afc7a96058ca65903d994cd862381825d7dca12b2183f087c757c26c0 go1.21.4.linux-s390x.tar.gz
|
9c4a81b72ebe44368813cd03684e1080a818bf915d84163abae2ed325a1b2dc0 go1.21.5.linux-s390x.tar.gz
|
||||||
870a0e462b94671dc2d6cac707e9e19f7524fdc3c90711e6cd4450c3713a8ce0 go1.21.4.windows-386.zip
|
6da2418889dfb37763d0eb149c4a8d728c029e12f0cd54fbca0a31ae547e2d34 go1.21.5.windows-386.zip
|
||||||
79e5428e068c912d9cfa6cd115c13549856ec689c1332eac17f5d6122e19d595 go1.21.4.windows-amd64.zip
|
bbe603cde7c9dee658f45164b4d06de1eff6e6e6b800100824e7c00d56a9a92f go1.21.5.windows-amd64.zip
|
||||||
58bc7c6f4d4c72da2df4d2650c8222fe03c9978070eb3c66be8bbaa2a4757ac1 go1.21.4.windows-arm64.zip
|
9b7acca50e674294e43202df4fbc26d5af4d8bc3170a3342a1514f09a2dab5e9 go1.21.5.windows-arm64.zip
|
||||||
|
|
||||||
# version:golangci 1.51.1
|
# version:golangci 1.55.2
|
||||||
# https://github.com/golangci/golangci-lint/releases/
|
# https://github.com/golangci/golangci-lint/releases/
|
||||||
# https://github.com/golangci/golangci-lint/releases/download/v1.51.1/
|
# https://github.com/golangci/golangci-lint/releases/download/v1.55.2/
|
||||||
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
|
632e96e6d5294fbbe7b2c410a49c8fa01c60712a0af85a567de85bcc1623ea21 golangci-lint-1.55.2-darwin-amd64.tar.gz
|
||||||
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
|
234463f059249f82045824afdcdd5db5682d0593052f58f6a3039a0a1c3899f6 golangci-lint-1.55.2-darwin-arm64.tar.gz
|
||||||
e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz
|
2bdd105e2d4e003a9058c33a22bb191a1e0f30fa0790acca0d8fbffac1d6247c golangci-lint-1.55.2-freebsd-386.tar.gz
|
||||||
623ce2d0fa4d35cc2e8d69fa7334227ab592380962a13b4d9cdc77cf41db2008 golangci-lint-1.51.1-freebsd-amd64.tar.gz
|
e75056e8b082386676ce23eba455cf893931a792c0d87e1e3743c0aec33c7fb5 golangci-lint-1.55.2-freebsd-amd64.tar.gz
|
||||||
131365feb0584cc2736c43192fa673ca50e5b6b765456990cb379ecfb787e568 golangci-lint-1.51.1-freebsd-armv6.tar.gz
|
5789b933facaf6136bd23f1d50add67b79bbcf8dfdfc9069a37f729395940a66 golangci-lint-1.55.2-freebsd-armv6.tar.gz
|
||||||
98fb627927cbb654f5bf85dcffc5f646666b2ce96ea0fed977c9fb28abd51532 golangci-lint-1.51.1-freebsd-armv7.tar.gz
|
7f21ab1008d05f32c954f99470fc86a83a059e530fe2add1d0b7d8ed4d8992a7 golangci-lint-1.55.2-freebsd-armv7.tar.gz
|
||||||
b36a99702fa762c15840261bc0fb41b4b1b16b8b19b8c0941bae98c85bb0f8b8 golangci-lint-1.51.1-linux-386.tar.gz
|
33ab06139b9219a28251f10821da94423db30285cc2af97494cbb2a281927de9 golangci-lint-1.55.2-illumos-amd64.tar.gz
|
||||||
17aeb26c76820c22efa0e1838b0ab93e90cfedef43fbfc9a2f33f27eb9e5e070 golangci-lint-1.51.1-linux-amd64.tar.gz
|
57ce6f8ce3ad6ee45d7cc3d9a047545a851c2547637834a3fcb086c7b40b1e6b golangci-lint-1.55.2-linux-386.tar.gz
|
||||||
9744bc34e7b8d82ca788b667bfb7155a39b4be9aef43bf9f10318b1372cea338 golangci-lint-1.51.1-linux-arm64.tar.gz
|
ca21c961a33be3bc15e4292dc40c98c8dcc5463a7b6768a3afc123761630c09c golangci-lint-1.55.2-linux-amd64.tar.gz
|
||||||
0dda8dbeb2ff7455a044ec8e347f2fc6d655d2e99d281b3b95e88167031c673d golangci-lint-1.51.1-linux-armv6.tar.gz
|
8eb0cee9b1dbf0eaa49871798c7f8a5b35f2960c52d776a5f31eb7d886b92746 golangci-lint-1.55.2-linux-arm64.tar.gz
|
||||||
0512f311b11d43b8b22989d929f0fe8a2e1e5ebe497f1eb0ff73a0fc3d188fd1 golangci-lint-1.51.1-linux-armv7.tar.gz
|
3195f3e0f37d353fd5bd415cabcd4e263f5c29d3d0ffb176c26ff3d2c75eb3bb golangci-lint-1.55.2-linux-armv6.tar.gz
|
||||||
d767108dcf84a8eaa844df3454cb0f75a492f4e7102ecc2b0a3545cfe073a566 golangci-lint-1.51.1-linux-loong64.tar.gz
|
c823ee36eb1a719e171de1f2f5ca3068033dce8d9817232fd10ed71fd6650406 golangci-lint-1.55.2-linux-armv7.tar.gz
|
||||||
3bd56c54daec16585b2668e0dfabb27af2c2b38cc0fdb46923e2521e1634846b golangci-lint-1.51.1-linux-mips64.tar.gz
|
758a5d2a356dc494bd13ed4c0d4bf5a54a4dc91267ea5ecdd87b86c7ca0624e7 golangci-lint-1.55.2-linux-loong64.tar.gz
|
||||||
f72f5adfa2219e15d2414c9a2966f86e74556cf17a85c727a7fb7770a16cf814 golangci-lint-1.51.1-linux-mips64le.tar.gz
|
2c7b9abdce7cae802a67d583cd7c6dca520bff6d0e17c8535a918e2f2b437aa0 golangci-lint-1.55.2-linux-mips64.tar.gz
|
||||||
e605521dac98096d8737e1997c954f41f1d0d8275b8731f62783d410c23574b9 golangci-lint-1.51.1-linux-ppc64le.tar.gz
|
024e0a15b85352cc27271285526e16a4ab66d3e67afbbe446c9808c06cb8dbed golangci-lint-1.55.2-linux-mips64le.tar.gz
|
||||||
2f683217b814339e74d61ca700922d8407f15addd6d4c5e8b156fbab79f26a87 golangci-lint-1.51.1-linux-riscv64.tar.gz
|
6b00f89ba5506c1de1efdd9fa17c54093013a294fefd8b9b31534db626a672ee golangci-lint-1.55.2-linux-ppc64le.tar.gz
|
||||||
d98528292b65971a3594e5880530e7624597dc9806fcfccdfbe39be411713d63 golangci-lint-1.51.1-linux-s390x.tar.gz
|
0faa0d047d9bf7b703ed3ea65b6117043c93504f9ca1de25ae929d3901c73d4a golangci-lint-1.55.2-linux-riscv64.tar.gz
|
||||||
9bb2d0fe9e692ed0aea4f2537e3e6862b2f6768fe2849a84f4a6ad09da9fd971 golangci-lint-1.51.1-netbsd-386.tar.gz
|
30dec9b22e7d5bb4e9d5ccea96da20f71cd7db3c8cf30b8ddc7cb9174c4d742a golangci-lint-1.55.2-linux-s390x.tar.gz
|
||||||
34cafdcd11ae73ae88d66c33eb8449f5c976fc3e37b44774dbe9c71caa95e592 golangci-lint-1.51.1-netbsd-amd64.tar.gz
|
5a0ede48f79ad707902fdb29be8cd2abd8302dc122b65ebae3fdfc86751c7698 golangci-lint-1.55.2-netbsd-386.tar.gz
|
||||||
f8b4e1e47ac17caafe8a5f32f975a2b6a7cb14c27c0f73c1fb15c20ca91c2e03 golangci-lint-1.51.1-netbsd-armv6.tar.gz
|
95af20a2e617126dd5b08122ece7819101070e1582a961067ce8c41172f901ad golangci-lint-1.55.2-netbsd-amd64.tar.gz
|
||||||
c4f58b7e227b9fd41f0e9310dc83f4a4e7d026598e2f6e95b78761081a6d9bd2 golangci-lint-1.51.1-netbsd-armv7.tar.gz
|
94fb7dacb7527847cc95d7120904e19a2a0a81a0d50d61766c9e0251da72ab9d golangci-lint-1.55.2-netbsd-armv6.tar.gz
|
||||||
6710e2f5375dc75521c1a17980a6cbbe6ff76c2f8b852964a8af558899a97cf5 golangci-lint-1.51.1-windows-386.zip
|
ca906bce5fee9619400e4a321c56476fe4a4efb6ac4fc989d340eb5563348873 golangci-lint-1.55.2-netbsd-armv7.tar.gz
|
||||||
722d7b87b9cdda0a3835d5030b3fc5385c2eba4c107f63f6391cfb2ac35f051d golangci-lint-1.51.1-windows-amd64.zip
|
45b442f69fc8915c4500201c0247b7f3f69544dbc9165403a61f9095f2c57355 golangci-lint-1.55.2-windows-386.zip
|
||||||
eb57f9bcb56646f2e3d6ccaf02ec227815fb05077b2e0b1bf9e755805acdc2b9 golangci-lint-1.51.1-windows-arm64.zip
|
f57d434d231d43417dfa631587522f8c1991220b43c8ffadb9c7bd279508bf81 golangci-lint-1.55.2-windows-amd64.zip
|
||||||
bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint-1.51.1-windows-armv6.zip
|
fd7dc8f4c6829ee6fafb252a4d81d2155cd35da7833665cbb25d53ce7cecd990 golangci-lint-1.55.2-windows-arm64.zip
|
||||||
cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip
|
1892c3c24f9e7ef44b02f6750c703864b6dc350129f3ec39510300007b2376f1 golangci-lint-1.55.2-windows-armv6.zip
|
||||||
|
a5e68ae73d38748b5269fad36ac7575e3c162a5dc63ef58abdea03cc5da4522a golangci-lint-1.55.2-windows-armv7.zip
|
||||||
|
|
||||||
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
||||||
#
|
#
|
||||||
|
17
build/ci.go
17
build/ci.go
@ -123,12 +123,13 @@ var (
|
|||||||
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
|
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
|
||||||
// kinetic
|
// kinetic
|
||||||
debDistroGoBoots = map[string]string{
|
debDistroGoBoots = map[string]string{
|
||||||
"trusty": "golang-1.11", // EOL: 04/2024
|
"trusty": "golang-1.11", // 14.04, EOL: 04/2024
|
||||||
"xenial": "golang-go", // EOL: 04/2026
|
"xenial": "golang-go", // 16.04, EOL: 04/2026
|
||||||
"bionic": "golang-go", // EOL: 04/2028
|
"bionic": "golang-go", // 18.04, EOL: 04/2028
|
||||||
"focal": "golang-go", // EOL: 04/2030
|
"focal": "golang-go", // 20.04, EOL: 04/2030
|
||||||
"jammy": "golang-go", // EOL: 04/2032
|
"jammy": "golang-go", // 22.04, EOL: 04/2032
|
||||||
"lunar": "golang-go", // EOL: 01/2024
|
"lunar": "golang-go", // 23.04, EOL: 01/2024
|
||||||
|
"mantic": "golang-go", // 23.10, EOL: 07/2024
|
||||||
}
|
}
|
||||||
|
|
||||||
debGoBootPaths = map[string]string{
|
debGoBootPaths = map[string]string{
|
||||||
@ -285,7 +286,7 @@ func doTest(cmdline []string) {
|
|||||||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||||
verbose = flag.Bool("v", false, "Whether to log verbosely")
|
verbose = flag.Bool("v", false, "Whether to log verbosely")
|
||||||
race = flag.Bool("race", false, "Execute the race detector")
|
race = flag.Bool("race", false, "Execute the race detector")
|
||||||
short = flag.Bool("short", false, "Pass the 'short'-flag to go test")
|
short = flag.Bool("short", false, "Pass the 'short'-flag to go test")
|
||||||
cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
|
cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
|
||||||
)
|
)
|
||||||
flag.CommandLine.Parse(cmdline)
|
flag.CommandLine.Parse(cmdline)
|
||||||
@ -366,7 +367,7 @@ func doLint(cmdline []string) {
|
|||||||
|
|
||||||
linter := downloadLinter(*cachedir)
|
linter := downloadLinter(*cachedir)
|
||||||
lflags := []string{"run", "--config", ".golangci.yml"}
|
lflags := []string{"run", "--config", ".golangci.yml"}
|
||||||
build.MustRunCommand(linter, append(lflags, packages...)...)
|
build.MustRunCommandWithOutput(linter, append(lflags, packages...)...)
|
||||||
fmt.Println("You have achieved perfection.")
|
fmt.Println("You have achieved perfection.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ func abigen(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
|
||||||
|
|
||||||
if err := app.Run(os.Args); err != nil {
|
if err := app.Run(os.Args); err != nil {
|
||||||
fmt.Fprintln(os.Stderr, err)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestNameFilter(t *testing.T) {
|
func TestNameFilter(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, err := newNameFilter("Foo")
|
_, err := newNameFilter("Foo")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = newNameFilter("too/many:colons:Foo")
|
_, err = newNameFilter("too/many:colons:Foo")
|
||||||
|
@ -44,7 +44,7 @@ func main() {
|
|||||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
|
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
|
||||||
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
||||||
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
||||||
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)")
|
verbosity = flag.Int("verbosity", 3, "log verbosity (0-5)")
|
||||||
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
||||||
|
|
||||||
nodeKey *ecdsa.PrivateKey
|
nodeKey *ecdsa.PrivateKey
|
||||||
@ -52,10 +52,11 @@ func main() {
|
|||||||
)
|
)
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
|
||||||
glogger.Verbosity(log.Lvl(*verbosity))
|
slogVerbosity := log.FromLegacyLevel(*verbosity)
|
||||||
|
glogger.Verbosity(slogVerbosity)
|
||||||
glogger.Vmodule(*vmodule)
|
glogger.Vmodule(*vmodule)
|
||||||
log.Root().SetHandler(glogger)
|
log.SetDefault(log.NewLogger(glogger))
|
||||||
|
|
||||||
natm, err := nat.Parse(*natdesc)
|
natm, err := nat.Parse(*natdesc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Clef can be used to sign transactions and data and is meant as a(n eventual) replacement for Geth's account management. This allows DApps to not depend on Geth's account management. When a DApp wants to sign data (or a transaction), it can send the content to Clef, which will then provide the user with context and asks for permission to sign the content. If the users grants the signing request, Clef will send the signature back to the DApp.
|
Clef can be used to sign transactions and data and is meant as a(n eventual) replacement for Geth's account management. This allows DApps to not depend on Geth's account management. When a DApp wants to sign data (or a transaction), it can send the content to Clef, which will then provide the user with context and asks for permission to sign the content. If the users grants the signing request, Clef will send the signature back to the DApp.
|
||||||
|
|
||||||
This setup allows a DApp to connect to a remote Ethereum node and send transactions that are locally signed. This can help in situations when a DApp is connected to an untrusted remote Ethereum node, because a local one is not available, not synchronised with the chain, or is a node that has no built-in (or limited) account management.
|
This setup allows a DApp to connect to a remote Ethereum node and send transactions that are locally signed. This can help in situations when a DApp is connected to an untrusted remote Ethereum node, because a local one is not available, not synchronized with the chain, or is a node that has no built-in (or limited) account management.
|
||||||
|
|
||||||
Clef can run as a daemon on the same machine, off a usb-stick like [USB armory](https://inversepath.com/usbarmory), or even a separate VM in a [QubesOS](https://www.qubes-os.org/) type setup.
|
Clef can run as a daemon on the same machine, off a usb-stick like [USB armory](https://inversepath.com/usbarmory), or even a separate VM in a [QubesOS](https://www.qubes-os.org/) type setup.
|
||||||
|
|
||||||
|
@ -26,12 +26,13 @@ import (
|
|||||||
|
|
||||||
// TestImportRaw tests clef --importraw
|
// TestImportRaw tests clef --importraw
|
||||||
func TestImportRaw(t *testing.T) {
|
func TestImportRaw(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
t.Cleanup(func() { os.Remove(keyPath) })
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("happy-path", func(t *testing.T) {
|
t.Run("happy-path", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Run clef importraw
|
// Run clef importraw
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("myverylongpassword").input("myverylongpassword")
|
clef.input("myverylongpassword").input("myverylongpassword")
|
||||||
@ -43,6 +44,7 @@ func TestImportRaw(t *testing.T) {
|
|||||||
})
|
})
|
||||||
// tests clef --importraw with mismatched passwords.
|
// tests clef --importraw with mismatched passwords.
|
||||||
t.Run("pw-mismatch", func(t *testing.T) {
|
t.Run("pw-mismatch", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Run clef importraw
|
// Run clef importraw
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit()
|
clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit()
|
||||||
@ -52,6 +54,7 @@ func TestImportRaw(t *testing.T) {
|
|||||||
})
|
})
|
||||||
// tests clef --importraw with a too short password.
|
// tests clef --importraw with a too short password.
|
||||||
t.Run("short-pw", func(t *testing.T) {
|
t.Run("short-pw", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Run clef importraw
|
// Run clef importraw
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("shorty").input("shorty").WaitExit()
|
clef.input("shorty").input("shorty").WaitExit()
|
||||||
@ -64,12 +67,13 @@ func TestImportRaw(t *testing.T) {
|
|||||||
|
|
||||||
// TestListAccounts tests clef --list-accounts
|
// TestListAccounts tests clef --list-accounts
|
||||||
func TestListAccounts(t *testing.T) {
|
func TestListAccounts(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
t.Cleanup(func() { os.Remove(keyPath) })
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("no-accounts", func(t *testing.T) {
|
t.Run("no-accounts", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts")
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts")
|
||||||
if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") {
|
if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") {
|
||||||
t.Logf("Output\n%v", out)
|
t.Logf("Output\n%v", out)
|
||||||
@ -77,6 +81,7 @@ func TestListAccounts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("one-account", func(t *testing.T) {
|
t.Run("one-account", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// First, we need to import
|
// First, we need to import
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
||||||
@ -91,12 +96,13 @@ func TestListAccounts(t *testing.T) {
|
|||||||
|
|
||||||
// TestListWallets tests clef --list-wallets
|
// TestListWallets tests clef --list-wallets
|
||||||
func TestListWallets(t *testing.T) {
|
func TestListWallets(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
t.Cleanup(func() { os.Remove(keyPath) })
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("no-accounts", func(t *testing.T) {
|
t.Run("no-accounts", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets")
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets")
|
||||||
if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") {
|
if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") {
|
||||||
t.Logf("Output\n%v", out)
|
t.Logf("Output\n%v", out)
|
||||||
@ -104,6 +110,7 @@ func TestListWallets(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("one-account", func(t *testing.T) {
|
t.Run("one-account", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// First, we need to import
|
// First, we need to import
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
||||||
|
@ -492,7 +492,8 @@ func initialize(c *cli.Context) error {
|
|||||||
if usecolor {
|
if usecolor {
|
||||||
output = colorable.NewColorable(logOutput)
|
output = colorable.NewColorable(logOutput)
|
||||||
}
|
}
|
||||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int(logLevelFlag.Name)), log.StreamHandler(output, log.TerminalFormat(usecolor))))
|
verbosity := log.FromLegacyLevel(c.Int(logLevelFlag.Name))
|
||||||
|
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(output, verbosity, usecolor)))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -581,6 +582,7 @@ func accountImport(c *cli.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if first != second {
|
if first != second {
|
||||||
|
//lint:ignore ST1005 This is a message for the user
|
||||||
return errors.New("Passwords do not match")
|
return errors.New("Passwords do not match")
|
||||||
}
|
}
|
||||||
acc, err := internalApi.ImportRawKey(hex.EncodeToString(crypto.FromECDSA(pKey)), first)
|
acc, err := internalApi.ImportRawKey(hex.EncodeToString(crypto.FromECDSA(pKey)), first)
|
||||||
@ -702,6 +704,7 @@ func signer(c *cli.Context) error {
|
|||||||
log.Info("Starting signer", "chainid", chainId, "keystore", ksLoc,
|
log.Info("Starting signer", "chainid", chainId, "keystore", ksLoc,
|
||||||
"light-kdf", lightKdf, "advanced", advanced)
|
"light-kdf", lightKdf, "advanced", advanced)
|
||||||
am := core.StartClefAccountManager(ksLoc, nousb, lightKdf, scpath)
|
am := core.StartClefAccountManager(ksLoc, nousb, lightKdf, scpath)
|
||||||
|
defer am.Close()
|
||||||
apiImpl := core.NewSignerAPI(am, chainId, nousb, ui, db, advanced, pwStorage)
|
apiImpl := core.NewSignerAPI(am, chainId, nousb, ui, db, advanced, pwStorage)
|
||||||
|
|
||||||
// Establish the bidirectional communication, by creating a new UI backend and registering
|
// Establish the bidirectional communication, by creating a new UI backend and registering
|
||||||
|
@ -91,7 +91,7 @@ class StdIOHandler:
|
|||||||
{"jsonrpc":"2.0","id":20,"method":"ui_approveTx","params":[{"transaction":{"from":"0xDEADbEeF000000000000000000000000DeaDbeEf","to":"0xDEADbEeF000000000000000000000000DeaDbeEf","gas":"0x3e8","gasPrice":"0x5","maxFeePerGas":null,"maxPriorityFeePerGas":null,"value":"0x6","nonce":"0x1","data":"0x"},"call_info":null,"meta":{"remote":"clef binary","local":"main","scheme":"in-proc","User-Agent":"","Origin":""}}]}
|
{"jsonrpc":"2.0","id":20,"method":"ui_approveTx","params":[{"transaction":{"from":"0xDEADbEeF000000000000000000000000DeaDbeEf","to":"0xDEADbEeF000000000000000000000000DeaDbeEf","gas":"0x3e8","gasPrice":"0x5","maxFeePerGas":null,"maxPriorityFeePerGas":null,"value":"0x6","nonce":"0x1","data":"0x"},"call_info":null,"meta":{"remote":"clef binary","local":"main","scheme":"in-proc","User-Agent":"","Origin":""}}]}
|
||||||
|
|
||||||
:param transaction: transaction info
|
:param transaction: transaction info
|
||||||
:param call_info: info abou the call, e.g. if ABI info could not be
|
:param call_info: info about the call, e.g. if ABI info could not be
|
||||||
:param meta: metadata about the request, e.g. where the call comes from
|
:param meta: metadata about the request, e.g. where the call comes from
|
||||||
:return:
|
:return:
|
||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
|
@ -108,31 +108,32 @@ Start the test by running `devp2p discv5 test -listen1 127.0.0.1 -listen2 127.0.
|
|||||||
|
|
||||||
The Eth Protocol test suite is a conformance test suite for the [eth protocol][eth].
|
The Eth Protocol test suite is a conformance test suite for the [eth protocol][eth].
|
||||||
|
|
||||||
To run the eth protocol test suite against your implementation, the node needs to be initialized as such:
|
To run the eth protocol test suite against your implementation, the node needs to be initialized
|
||||||
|
with our test chain. The chain files are located in `./cmd/devp2p/internal/ethtest/testdata`.
|
||||||
|
|
||||||
1. initialize the geth node with the `genesis.json` file contained in the `testdata` directory
|
1. initialize the geth node with the `genesis.json` file
|
||||||
2. import the `halfchain.rlp` file in the `testdata` directory
|
2. import blocks from `chain.rlp`
|
||||||
3. run geth with the following flags:
|
3. run the client using the resulting database. For geth, use a command like the one below:
|
||||||
```
|
|
||||||
geth --datadir <datadir> --nodiscover --nat=none --networkid 19763 --verbosity 5
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, run the following command, replacing `<enode>` with the enode of the geth node:
|
geth \
|
||||||
```
|
--datadir <datadir> \
|
||||||
devp2p rlpx eth-test <enode> cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json
|
--nodiscover \
|
||||||
```
|
--nat=none \
|
||||||
|
--networkid 3503995874084926 \
|
||||||
|
--verbosity 5 \
|
||||||
|
--authrpc.jwtsecret 0x7365637265747365637265747365637265747365637265747365637265747365
|
||||||
|
|
||||||
|
Note that the tests also require access to the engine API.
|
||||||
|
The test suite can now be executed using the devp2p tool.
|
||||||
|
|
||||||
|
devp2p rlpx eth-test \
|
||||||
|
--chain internal/ethtest/testdata \
|
||||||
|
--node enode://.... \
|
||||||
|
--engineapi http://127.0.0.1:8551 \
|
||||||
|
--jwtsecret 0x7365637265747365637265747365637265747365637265747365637265747365
|
||||||
|
|
||||||
Repeat the above process (re-initialising the node) in order to run the Eth Protocol test suite again.
|
Repeat the above process (re-initialising the node) in order to run the Eth Protocol test suite again.
|
||||||
|
|
||||||
#### Eth66 Test Suite
|
|
||||||
|
|
||||||
The Eth66 test suite is also a conformance test suite for the eth 66 protocol version specifically.
|
|
||||||
To run the eth66 protocol test suite, initialize a geth node as described above and run the following command,
|
|
||||||
replacing `<enode>` with the enode of the geth node:
|
|
||||||
|
|
||||||
```
|
|
||||||
devp2p rlpx eth66-test <enode> cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json
|
|
||||||
```
|
|
||||||
|
|
||||||
[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md
|
[eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md
|
||||||
[dns-tutorial]: https://geth.ethereum.org/docs/developers/geth-developer/dns-discovery-setup
|
[dns-tutorial]: https://geth.ethereum.org/docs/developers/geth-developer/dns-discovery-setup
|
||||||
|
@ -236,7 +236,7 @@ func discv4Crawl(ctx *cli.Context) error {
|
|||||||
func discv4Test(ctx *cli.Context) error {
|
func discv4Test(ctx *cli.Context) error {
|
||||||
// Configure test package globals.
|
// Configure test package globals.
|
||||||
if !ctx.IsSet(remoteEnodeFlag.Name) {
|
if !ctx.IsSet(remoteEnodeFlag.Name) {
|
||||||
return fmt.Errorf("Missing -%v", remoteEnodeFlag.Name)
|
return fmt.Errorf("missing -%v", remoteEnodeFlag.Name)
|
||||||
}
|
}
|
||||||
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
|
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
|
||||||
v4test.Listen1 = ctx.String(testListen1Flag.Name)
|
v4test.Listen1 = ctx.String(testListen1Flag.Name)
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
// This test checks that computeChanges/splitChanges create DNS changes in
|
// This test checks that computeChanges/splitChanges create DNS changes in
|
||||||
// leaf-added -> root-changed -> leaf-deleted order.
|
// leaf-added -> root-changed -> leaf-deleted order.
|
||||||
func TestRoute53ChangeSort(t *testing.T) {
|
func TestRoute53ChangeSort(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testTree0 := map[string]recordSet{
|
testTree0 := map[string]recordSet{
|
||||||
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
|
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
|
||||||
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
|
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
|
||||||
@ -164,6 +165,7 @@ func TestRoute53ChangeSort(t *testing.T) {
|
|||||||
|
|
||||||
// This test checks that computeChanges compares the quoted value of the records correctly.
|
// This test checks that computeChanges compares the quoted value of the records correctly.
|
||||||
func TestRoute53NoChange(t *testing.T) {
|
func TestRoute53NoChange(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Existing record set.
|
// Existing record set.
|
||||||
testTree0 := map[string]recordSet{
|
testTree0 := map[string]recordSet{
|
||||||
"n": {ttl: rootTTL, values: []string{
|
"n": {ttl: rootTTL, values: []string{
|
||||||
|
@ -17,27 +17,118 @@
|
|||||||
package ethtest
|
package ethtest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"crypto/ecdsa"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/forkid"
|
"github.com/ethereum/go-ethereum/core/forkid"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Chain is a lightweight blockchain-like store which can read a hivechain
|
||||||
|
// created chain.
|
||||||
type Chain struct {
|
type Chain struct {
|
||||||
genesis core.Genesis
|
genesis core.Genesis
|
||||||
blocks []*types.Block
|
blocks []*types.Block
|
||||||
chainConfig *params.ChainConfig
|
state map[common.Address]state.DumpAccount // state of head block
|
||||||
|
senders map[common.Address]*senderInfo
|
||||||
|
config *params.ChainConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChain takes the given chain.rlp file, and decodes and returns
|
||||||
|
// the blocks from the file.
|
||||||
|
func NewChain(dir string) (*Chain, error) {
|
||||||
|
gen, err := loadGenesis(path.Join(dir, "genesis.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
gblock := gen.ToBlock()
|
||||||
|
|
||||||
|
blocks, err := blocksFromFile(path.Join(dir, "chain.rlp"), gblock)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
state, err := readState(path.Join(dir, "headstate.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
accounts, err := readAccounts(path.Join(dir, "accounts.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Chain{
|
||||||
|
genesis: gen,
|
||||||
|
blocks: blocks,
|
||||||
|
state: state,
|
||||||
|
senders: accounts,
|
||||||
|
config: gen.Config,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// senderInfo is an account record as output in the "accounts.json" file from
|
||||||
|
// hivechain.
|
||||||
|
type senderInfo struct {
|
||||||
|
Key *ecdsa.PrivateKey `json:"key"`
|
||||||
|
Nonce uint64 `json:"nonce"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Head returns the chain head.
|
||||||
|
func (c *Chain) Head() *types.Block {
|
||||||
|
return c.blocks[c.Len()-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountsInHashOrder returns all accounts of the head state, ordered by hash of address.
|
||||||
|
func (c *Chain) AccountsInHashOrder() []state.DumpAccount {
|
||||||
|
list := make([]state.DumpAccount, len(c.state))
|
||||||
|
i := 0
|
||||||
|
for addr, acc := range c.state {
|
||||||
|
addr := addr
|
||||||
|
list[i] = acc
|
||||||
|
list[i].Address = &addr
|
||||||
|
if len(acc.AddressHash) != 32 {
|
||||||
|
panic(fmt.Errorf("missing/invalid SecureKey in dump account %v", addr))
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
slices.SortFunc(list, func(x, y state.DumpAccount) int {
|
||||||
|
return bytes.Compare(x.AddressHash, y.AddressHash)
|
||||||
|
})
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeHashes returns all bytecode hashes contained in the head state.
|
||||||
|
func (c *Chain) CodeHashes() []common.Hash {
|
||||||
|
var hashes []common.Hash
|
||||||
|
seen := make(map[common.Hash]struct{})
|
||||||
|
seen[types.EmptyCodeHash] = struct{}{}
|
||||||
|
for _, acc := range c.state {
|
||||||
|
h := common.BytesToHash(acc.CodeHash)
|
||||||
|
if _, ok := seen[h]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hashes = append(hashes, h)
|
||||||
|
seen[h] = struct{}{}
|
||||||
|
}
|
||||||
|
slices.SortFunc(hashes, (common.Hash).Cmp)
|
||||||
|
return hashes
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len returns the length of the chain.
|
// Len returns the length of the chain.
|
||||||
@ -45,6 +136,11 @@ func (c *Chain) Len() int {
|
|||||||
return len(c.blocks)
|
return len(c.blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ForkID gets the fork id of the chain.
|
||||||
|
func (c *Chain) ForkID() forkid.ID {
|
||||||
|
return forkid.NewID(c.config, c.blocks[0], uint64(c.Len()), c.blocks[c.Len()-1].Time())
|
||||||
|
}
|
||||||
|
|
||||||
// TD calculates the total difficulty of the chain at the
|
// TD calculates the total difficulty of the chain at the
|
||||||
// chain head.
|
// chain head.
|
||||||
func (c *Chain) TD() *big.Int {
|
func (c *Chain) TD() *big.Int {
|
||||||
@ -55,19 +151,12 @@ func (c *Chain) TD() *big.Int {
|
|||||||
return sum
|
return sum
|
||||||
}
|
}
|
||||||
|
|
||||||
// TotalDifficultyAt calculates the total difficulty of the chain
|
// GetBlock returns the block at the specified number.
|
||||||
// at the given block height.
|
func (c *Chain) GetBlock(number int) *types.Block {
|
||||||
func (c *Chain) TotalDifficultyAt(height int) *big.Int {
|
return c.blocks[number]
|
||||||
sum := new(big.Int)
|
|
||||||
if height >= c.Len() {
|
|
||||||
return sum
|
|
||||||
}
|
|
||||||
for _, block := range c.blocks[:height+1] {
|
|
||||||
sum.Add(sum, block.Difficulty())
|
|
||||||
}
|
|
||||||
return sum
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RootAt returns the state root for the block at the given height.
|
||||||
func (c *Chain) RootAt(height int) common.Hash {
|
func (c *Chain) RootAt(height int) common.Hash {
|
||||||
if height < c.Len() {
|
if height < c.Len() {
|
||||||
return c.blocks[height].Root()
|
return c.blocks[height].Root()
|
||||||
@ -75,37 +164,56 @@ func (c *Chain) RootAt(height int) common.Hash {
|
|||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForkID gets the fork id of the chain.
|
// GetSender returns the address associated with account at the index in the
|
||||||
func (c *Chain) ForkID() forkid.ID {
|
// pre-funded accounts list.
|
||||||
return forkid.NewID(c.chainConfig, c.blocks[0], uint64(c.Len()), c.blocks[0].Time())
|
func (c *Chain) GetSender(idx int) (common.Address, uint64) {
|
||||||
}
|
var accounts Addresses
|
||||||
|
for addr := range c.senders {
|
||||||
// Shorten returns a copy chain of a desired height from the imported
|
accounts = append(accounts, addr)
|
||||||
func (c *Chain) Shorten(height int) *Chain {
|
|
||||||
blocks := make([]*types.Block, height)
|
|
||||||
copy(blocks, c.blocks[:height])
|
|
||||||
|
|
||||||
config := *c.chainConfig
|
|
||||||
return &Chain{
|
|
||||||
blocks: blocks,
|
|
||||||
chainConfig: &config,
|
|
||||||
}
|
}
|
||||||
|
sort.Sort(accounts)
|
||||||
|
addr := accounts[idx]
|
||||||
|
return addr, c.senders[addr].Nonce
|
||||||
}
|
}
|
||||||
|
|
||||||
// Head returns the chain head.
|
// IncNonce increases the specified signing account's pending nonce.
|
||||||
func (c *Chain) Head() *types.Block {
|
func (c *Chain) IncNonce(addr common.Address, amt uint64) {
|
||||||
return c.blocks[c.Len()-1]
|
if _, ok := c.senders[addr]; !ok {
|
||||||
|
panic("nonce increment for non-signer")
|
||||||
|
}
|
||||||
|
c.senders[addr].Nonce += amt
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chain) GetHeaders(req *GetBlockHeaders) ([]*types.Header, error) {
|
// Balance returns the balance of an account at the head of the chain.
|
||||||
|
func (c *Chain) Balance(addr common.Address) *big.Int {
|
||||||
|
bal := new(big.Int)
|
||||||
|
if acc, ok := c.state[addr]; ok {
|
||||||
|
bal, _ = bal.SetString(acc.Balance, 10)
|
||||||
|
}
|
||||||
|
return bal
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignTx signs a transaction for the specified from account, so long as that
|
||||||
|
// account was in the hivechain accounts dump.
|
||||||
|
func (c *Chain) SignTx(from common.Address, tx *types.Transaction) (*types.Transaction, error) {
|
||||||
|
signer := types.LatestSigner(c.config)
|
||||||
|
acc, ok := c.senders[from]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("account not available for signing: %s", from)
|
||||||
|
}
|
||||||
|
return types.SignTx(tx, signer, acc.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeaders returns the headers base on an ethGetPacketHeadersPacket.
|
||||||
|
func (c *Chain) GetHeaders(req *eth.GetBlockHeadersPacket) ([]*types.Header, error) {
|
||||||
if req.Amount < 1 {
|
if req.Amount < 1 {
|
||||||
return nil, errors.New("no block headers requested")
|
return nil, errors.New("no block headers requested")
|
||||||
}
|
}
|
||||||
|
var (
|
||||||
headers := make([]*types.Header, req.Amount)
|
headers = make([]*types.Header, req.Amount)
|
||||||
var blockNumber uint64
|
blockNumber uint64
|
||||||
|
)
|
||||||
// range over blocks to check if our chain has the requested header
|
// Range over blocks to check if our chain has the requested header.
|
||||||
for _, block := range c.blocks {
|
for _, block := range c.blocks {
|
||||||
if block.Hash() == req.Origin.Hash || block.Number().Uint64() == req.Origin.Number {
|
if block.Hash() == req.Origin.Hash || block.Number().Uint64() == req.Origin.Number {
|
||||||
headers[0] = block.Header()
|
headers[0] = block.Header()
|
||||||
@ -115,40 +223,30 @@ func (c *Chain) GetHeaders(req *GetBlockHeaders) ([]*types.Header, error) {
|
|||||||
if headers[0] == nil {
|
if headers[0] == nil {
|
||||||
return nil, fmt.Errorf("no headers found for given origin number %v, hash %v", req.Origin.Number, req.Origin.Hash)
|
return nil, fmt.Errorf("no headers found for given origin number %v, hash %v", req.Origin.Number, req.Origin.Hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Reverse {
|
if req.Reverse {
|
||||||
for i := 1; i < int(req.Amount); i++ {
|
for i := 1; i < int(req.Amount); i++ {
|
||||||
blockNumber -= (1 - req.Skip)
|
blockNumber -= (1 - req.Skip)
|
||||||
headers[i] = c.blocks[blockNumber].Header()
|
headers[i] = c.blocks[blockNumber].Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
return headers, nil
|
return headers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 1; i < int(req.Amount); i++ {
|
for i := 1; i < int(req.Amount); i++ {
|
||||||
blockNumber += (1 + req.Skip)
|
blockNumber += (1 + req.Skip)
|
||||||
headers[i] = c.blocks[blockNumber].Header()
|
headers[i] = c.blocks[blockNumber].Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
return headers, nil
|
return headers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadChain takes the given chain.rlp file, and decodes and returns
|
// Shorten returns a copy chain of a desired height from the imported
|
||||||
// the blocks from the file.
|
func (c *Chain) Shorten(height int) *Chain {
|
||||||
func loadChain(chainfile string, genesis string) (*Chain, error) {
|
blocks := make([]*types.Block, height)
|
||||||
gen, err := loadGenesis(genesis)
|
copy(blocks, c.blocks[:height])
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
gblock := gen.ToBlock()
|
|
||||||
|
|
||||||
blocks, err := blocksFromFile(chainfile, gblock)
|
config := *c.config
|
||||||
if err != nil {
|
return &Chain{
|
||||||
return nil, err
|
blocks: blocks,
|
||||||
|
config: &config,
|
||||||
}
|
}
|
||||||
|
|
||||||
c := &Chain{genesis: gen, blocks: blocks, chainConfig: gen.Config}
|
|
||||||
return c, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadGenesis(genesisFile string) (core.Genesis, error) {
|
func loadGenesis(genesisFile string) (core.Genesis, error) {
|
||||||
@ -163,6 +261,22 @@ func loadGenesis(genesisFile string) (core.Genesis, error) {
|
|||||||
return gen, nil
|
return gen, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Addresses []common.Address
|
||||||
|
|
||||||
|
func (a Addresses) Len() int {
|
||||||
|
return len(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Addresses) Less(i, j int) bool {
|
||||||
|
return bytes.Compare(a[i][:], a[j][:]) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Addresses) Swap(i, j int) {
|
||||||
|
tmp := a[i]
|
||||||
|
a[i] = a[j]
|
||||||
|
a[j] = tmp
|
||||||
|
}
|
||||||
|
|
||||||
func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, error) {
|
func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, error) {
|
||||||
// Load chain.rlp.
|
// Load chain.rlp.
|
||||||
fh, err := os.Open(chainfile)
|
fh, err := os.Open(chainfile)
|
||||||
@ -193,3 +307,47 @@ func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, erro
|
|||||||
}
|
}
|
||||||
return blocks, nil
|
return blocks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func readState(file string) (map[common.Address]state.DumpAccount, error) {
|
||||||
|
f, err := os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read state: %v", err)
|
||||||
|
}
|
||||||
|
var dump state.Dump
|
||||||
|
if err := json.Unmarshal(f, &dump); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to unmarshal state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
state := make(map[common.Address]state.DumpAccount)
|
||||||
|
for key, acct := range dump.Accounts {
|
||||||
|
var addr common.Address
|
||||||
|
if err := addr.UnmarshalText([]byte(key)); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid address %q", key)
|
||||||
|
}
|
||||||
|
state[addr] = acct
|
||||||
|
}
|
||||||
|
return state, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readAccounts(file string) (map[common.Address]*senderInfo, error) {
|
||||||
|
f, err := os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read state: %v", err)
|
||||||
|
}
|
||||||
|
type account struct {
|
||||||
|
Key hexutil.Bytes `json:"key"`
|
||||||
|
}
|
||||||
|
keys := make(map[common.Address]account)
|
||||||
|
if err := json.Unmarshal(f, &keys); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to unmarshal accounts: %v", err)
|
||||||
|
}
|
||||||
|
accounts := make(map[common.Address]*senderInfo)
|
||||||
|
for addr, acc := range keys {
|
||||||
|
pk, err := crypto.HexToECDSA(common.Bytes2Hex(acc.Key))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read private key for %s: %v", err, addr)
|
||||||
|
}
|
||||||
|
accounts[addr] = &senderInfo{Key: pk, Nonce: 0}
|
||||||
|
}
|
||||||
|
return accounts, nil
|
||||||
|
}
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
// TestEthProtocolNegotiation tests whether the test suite
|
// TestEthProtocolNegotiation tests whether the test suite
|
||||||
// can negotiate the highest eth protocol in a status message exchange
|
// can negotiate the highest eth protocol in a status message exchange
|
||||||
func TestEthProtocolNegotiation(t *testing.T) {
|
func TestEthProtocolNegotiation(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
conn *Conn
|
conn *Conn
|
||||||
caps []p2p.Cap
|
caps []p2p.Cap
|
||||||
@ -122,29 +123,26 @@ func TestEthProtocolNegotiation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestChain_GetHeaders tests whether the test suite can correctly
|
// TestChainGetHeaders tests whether the test suite can correctly
|
||||||
// respond to a GetBlockHeaders request from a node.
|
// respond to a GetBlockHeaders request from a node.
|
||||||
func TestChain_GetHeaders(t *testing.T) {
|
func TestChainGetHeaders(t *testing.T) {
|
||||||
chainFile, err := filepath.Abs("./testdata/chain.rlp")
|
t.Parallel()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
genesisFile, err := filepath.Abs("./testdata/genesis.json")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
chain, err := loadChain(chainFile, genesisFile)
|
dir, err := filepath.Abs("./testdata")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
chain, err := NewChain(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
req GetBlockHeaders
|
req eth.GetBlockHeadersPacket
|
||||||
expected []*types.Header
|
expected []*types.Header
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: eth.GetBlockHeadersPacket{
|
||||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Number: uint64(2)},
|
Origin: eth.HashOrNumber{Number: uint64(2)},
|
||||||
Amount: uint64(5),
|
Amount: uint64(5),
|
||||||
@ -161,7 +159,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: eth.GetBlockHeadersPacket{
|
||||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
|
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
|
||||||
Amount: uint64(3),
|
Amount: uint64(3),
|
||||||
@ -176,7 +174,7 @@ func TestChain_GetHeaders(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
req: GetBlockHeaders{
|
req: eth.GetBlockHeadersPacket{
|
||||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
||||||
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
|
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
|
||||||
Amount: uint64(1),
|
Amount: uint64(1),
|
||||||
|
361
cmd/devp2p/internal/ethtest/conn.go
Normal file
361
cmd/devp2p/internal/ethtest/conn.go
Normal file
@ -0,0 +1,361 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ethtest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
pretty = spew.ConfigState{
|
||||||
|
Indent: " ",
|
||||||
|
DisableCapacities: true,
|
||||||
|
DisablePointerAddresses: true,
|
||||||
|
SortKeys: true,
|
||||||
|
}
|
||||||
|
timeout = 2 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// dial attempts to dial the given node and perform a handshake, returning the
|
||||||
|
// created Conn if successful.
|
||||||
|
func (s *Suite) dial() (*Conn, error) {
|
||||||
|
key, _ := crypto.GenerateKey()
|
||||||
|
return s.dialAs(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dialAs attempts to dial a given node and perform a handshake using the given
|
||||||
|
// private key.
|
||||||
|
func (s *Suite) dialAs(key *ecdsa.PrivateKey) (*Conn, error) {
|
||||||
|
fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
conn := Conn{Conn: rlpx.NewConn(fd, s.Dest.Pubkey())}
|
||||||
|
conn.ourKey = key
|
||||||
|
_, err = conn.Handshake(conn.ourKey)
|
||||||
|
if err != nil {
|
||||||
|
conn.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
conn.caps = []p2p.Cap{
|
||||||
|
{Name: "eth", Version: 67},
|
||||||
|
{Name: "eth", Version: 68},
|
||||||
|
}
|
||||||
|
conn.ourHighestProtoVersion = 68
|
||||||
|
return &conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dialSnap creates a connection with snap/1 capability.
|
||||||
|
func (s *Suite) dialSnap() (*Conn, error) {
|
||||||
|
conn, err := s.dial()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
|
||||||
|
conn.ourHighestSnapProtoVersion = 1
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conn represents an individual connection with a peer
|
||||||
|
type Conn struct {
|
||||||
|
*rlpx.Conn
|
||||||
|
ourKey *ecdsa.PrivateKey
|
||||||
|
negotiatedProtoVersion uint
|
||||||
|
negotiatedSnapProtoVersion uint
|
||||||
|
ourHighestProtoVersion uint
|
||||||
|
ourHighestSnapProtoVersion uint
|
||||||
|
caps []p2p.Cap
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads a packet from the connection.
|
||||||
|
func (c *Conn) Read() (uint64, []byte, error) {
|
||||||
|
c.SetReadDeadline(time.Now().Add(timeout))
|
||||||
|
code, data, _, err := c.Conn.Read()
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, err
|
||||||
|
}
|
||||||
|
return code, data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadMsg attempts to read a devp2p message with a specific code.
|
||||||
|
func (c *Conn) ReadMsg(proto Proto, code uint64, msg any) error {
|
||||||
|
c.SetReadDeadline(time.Now().Add(timeout))
|
||||||
|
for {
|
||||||
|
got, data, err := c.Read()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if protoOffset(proto)+code == got {
|
||||||
|
return rlp.DecodeBytes(data, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes a eth packet to the connection.
|
||||||
|
func (c *Conn) Write(proto Proto, code uint64, msg any) error {
|
||||||
|
c.SetWriteDeadline(time.Now().Add(timeout))
|
||||||
|
payload, err := rlp.EncodeToBytes(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.Conn.Write(protoOffset(proto)+code, payload)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadEth reads an Eth sub-protocol wire message.
|
||||||
|
func (c *Conn) ReadEth() (any, error) {
|
||||||
|
c.SetReadDeadline(time.Now().Add(timeout))
|
||||||
|
for {
|
||||||
|
code, data, _, err := c.Conn.Read()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if code == pingMsg {
|
||||||
|
c.Write(baseProto, pongMsg, []byte{})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if getProto(code) != ethProto {
|
||||||
|
// Read until eth message.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
code -= baseProtoLen
|
||||||
|
|
||||||
|
var msg any
|
||||||
|
switch int(code) {
|
||||||
|
case eth.StatusMsg:
|
||||||
|
msg = new(eth.StatusPacket)
|
||||||
|
case eth.GetBlockHeadersMsg:
|
||||||
|
msg = new(eth.GetBlockHeadersPacket)
|
||||||
|
case eth.BlockHeadersMsg:
|
||||||
|
msg = new(eth.BlockHeadersPacket)
|
||||||
|
case eth.GetBlockBodiesMsg:
|
||||||
|
msg = new(eth.GetBlockBodiesPacket)
|
||||||
|
case eth.BlockBodiesMsg:
|
||||||
|
msg = new(eth.BlockBodiesPacket)
|
||||||
|
case eth.NewBlockMsg:
|
||||||
|
msg = new(eth.NewBlockPacket)
|
||||||
|
case eth.NewBlockHashesMsg:
|
||||||
|
msg = new(eth.NewBlockHashesPacket)
|
||||||
|
case eth.TransactionsMsg:
|
||||||
|
msg = new(eth.TransactionsPacket)
|
||||||
|
case eth.NewPooledTransactionHashesMsg:
|
||||||
|
msg = new(eth.NewPooledTransactionHashesPacket68)
|
||||||
|
case eth.GetPooledTransactionsMsg:
|
||||||
|
msg = new(eth.GetPooledTransactionsPacket)
|
||||||
|
case eth.PooledTransactionsMsg:
|
||||||
|
msg = new(eth.PooledTransactionsPacket)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unhandled eth msg code %d", code))
|
||||||
|
}
|
||||||
|
if err := rlp.DecodeBytes(data, msg); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode eth msg: %v", err)
|
||||||
|
}
|
||||||
|
return msg, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSnap reads a snap/1 response with the given id from the connection.
|
||||||
|
func (c *Conn) ReadSnap() (any, error) {
|
||||||
|
c.SetReadDeadline(time.Now().Add(timeout))
|
||||||
|
for {
|
||||||
|
code, data, _, err := c.Conn.Read()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if getProto(code) != snapProto {
|
||||||
|
// Read until snap message.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
code -= baseProtoLen + ethProtoLen
|
||||||
|
|
||||||
|
var msg any
|
||||||
|
switch int(code) {
|
||||||
|
case snap.GetAccountRangeMsg:
|
||||||
|
msg = new(snap.GetAccountRangePacket)
|
||||||
|
case snap.AccountRangeMsg:
|
||||||
|
msg = new(snap.AccountRangePacket)
|
||||||
|
case snap.GetStorageRangesMsg:
|
||||||
|
msg = new(snap.GetStorageRangesPacket)
|
||||||
|
case snap.StorageRangesMsg:
|
||||||
|
msg = new(snap.StorageRangesPacket)
|
||||||
|
case snap.GetByteCodesMsg:
|
||||||
|
msg = new(snap.GetByteCodesPacket)
|
||||||
|
case snap.ByteCodesMsg:
|
||||||
|
msg = new(snap.ByteCodesPacket)
|
||||||
|
case snap.GetTrieNodesMsg:
|
||||||
|
msg = new(snap.GetTrieNodesPacket)
|
||||||
|
case snap.TrieNodesMsg:
|
||||||
|
msg = new(snap.TrieNodesPacket)
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unhandled snap code: %d", code))
|
||||||
|
}
|
||||||
|
if err := rlp.DecodeBytes(data, msg); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not rlp decode message: %v", err)
|
||||||
|
}
|
||||||
|
return msg, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// peer performs both the protocol handshake and the status message
|
||||||
|
// exchange with the node in order to peer with it.
|
||||||
|
func (c *Conn) peer(chain *Chain, status *eth.StatusPacket) error {
|
||||||
|
if err := c.handshake(); err != nil {
|
||||||
|
return fmt.Errorf("handshake failed: %v", err)
|
||||||
|
}
|
||||||
|
if err := c.statusExchange(chain, status); err != nil {
|
||||||
|
return fmt.Errorf("status exchange failed: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// handshake performs a protocol handshake with the node.
|
||||||
|
func (c *Conn) handshake() error {
|
||||||
|
// Write hello to client.
|
||||||
|
pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:]
|
||||||
|
ourHandshake := &protoHandshake{
|
||||||
|
Version: 5,
|
||||||
|
Caps: c.caps,
|
||||||
|
ID: pub0,
|
||||||
|
}
|
||||||
|
if err := c.Write(baseProto, handshakeMsg, ourHandshake); err != nil {
|
||||||
|
return fmt.Errorf("write to connection failed: %v", err)
|
||||||
|
}
|
||||||
|
// Read hello from client.
|
||||||
|
code, data, err := c.Read()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("erroring reading handshake: %v", err)
|
||||||
|
}
|
||||||
|
switch code {
|
||||||
|
case handshakeMsg:
|
||||||
|
msg := new(protoHandshake)
|
||||||
|
if err := rlp.DecodeBytes(data, &msg); err != nil {
|
||||||
|
return fmt.Errorf("error decoding handshake msg: %v", err)
|
||||||
|
}
|
||||||
|
// Set snappy if version is at least 5.
|
||||||
|
if msg.Version >= 5 {
|
||||||
|
c.SetSnappy(true)
|
||||||
|
}
|
||||||
|
c.negotiateEthProtocol(msg.Caps)
|
||||||
|
if c.negotiatedProtoVersion == 0 {
|
||||||
|
return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
||||||
|
}
|
||||||
|
// If we require snap, verify that it was negotiated.
|
||||||
|
if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion {
|
||||||
|
return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("bad handshake: got msg code %d", code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// negotiateEthProtocol sets the Conn's eth protocol version to highest
|
||||||
|
// advertised capability from peer.
|
||||||
|
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
||||||
|
var highestEthVersion uint
|
||||||
|
var highestSnapVersion uint
|
||||||
|
for _, capability := range caps {
|
||||||
|
switch capability.Name {
|
||||||
|
case "eth":
|
||||||
|
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
|
||||||
|
highestEthVersion = capability.Version
|
||||||
|
}
|
||||||
|
case "snap":
|
||||||
|
if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion {
|
||||||
|
highestSnapVersion = capability.Version
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.negotiatedProtoVersion = highestEthVersion
|
||||||
|
c.negotiatedSnapProtoVersion = highestSnapVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// statusExchange performs a `Status` message exchange with the given node.
|
||||||
|
func (c *Conn) statusExchange(chain *Chain, status *eth.StatusPacket) error {
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
code, data, err := c.Read()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read from connection: %w", err)
|
||||||
|
}
|
||||||
|
switch code {
|
||||||
|
case eth.StatusMsg + protoOffset(ethProto):
|
||||||
|
msg := new(eth.StatusPacket)
|
||||||
|
if err := rlp.DecodeBytes(data, &msg); err != nil {
|
||||||
|
return fmt.Errorf("error decoding status packet: %w", err)
|
||||||
|
}
|
||||||
|
if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want {
|
||||||
|
return fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x",
|
||||||
|
want, chain.blocks[chain.Len()-1].NumberU64(), have)
|
||||||
|
}
|
||||||
|
if have, want := msg.TD.Cmp(chain.TD()), 0; have != want {
|
||||||
|
return fmt.Errorf("wrong TD in status: have %v want %v", have, want)
|
||||||
|
}
|
||||||
|
if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) {
|
||||||
|
return fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want)
|
||||||
|
}
|
||||||
|
if have, want := msg.ProtocolVersion, c.ourHighestProtoVersion; have != uint32(want) {
|
||||||
|
return fmt.Errorf("wrong protocol version: have %v, want %v", have, want)
|
||||||
|
}
|
||||||
|
break loop
|
||||||
|
case discMsg:
|
||||||
|
var msg []p2p.DiscReason
|
||||||
|
if rlp.DecodeBytes(data, &msg); len(msg) == 0 {
|
||||||
|
return errors.New("invalid disconnect message")
|
||||||
|
}
|
||||||
|
return fmt.Errorf("disconnect received: %v", pretty.Sdump(msg))
|
||||||
|
case pingMsg:
|
||||||
|
// TODO (renaynay): in the future, this should be an error
|
||||||
|
// (PINGs should not be a response upon fresh connection)
|
||||||
|
c.Write(baseProto, pongMsg, nil)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("bad status message: code %d", code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// make sure eth protocol version is set for negotiation
|
||||||
|
if c.negotiatedProtoVersion == 0 {
|
||||||
|
return errors.New("eth protocol version must be set in Conn")
|
||||||
|
}
|
||||||
|
if status == nil {
|
||||||
|
// default status message
|
||||||
|
status = ð.StatusPacket{
|
||||||
|
ProtocolVersion: uint32(c.negotiatedProtoVersion),
|
||||||
|
NetworkID: chain.config.ChainID.Uint64(),
|
||||||
|
TD: chain.TD(),
|
||||||
|
Head: chain.blocks[chain.Len()-1].Hash(),
|
||||||
|
Genesis: chain.blocks[0].Hash(),
|
||||||
|
ForkID: chain.ForkID(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := c.Write(ethProto, eth.StatusMsg, status); err != nil {
|
||||||
|
return fmt.Errorf("write to connection failed: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
69
cmd/devp2p/internal/ethtest/engine.go
Normal file
69
cmd/devp2p/internal/ethtest/engine.go
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ethtest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/golang-jwt/jwt/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EngineClient is a wrapper around engine-related data.
|
||||||
|
type EngineClient struct {
|
||||||
|
url string
|
||||||
|
jwt [32]byte
|
||||||
|
headfcu []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEngineClient creates a new engine client.
|
||||||
|
func NewEngineClient(dir, url, jwt string) (*EngineClient, error) {
|
||||||
|
headfcu, err := os.ReadFile(path.Join(dir, "headfcu.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read headfcu: %w", err)
|
||||||
|
}
|
||||||
|
return &EngineClient{url, common.HexToHash(jwt), headfcu}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// token returns the jwt claim token for authorization.
|
||||||
|
func (ec *EngineClient) token() string {
|
||||||
|
claims := jwt.RegisteredClaims{IssuedAt: jwt.NewNumericDate(time.Now())}
|
||||||
|
token, _ := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString(ec.jwt[:])
|
||||||
|
return token
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendForkchoiceUpdated sends an fcu for the head of the generated chain.
|
||||||
|
func (ec *EngineClient) sendForkchoiceUpdated() error {
|
||||||
|
var (
|
||||||
|
req, _ = http.NewRequest(http.MethodPost, ec.url, io.NopCloser(bytes.NewReader(ec.headfcu)))
|
||||||
|
header = make(http.Header)
|
||||||
|
)
|
||||||
|
// Set header
|
||||||
|
header.Set("accept", "application/json")
|
||||||
|
header.Set("content-type", "application/json")
|
||||||
|
header.Set("Authorization", fmt.Sprintf("Bearer %v", ec.token()))
|
||||||
|
req.Header = header
|
||||||
|
|
||||||
|
_, err := new(http.Client).Do(req)
|
||||||
|
return err
|
||||||
|
}
|
@ -1,650 +0,0 @@
|
|||||||
// Copyright 2021 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package ethtest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
|
||||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
pretty = spew.ConfigState{
|
|
||||||
Indent: " ",
|
|
||||||
DisableCapacities: true,
|
|
||||||
DisablePointerAddresses: true,
|
|
||||||
SortKeys: true,
|
|
||||||
}
|
|
||||||
timeout = 20 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
// dial attempts to dial the given node and perform a handshake,
|
|
||||||
// returning the created Conn if successful.
|
|
||||||
func (s *Suite) dial() (*Conn, error) {
|
|
||||||
// dial
|
|
||||||
fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP()))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
conn := Conn{Conn: rlpx.NewConn(fd, s.Dest.Pubkey())}
|
|
||||||
// do encHandshake
|
|
||||||
conn.ourKey, _ = crypto.GenerateKey()
|
|
||||||
_, err = conn.Handshake(conn.ourKey)
|
|
||||||
if err != nil {
|
|
||||||
conn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// set default p2p capabilities
|
|
||||||
conn.caps = []p2p.Cap{
|
|
||||||
{Name: "eth", Version: 67},
|
|
||||||
{Name: "eth", Version: 68},
|
|
||||||
}
|
|
||||||
conn.ourHighestProtoVersion = 68
|
|
||||||
return &conn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dialSnap creates a connection with snap/1 capability.
|
|
||||||
func (s *Suite) dialSnap() (*Conn, error) {
|
|
||||||
conn, err := s.dial()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("dial failed: %v", err)
|
|
||||||
}
|
|
||||||
conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
|
|
||||||
conn.ourHighestSnapProtoVersion = 1
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// peer performs both the protocol handshake and the status message
|
|
||||||
// exchange with the node in order to peer with it.
|
|
||||||
func (c *Conn) peer(chain *Chain, status *Status) error {
|
|
||||||
if err := c.handshake(); err != nil {
|
|
||||||
return fmt.Errorf("handshake failed: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := c.statusExchange(chain, status); err != nil {
|
|
||||||
return fmt.Errorf("status exchange failed: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// handshake performs a protocol handshake with the node.
|
|
||||||
func (c *Conn) handshake() error {
|
|
||||||
defer c.SetDeadline(time.Time{})
|
|
||||||
c.SetDeadline(time.Now().Add(10 * time.Second))
|
|
||||||
// write hello to client
|
|
||||||
pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:]
|
|
||||||
ourHandshake := &Hello{
|
|
||||||
Version: 5,
|
|
||||||
Caps: c.caps,
|
|
||||||
ID: pub0,
|
|
||||||
}
|
|
||||||
if err := c.Write(ourHandshake); err != nil {
|
|
||||||
return fmt.Errorf("write to connection failed: %v", err)
|
|
||||||
}
|
|
||||||
// read hello from client
|
|
||||||
switch msg := c.Read().(type) {
|
|
||||||
case *Hello:
|
|
||||||
// set snappy if version is at least 5
|
|
||||||
if msg.Version >= 5 {
|
|
||||||
c.SetSnappy(true)
|
|
||||||
}
|
|
||||||
c.negotiateEthProtocol(msg.Caps)
|
|
||||||
if c.negotiatedProtoVersion == 0 {
|
|
||||||
return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
|
||||||
}
|
|
||||||
// If we require snap, verify that it was negotiated
|
|
||||||
if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion {
|
|
||||||
return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("bad handshake: %#v", msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// negotiateEthProtocol sets the Conn's eth protocol version to highest
|
|
||||||
// advertised capability from peer.
|
|
||||||
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
|
||||||
var highestEthVersion uint
|
|
||||||
var highestSnapVersion uint
|
|
||||||
for _, capability := range caps {
|
|
||||||
switch capability.Name {
|
|
||||||
case "eth":
|
|
||||||
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
|
|
||||||
highestEthVersion = capability.Version
|
|
||||||
}
|
|
||||||
case "snap":
|
|
||||||
if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion {
|
|
||||||
highestSnapVersion = capability.Version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.negotiatedProtoVersion = highestEthVersion
|
|
||||||
c.negotiatedSnapProtoVersion = highestSnapVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
// statusExchange performs a `Status` message exchange with the given node.
|
|
||||||
func (c *Conn) statusExchange(chain *Chain, status *Status) (Message, error) {
|
|
||||||
defer c.SetDeadline(time.Time{})
|
|
||||||
c.SetDeadline(time.Now().Add(20 * time.Second))
|
|
||||||
|
|
||||||
// read status message from client
|
|
||||||
var message Message
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
switch msg := c.Read().(type) {
|
|
||||||
case *Status:
|
|
||||||
if have, want := msg.Head, chain.blocks[chain.Len()-1].Hash(); have != want {
|
|
||||||
return nil, fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x",
|
|
||||||
want, chain.blocks[chain.Len()-1].NumberU64(), have)
|
|
||||||
}
|
|
||||||
if have, want := msg.TD.Cmp(chain.TD()), 0; have != want {
|
|
||||||
return nil, fmt.Errorf("wrong TD in status: have %v want %v", have, want)
|
|
||||||
}
|
|
||||||
if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) {
|
|
||||||
return nil, fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want)
|
|
||||||
}
|
|
||||||
if have, want := msg.ProtocolVersion, c.ourHighestProtoVersion; have != uint32(want) {
|
|
||||||
return nil, fmt.Errorf("wrong protocol version: have %v, want %v", have, want)
|
|
||||||
}
|
|
||||||
message = msg
|
|
||||||
break loop
|
|
||||||
case *Disconnect:
|
|
||||||
return nil, fmt.Errorf("disconnect received: %v", msg.Reason)
|
|
||||||
case *Ping:
|
|
||||||
c.Write(&Pong{}) // TODO (renaynay): in the future, this should be an error
|
|
||||||
// (PINGs should not be a response upon fresh connection)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("bad status message: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// make sure eth protocol version is set for negotiation
|
|
||||||
if c.negotiatedProtoVersion == 0 {
|
|
||||||
return nil, errors.New("eth protocol version must be set in Conn")
|
|
||||||
}
|
|
||||||
if status == nil {
|
|
||||||
// default status message
|
|
||||||
status = &Status{
|
|
||||||
ProtocolVersion: uint32(c.negotiatedProtoVersion),
|
|
||||||
NetworkID: chain.chainConfig.ChainID.Uint64(),
|
|
||||||
TD: chain.TD(),
|
|
||||||
Head: chain.blocks[chain.Len()-1].Hash(),
|
|
||||||
Genesis: chain.blocks[0].Hash(),
|
|
||||||
ForkID: chain.ForkID(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := c.Write(status); err != nil {
|
|
||||||
return nil, fmt.Errorf("write to connection failed: %v", err)
|
|
||||||
}
|
|
||||||
return message, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createSendAndRecvConns creates two connections, one for sending messages to the
|
|
||||||
// node, and one for receiving messages from the node.
|
|
||||||
func (s *Suite) createSendAndRecvConns() (*Conn, *Conn, error) {
|
|
||||||
sendConn, err := s.dial()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("dial failed: %v", err)
|
|
||||||
}
|
|
||||||
recvConn, err := s.dial()
|
|
||||||
if err != nil {
|
|
||||||
sendConn.Close()
|
|
||||||
return nil, nil, fmt.Errorf("dial failed: %v", err)
|
|
||||||
}
|
|
||||||
return sendConn, recvConn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readAndServe serves GetBlockHeaders requests while waiting
|
|
||||||
// on another message from the node.
|
|
||||||
func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
|
|
||||||
start := time.Now()
|
|
||||||
for time.Since(start) < timeout {
|
|
||||||
c.SetReadDeadline(time.Now().Add(10 * time.Second))
|
|
||||||
|
|
||||||
msg := c.Read()
|
|
||||||
switch msg := msg.(type) {
|
|
||||||
case *Ping:
|
|
||||||
c.Write(&Pong{})
|
|
||||||
case *GetBlockHeaders:
|
|
||||||
headers, err := chain.GetHeaders(msg)
|
|
||||||
if err != nil {
|
|
||||||
return errorf("could not get headers for inbound header request: %v", err)
|
|
||||||
}
|
|
||||||
resp := &BlockHeaders{
|
|
||||||
RequestId: msg.ReqID(),
|
|
||||||
BlockHeadersRequest: eth.BlockHeadersRequest(headers),
|
|
||||||
}
|
|
||||||
if err := c.Write(resp); err != nil {
|
|
||||||
return errorf("could not write to connection: %v", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return errorf("no message received within %v", timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
// headersRequest executes the given `GetBlockHeaders` request.
|
|
||||||
func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint64) ([]*types.Header, error) {
|
|
||||||
defer c.SetReadDeadline(time.Time{})
|
|
||||||
c.SetReadDeadline(time.Now().Add(20 * time.Second))
|
|
||||||
|
|
||||||
// write request
|
|
||||||
request.RequestId = reqID
|
|
||||||
if err := c.Write(request); err != nil {
|
|
||||||
return nil, fmt.Errorf("could not write to connection: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for response
|
|
||||||
msg := c.waitForResponse(chain, timeout, request.RequestId)
|
|
||||||
resp, ok := msg.(*BlockHeaders)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
headers := []*types.Header(resp.BlockHeadersRequest)
|
|
||||||
return headers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
|
|
||||||
defer c.SetReadDeadline(time.Time{})
|
|
||||||
c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
if err := c.Write(msg); err != nil {
|
|
||||||
return nil, fmt.Errorf("could not write to connection: %v", err)
|
|
||||||
}
|
|
||||||
return c.ReadSnap(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// headersMatch returns whether the received headers match the given request
|
|
||||||
func headersMatch(expected []*types.Header, headers []*types.Header) bool {
|
|
||||||
return reflect.DeepEqual(expected, headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// waitForResponse reads from the connection until a response with the expected
|
|
||||||
// request ID is received.
|
|
||||||
func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message {
|
|
||||||
for {
|
|
||||||
msg := c.readAndServe(chain, timeout)
|
|
||||||
if msg.ReqID() == requestID {
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendNextBlock broadcasts the next block in the chain and waits
|
|
||||||
// for the node to propagate the block and import it into its chain.
|
|
||||||
func (s *Suite) sendNextBlock() error {
|
|
||||||
// set up sending and receiving connections
|
|
||||||
sendConn, recvConn, err := s.createSendAndRecvConns()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer sendConn.Close()
|
|
||||||
defer recvConn.Close()
|
|
||||||
if err = sendConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
if err = recvConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
// create new block announcement
|
|
||||||
nextBlock := s.fullChain.blocks[s.chain.Len()]
|
|
||||||
blockAnnouncement := &NewBlock{
|
|
||||||
Block: nextBlock,
|
|
||||||
TD: s.fullChain.TotalDifficultyAt(s.chain.Len()),
|
|
||||||
}
|
|
||||||
// send announcement and wait for node to request the header
|
|
||||||
if err = s.testAnnounce(sendConn, recvConn, blockAnnouncement); err != nil {
|
|
||||||
return fmt.Errorf("failed to announce block: %v", err)
|
|
||||||
}
|
|
||||||
// wait for client to update its chain
|
|
||||||
if err = s.waitForBlockImport(recvConn, nextBlock); err != nil {
|
|
||||||
return fmt.Errorf("failed to receive confirmation of block import: %v", err)
|
|
||||||
}
|
|
||||||
// update test suite chain
|
|
||||||
s.chain.blocks = append(s.chain.blocks, nextBlock)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// testAnnounce writes a block announcement to the node and waits for the node
|
|
||||||
// to propagate it.
|
|
||||||
func (s *Suite) testAnnounce(sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) error {
|
|
||||||
if err := sendConn.Write(blockAnnouncement); err != nil {
|
|
||||||
return fmt.Errorf("could not write to connection: %v", err)
|
|
||||||
}
|
|
||||||
return s.waitAnnounce(receiveConn, blockAnnouncement)
|
|
||||||
}
|
|
||||||
|
|
||||||
// waitAnnounce waits for a NewBlock or NewBlockHashes announcement from the node.
|
|
||||||
func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error {
|
|
||||||
for {
|
|
||||||
switch msg := conn.readAndServe(s.chain, timeout).(type) {
|
|
||||||
case *NewBlock:
|
|
||||||
if !reflect.DeepEqual(blockAnnouncement.Block.Header(), msg.Block.Header()) {
|
|
||||||
return fmt.Errorf("wrong header in block announcement: \nexpected %v "+
|
|
||||||
"\ngot %v", blockAnnouncement.Block.Header(), msg.Block.Header())
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(blockAnnouncement.TD, msg.TD) {
|
|
||||||
return fmt.Errorf("wrong TD in announcement: expected %v, got %v", blockAnnouncement.TD, msg.TD)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case *NewBlockHashes:
|
|
||||||
hashes := *msg
|
|
||||||
if blockAnnouncement.Block.Hash() != hashes[0].Hash {
|
|
||||||
return fmt.Errorf("wrong block hash in announcement: expected %v, got %v", blockAnnouncement.Block.Hash(), hashes[0].Hash)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
// ignore tx announcements from previous tests
|
|
||||||
case *NewPooledTransactionHashes66:
|
|
||||||
continue
|
|
||||||
case *NewPooledTransactionHashes:
|
|
||||||
continue
|
|
||||||
case *Transactions:
|
|
||||||
continue
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
|
|
||||||
defer conn.SetReadDeadline(time.Time{})
|
|
||||||
conn.SetReadDeadline(time.Now().Add(20 * time.Second))
|
|
||||||
// create request
|
|
||||||
req := &GetBlockHeaders{
|
|
||||||
GetBlockHeadersRequest: ð.GetBlockHeadersRequest{
|
|
||||||
Origin: eth.HashOrNumber{Hash: block.Hash()},
|
|
||||||
Amount: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// loop until BlockHeaders response contains desired block, confirming the
|
|
||||||
// node imported the block
|
|
||||||
for {
|
|
||||||
requestID := uint64(54)
|
|
||||||
headers, err := conn.headersRequest(req, s.chain, requestID)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("GetBlockHeader request failed: %v", err)
|
|
||||||
}
|
|
||||||
// if headers response is empty, node hasn't imported block yet, try again
|
|
||||||
if len(headers) == 0 {
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(block.Header(), headers[0]) {
|
|
||||||
return fmt.Errorf("wrong header returned: wanted %v, got %v", block.Header(), headers[0])
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Suite) oldAnnounce() error {
|
|
||||||
sendConn, receiveConn, err := s.createSendAndRecvConns()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer sendConn.Close()
|
|
||||||
defer receiveConn.Close()
|
|
||||||
if err := sendConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
if err := receiveConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
// create old block announcement
|
|
||||||
oldBlockAnnounce := &NewBlock{
|
|
||||||
Block: s.chain.blocks[len(s.chain.blocks)/2],
|
|
||||||
TD: s.chain.blocks[len(s.chain.blocks)/2].Difficulty(),
|
|
||||||
}
|
|
||||||
if err := sendConn.Write(oldBlockAnnounce); err != nil {
|
|
||||||
return fmt.Errorf("could not write to connection: %v", err)
|
|
||||||
}
|
|
||||||
// wait to see if the announcement is propagated
|
|
||||||
switch msg := receiveConn.readAndServe(s.chain, time.Second*8).(type) {
|
|
||||||
case *NewBlock:
|
|
||||||
block := *msg
|
|
||||||
if block.Block.Hash() == oldBlockAnnounce.Block.Hash() {
|
|
||||||
return fmt.Errorf("unexpected: block propagated: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
case *NewBlockHashes:
|
|
||||||
hashes := *msg
|
|
||||||
for _, hash := range hashes {
|
|
||||||
if hash.Hash == oldBlockAnnounce.Block.Hash() {
|
|
||||||
return fmt.Errorf("unexpected: block announced: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case *Error:
|
|
||||||
errMsg := *msg
|
|
||||||
// check to make sure error is timeout (propagation didn't come through == test successful)
|
|
||||||
if !strings.Contains(errMsg.String(), "timeout") {
|
|
||||||
return fmt.Errorf("unexpected error: %v", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Suite) maliciousHandshakes(t *utesting.T) error {
|
|
||||||
conn, err := s.dial()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("dial failed: %v", err)
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
// write hello to client
|
|
||||||
pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:]
|
|
||||||
handshakes := []*Hello{
|
|
||||||
{
|
|
||||||
Version: 5,
|
|
||||||
Caps: []p2p.Cap{
|
|
||||||
{Name: largeString(2), Version: 64},
|
|
||||||
},
|
|
||||||
ID: pub0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Version: 5,
|
|
||||||
Caps: []p2p.Cap{
|
|
||||||
{Name: "eth", Version: 64},
|
|
||||||
{Name: "eth", Version: 65},
|
|
||||||
},
|
|
||||||
ID: append(pub0, byte(0)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Version: 5,
|
|
||||||
Caps: []p2p.Cap{
|
|
||||||
{Name: "eth", Version: 64},
|
|
||||||
{Name: "eth", Version: 65},
|
|
||||||
},
|
|
||||||
ID: append(pub0, pub0...),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Version: 5,
|
|
||||||
Caps: []p2p.Cap{
|
|
||||||
{Name: "eth", Version: 64},
|
|
||||||
{Name: "eth", Version: 65},
|
|
||||||
},
|
|
||||||
ID: largeBuffer(2),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Version: 5,
|
|
||||||
Caps: []p2p.Cap{
|
|
||||||
{Name: largeString(2), Version: 64},
|
|
||||||
},
|
|
||||||
ID: largeBuffer(2),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i, handshake := range handshakes {
|
|
||||||
t.Logf("Testing malicious handshake %v\n", i)
|
|
||||||
if err := conn.Write(handshake); err != nil {
|
|
||||||
return fmt.Errorf("could not write to connection: %v", err)
|
|
||||||
}
|
|
||||||
// check that the peer disconnected
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
switch msg := conn.readAndServe(s.chain, 20*time.Second).(type) {
|
|
||||||
case *Disconnect:
|
|
||||||
case *Error:
|
|
||||||
case *Hello:
|
|
||||||
// Discard one hello as Hello's are sent concurrently
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// dial for the next round
|
|
||||||
conn, err = s.dial()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("dial failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Suite) maliciousStatus(conn *Conn) error {
|
|
||||||
if err := conn.handshake(); err != nil {
|
|
||||||
return fmt.Errorf("handshake failed: %v", err)
|
|
||||||
}
|
|
||||||
status := &Status{
|
|
||||||
ProtocolVersion: uint32(conn.negotiatedProtoVersion),
|
|
||||||
NetworkID: s.chain.chainConfig.ChainID.Uint64(),
|
|
||||||
TD: largeNumber(2),
|
|
||||||
Head: s.chain.blocks[s.chain.Len()-1].Hash(),
|
|
||||||
Genesis: s.chain.blocks[0].Hash(),
|
|
||||||
ForkID: s.chain.ForkID(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// get status
|
|
||||||
msg, err := conn.statusExchange(s.chain, status)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("status exchange failed: %v", err)
|
|
||||||
}
|
|
||||||
switch msg := msg.(type) {
|
|
||||||
case *Status:
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("expected status, got: %#v ", msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for disconnect
|
|
||||||
switch msg := conn.readAndServe(s.chain, timeout).(type) {
|
|
||||||
case *Disconnect:
|
|
||||||
return nil
|
|
||||||
case *Error:
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("expected disconnect, got: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Suite) hashAnnounce() error {
|
|
||||||
// create connections
|
|
||||||
sendConn, recvConn, err := s.createSendAndRecvConns()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create connections: %v", err)
|
|
||||||
}
|
|
||||||
defer sendConn.Close()
|
|
||||||
defer recvConn.Close()
|
|
||||||
if err := sendConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
if err := recvConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create NewBlockHashes announcement
|
|
||||||
type anno struct {
|
|
||||||
Hash common.Hash // Hash of one particular block being announced
|
|
||||||
Number uint64 // Number of one particular block being announced
|
|
||||||
}
|
|
||||||
nextBlock := s.fullChain.blocks[s.chain.Len()]
|
|
||||||
announcement := anno{Hash: nextBlock.Hash(), Number: nextBlock.Number().Uint64()}
|
|
||||||
newBlockHash := &NewBlockHashes{announcement}
|
|
||||||
if err := sendConn.Write(newBlockHash); err != nil {
|
|
||||||
return fmt.Errorf("failed to write to connection: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Announcement sent, now wait for a header request
|
|
||||||
msg := sendConn.Read()
|
|
||||||
blockHeaderReq, ok := msg.(*GetBlockHeaders)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unexpected %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
if blockHeaderReq.Amount != 1 {
|
|
||||||
return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount)
|
|
||||||
}
|
|
||||||
if blockHeaderReq.Origin.Hash != announcement.Hash {
|
|
||||||
return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v",
|
|
||||||
pretty.Sdump(announcement),
|
|
||||||
pretty.Sdump(blockHeaderReq))
|
|
||||||
}
|
|
||||||
err = sendConn.Write(&BlockHeaders{
|
|
||||||
RequestId: blockHeaderReq.ReqID(),
|
|
||||||
BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to write to connection: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for block announcement
|
|
||||||
msg = recvConn.readAndServe(s.chain, timeout)
|
|
||||||
switch msg := msg.(type) {
|
|
||||||
case *NewBlockHashes:
|
|
||||||
hashes := *msg
|
|
||||||
if len(hashes) != 1 {
|
|
||||||
return fmt.Errorf("unexpected new block hash announcement: wanted 1 announcement, got %d", len(hashes))
|
|
||||||
}
|
|
||||||
if nextBlock.Hash() != hashes[0].Hash {
|
|
||||||
return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(),
|
|
||||||
hashes[0].Hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
case *NewBlock:
|
|
||||||
// node should only propagate NewBlock without having requested the body if the body is empty
|
|
||||||
nextBlockBody := nextBlock.Body()
|
|
||||||
if len(nextBlockBody.Transactions) != 0 || len(nextBlockBody.Uncles) != 0 {
|
|
||||||
return fmt.Errorf("unexpected non-empty new block propagated: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
if msg.Block.Hash() != nextBlock.Hash() {
|
|
||||||
return fmt.Errorf("mismatched hash of propagated new block: wanted %v, got %v",
|
|
||||||
nextBlock.Hash(), msg.Block.Hash())
|
|
||||||
}
|
|
||||||
// check to make sure header matches header that was sent to the node
|
|
||||||
if !reflect.DeepEqual(nextBlock.Header(), msg.Block.Header()) {
|
|
||||||
return fmt.Errorf("incorrect header received: wanted %v, got %v", nextBlock.Header(), msg.Block.Header())
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
// confirm node imported block
|
|
||||||
if err := s.waitForBlockImport(recvConn, nextBlock); err != nil {
|
|
||||||
return fmt.Errorf("error waiting for node to import new block: %v", err)
|
|
||||||
}
|
|
||||||
// update the chain
|
|
||||||
s.chain.blocks = append(s.chain.blocks, nextBlock)
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,80 +0,0 @@
|
|||||||
// Copyright 2020 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package ethtest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// largeNumber returns a very large big.Int.
|
|
||||||
func largeNumber(megabytes int) *big.Int {
|
|
||||||
buf := make([]byte, megabytes*1024*1024)
|
|
||||||
rand.Read(buf)
|
|
||||||
bigint := new(big.Int)
|
|
||||||
bigint.SetBytes(buf)
|
|
||||||
return bigint
|
|
||||||
}
|
|
||||||
|
|
||||||
// largeBuffer returns a very large buffer.
|
|
||||||
func largeBuffer(megabytes int) []byte {
|
|
||||||
buf := make([]byte, megabytes*1024*1024)
|
|
||||||
rand.Read(buf)
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// largeString returns a very large string.
|
|
||||||
func largeString(megabytes int) string {
|
|
||||||
buf := make([]byte, megabytes*1024*1024)
|
|
||||||
rand.Read(buf)
|
|
||||||
return hexutil.Encode(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func largeBlock() *types.Block {
|
|
||||||
return types.NewBlockWithHeader(largeHeader())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a random hash
|
|
||||||
func randHash() common.Hash {
|
|
||||||
var h common.Hash
|
|
||||||
rand.Read(h[:])
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func largeHeader() *types.Header {
|
|
||||||
return &types.Header{
|
|
||||||
MixDigest: randHash(),
|
|
||||||
ReceiptHash: randHash(),
|
|
||||||
TxHash: randHash(),
|
|
||||||
Nonce: types.BlockNonce{},
|
|
||||||
Extra: []byte{},
|
|
||||||
Bloom: types.Bloom{},
|
|
||||||
GasUsed: 0,
|
|
||||||
Coinbase: common.Address{},
|
|
||||||
GasLimit: 0,
|
|
||||||
UncleHash: types.EmptyUncleHash,
|
|
||||||
Time: 1337,
|
|
||||||
ParentHash: randHash(),
|
|
||||||
Root: randHash(),
|
|
||||||
Number: largeNumber(2),
|
|
||||||
Difficulty: largeNumber(2),
|
|
||||||
}
|
|
||||||
}
|
|
9
cmd/devp2p/internal/ethtest/mkchain.sh
Normal file
9
cmd/devp2p/internal/ethtest/mkchain.sh
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
hivechain generate \
|
||||||
|
--fork-interval 6 \
|
||||||
|
--tx-interval 1 \
|
||||||
|
--length 500 \
|
||||||
|
--outdir testdata \
|
||||||
|
--lastfork cancun \
|
||||||
|
--outputs accounts,genesis,chain,headstate,txinfo,headblock,headfcu,newpayload,forkenv
|
87
cmd/devp2p/internal/ethtest/protocol.go
Normal file
87
cmd/devp2p/internal/ethtest/protocol.go
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
package ethtest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Unexported devp2p message codes from p2p/peer.go.
|
||||||
|
const (
|
||||||
|
handshakeMsg = 0x00
|
||||||
|
discMsg = 0x01
|
||||||
|
pingMsg = 0x02
|
||||||
|
pongMsg = 0x03
|
||||||
|
)
|
||||||
|
|
||||||
|
// Unexported devp2p protocol lengths from p2p package.
|
||||||
|
const (
|
||||||
|
baseProtoLen = 16
|
||||||
|
ethProtoLen = 17
|
||||||
|
snapProtoLen = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
// Unexported handshake structure from p2p/peer.go.
|
||||||
|
type protoHandshake struct {
|
||||||
|
Version uint64
|
||||||
|
Name string
|
||||||
|
Caps []p2p.Cap
|
||||||
|
ListenPort uint64
|
||||||
|
ID []byte
|
||||||
|
Rest []rlp.RawValue `rlp:"tail"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Hello = protoHandshake
|
||||||
|
|
||||||
|
// Proto is an enum representing devp2p protocol types.
|
||||||
|
type Proto int
|
||||||
|
|
||||||
|
const (
|
||||||
|
baseProto Proto = iota
|
||||||
|
ethProto
|
||||||
|
snapProto
|
||||||
|
)
|
||||||
|
|
||||||
|
// getProto returns the protocol a certain message code is associated with
|
||||||
|
// (assuming the negotiated capabilities are exactly {eth,snap})
|
||||||
|
func getProto(code uint64) Proto {
|
||||||
|
switch {
|
||||||
|
case code < baseProtoLen:
|
||||||
|
return baseProto
|
||||||
|
case code < baseProtoLen+ethProtoLen:
|
||||||
|
return ethProto
|
||||||
|
case code < baseProtoLen+ethProtoLen+snapProtoLen:
|
||||||
|
return snapProto
|
||||||
|
default:
|
||||||
|
panic("unhandled msg code beyond last protocol")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// protoOffset will return the offset at which the specified protocol's messages
|
||||||
|
// begin.
|
||||||
|
func protoOffset(proto Proto) uint64 {
|
||||||
|
switch proto {
|
||||||
|
case baseProto:
|
||||||
|
return 0
|
||||||
|
case ethProto:
|
||||||
|
return baseProtoLen
|
||||||
|
case snapProto:
|
||||||
|
return baseProtoLen + ethProtoLen
|
||||||
|
default:
|
||||||
|
panic("unhandled protocol")
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -1,60 +0,0 @@
|
|||||||
// Copyright 2022 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package ethtest
|
|
||||||
|
|
||||||
import "github.com/ethereum/go-ethereum/eth/protocols/snap"
|
|
||||||
|
|
||||||
// GetAccountRange represents an account range query.
|
|
||||||
type GetAccountRange snap.GetAccountRangePacket
|
|
||||||
|
|
||||||
func (msg GetAccountRange) Code() int { return 33 }
|
|
||||||
func (msg GetAccountRange) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type AccountRange snap.AccountRangePacket
|
|
||||||
|
|
||||||
func (msg AccountRange) Code() int { return 34 }
|
|
||||||
func (msg AccountRange) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type GetStorageRanges snap.GetStorageRangesPacket
|
|
||||||
|
|
||||||
func (msg GetStorageRanges) Code() int { return 35 }
|
|
||||||
func (msg GetStorageRanges) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type StorageRanges snap.StorageRangesPacket
|
|
||||||
|
|
||||||
func (msg StorageRanges) Code() int { return 36 }
|
|
||||||
func (msg StorageRanges) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type GetByteCodes snap.GetByteCodesPacket
|
|
||||||
|
|
||||||
func (msg GetByteCodes) Code() int { return 37 }
|
|
||||||
func (msg GetByteCodes) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type ByteCodes snap.ByteCodesPacket
|
|
||||||
|
|
||||||
func (msg ByteCodes) Code() int { return 38 }
|
|
||||||
func (msg ByteCodes) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type GetTrieNodes snap.GetTrieNodesPacket
|
|
||||||
|
|
||||||
func (msg GetTrieNodes) Code() int { return 39 }
|
|
||||||
func (msg GetTrieNodes) ReqID() uint64 { return msg.ID }
|
|
||||||
|
|
||||||
type TrieNodes snap.TrieNodesPacket
|
|
||||||
|
|
||||||
func (msg TrieNodes) Code() int { return 40 }
|
|
||||||
func (msg TrieNodes) ReqID() uint64 { return msg.ID }
|
|
File diff suppressed because it is too large
Load Diff
@ -17,37 +17,53 @@
|
|||||||
package ethtest
|
package ethtest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
crand "crypto/rand"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
func makeJWTSecret() (string, [32]byte, error) {
|
||||||
genesisFile = "./testdata/genesis.json"
|
var secret [32]byte
|
||||||
halfchainFile = "./testdata/halfchain.rlp"
|
if _, err := crand.Read(secret[:]); err != nil {
|
||||||
fullchainFile = "./testdata/chain.rlp"
|
return "", secret, fmt.Errorf("failed to create jwt secret: %v", err)
|
||||||
)
|
}
|
||||||
|
jwtPath := path.Join(os.TempDir(), "jwt_secret")
|
||||||
|
if err := os.WriteFile(jwtPath, []byte(hexutil.Encode(secret[:])), 0600); err != nil {
|
||||||
|
return "", secret, fmt.Errorf("failed to prepare jwt secret file: %v", err)
|
||||||
|
}
|
||||||
|
return jwtPath, secret, nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestEthSuite(t *testing.T) {
|
func TestEthSuite(t *testing.T) {
|
||||||
geth, err := runGeth()
|
jwtPath, secret, err := makeJWTSecret()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not make jwt secret: %v", err)
|
||||||
|
}
|
||||||
|
geth, err := runGeth("./testdata", jwtPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not run geth: %v", err)
|
t.Fatalf("could not run geth: %v", err)
|
||||||
}
|
}
|
||||||
defer geth.Close()
|
defer geth.Close()
|
||||||
|
|
||||||
suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
|
suite, err := NewSuite(geth.Server().Self(), "./testdata", geth.HTTPAuthEndpoint(), common.Bytes2Hex(secret[:]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not create new test suite: %v", err)
|
t.Fatalf("could not create new test suite: %v", err)
|
||||||
}
|
}
|
||||||
for _, test := range suite.EthTests() {
|
for _, test := range suite.EthTests() {
|
||||||
t.Run(test.Name, func(t *testing.T) {
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
result := utesting.RunTests([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
||||||
if result[0].Failed {
|
if result[0].Failed {
|
||||||
t.Fatal()
|
t.Fatal()
|
||||||
}
|
}
|
||||||
@ -56,19 +72,23 @@ func TestEthSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSnapSuite(t *testing.T) {
|
func TestSnapSuite(t *testing.T) {
|
||||||
geth, err := runGeth()
|
jwtPath, secret, err := makeJWTSecret()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not make jwt secret: %v", err)
|
||||||
|
}
|
||||||
|
geth, err := runGeth("./testdata", jwtPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not run geth: %v", err)
|
t.Fatalf("could not run geth: %v", err)
|
||||||
}
|
}
|
||||||
defer geth.Close()
|
defer geth.Close()
|
||||||
|
|
||||||
suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
|
suite, err := NewSuite(geth.Server().Self(), "./testdata", geth.HTTPAuthEndpoint(), common.Bytes2Hex(secret[:]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not create new test suite: %v", err)
|
t.Fatalf("could not create new test suite: %v", err)
|
||||||
}
|
}
|
||||||
for _, test := range suite.SnapTests() {
|
for _, test := range suite.SnapTests() {
|
||||||
t.Run(test.Name, func(t *testing.T) {
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
result := utesting.RunTests([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
||||||
if result[0].Failed {
|
if result[0].Failed {
|
||||||
t.Fatal()
|
t.Fatal()
|
||||||
}
|
}
|
||||||
@ -77,20 +97,23 @@ func TestSnapSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runGeth creates and starts a geth node
|
// runGeth creates and starts a geth node
|
||||||
func runGeth() (*node.Node, error) {
|
func runGeth(dir string, jwtPath string) (*node.Node, error) {
|
||||||
stack, err := node.New(&node.Config{
|
stack, err := node.New(&node.Config{
|
||||||
|
AuthAddr: "127.0.0.1",
|
||||||
|
AuthPort: 0,
|
||||||
P2P: p2p.Config{
|
P2P: p2p.Config{
|
||||||
ListenAddr: "127.0.0.1:0",
|
ListenAddr: "127.0.0.1:0",
|
||||||
NoDiscovery: true,
|
NoDiscovery: true,
|
||||||
MaxPeers: 10, // in case a test requires multiple connections, can be changed in the future
|
MaxPeers: 10, // in case a test requires multiple connections, can be changed in the future
|
||||||
NoDial: true,
|
NoDial: true,
|
||||||
},
|
},
|
||||||
|
JWTSecret: jwtPath,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = setupGeth(stack)
|
err = setupGeth(stack, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
stack.Close()
|
stack.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -102,12 +125,11 @@ func runGeth() (*node.Node, error) {
|
|||||||
return stack, nil
|
return stack, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupGeth(stack *node.Node) error {
|
func setupGeth(stack *node.Node, dir string) error {
|
||||||
chain, err := loadChain(halfchainFile, genesisFile)
|
chain, err := NewChain(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
backend, err := eth.New(stack, ðconfig.Config{
|
backend, err := eth.New(stack, ðconfig.Config{
|
||||||
Genesis: &chain.genesis,
|
Genesis: &chain.genesis,
|
||||||
NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763
|
NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763
|
||||||
@ -120,8 +142,9 @@ func setupGeth(stack *node.Node) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
backend.SetSynced()
|
if err := catalyst.Register(stack, backend); err != nil {
|
||||||
|
return fmt.Errorf("failed to register catalyst service: %v", err)
|
||||||
|
}
|
||||||
_, err = backend.BlockChain().InsertChain(chain.blocks[1:])
|
_, err = backend.BlockChain().InsertChain(chain.blocks[1:])
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
62
cmd/devp2p/internal/ethtest/testdata/accounts.json
vendored
Normal file
62
cmd/devp2p/internal/ethtest/testdata/accounts.json
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
{
|
||||||
|
"0x0c2c51a0990aee1d73c1228de158688341557508": {
|
||||||
|
"key": "0xbfcd0e032489319f4e5ca03e643b2025db624be6cf99cbfed90c4502e3754850"
|
||||||
|
},
|
||||||
|
"0x14e46043e63d0e3cdcf2530519f4cfaf35058cb2": {
|
||||||
|
"key": "0x457075f6822ac29481154792f65c5f1ec335b4fea9ca20f3fea8fa1d78a12c68"
|
||||||
|
},
|
||||||
|
"0x16c57edf7fa9d9525378b0b81bf8a3ced0620c1c": {
|
||||||
|
"key": "0x865898edcf43206d138c93f1bbd86311f4657b057658558888aa5ac4309626a6"
|
||||||
|
},
|
||||||
|
"0x1f4924b14f34e24159387c0a4cdbaa32f3ddb0cf": {
|
||||||
|
"key": "0xee7f7875d826d7443ccc5c174e38b2c436095018774248a8074ee92d8914dcdb"
|
||||||
|
},
|
||||||
|
"0x1f5bde34b4afc686f136c7a3cb6ec376f7357759": {
|
||||||
|
"key": "0x25e6ce8611cefb5cd338aeaa9292ed2139714668d123a4fb156cabb42051b5b7"
|
||||||
|
},
|
||||||
|
"0x2d389075be5be9f2246ad654ce152cf05990b209": {
|
||||||
|
"key": "0x19168cd7767604b3d19b99dc3da1302b9ccb6ee9ad61660859e07acd4a2625dd"
|
||||||
|
},
|
||||||
|
"0x3ae75c08b4c907eb63a8960c45b86e1e9ab6123c": {
|
||||||
|
"key": "0x71aa7d299c7607dabfc3d0e5213d612b5e4a97455b596c2f642daac43fa5eeaa"
|
||||||
|
},
|
||||||
|
"0x4340ee1b812acb40a1eb561c019c327b243b92df": {
|
||||||
|
"key": "0x47f666f20e2175606355acec0ea1b37870c15e5797e962340da7ad7972a537e8"
|
||||||
|
},
|
||||||
|
"0x4a0f1452281bcec5bd90c3dce6162a5995bfe9df": {
|
||||||
|
"key": "0xa88293fefc623644969e2ce6919fb0dbd0fd64f640293b4bf7e1a81c97e7fc7f"
|
||||||
|
},
|
||||||
|
"0x4dde844b71bcdf95512fb4dc94e84fb67b512ed8": {
|
||||||
|
"key": "0x6e1e16a9c15641c73bf6e237f9293ab1d4e7c12b9adf83cfc94bcf969670f72d"
|
||||||
|
},
|
||||||
|
"0x5f552da00dfb4d3749d9e62dcee3c918855a86a0": {
|
||||||
|
"key": "0x41be4e00aac79f7ffbb3455053ec05e971645440d594c047cdcc56a3c7458bd6"
|
||||||
|
},
|
||||||
|
"0x654aa64f5fbefb84c270ec74211b81ca8c44a72e": {
|
||||||
|
"key": "0xc825f31cd8792851e33a290b3d749e553983111fc1f36dfbbdb45f101973f6a9"
|
||||||
|
},
|
||||||
|
"0x717f8aa2b982bee0e29f573d31df288663e1ce16": {
|
||||||
|
"key": "0x8d0faa04ae0f9bc3cd4c890aa025d5f40916f4729538b19471c0beefe11d9e19"
|
||||||
|
},
|
||||||
|
"0x7435ed30a8b4aeb0877cef0c6e8cffe834eb865f": {
|
||||||
|
"key": "0x4552dbe6ca4699322b5d923d0c9bcdd24644f5db8bf89a085b67c6c49b8a1b91"
|
||||||
|
},
|
||||||
|
"0x83c7e323d189f18725ac510004fdc2941f8c4a78": {
|
||||||
|
"key": "0x34391cbbf06956bb506f45ec179cdd84df526aa364e27bbde65db9c15d866d00"
|
||||||
|
},
|
||||||
|
"0x84e75c28348fb86acea1a93a39426d7d60f4cc46": {
|
||||||
|
"key": "0xf6a8f1603b8368f3ca373292b7310c53bec7b508aecacd442554ebc1c5d0c856"
|
||||||
|
},
|
||||||
|
"0xc7b99a164efd027a93f147376cc7da7c67c6bbe0": {
|
||||||
|
"key": "0x8d56bcbcf2c1b7109e1396a28d7a0234e33544ade74ea32c460ce4a443b239b1"
|
||||||
|
},
|
||||||
|
"0xd803681e487e6ac18053afc5a6cd813c86ec3e4d": {
|
||||||
|
"key": "0xfc39d1c9ddbba176d806ebb42d7460189fe56ca163ad3eb6143bfc6beb6f6f72"
|
||||||
|
},
|
||||||
|
"0xe7d13f7aa2a838d24c59b40186a0aca1e21cffcc": {
|
||||||
|
"key": "0x9ee3fd550664b246ad7cdba07162dd25530a3b1d51476dd1d85bbc29f0592684"
|
||||||
|
},
|
||||||
|
"0xeda8645ba6948855e3b3cd596bbb07596d59c603": {
|
||||||
|
"key": "0x14cdde09d1640eb8c3cda063891b0453073f57719583381ff78811efa6d4199f"
|
||||||
|
}
|
||||||
|
}
|
BIN
cmd/devp2p/internal/ethtest/testdata/chain.rlp
vendored
BIN
cmd/devp2p/internal/ethtest/testdata/chain.rlp
vendored
Binary file not shown.
20
cmd/devp2p/internal/ethtest/testdata/forkenv.json
vendored
Normal file
20
cmd/devp2p/internal/ethtest/testdata/forkenv.json
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"HIVE_CANCUN_TIMESTAMP": "840",
|
||||||
|
"HIVE_CHAIN_ID": "3503995874084926",
|
||||||
|
"HIVE_FORK_ARROW_GLACIER": "60",
|
||||||
|
"HIVE_FORK_BERLIN": "48",
|
||||||
|
"HIVE_FORK_BYZANTIUM": "18",
|
||||||
|
"HIVE_FORK_CONSTANTINOPLE": "24",
|
||||||
|
"HIVE_FORK_GRAY_GLACIER": "66",
|
||||||
|
"HIVE_FORK_HOMESTEAD": "0",
|
||||||
|
"HIVE_FORK_ISTANBUL": "36",
|
||||||
|
"HIVE_FORK_LONDON": "54",
|
||||||
|
"HIVE_FORK_MUIR_GLACIER": "42",
|
||||||
|
"HIVE_FORK_PETERSBURG": "30",
|
||||||
|
"HIVE_FORK_SPURIOUS": "12",
|
||||||
|
"HIVE_FORK_TANGERINE": "6",
|
||||||
|
"HIVE_MERGE_BLOCK_ID": "72",
|
||||||
|
"HIVE_NETWORK_ID": "3503995874084926",
|
||||||
|
"HIVE_SHANGHAI_TIMESTAMP": "780",
|
||||||
|
"HIVE_TERMINAL_TOTAL_DIFFICULTY": "9454784"
|
||||||
|
}
|
133
cmd/devp2p/internal/ethtest/testdata/genesis.json
vendored
133
cmd/devp2p/internal/ethtest/testdata/genesis.json
vendored
@ -1,27 +1,112 @@
|
|||||||
{
|
{
|
||||||
"config": {
|
"config": {
|
||||||
"chainId": 19763,
|
"chainId": 3503995874084926,
|
||||||
"homesteadBlock": 0,
|
"homesteadBlock": 0,
|
||||||
"eip150Block": 0,
|
"eip150Block": 6,
|
||||||
"eip155Block": 0,
|
"eip155Block": 12,
|
||||||
"eip158Block": 0,
|
"eip158Block": 12,
|
||||||
"byzantiumBlock": 0,
|
"byzantiumBlock": 18,
|
||||||
"terminalTotalDifficultyPassed": true,
|
"constantinopleBlock": 24,
|
||||||
"ethash": {}
|
"petersburgBlock": 30,
|
||||||
|
"istanbulBlock": 36,
|
||||||
|
"muirGlacierBlock": 42,
|
||||||
|
"berlinBlock": 48,
|
||||||
|
"londonBlock": 54,
|
||||||
|
"arrowGlacierBlock": 60,
|
||||||
|
"grayGlacierBlock": 66,
|
||||||
|
"mergeNetsplitBlock": 72,
|
||||||
|
"shanghaiTime": 780,
|
||||||
|
"cancunTime": 840,
|
||||||
|
"terminalTotalDifficulty": 9454784,
|
||||||
|
"terminalTotalDifficultyPassed": true,
|
||||||
|
"ethash": {}
|
||||||
|
},
|
||||||
|
"nonce": "0x0",
|
||||||
|
"timestamp": "0x0",
|
||||||
|
"extraData": "0x68697665636861696e",
|
||||||
|
"gasLimit": "0x23f3e20",
|
||||||
|
"difficulty": "0x20000",
|
||||||
|
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||||
|
"alloc": {
|
||||||
|
"000f3df6d732807ef1319fb7b8bb8522d0beac02": {
|
||||||
|
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500",
|
||||||
|
"balance": "0x2a"
|
||||||
},
|
},
|
||||||
"nonce": "0xdeadbeefdeadbeef",
|
"0c2c51a0990aee1d73c1228de158688341557508": {
|
||||||
"timestamp": "0x0",
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
"gasLimit": "0x80000000",
|
|
||||||
"difficulty": "0x20000",
|
|
||||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
|
||||||
"alloc": {
|
|
||||||
"71562b71999873db5b286df957af199ec94617f7": {
|
|
||||||
"balance": "0xffffffffffffffffffffffffff"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"number": "0x0",
|
"14e46043e63d0e3cdcf2530519f4cfaf35058cb2": {
|
||||||
"gasUsed": "0x0",
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
},
|
||||||
}
|
"16c57edf7fa9d9525378b0b81bf8a3ced0620c1c": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"1f4924b14f34e24159387c0a4cdbaa32f3ddb0cf": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"1f5bde34b4afc686f136c7a3cb6ec376f7357759": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"2d389075be5be9f2246ad654ce152cf05990b209": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"3ae75c08b4c907eb63a8960c45b86e1e9ab6123c": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"4340ee1b812acb40a1eb561c019c327b243b92df": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"4a0f1452281bcec5bd90c3dce6162a5995bfe9df": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"4dde844b71bcdf95512fb4dc94e84fb67b512ed8": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"5f552da00dfb4d3749d9e62dcee3c918855a86a0": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"654aa64f5fbefb84c270ec74211b81ca8c44a72e": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"717f8aa2b982bee0e29f573d31df288663e1ce16": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"7435ed30a8b4aeb0877cef0c6e8cffe834eb865f": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"83c7e323d189f18725ac510004fdc2941f8c4a78": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"84e75c28348fb86acea1a93a39426d7d60f4cc46": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"8bebc8ba651aee624937e7d897853ac30c95a067": {
|
||||||
|
"storage": {
|
||||||
|
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000001",
|
||||||
|
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000000000000000000003"
|
||||||
|
},
|
||||||
|
"balance": "0x1",
|
||||||
|
"nonce": "0x1"
|
||||||
|
},
|
||||||
|
"c7b99a164efd027a93f147376cc7da7c67c6bbe0": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"d803681e487e6ac18053afc5a6cd813c86ec3e4d": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"e7d13f7aa2a838d24c59b40186a0aca1e21cffcc": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
},
|
||||||
|
"eda8645ba6948855e3b3cd596bbb07596d59c603": {
|
||||||
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"number": "0x0",
|
||||||
|
"gasUsed": "0x0",
|
||||||
|
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"baseFeePerGas": null,
|
||||||
|
"excessBlobGas": null,
|
||||||
|
"blobGasUsed": null
|
||||||
|
}
|
BIN
cmd/devp2p/internal/ethtest/testdata/halfchain.rlp
vendored
BIN
cmd/devp2p/internal/ethtest/testdata/halfchain.rlp
vendored
Binary file not shown.
23
cmd/devp2p/internal/ethtest/testdata/headblock.json
vendored
Normal file
23
cmd/devp2p/internal/ethtest/testdata/headblock.json
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"parentHash": "0x96a73007443980c5e0985dfbb45279aa496dadea16918ad42c65c0bf8122ec39",
|
||||||
|
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
|
"miner": "0x0000000000000000000000000000000000000000",
|
||||||
|
"stateRoot": "0xea4c1f4d9fa8664c22574c5b2f948a78c4b1a753cebc1861e7fb5b1aa21c5a94",
|
||||||
|
"transactionsRoot": "0xecda39025fc4c609ce778d75eed0aa53b65ce1e3d1373b34bad8578cc31e5b48",
|
||||||
|
"receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
|
||||||
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"difficulty": "0x0",
|
||||||
|
"number": "0x1f4",
|
||||||
|
"gasLimit": "0x47e7c40",
|
||||||
|
"gasUsed": "0x5208",
|
||||||
|
"timestamp": "0x1388",
|
||||||
|
"extraData": "0x",
|
||||||
|
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"nonce": "0x0000000000000000",
|
||||||
|
"baseFeePerGas": "0x7",
|
||||||
|
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
|
"blobGasUsed": "0x0",
|
||||||
|
"excessBlobGas": "0x0",
|
||||||
|
"parentBeaconBlockRoot": "0xf653da50cdff4733f13f7a5e338290e883bdf04adf3f112709728063ea965d6c",
|
||||||
|
"hash": "0x36a166f0dcd160fc5e5c61c9a7c2d7f236d9175bf27f43aaa2150e291f092ef7"
|
||||||
|
}
|
13
cmd/devp2p/internal/ethtest/testdata/headfcu.json
vendored
Normal file
13
cmd/devp2p/internal/ethtest/testdata/headfcu.json
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": "fcu500",
|
||||||
|
"method": "engine_forkchoiceUpdatedV3",
|
||||||
|
"params": [
|
||||||
|
{
|
||||||
|
"headBlockHash": "0x36a166f0dcd160fc5e5c61c9a7c2d7f236d9175bf27f43aaa2150e291f092ef7",
|
||||||
|
"safeBlockHash": "0x36a166f0dcd160fc5e5c61c9a7c2d7f236d9175bf27f43aaa2150e291f092ef7",
|
||||||
|
"finalizedBlockHash": "0x36a166f0dcd160fc5e5c61c9a7c2d7f236d9175bf27f43aaa2150e291f092ef7"
|
||||||
|
},
|
||||||
|
null
|
||||||
|
]
|
||||||
|
}
|
4204
cmd/devp2p/internal/ethtest/testdata/headstate.json
vendored
Normal file
4204
cmd/devp2p/internal/ethtest/testdata/headstate.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
13268
cmd/devp2p/internal/ethtest/testdata/newpayload.json
vendored
Normal file
13268
cmd/devp2p/internal/ethtest/testdata/newpayload.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3018
cmd/devp2p/internal/ethtest/testdata/txinfo.json
vendored
Normal file
3018
cmd/devp2p/internal/ethtest/testdata/txinfo.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@ -19,429 +19,141 @@ package ethtest
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"os"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
|
// sendTxs sends the given transactions to the node and
|
||||||
var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
// expects the node to accept and propagate them.
|
||||||
|
func (s *Suite) sendTxs(txs []*types.Transaction) error {
|
||||||
func (s *Suite) sendSuccessfulTxs(t *utesting.T) error {
|
// Open sending conn.
|
||||||
tests := []*types.Transaction{
|
sendConn, err := s.dial()
|
||||||
getNextTxFromChain(s),
|
|
||||||
unknownTx(s),
|
|
||||||
}
|
|
||||||
for i, tx := range tests {
|
|
||||||
if tx == nil {
|
|
||||||
return errors.New("could not find tx to send")
|
|
||||||
}
|
|
||||||
t.Logf("Testing tx propagation %d: sending tx %v %v %v\n", i, tx.Hash().String(), tx.GasPrice(), tx.Gas())
|
|
||||||
// get previous tx if exists for reference in case of old tx propagation
|
|
||||||
var prevTx *types.Transaction
|
|
||||||
if i != 0 {
|
|
||||||
prevTx = tests[i-1]
|
|
||||||
}
|
|
||||||
// write tx to connection
|
|
||||||
if err := sendSuccessfulTx(s, tx, prevTx); err != nil {
|
|
||||||
return fmt.Errorf("send successful tx test failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction) error {
|
|
||||||
sendConn, recvConn, err := s.createSendAndRecvConns()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer sendConn.Close()
|
defer sendConn.Close()
|
||||||
defer recvConn.Close()
|
|
||||||
if err = sendConn.peer(s.chain, nil); err != nil {
|
if err = sendConn.peer(s.chain, nil); err != nil {
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
// Send the transaction
|
|
||||||
if err = sendConn.Write(&Transactions{tx}); err != nil {
|
|
||||||
return fmt.Errorf("failed to write to connection: %v", err)
|
|
||||||
}
|
|
||||||
// peer receiving connection to node
|
|
||||||
if err = recvConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// update last nonce seen
|
// Open receiving conn.
|
||||||
nonce = tx.Nonce()
|
|
||||||
|
|
||||||
// Wait for the transaction announcement
|
|
||||||
for {
|
|
||||||
switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
|
|
||||||
case *Transactions:
|
|
||||||
recTxs := *msg
|
|
||||||
// if you receive an old tx propagation, read from connection again
|
|
||||||
if len(recTxs) == 1 && prevTx != nil {
|
|
||||||
if recTxs[0] == prevTx {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, gotTx := range recTxs {
|
|
||||||
if gotTx.Hash() == tx.Hash() {
|
|
||||||
// Ok
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("missing transaction: got %v missing %v", recTxs, tx.Hash())
|
|
||||||
case *NewPooledTransactionHashes66:
|
|
||||||
txHashes := *msg
|
|
||||||
// if you receive an old tx propagation, read from connection again
|
|
||||||
if len(txHashes) == 1 && prevTx != nil {
|
|
||||||
if txHashes[0] == prevTx.Hash() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, gotHash := range txHashes {
|
|
||||||
if gotHash == tx.Hash() {
|
|
||||||
// Ok
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash())
|
|
||||||
case *NewPooledTransactionHashes:
|
|
||||||
txHashes := msg.Hashes
|
|
||||||
if len(txHashes) != len(msg.Sizes) {
|
|
||||||
return fmt.Errorf("invalid msg size lengths: hashes: %v sizes: %v", len(txHashes), len(msg.Sizes))
|
|
||||||
}
|
|
||||||
if len(txHashes) != len(msg.Types) {
|
|
||||||
return fmt.Errorf("invalid msg type lengths: hashes: %v types: %v", len(txHashes), len(msg.Types))
|
|
||||||
}
|
|
||||||
// if you receive an old tx propagation, read from connection again
|
|
||||||
if len(txHashes) == 1 && prevTx != nil {
|
|
||||||
if txHashes[0] == prevTx.Hash() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for index, gotHash := range txHashes {
|
|
||||||
if gotHash == tx.Hash() {
|
|
||||||
if msg.Sizes[index] != uint32(tx.Size()) {
|
|
||||||
return fmt.Errorf("invalid tx size: got %v want %v", msg.Sizes[index], tx.Size())
|
|
||||||
}
|
|
||||||
if msg.Types[index] != tx.Type() {
|
|
||||||
return fmt.Errorf("invalid tx type: got %v want %v", msg.Types[index], tx.Type())
|
|
||||||
}
|
|
||||||
// Ok
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash())
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Suite) sendMaliciousTxs(t *utesting.T) error {
|
|
||||||
badTxs := []*types.Transaction{
|
|
||||||
getOldTxFromChain(s),
|
|
||||||
invalidNonceTx(s),
|
|
||||||
hugeAmount(s),
|
|
||||||
hugeGasPrice(s),
|
|
||||||
hugeData(s),
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup receiving connection before sending malicious txs
|
|
||||||
recvConn, err := s.dial()
|
recvConn, err := s.dial()
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("dial failed: %v", err)
|
|
||||||
}
|
|
||||||
defer recvConn.Close()
|
|
||||||
if err = recvConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, tx := range badTxs {
|
|
||||||
t.Logf("Testing malicious tx propagation: %v\n", i)
|
|
||||||
if err = sendMaliciousTx(s, tx); err != nil {
|
|
||||||
return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// check to make sure bad txs aren't propagated
|
|
||||||
return checkMaliciousTxPropagation(s, badTxs, recvConn)
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendMaliciousTx(s *Suite, tx *types.Transaction) error {
|
|
||||||
conn, err := s.dial()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("dial failed: %v", err)
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
if err = conn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// write malicious tx
|
|
||||||
if err = conn.Write(&Transactions{tx}); err != nil {
|
|
||||||
return fmt.Errorf("failed to write to connection: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var nonce = uint64(99)
|
|
||||||
|
|
||||||
// sendMultipleSuccessfulTxs sends the given transactions to the node and
|
|
||||||
// expects the node to accept and propagate them.
|
|
||||||
func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction) error {
|
|
||||||
txMsg := Transactions(txs)
|
|
||||||
t.Logf("sending %d txs\n", len(txs))
|
|
||||||
|
|
||||||
sendConn, recvConn, err := s.createSendAndRecvConns()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer sendConn.Close()
|
|
||||||
defer recvConn.Close()
|
defer recvConn.Close()
|
||||||
if err = sendConn.peer(s.chain, nil); err != nil {
|
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
|
||||||
}
|
|
||||||
if err = recvConn.peer(s.chain, nil); err != nil {
|
if err = recvConn.peer(s.chain, nil); err != nil {
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send the transactions
|
if err = sendConn.Write(ethProto, eth.TransactionsMsg, eth.TransactionsPacket(txs)); err != nil {
|
||||||
if err = sendConn.Write(&txMsg); err != nil {
|
|
||||||
return fmt.Errorf("failed to write message to connection: %v", err)
|
return fmt.Errorf("failed to write message to connection: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// update nonce
|
var (
|
||||||
nonce = txs[len(txs)-1].Nonce()
|
got = make(map[common.Hash]bool)
|
||||||
|
end = time.Now().Add(timeout)
|
||||||
|
)
|
||||||
|
|
||||||
// Wait for the transaction announcement(s) and make sure all sent txs are being propagated.
|
// Wait for the transaction announcements, make sure all txs ar propagated.
|
||||||
// all txs should be announced within a couple announcements.
|
for time.Now().Before(end) {
|
||||||
recvHashes := make([]common.Hash, 0)
|
msg, err := recvConn.ReadEth()
|
||||||
|
if err != nil {
|
||||||
for i := 0; i < 20; i++ {
|
return fmt.Errorf("failed to read from connection: %w", err)
|
||||||
switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
|
}
|
||||||
case *Transactions:
|
switch msg := msg.(type) {
|
||||||
|
case *eth.TransactionsPacket:
|
||||||
for _, tx := range *msg {
|
for _, tx := range *msg {
|
||||||
recvHashes = append(recvHashes, tx.Hash())
|
got[tx.Hash()] = true
|
||||||
|
}
|
||||||
|
case *eth.NewPooledTransactionHashesPacket68:
|
||||||
|
for _, hash := range msg.Hashes {
|
||||||
|
got[hash] = true
|
||||||
}
|
}
|
||||||
case *NewPooledTransactionHashes66:
|
|
||||||
recvHashes = append(recvHashes, *msg...)
|
|
||||||
case *NewPooledTransactionHashes:
|
|
||||||
recvHashes = append(recvHashes, msg.Hashes...)
|
|
||||||
default:
|
default:
|
||||||
if !strings.Contains(pretty.Sdump(msg), "i/o timeout") {
|
return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg))
|
||||||
return fmt.Errorf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg))
|
}
|
||||||
|
|
||||||
|
// Check if all txs received.
|
||||||
|
allReceived := func() bool {
|
||||||
|
for _, tx := range txs {
|
||||||
|
if !got[tx.Hash()] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
// break once all 2000 txs have been received
|
if allReceived() {
|
||||||
if len(recvHashes) == 2000 {
|
return nil
|
||||||
break
|
|
||||||
}
|
|
||||||
if len(recvHashes) > 0 {
|
|
||||||
_, missingTxs := compareReceivedTxs(recvHashes, txs)
|
|
||||||
if len(missingTxs) > 0 {
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
t.Logf("successfully received all %d txs", len(txs))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, missingTxs := compareReceivedTxs(recvHashes, txs)
|
|
||||||
if len(missingTxs) > 0 {
|
return fmt.Errorf("timed out waiting for txs")
|
||||||
for _, missing := range missingTxs {
|
|
||||||
t.Logf("missing tx: %v", missing.Hash())
|
|
||||||
}
|
|
||||||
return fmt.Errorf("missing %d txs", len(missingTxs))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkMaliciousTxPropagation checks whether the given malicious transactions were
|
func (s *Suite) sendInvalidTxs(txs []*types.Transaction) error {
|
||||||
// propagated by the node.
|
// Open sending conn.
|
||||||
func checkMaliciousTxPropagation(s *Suite, txs []*types.Transaction, conn *Conn) error {
|
sendConn, err := s.dial()
|
||||||
switch msg := conn.readAndServe(s.chain, time.Second*8).(type) {
|
|
||||||
case *Transactions:
|
|
||||||
// check to see if any of the failing txs were in the announcement
|
|
||||||
recvTxs := make([]common.Hash, len(*msg))
|
|
||||||
for i, recvTx := range *msg {
|
|
||||||
recvTxs[i] = recvTx.Hash()
|
|
||||||
}
|
|
||||||
badTxs, _ := compareReceivedTxs(recvTxs, txs)
|
|
||||||
if len(badTxs) > 0 {
|
|
||||||
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
|
|
||||||
}
|
|
||||||
case *NewPooledTransactionHashes66:
|
|
||||||
badTxs, _ := compareReceivedTxs(*msg, txs)
|
|
||||||
if len(badTxs) > 0 {
|
|
||||||
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
|
|
||||||
}
|
|
||||||
case *NewPooledTransactionHashes:
|
|
||||||
badTxs, _ := compareReceivedTxs(msg.Hashes, txs)
|
|
||||||
if len(badTxs) > 0 {
|
|
||||||
return fmt.Errorf("received %d bad txs: \n%v", len(badTxs), badTxs)
|
|
||||||
}
|
|
||||||
case *Error:
|
|
||||||
// Transaction should not be announced -> wait for timeout
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// compareReceivedTxs compares the received set of txs against the given set of txs,
|
|
||||||
// returning both the set received txs that were present within the given txs, and
|
|
||||||
// the set of txs that were missing from the set of received txs
|
|
||||||
func compareReceivedTxs(recvTxs []common.Hash, txs []*types.Transaction) (present []*types.Transaction, missing []*types.Transaction) {
|
|
||||||
// create a map of the hashes received from node
|
|
||||||
recvHashes := make(map[common.Hash]common.Hash)
|
|
||||||
for _, hash := range recvTxs {
|
|
||||||
recvHashes[hash] = hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect present txs and missing txs separately
|
|
||||||
present = make([]*types.Transaction, 0)
|
|
||||||
missing = make([]*types.Transaction, 0)
|
|
||||||
for _, tx := range txs {
|
|
||||||
if _, exists := recvHashes[tx.Hash()]; exists {
|
|
||||||
present = append(present, tx)
|
|
||||||
} else {
|
|
||||||
missing = append(missing, tx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return present, missing
|
|
||||||
}
|
|
||||||
|
|
||||||
func unknownTx(s *Suite) *types.Transaction {
|
|
||||||
tx := getNextTxFromChain(s)
|
|
||||||
if tx == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var to common.Address
|
|
||||||
if tx.To() != nil {
|
|
||||||
to = *tx.To()
|
|
||||||
}
|
|
||||||
txNew := types.NewTransaction(tx.Nonce()+1, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data())
|
|
||||||
return signWithFaucet(s.chain.chainConfig, txNew)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNextTxFromChain(s *Suite) *types.Transaction {
|
|
||||||
// Get a new transaction
|
|
||||||
for _, blocks := range s.fullChain.blocks[s.chain.Len():] {
|
|
||||||
txs := blocks.Transactions()
|
|
||||||
if txs.Len() != 0 {
|
|
||||||
return txs[0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTxs(s *Suite, numTxs int) (map[common.Hash]common.Hash, []*types.Transaction, error) {
|
|
||||||
txHashMap := make(map[common.Hash]common.Hash, numTxs)
|
|
||||||
txs := make([]*types.Transaction, numTxs)
|
|
||||||
|
|
||||||
nextTx := getNextTxFromChain(s)
|
|
||||||
if nextTx == nil {
|
|
||||||
return nil, nil, errors.New("failed to get the next transaction")
|
|
||||||
}
|
|
||||||
gas := nextTx.Gas()
|
|
||||||
|
|
||||||
nonce = nonce + 1
|
|
||||||
// generate txs
|
|
||||||
for i := 0; i < numTxs; i++ {
|
|
||||||
tx := generateTx(s.chain.chainConfig, nonce, gas)
|
|
||||||
if tx == nil {
|
|
||||||
return nil, nil, errors.New("failed to get the next transaction")
|
|
||||||
}
|
|
||||||
txHashMap[tx.Hash()] = tx.Hash()
|
|
||||||
txs[i] = tx
|
|
||||||
nonce = nonce + 1
|
|
||||||
}
|
|
||||||
return txHashMap, txs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateTx(chainConfig *params.ChainConfig, nonce uint64, gas uint64) *types.Transaction {
|
|
||||||
var to common.Address
|
|
||||||
tx := types.NewTransaction(nonce, to, big.NewInt(1), gas, big.NewInt(1), []byte{})
|
|
||||||
return signWithFaucet(chainConfig, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOldTxFromChain(s *Suite) *types.Transaction {
|
|
||||||
for _, blocks := range s.fullChain.blocks[:s.chain.Len()-1] {
|
|
||||||
txs := blocks.Transactions()
|
|
||||||
if txs.Len() != 0 {
|
|
||||||
return txs[0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func invalidNonceTx(s *Suite) *types.Transaction {
|
|
||||||
tx := getNextTxFromChain(s)
|
|
||||||
if tx == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var to common.Address
|
|
||||||
if tx.To() != nil {
|
|
||||||
to = *tx.To()
|
|
||||||
}
|
|
||||||
txNew := types.NewTransaction(tx.Nonce()-2, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data())
|
|
||||||
return signWithFaucet(s.chain.chainConfig, txNew)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hugeAmount(s *Suite) *types.Transaction {
|
|
||||||
tx := getNextTxFromChain(s)
|
|
||||||
if tx == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
amount := largeNumber(2)
|
|
||||||
var to common.Address
|
|
||||||
if tx.To() != nil {
|
|
||||||
to = *tx.To()
|
|
||||||
}
|
|
||||||
txNew := types.NewTransaction(tx.Nonce(), to, amount, tx.Gas(), tx.GasPrice(), tx.Data())
|
|
||||||
return signWithFaucet(s.chain.chainConfig, txNew)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hugeGasPrice(s *Suite) *types.Transaction {
|
|
||||||
tx := getNextTxFromChain(s)
|
|
||||||
if tx == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
gasPrice := largeNumber(2)
|
|
||||||
var to common.Address
|
|
||||||
if tx.To() != nil {
|
|
||||||
to = *tx.To()
|
|
||||||
}
|
|
||||||
txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), gasPrice, tx.Data())
|
|
||||||
return signWithFaucet(s.chain.chainConfig, txNew)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hugeData(s *Suite) *types.Transaction {
|
|
||||||
tx := getNextTxFromChain(s)
|
|
||||||
if tx == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var to common.Address
|
|
||||||
if tx.To() != nil {
|
|
||||||
to = *tx.To()
|
|
||||||
}
|
|
||||||
txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), tx.GasPrice(), largeBuffer(2))
|
|
||||||
return signWithFaucet(s.chain.chainConfig, txNew)
|
|
||||||
}
|
|
||||||
|
|
||||||
func signWithFaucet(chainConfig *params.ChainConfig, tx *types.Transaction) *types.Transaction {
|
|
||||||
signer := types.LatestSigner(chainConfig)
|
|
||||||
signedTx, err := types.SignTx(tx, signer, faucetKey)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return err
|
||||||
|
}
|
||||||
|
defer sendConn.Close()
|
||||||
|
if err = sendConn.peer(s.chain, nil); err != nil {
|
||||||
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
sendConn.SetDeadline(time.Now().Add(timeout))
|
||||||
|
|
||||||
|
// Open receiving conn.
|
||||||
|
recvConn, err := s.dial()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer recvConn.Close()
|
||||||
|
if err = recvConn.peer(s.chain, nil); err != nil {
|
||||||
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
recvConn.SetDeadline(time.Now().Add(timeout))
|
||||||
|
|
||||||
|
if err = sendConn.Write(ethProto, eth.TransactionsMsg, txs); err != nil {
|
||||||
|
return fmt.Errorf("failed to write message to connection: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make map of invalid txs.
|
||||||
|
invalids := make(map[common.Hash]struct{})
|
||||||
|
for _, tx := range txs {
|
||||||
|
invalids[tx.Hash()] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get responses.
|
||||||
|
recvConn.SetReadDeadline(time.Now().Add(timeout))
|
||||||
|
for {
|
||||||
|
msg, err := recvConn.ReadEth()
|
||||||
|
if errors.Is(err, os.ErrDeadlineExceeded) {
|
||||||
|
// Successful if no invalid txs are propagated before timeout.
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("failed to read from connection: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
case *eth.TransactionsPacket:
|
||||||
|
for _, tx := range txs {
|
||||||
|
if _, ok := invalids[tx.Hash()]; ok {
|
||||||
|
return fmt.Errorf("received bad tx: %s", tx.Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case *eth.NewPooledTransactionHashesPacket68:
|
||||||
|
for _, hash := range msg.Hashes {
|
||||||
|
if _, ok := invalids[hash]; ok {
|
||||||
|
return fmt.Errorf("received bad tx: %s", hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return signedTx
|
|
||||||
}
|
}
|
||||||
|
@ -1,291 +0,0 @@
|
|||||||
// Copyright 2020 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package ethtest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Message interface {
|
|
||||||
Code() int
|
|
||||||
ReqID() uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type Error struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Unwrap() error { return e.err }
|
|
||||||
func (e *Error) Error() string { return e.err.Error() }
|
|
||||||
func (e *Error) String() string { return e.Error() }
|
|
||||||
|
|
||||||
func (e *Error) Code() int { return -1 }
|
|
||||||
func (e *Error) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
func errorf(format string, args ...interface{}) *Error {
|
|
||||||
return &Error{fmt.Errorf(format, args...)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hello is the RLP structure of the protocol handshake.
|
|
||||||
type Hello struct {
|
|
||||||
Version uint64
|
|
||||||
Name string
|
|
||||||
Caps []p2p.Cap
|
|
||||||
ListenPort uint64
|
|
||||||
ID []byte // secp256k1 public key
|
|
||||||
|
|
||||||
// Ignore additional fields (for forward compatibility).
|
|
||||||
Rest []rlp.RawValue `rlp:"tail"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (msg Hello) Code() int { return 0x00 }
|
|
||||||
func (msg Hello) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// Disconnect is the RLP structure for a disconnect message.
|
|
||||||
type Disconnect struct {
|
|
||||||
Reason p2p.DiscReason
|
|
||||||
}
|
|
||||||
|
|
||||||
func (msg Disconnect) Code() int { return 0x01 }
|
|
||||||
func (msg Disconnect) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
type Ping struct{}
|
|
||||||
|
|
||||||
func (msg Ping) Code() int { return 0x02 }
|
|
||||||
func (msg Ping) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
type Pong struct{}
|
|
||||||
|
|
||||||
func (msg Pong) Code() int { return 0x03 }
|
|
||||||
func (msg Pong) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// Status is the network packet for the status message for eth/64 and later.
|
|
||||||
type Status eth.StatusPacket
|
|
||||||
|
|
||||||
func (msg Status) Code() int { return 16 }
|
|
||||||
func (msg Status) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// NewBlockHashes is the network packet for the block announcements.
|
|
||||||
type NewBlockHashes eth.NewBlockHashesPacket
|
|
||||||
|
|
||||||
func (msg NewBlockHashes) Code() int { return 17 }
|
|
||||||
func (msg NewBlockHashes) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
type Transactions eth.TransactionsPacket
|
|
||||||
|
|
||||||
func (msg Transactions) Code() int { return 18 }
|
|
||||||
func (msg Transactions) ReqID() uint64 { return 18 }
|
|
||||||
|
|
||||||
// GetBlockHeaders represents a block header query.
|
|
||||||
type GetBlockHeaders eth.GetBlockHeadersPacket
|
|
||||||
|
|
||||||
func (msg GetBlockHeaders) Code() int { return 19 }
|
|
||||||
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
type BlockHeaders eth.BlockHeadersPacket
|
|
||||||
|
|
||||||
func (msg BlockHeaders) Code() int { return 20 }
|
|
||||||
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
// GetBlockBodies represents a GetBlockBodies request
|
|
||||||
type GetBlockBodies eth.GetBlockBodiesPacket
|
|
||||||
|
|
||||||
func (msg GetBlockBodies) Code() int { return 21 }
|
|
||||||
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
// BlockBodies is the network packet for block content distribution.
|
|
||||||
type BlockBodies eth.BlockBodiesPacket
|
|
||||||
|
|
||||||
func (msg BlockBodies) Code() int { return 22 }
|
|
||||||
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
// NewBlock is the network packet for the block propagation message.
|
|
||||||
type NewBlock eth.NewBlockPacket
|
|
||||||
|
|
||||||
func (msg NewBlock) Code() int { return 23 }
|
|
||||||
func (msg NewBlock) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
|
|
||||||
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67
|
|
||||||
|
|
||||||
func (msg NewPooledTransactionHashes66) Code() int { return 24 }
|
|
||||||
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
// NewPooledTransactionHashes is the network packet for the tx hash propagation message.
|
|
||||||
type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68
|
|
||||||
|
|
||||||
func (msg NewPooledTransactionHashes) Code() int { return 24 }
|
|
||||||
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
|
|
||||||
|
|
||||||
type GetPooledTransactions eth.GetPooledTransactionsPacket
|
|
||||||
|
|
||||||
func (msg GetPooledTransactions) Code() int { return 25 }
|
|
||||||
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
type PooledTransactions eth.PooledTransactionsPacket
|
|
||||||
|
|
||||||
func (msg PooledTransactions) Code() int { return 26 }
|
|
||||||
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
|
|
||||||
|
|
||||||
// Conn represents an individual connection with a peer
|
|
||||||
type Conn struct {
|
|
||||||
*rlpx.Conn
|
|
||||||
ourKey *ecdsa.PrivateKey
|
|
||||||
negotiatedProtoVersion uint
|
|
||||||
negotiatedSnapProtoVersion uint
|
|
||||||
ourHighestProtoVersion uint
|
|
||||||
ourHighestSnapProtoVersion uint
|
|
||||||
caps []p2p.Cap
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads an eth66 packet from the connection.
|
|
||||||
func (c *Conn) Read() Message {
|
|
||||||
code, rawData, _, err := c.Conn.Read()
|
|
||||||
if err != nil {
|
|
||||||
return errorf("could not read from connection: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var msg Message
|
|
||||||
switch int(code) {
|
|
||||||
case (Hello{}).Code():
|
|
||||||
msg = new(Hello)
|
|
||||||
case (Ping{}).Code():
|
|
||||||
msg = new(Ping)
|
|
||||||
case (Pong{}).Code():
|
|
||||||
msg = new(Pong)
|
|
||||||
case (Disconnect{}).Code():
|
|
||||||
msg = new(Disconnect)
|
|
||||||
case (Status{}).Code():
|
|
||||||
msg = new(Status)
|
|
||||||
case (GetBlockHeaders{}).Code():
|
|
||||||
ethMsg := new(eth.GetBlockHeadersPacket)
|
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*GetBlockHeaders)(ethMsg)
|
|
||||||
case (BlockHeaders{}).Code():
|
|
||||||
ethMsg := new(eth.BlockHeadersPacket)
|
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*BlockHeaders)(ethMsg)
|
|
||||||
case (GetBlockBodies{}).Code():
|
|
||||||
ethMsg := new(eth.GetBlockBodiesPacket)
|
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*GetBlockBodies)(ethMsg)
|
|
||||||
case (BlockBodies{}).Code():
|
|
||||||
ethMsg := new(eth.BlockBodiesPacket)
|
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*BlockBodies)(ethMsg)
|
|
||||||
case (NewBlock{}).Code():
|
|
||||||
msg = new(NewBlock)
|
|
||||||
case (NewBlockHashes{}).Code():
|
|
||||||
msg = new(NewBlockHashes)
|
|
||||||
case (Transactions{}).Code():
|
|
||||||
msg = new(Transactions)
|
|
||||||
case (NewPooledTransactionHashes66{}).Code():
|
|
||||||
// Try decoding to eth68
|
|
||||||
ethMsg := new(NewPooledTransactionHashes)
|
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err == nil {
|
|
||||||
return ethMsg
|
|
||||||
}
|
|
||||||
msg = new(NewPooledTransactionHashes66)
|
|
||||||
case (GetPooledTransactions{}.Code()):
|
|
||||||
ethMsg := new(eth.GetPooledTransactionsPacket)
|
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*GetPooledTransactions)(ethMsg)
|
|
||||||
case (PooledTransactions{}.Code()):
|
|
||||||
ethMsg := new(eth.PooledTransactionsPacket)
|
|
||||||
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return (*PooledTransactions)(ethMsg)
|
|
||||||
default:
|
|
||||||
msg = errorf("invalid message code: %d", code)
|
|
||||||
}
|
|
||||||
|
|
||||||
if msg != nil {
|
|
||||||
if err := rlp.DecodeBytes(rawData, msg); err != nil {
|
|
||||||
return errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
return errorf("invalid message: %s", string(rawData))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes a eth packet to the connection.
|
|
||||||
func (c *Conn) Write(msg Message) error {
|
|
||||||
payload, err := rlp.EncodeToBytes(msg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = c.Conn.Write(uint64(msg.Code()), payload)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadSnap reads a snap/1 response with the given id from the connection.
|
|
||||||
func (c *Conn) ReadSnap(id uint64) (Message, error) {
|
|
||||||
respId := id + 1
|
|
||||||
start := time.Now()
|
|
||||||
for respId != id && time.Since(start) < timeout {
|
|
||||||
code, rawData, _, err := c.Conn.Read()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not read from connection: %v", err)
|
|
||||||
}
|
|
||||||
var snpMsg interface{}
|
|
||||||
switch int(code) {
|
|
||||||
case (GetAccountRange{}).Code():
|
|
||||||
snpMsg = new(GetAccountRange)
|
|
||||||
case (AccountRange{}).Code():
|
|
||||||
snpMsg = new(AccountRange)
|
|
||||||
case (GetStorageRanges{}).Code():
|
|
||||||
snpMsg = new(GetStorageRanges)
|
|
||||||
case (StorageRanges{}).Code():
|
|
||||||
snpMsg = new(StorageRanges)
|
|
||||||
case (GetByteCodes{}).Code():
|
|
||||||
snpMsg = new(GetByteCodes)
|
|
||||||
case (ByteCodes{}).Code():
|
|
||||||
snpMsg = new(ByteCodes)
|
|
||||||
case (GetTrieNodes{}).Code():
|
|
||||||
snpMsg = new(GetTrieNodes)
|
|
||||||
case (TrieNodes{}).Code():
|
|
||||||
snpMsg = new(TrieNodes)
|
|
||||||
default:
|
|
||||||
//return nil, fmt.Errorf("invalid message code: %d", code)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := rlp.DecodeBytes(rawData, snpMsg); err != nil {
|
|
||||||
return nil, fmt.Errorf("could not rlp decode message: %v", err)
|
|
||||||
}
|
|
||||||
return snpMsg.(Message), nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("request timed out")
|
|
||||||
}
|
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest"
|
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
"github.com/ethereum/go-ethereum/p2p/rlpx"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -46,22 +47,30 @@ var (
|
|||||||
}
|
}
|
||||||
rlpxEthTestCommand = &cli.Command{
|
rlpxEthTestCommand = &cli.Command{
|
||||||
Name: "eth-test",
|
Name: "eth-test",
|
||||||
Usage: "Runs tests against a node",
|
Usage: "Runs eth protocol tests against a node",
|
||||||
ArgsUsage: "<node> <chain.rlp> <genesis.json>",
|
ArgsUsage: "<node>",
|
||||||
Action: rlpxEthTest,
|
Action: rlpxEthTest,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
testPatternFlag,
|
testPatternFlag,
|
||||||
testTAPFlag,
|
testTAPFlag,
|
||||||
|
testChainDirFlag,
|
||||||
|
testNodeFlag,
|
||||||
|
testNodeJWTFlag,
|
||||||
|
testNodeEngineFlag,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rlpxSnapTestCommand = &cli.Command{
|
rlpxSnapTestCommand = &cli.Command{
|
||||||
Name: "snap-test",
|
Name: "snap-test",
|
||||||
Usage: "Runs tests against a node",
|
Usage: "Runs snap protocol tests against a node",
|
||||||
ArgsUsage: "<node> <chain.rlp> <genesis.json>",
|
ArgsUsage: "",
|
||||||
Action: rlpxSnapTest,
|
Action: rlpxSnapTest,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
testPatternFlag,
|
testPatternFlag,
|
||||||
testTAPFlag,
|
testTAPFlag,
|
||||||
|
testChainDirFlag,
|
||||||
|
testNodeFlag,
|
||||||
|
testNodeJWTFlag,
|
||||||
|
testNodeEngineFlag,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -103,10 +112,8 @@ func rlpxPing(ctx *cli.Context) error {
|
|||||||
|
|
||||||
// rlpxEthTest runs the eth protocol test suite.
|
// rlpxEthTest runs the eth protocol test suite.
|
||||||
func rlpxEthTest(ctx *cli.Context) error {
|
func rlpxEthTest(ctx *cli.Context) error {
|
||||||
if ctx.NArg() < 3 {
|
p := cliTestParams(ctx)
|
||||||
exit("missing path to chain.rlp as command-line argument")
|
suite, err := ethtest.NewSuite(p.node, p.chainDir, p.engineAPI, p.jwt)
|
||||||
}
|
|
||||||
suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args().Get(1), ctx.Args().Get(2))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit(err)
|
exit(err)
|
||||||
}
|
}
|
||||||
@ -115,12 +122,44 @@ func rlpxEthTest(ctx *cli.Context) error {
|
|||||||
|
|
||||||
// rlpxSnapTest runs the snap protocol test suite.
|
// rlpxSnapTest runs the snap protocol test suite.
|
||||||
func rlpxSnapTest(ctx *cli.Context) error {
|
func rlpxSnapTest(ctx *cli.Context) error {
|
||||||
if ctx.NArg() < 3 {
|
p := cliTestParams(ctx)
|
||||||
exit("missing path to chain.rlp as command-line argument")
|
suite, err := ethtest.NewSuite(p.node, p.chainDir, p.engineAPI, p.jwt)
|
||||||
}
|
|
||||||
suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args().Get(1), ctx.Args().Get(2))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exit(err)
|
exit(err)
|
||||||
}
|
}
|
||||||
return runTests(ctx, suite.SnapTests())
|
return runTests(ctx, suite.SnapTests())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type testParams struct {
|
||||||
|
node *enode.Node
|
||||||
|
engineAPI string
|
||||||
|
jwt string
|
||||||
|
chainDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func cliTestParams(ctx *cli.Context) *testParams {
|
||||||
|
nodeStr := ctx.String(testNodeFlag.Name)
|
||||||
|
if nodeStr == "" {
|
||||||
|
exit(fmt.Errorf("missing -%s", testNodeFlag.Name))
|
||||||
|
}
|
||||||
|
node, err := parseNode(nodeStr)
|
||||||
|
if err != nil {
|
||||||
|
exit(err)
|
||||||
|
}
|
||||||
|
p := testParams{
|
||||||
|
node: node,
|
||||||
|
engineAPI: ctx.String(testNodeEngineFlag.Name),
|
||||||
|
jwt: ctx.String(testNodeJWTFlag.Name),
|
||||||
|
chainDir: ctx.String(testChainDirFlag.Name),
|
||||||
|
}
|
||||||
|
if p.engineAPI == "" {
|
||||||
|
exit(fmt.Errorf("missing -%s", testNodeEngineFlag.Name))
|
||||||
|
}
|
||||||
|
if p.jwt == "" {
|
||||||
|
exit(fmt.Errorf("missing -%s", testNodeJWTFlag.Name))
|
||||||
|
}
|
||||||
|
if p.chainDir == "" {
|
||||||
|
exit(fmt.Errorf("missing -%s", testChainDirFlag.Name))
|
||||||
|
}
|
||||||
|
return &p
|
||||||
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
|
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/v4test"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -27,23 +28,51 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
testPatternFlag = &cli.StringFlag{
|
testPatternFlag = &cli.StringFlag{
|
||||||
Name: "run",
|
Name: "run",
|
||||||
Usage: "Pattern of test suite(s) to run",
|
Usage: "Pattern of test suite(s) to run",
|
||||||
|
Category: flags.TestingCategory,
|
||||||
}
|
}
|
||||||
testTAPFlag = &cli.BoolFlag{
|
testTAPFlag = &cli.BoolFlag{
|
||||||
Name: "tap",
|
Name: "tap",
|
||||||
Usage: "Output TAP",
|
Usage: "Output test results in TAP format",
|
||||||
|
Category: flags.TestingCategory,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// for eth/snap tests
|
||||||
|
testChainDirFlag = &cli.StringFlag{
|
||||||
|
Name: "chain",
|
||||||
|
Usage: "Test chain directory (required)",
|
||||||
|
Category: flags.TestingCategory,
|
||||||
|
}
|
||||||
|
testNodeFlag = &cli.StringFlag{
|
||||||
|
Name: "node",
|
||||||
|
Usage: "Peer-to-Peer endpoint (ENR) of the test node (required)",
|
||||||
|
Category: flags.TestingCategory,
|
||||||
|
}
|
||||||
|
testNodeJWTFlag = &cli.StringFlag{
|
||||||
|
Name: "jwtsecret",
|
||||||
|
Usage: "JWT secret for the engine API of the test node (required)",
|
||||||
|
Category: flags.TestingCategory,
|
||||||
|
Value: "0x7365637265747365637265747365637265747365637265747365637265747365",
|
||||||
|
}
|
||||||
|
testNodeEngineFlag = &cli.StringFlag{
|
||||||
|
Name: "engineapi",
|
||||||
|
Usage: "Engine API endpoint of the test node (required)",
|
||||||
|
Category: flags.TestingCategory,
|
||||||
|
}
|
||||||
|
|
||||||
// These two are specific to the discovery tests.
|
// These two are specific to the discovery tests.
|
||||||
testListen1Flag = &cli.StringFlag{
|
testListen1Flag = &cli.StringFlag{
|
||||||
Name: "listen1",
|
Name: "listen1",
|
||||||
Usage: "IP address of the first tester",
|
Usage: "IP address of the first tester",
|
||||||
Value: v4test.Listen1,
|
Value: v4test.Listen1,
|
||||||
|
Category: flags.TestingCategory,
|
||||||
}
|
}
|
||||||
testListen2Flag = &cli.StringFlag{
|
testListen2Flag = &cli.StringFlag{
|
||||||
Name: "listen2",
|
Name: "listen2",
|
||||||
Usage: "IP address of the second tester",
|
Usage: "IP address of the second tester",
|
||||||
Value: v4test.Listen2,
|
Value: v4test.Listen2,
|
||||||
|
Category: flags.TestingCategory,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -54,7 +83,7 @@ func runTests(ctx *cli.Context, tests []utesting.Test) error {
|
|||||||
}
|
}
|
||||||
// Disable logging unless explicitly enabled.
|
// Disable logging unless explicitly enabled.
|
||||||
if !ctx.IsSet("verbosity") && !ctx.IsSet("vmodule") {
|
if !ctx.IsSet("verbosity") && !ctx.IsSet("vmodule") {
|
||||||
log.Root().SetHandler(log.DiscardHandler())
|
log.SetDefault(log.NewLogger(log.DiscardHandler()))
|
||||||
}
|
}
|
||||||
// Run the tests.
|
// Run the tests.
|
||||||
var run = utesting.RunTests
|
var run = utesting.RunTests
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestMessageSignVerify(t *testing.T) {
|
func TestMessageSignVerify(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
|
|
||||||
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
||||||
|
@ -88,7 +88,7 @@ type Env struct {
|
|||||||
CurrentTimestamp uint64 `json:"currentTimestamp"`
|
CurrentTimestamp uint64 `json:"currentTimestamp"`
|
||||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||||
// optional
|
// optional
|
||||||
CurrentDifficulty *big.Int `json:"currentDifficuly"`
|
CurrentDifficulty *big.Int `json:"currentDifficulty"`
|
||||||
CurrentRandom *big.Int `json:"currentRandom"`
|
CurrentRandom *big.Int `json:"currentRandom"`
|
||||||
CurrentBaseFee *big.Int `json:"currentBaseFee"`
|
CurrentBaseFee *big.Int `json:"currentBaseFee"`
|
||||||
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
||||||
@ -214,7 +214,7 @@ exitcode:3 OK
|
|||||||
|
|
||||||
The chain configuration to be used for a transition is specified via the
|
The chain configuration to be used for a transition is specified via the
|
||||||
`--state.fork` CLI flag. A list of possible values and configurations can be
|
`--state.fork` CLI flag. A list of possible values and configurations can be
|
||||||
found in [`tests/init.go`](tests/init.go).
|
found in [`tests/init.go`](../../tests/init.go).
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
##### Basic usage
|
##### Basic usage
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
@ -40,7 +41,7 @@ var RunFlag = &cli.StringFlag{
|
|||||||
var blockTestCommand = &cli.Command{
|
var blockTestCommand = &cli.Command{
|
||||||
Action: blockTestCmd,
|
Action: blockTestCmd,
|
||||||
Name: "blocktest",
|
Name: "blocktest",
|
||||||
Usage: "executes the given blockchain tests",
|
Usage: "Executes the given blockchain tests",
|
||||||
ArgsUsage: "<file>",
|
ArgsUsage: "<file>",
|
||||||
Flags: []cli.Flag{RunFlag},
|
Flags: []cli.Flag{RunFlag},
|
||||||
}
|
}
|
||||||
@ -85,7 +86,13 @@ func blockTestCmd(ctx *cli.Context) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
test := tests[name]
|
test := tests[name]
|
||||||
if err := test.Run(false, rawdb.HashScheme, tracer); err != nil {
|
if err := test.Run(false, rawdb.HashScheme, tracer, func(res error, chain *core.BlockChain) {
|
||||||
|
if ctx.Bool(DumpFlag.Name) {
|
||||||
|
if state, _ := chain.State(); state != nil {
|
||||||
|
fmt.Println(string(state.Dump(nil)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}); err != nil {
|
||||||
return fmt.Errorf("test %v: %w", name, err)
|
return fmt.Errorf("test %v: %w", name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
var compileCommand = &cli.Command{
|
var compileCommand = &cli.Command{
|
||||||
Action: compileCmd,
|
Action: compileCmd,
|
||||||
Name: "compile",
|
Name: "compile",
|
||||||
Usage: "compiles easm source to evm binary",
|
Usage: "Compiles easm source to evm binary",
|
||||||
ArgsUsage: "<file>",
|
ArgsUsage: "<file>",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
var disasmCommand = &cli.Command{
|
var disasmCommand = &cli.Command{
|
||||||
Action: disasmCmd,
|
Action: disasmCmd,
|
||||||
Name: "disasm",
|
Name: "disasm",
|
||||||
Usage: "disassembles evm binary",
|
Usage: "Disassembles evm binary",
|
||||||
ArgsUsage: "<file>",
|
ArgsUsage: "<file>",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
@ -215,11 +214,6 @@ func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) {
|
|||||||
|
|
||||||
// BuildBlock constructs a block from the given inputs.
|
// BuildBlock constructs a block from the given inputs.
|
||||||
func BuildBlock(ctx *cli.Context) error {
|
func BuildBlock(ctx *cli.Context) error {
|
||||||
// Configure the go-ethereum logger
|
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
|
||||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
|
||||||
log.Root().SetHandler(glogger)
|
|
||||||
|
|
||||||
baseDir, err := createBasedir(ctx)
|
baseDir, err := createBasedir(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
||||||
|
@ -117,7 +117,7 @@ type rejectedTx struct {
|
|||||||
// Apply applies a set of transactions to a pre-state
|
// Apply applies a set of transactions to a pre-state
|
||||||
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
txIt txIterator, miningReward int64,
|
txIt txIterator, miningReward int64,
|
||||||
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, []byte, error) {
|
getTracerFn func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
|
||||||
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
||||||
// required blockhashes
|
// required blockhashes
|
||||||
var hashError error
|
var hashError error
|
||||||
@ -140,6 +140,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
rejectedTxs []*rejectedTx
|
rejectedTxs []*rejectedTx
|
||||||
includedTxs types.Transactions
|
includedTxs types.Transactions
|
||||||
gasUsed = uint64(0)
|
gasUsed = uint64(0)
|
||||||
|
blobGasUsed = uint64(0)
|
||||||
receipts = make(types.Receipts, 0)
|
receipts = make(types.Receipts, 0)
|
||||||
txIndex = 0
|
txIndex = 0
|
||||||
)
|
)
|
||||||
@ -189,7 +190,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig)
|
evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig)
|
||||||
core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb)
|
core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb)
|
||||||
}
|
}
|
||||||
var blobGasUsed uint64
|
|
||||||
|
|
||||||
for i := 0; txIt.Next(); i++ {
|
for i := 0; txIt.Next(); i++ {
|
||||||
tx, err := txIt.Tx()
|
tx, err := txIt.Tx()
|
||||||
@ -210,15 +210,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
txBlobGas := uint64(0)
|
||||||
if tx.Type() == types.BlobTxType {
|
if tx.Type() == types.BlobTxType {
|
||||||
txBlobGas := uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes()))
|
txBlobGas = uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes()))
|
||||||
if used, max := blobGasUsed+txBlobGas, uint64(params.MaxBlobGasPerBlock); used > max {
|
if used, max := blobGasUsed+txBlobGas, uint64(params.MaxBlobGasPerBlock); used > max {
|
||||||
err := fmt.Errorf("blob gas (%d) would exceed maximum allowance %d", used, max)
|
err := fmt.Errorf("blob gas (%d) would exceed maximum allowance %d", used, max)
|
||||||
log.Warn("rejected tx", "index", i, "err", err)
|
log.Warn("rejected tx", "index", i, "err", err)
|
||||||
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
blobGasUsed += txBlobGas
|
|
||||||
}
|
}
|
||||||
tracer, err := getTracerFn(txIndex, tx.Hash())
|
tracer, err := getTracerFn(txIndex, tx.Hash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -247,6 +247,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
if hashError != nil {
|
if hashError != nil {
|
||||||
return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError)
|
return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError)
|
||||||
}
|
}
|
||||||
|
blobGasUsed += txBlobGas
|
||||||
gasUsed += msgResult.UsedGas
|
gasUsed += msgResult.UsedGas
|
||||||
|
|
||||||
// Receipt:
|
// Receipt:
|
||||||
|
@ -28,12 +28,15 @@ import (
|
|||||||
var (
|
var (
|
||||||
TraceFlag = &cli.BoolFlag{
|
TraceFlag = &cli.BoolFlag{
|
||||||
Name: "trace",
|
Name: "trace",
|
||||||
Usage: "Output full trace logs to files <txhash>.jsonl",
|
Usage: "Configures the use of the JSON opcode tracer. This tracer emits traces to files as trace-<txIndex>-<txHash>.jsonl",
|
||||||
}
|
}
|
||||||
TraceDisableMemoryFlag = &cli.BoolFlag{
|
TraceTracerFlag = &cli.StringFlag{
|
||||||
Name: "trace.nomemory",
|
Name: "trace.tracer",
|
||||||
Value: true,
|
Usage: "Configures the use of a custom tracer, e.g native or js tracers. Examples are callTracer and 4byteTracer. These tracers emit results into files as trace-<txIndex>-<txHash>.json",
|
||||||
Usage: "Disable full memory dump in traces (deprecated)",
|
}
|
||||||
|
TraceTracerConfigFlag = &cli.StringFlag{
|
||||||
|
Name: "trace.jsonconfig",
|
||||||
|
Usage: "The configurations for the custom tracer specified by --trace.tracer. If provided, must be in JSON format",
|
||||||
}
|
}
|
||||||
TraceEnableMemoryFlag = &cli.BoolFlag{
|
TraceEnableMemoryFlag = &cli.BoolFlag{
|
||||||
Name: "trace.memory",
|
Name: "trace.memory",
|
||||||
@ -43,11 +46,6 @@ var (
|
|||||||
Name: "trace.nostack",
|
Name: "trace.nostack",
|
||||||
Usage: "Disable stack output in traces",
|
Usage: "Disable stack output in traces",
|
||||||
}
|
}
|
||||||
TraceDisableReturnDataFlag = &cli.BoolFlag{
|
|
||||||
Name: "trace.noreturndata",
|
|
||||||
Value: true,
|
|
||||||
Usage: "Disable return data output in traces (deprecated)",
|
|
||||||
}
|
|
||||||
TraceEnableReturnDataFlag = &cli.BoolFlag{
|
TraceEnableReturnDataFlag = &cli.BoolFlag{
|
||||||
Name: "trace.returndata",
|
Name: "trace.returndata",
|
||||||
Usage: "Enable return data output in traces",
|
Usage: "Enable return data output in traces",
|
||||||
|
81
cmd/evm/internal/t8ntool/tracewriter.go
Normal file
81
cmd/evm/internal/t8ntool/tracewriter.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// traceWriter is an vm.EVMLogger which also holds an inner logger/tracer.
|
||||||
|
// When the TxEnd event happens, the inner tracer result is written to the file, and
|
||||||
|
// the file is closed.
|
||||||
|
type traceWriter struct {
|
||||||
|
inner vm.EVMLogger
|
||||||
|
f io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile-time interface check
|
||||||
|
var _ = vm.EVMLogger((*traceWriter)(nil))
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureTxEnd(restGas uint64) {
|
||||||
|
t.inner.CaptureTxEnd(restGas)
|
||||||
|
defer t.f.Close()
|
||||||
|
|
||||||
|
if tracer, ok := t.inner.(tracers.Tracer); ok {
|
||||||
|
result, err := tracer.GetResult()
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error in tracer", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = json.NewEncoder(t.f).Encode(result)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error writing tracer output", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureTxStart(gasLimit uint64) { t.inner.CaptureTxStart(gasLimit) }
|
||||||
|
func (t *traceWriter) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
|
||||||
|
t.inner.CaptureStart(env, from, to, create, input, gas, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureEnd(output []byte, gasUsed uint64, err error) {
|
||||||
|
t.inner.CaptureEnd(output, gasUsed, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
|
||||||
|
t.inner.CaptureEnter(typ, from, to, input, gas, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureExit(output []byte, gasUsed uint64, err error) {
|
||||||
|
t.inner.CaptureExit(output, gasUsed, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
|
||||||
|
t.inner.CaptureState(pc, op, gas, cost, scope, rData, depth, err)
|
||||||
|
}
|
||||||
|
func (t *traceWriter) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
|
||||||
|
t.inner.CaptureFault(pc, op, gas, cost, scope, depth, err)
|
||||||
|
}
|
@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
@ -65,11 +64,6 @@ func (r *result) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Transaction(ctx *cli.Context) error {
|
func Transaction(ctx *cli.Context) error {
|
||||||
// Configure the go-ethereum logger
|
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
|
||||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
|
||||||
log.Root().SetHandler(glogger)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@ -80,62 +81,43 @@ type input struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Transition(ctx *cli.Context) error {
|
func Transition(ctx *cli.Context) error {
|
||||||
// Configure the go-ethereum logger
|
var getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) { return nil, nil }
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
|
||||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
|
||||||
log.Root().SetHandler(glogger)
|
|
||||||
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
tracer vm.EVMLogger
|
|
||||||
)
|
|
||||||
var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)
|
|
||||||
|
|
||||||
baseDir, err := createBasedir(ctx)
|
baseDir, err := createBasedir(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
||||||
}
|
}
|
||||||
if ctx.Bool(TraceFlag.Name) {
|
|
||||||
if ctx.IsSet(TraceDisableMemoryFlag.Name) && ctx.IsSet(TraceEnableMemoryFlag.Name) {
|
if ctx.Bool(TraceFlag.Name) { // JSON opcode tracing
|
||||||
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
|
|
||||||
}
|
|
||||||
if ctx.IsSet(TraceDisableReturnDataFlag.Name) && ctx.IsSet(TraceEnableReturnDataFlag.Name) {
|
|
||||||
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
|
|
||||||
}
|
|
||||||
if ctx.IsSet(TraceDisableMemoryFlag.Name) {
|
|
||||||
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
|
|
||||||
}
|
|
||||||
if ctx.IsSet(TraceDisableReturnDataFlag.Name) {
|
|
||||||
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
|
|
||||||
}
|
|
||||||
// Configure the EVM logger
|
// Configure the EVM logger
|
||||||
logConfig := &logger.Config{
|
logConfig := &logger.Config{
|
||||||
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||||
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name) || ctx.Bool(TraceEnableMemoryFlag.Name),
|
EnableMemory: ctx.Bool(TraceEnableMemoryFlag.Name),
|
||||||
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name) || ctx.Bool(TraceEnableReturnDataFlag.Name),
|
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
|
||||||
Debug: true,
|
Debug: true,
|
||||||
}
|
}
|
||||||
var prevFile *os.File
|
|
||||||
// This one closes the last file
|
|
||||||
defer func() {
|
|
||||||
if prevFile != nil {
|
|
||||||
prevFile.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
|
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
|
||||||
if prevFile != nil {
|
|
||||||
prevFile.Close()
|
|
||||||
}
|
|
||||||
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
|
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||||
}
|
}
|
||||||
prevFile = traceFile
|
return &traceWriter{logger.NewJSONLogger(logConfig, traceFile), traceFile}, nil
|
||||||
return logger.NewJSONLogger(logConfig, traceFile), nil
|
|
||||||
}
|
}
|
||||||
} else {
|
} else if ctx.IsSet(TraceTracerFlag.Name) {
|
||||||
getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) {
|
var config json.RawMessage
|
||||||
return nil, nil
|
if ctx.IsSet(TraceTracerConfigFlag.Name) {
|
||||||
|
config = []byte(ctx.String(TraceTracerConfigFlag.Name))
|
||||||
|
}
|
||||||
|
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
|
||||||
|
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String())))
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||||
|
}
|
||||||
|
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %w", err))
|
||||||
|
}
|
||||||
|
return &traceWriter{tracer, traceFile}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// We need to load three things: alloc, env and transactions. May be either in
|
// We need to load three things: alloc, env and transactions. May be either in
|
||||||
@ -174,9 +156,7 @@ func Transition(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
prestate.Env = *inputData.Env
|
prestate.Env = *inputData.Env
|
||||||
|
|
||||||
vmConfig := vm.Config{
|
vmConfig := vm.Config{}
|
||||||
Tracer: tracer,
|
|
||||||
}
|
|
||||||
// Construct the chainconfig
|
// Construct the chainconfig
|
||||||
var chainConfig *params.ChainConfig
|
var chainConfig *params.ChainConfig
|
||||||
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
||||||
|
@ -26,6 +26,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
|
// Force-load the tracer engines to trigger registration
|
||||||
|
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
|
||||||
|
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -139,14 +143,14 @@ var (
|
|||||||
var stateTransitionCommand = &cli.Command{
|
var stateTransitionCommand = &cli.Command{
|
||||||
Name: "transition",
|
Name: "transition",
|
||||||
Aliases: []string{"t8n"},
|
Aliases: []string{"t8n"},
|
||||||
Usage: "executes a full state transition",
|
Usage: "Executes a full state transition",
|
||||||
Action: t8ntool.Transition,
|
Action: t8ntool.Transition,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
t8ntool.TraceFlag,
|
t8ntool.TraceFlag,
|
||||||
t8ntool.TraceDisableMemoryFlag,
|
t8ntool.TraceTracerFlag,
|
||||||
|
t8ntool.TraceTracerConfigFlag,
|
||||||
t8ntool.TraceEnableMemoryFlag,
|
t8ntool.TraceEnableMemoryFlag,
|
||||||
t8ntool.TraceDisableStackFlag,
|
t8ntool.TraceDisableStackFlag,
|
||||||
t8ntool.TraceDisableReturnDataFlag,
|
|
||||||
t8ntool.TraceEnableReturnDataFlag,
|
t8ntool.TraceEnableReturnDataFlag,
|
||||||
t8ntool.OutputBasedir,
|
t8ntool.OutputBasedir,
|
||||||
t8ntool.OutputAllocFlag,
|
t8ntool.OutputAllocFlag,
|
||||||
@ -158,27 +162,25 @@ var stateTransitionCommand = &cli.Command{
|
|||||||
t8ntool.ForknameFlag,
|
t8ntool.ForknameFlag,
|
||||||
t8ntool.ChainIDFlag,
|
t8ntool.ChainIDFlag,
|
||||||
t8ntool.RewardFlag,
|
t8ntool.RewardFlag,
|
||||||
t8ntool.VerbosityFlag,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var transactionCommand = &cli.Command{
|
var transactionCommand = &cli.Command{
|
||||||
Name: "transaction",
|
Name: "transaction",
|
||||||
Aliases: []string{"t9n"},
|
Aliases: []string{"t9n"},
|
||||||
Usage: "performs transaction validation",
|
Usage: "Performs transaction validation",
|
||||||
Action: t8ntool.Transaction,
|
Action: t8ntool.Transaction,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
t8ntool.InputTxsFlag,
|
t8ntool.InputTxsFlag,
|
||||||
t8ntool.ChainIDFlag,
|
t8ntool.ChainIDFlag,
|
||||||
t8ntool.ForknameFlag,
|
t8ntool.ForknameFlag,
|
||||||
t8ntool.VerbosityFlag,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var blockBuilderCommand = &cli.Command{
|
var blockBuilderCommand = &cli.Command{
|
||||||
Name: "block-builder",
|
Name: "block-builder",
|
||||||
Aliases: []string{"b11r"},
|
Aliases: []string{"b11r"},
|
||||||
Usage: "builds a block",
|
Usage: "Builds a block",
|
||||||
Action: t8ntool.BuildBlock,
|
Action: t8ntool.BuildBlock,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
t8ntool.OutputBasedir,
|
t8ntool.OutputBasedir,
|
||||||
@ -188,7 +190,6 @@ var blockBuilderCommand = &cli.Command{
|
|||||||
t8ntool.InputWithdrawalsFlag,
|
t8ntool.InputWithdrawalsFlag,
|
||||||
t8ntool.InputTxsRlpFlag,
|
t8ntool.InputTxsRlpFlag,
|
||||||
t8ntool.SealCliqueFlag,
|
t8ntool.SealCliqueFlag,
|
||||||
t8ntool.VerbosityFlag,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user