Merge commit 'da6cdaf63' into merge/geth-v1.13.6
This commit is contained in:
commit
ac499a7ff1
23
.github/workflows/go.yml
vendored
Normal file
23
.github/workflows/go.yml
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
name: i386 linux tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ master ]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: self-hosted
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.21.4
|
||||||
|
- name: Run tests
|
||||||
|
run: go test ./...
|
||||||
|
env:
|
||||||
|
GOOS: linux
|
||||||
|
GOARCH: 386
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.20-alpine as builder
|
FROM golang:1.21-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
@ -251,7 +251,7 @@ var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
|
|||||||
var panicSelector = crypto.Keccak256([]byte("Panic(uint256)"))[:4]
|
var panicSelector = crypto.Keccak256([]byte("Panic(uint256)"))[:4]
|
||||||
|
|
||||||
// panicReasons map is for readable panic codes
|
// panicReasons map is for readable panic codes
|
||||||
// see this linkage for the deails
|
// see this linkage for the details
|
||||||
// https://docs.soliditylang.org/en/v0.8.21/control-structures.html#panic-via-assert-and-error-via-require
|
// https://docs.soliditylang.org/en/v0.8.21/control-structures.html#panic-via-assert-and-error-via-require
|
||||||
// the reason string list is copied from ether.js
|
// the reason string list is copied from ether.js
|
||||||
// https://github.com/ethers-io/ethers.js/blob/fa3a883ff7c88611ce766f58bdd4b8ac90814470/src.ts/abi/interface.ts#L207-L218
|
// https://github.com/ethers-io/ethers.js/blob/fa3a883ff7c88611ce766f58bdd4b8ac90814470/src.ts/abi/interface.ts#L207-L218
|
||||||
|
@ -120,6 +120,7 @@ var methods = map[string]Method{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReader(t *testing.T) {
|
func TestReader(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi := ABI{
|
abi := ABI{
|
||||||
Methods: methods,
|
Methods: methods,
|
||||||
}
|
}
|
||||||
@ -151,6 +152,7 @@ func TestReader(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidABI(t *testing.T) {
|
func TestInvalidABI(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
json := `[{ "type" : "function", "name" : "", "constant" : fals }]`
|
json := `[{ "type" : "function", "name" : "", "constant" : fals }]`
|
||||||
_, err := JSON(strings.NewReader(json))
|
_, err := JSON(strings.NewReader(json))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -170,6 +172,7 @@ func TestInvalidABI(t *testing.T) {
|
|||||||
// constructor(uint256 a, uint256 b) public{}
|
// constructor(uint256 a, uint256 b) public{}
|
||||||
// }
|
// }
|
||||||
func TestConstructor(t *testing.T) {
|
func TestConstructor(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
|
json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]`
|
||||||
method := NewMethod("", "", Constructor, "nonpayable", false, false, []Argument{{"a", Uint256, false}, {"b", Uint256, false}}, nil)
|
method := NewMethod("", "", Constructor, "nonpayable", false, false, []Argument{{"a", Uint256, false}, {"b", Uint256, false}}, nil)
|
||||||
// Test from JSON
|
// Test from JSON
|
||||||
@ -199,6 +202,7 @@ func TestConstructor(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTestNumbers(t *testing.T) {
|
func TestTestNumbers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -236,6 +240,7 @@ func TestTestNumbers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodSignature(t *testing.T) {
|
func TestMethodSignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil)
|
m := NewMethod("foo", "foo", Function, "", false, false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil)
|
||||||
exp := "foo(string,string)"
|
exp := "foo(string,string)"
|
||||||
if m.Sig != exp {
|
if m.Sig != exp {
|
||||||
@ -274,6 +279,7 @@ func TestMethodSignature(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOverloadedMethodSignature(t *testing.T) {
|
func TestOverloadedMethodSignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
json := `[{"constant":true,"inputs":[{"name":"i","type":"uint256"},{"name":"j","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"name":"i","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"}],"name":"bar","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"},{"indexed":false,"name":"j","type":"uint256"}],"name":"bar","type":"event"}]`
|
json := `[{"constant":true,"inputs":[{"name":"i","type":"uint256"},{"name":"j","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"name":"i","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"}],"name":"bar","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"},{"indexed":false,"name":"j","type":"uint256"}],"name":"bar","type":"event"}]`
|
||||||
abi, err := JSON(strings.NewReader(json))
|
abi, err := JSON(strings.NewReader(json))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -297,6 +303,7 @@ func TestOverloadedMethodSignature(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCustomErrors(t *testing.T) {
|
func TestCustomErrors(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]`
|
json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]`
|
||||||
abi, err := JSON(strings.NewReader(json))
|
abi, err := JSON(strings.NewReader(json))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -311,6 +318,7 @@ func TestCustomErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiPack(t *testing.T) {
|
func TestMultiPack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -348,6 +356,7 @@ func ExampleJSON() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInputVariableInputLength(t *testing.T) {
|
func TestInputVariableInputLength(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "type" : "function", "name" : "strOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
|
{ "type" : "function", "name" : "strOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
|
||||||
{ "type" : "function", "name" : "bytesOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
|
{ "type" : "function", "name" : "bytesOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
|
||||||
@ -476,6 +485,7 @@ func TestInputVariableInputLength(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@ -650,6 +660,7 @@ func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDefaultFunctionParsing(t *testing.T) {
|
func TestDefaultFunctionParsing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[{ "name" : "balance", "type" : "function" }]`
|
const definition = `[{ "name" : "balance", "type" : "function" }]`
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
@ -663,6 +674,7 @@ func TestDefaultFunctionParsing(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBareEvents(t *testing.T) {
|
func TestBareEvents(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "type" : "event", "name" : "balance" },
|
{ "type" : "event", "name" : "balance" },
|
||||||
{ "type" : "event", "name" : "anon", "anonymous" : true},
|
{ "type" : "event", "name" : "anon", "anonymous" : true},
|
||||||
@ -739,6 +751,7 @@ func TestBareEvents(t *testing.T) {
|
|||||||
//
|
//
|
||||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||||
func TestUnpackEvent(t *testing.T) {
|
func TestUnpackEvent(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -777,6 +790,7 @@ func TestUnpackEvent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackEventIntoMap(t *testing.T) {
|
func TestUnpackEventIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -827,6 +841,7 @@ func TestUnpackEventIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackMethodIntoMap(t *testing.T) {
|
func TestUnpackMethodIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
|
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -877,6 +892,7 @@ func TestUnpackMethodIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIntoMapNamingConflict(t *testing.T) {
|
func TestUnpackIntoMapNamingConflict(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Two methods have the same name
|
// Two methods have the same name
|
||||||
var abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"get","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
|
var abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"get","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
|
||||||
abi, err := JSON(strings.NewReader(abiJSON))
|
abi, err := JSON(strings.NewReader(abiJSON))
|
||||||
@ -960,6 +976,7 @@ func TestUnpackIntoMapNamingConflict(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestABI_MethodById(t *testing.T) {
|
func TestABI_MethodById(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -992,6 +1009,7 @@ func TestABI_MethodById(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestABI_EventById(t *testing.T) {
|
func TestABI_EventById(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
json string
|
json string
|
||||||
@ -1058,6 +1076,7 @@ func TestABI_EventById(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestABI_ErrorByID(t *testing.T) {
|
func TestABI_ErrorByID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(`[
|
abi, err := JSON(strings.NewReader(`[
|
||||||
{"inputs":[{"internalType":"uint256","name":"x","type":"uint256"}],"name":"MyError1","type":"error"},
|
{"inputs":[{"internalType":"uint256","name":"x","type":"uint256"}],"name":"MyError1","type":"error"},
|
||||||
{"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"x","type":"tuple"},{"internalType":"address","name":"y","type":"address"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"z","type":"tuple"}],"name":"MyError2","type":"error"},
|
{"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"x","type":"tuple"},{"internalType":"address","name":"y","type":"address"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"z","type":"tuple"}],"name":"MyError2","type":"error"},
|
||||||
@ -1088,6 +1107,7 @@ func TestABI_ErrorByID(t *testing.T) {
|
|||||||
// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name
|
// TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name
|
||||||
// conflict and that the second transfer method will be renamed transfer1.
|
// conflict and that the second transfer method will be renamed transfer1.
|
||||||
func TestDoubleDuplicateMethodNames(t *testing.T) {
|
func TestDoubleDuplicateMethodNames(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
|
abiJSON := `[{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"}],"name":"transfer0","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"value","type":"uint256"},{"name":"data","type":"bytes"},{"name":"customFallback","type":"string"}],"name":"transfer","outputs":[{"name":"ok","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
|
||||||
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1117,6 +1137,7 @@ func TestDoubleDuplicateMethodNames(t *testing.T) {
|
|||||||
// event send();
|
// event send();
|
||||||
// }
|
// }
|
||||||
func TestDoubleDuplicateEventNames(t *testing.T) {
|
func TestDoubleDuplicateEventNames(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abiJSON := `[{"anonymous": false,"inputs": [{"indexed": false,"internalType": "uint256","name": "a","type": "uint256"}],"name": "send","type": "event"},{"anonymous": false,"inputs": [],"name": "send0","type": "event"},{ "anonymous": false, "inputs": [],"name": "send","type": "event"}]`
|
abiJSON := `[{"anonymous": false,"inputs": [{"indexed": false,"internalType": "uint256","name": "a","type": "uint256"}],"name": "send","type": "event"},{"anonymous": false,"inputs": [],"name": "send0","type": "event"},{ "anonymous": false, "inputs": [],"name": "send","type": "event"}]`
|
||||||
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1144,6 +1165,7 @@ func TestDoubleDuplicateEventNames(t *testing.T) {
|
|||||||
// event send(uint256, uint256);
|
// event send(uint256, uint256);
|
||||||
// }
|
// }
|
||||||
func TestUnnamedEventParam(t *testing.T) {
|
func TestUnnamedEventParam(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]`
|
abiJSON := `[{ "anonymous": false, "inputs": [{ "indexed": false,"internalType": "uint256", "name": "","type": "uint256"},{"indexed": false,"internalType": "uint256","name": "","type": "uint256"}],"name": "send","type": "event"}]`
|
||||||
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
contractAbi, err := JSON(strings.NewReader(abiJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1177,7 +1199,9 @@ func TestUnpackRevert(t *testing.T) {
|
|||||||
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
|
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
|
||||||
}
|
}
|
||||||
for index, c := range cases {
|
for index, c := range cases {
|
||||||
|
index, c := index, c
|
||||||
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
|
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
got, err := UnpackRevert(common.Hex2Bytes(c.input))
|
got, err := UnpackRevert(common.Hex2Bytes(c.input))
|
||||||
if c.expectErr != nil {
|
if c.expectErr != nil {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -22,33 +22,32 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
|
||||||
fuzz "github.com/google/gofuzz"
|
fuzz "github.com/google/gofuzz"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestReplicate can be used to replicate crashers from the fuzzing tests.
|
// TestReplicate can be used to replicate crashers from the fuzzing tests.
|
||||||
// Just replace testString with the data in .quoted
|
// Just replace testString with the data in .quoted
|
||||||
func TestReplicate(t *testing.T) {
|
func TestReplicate(t *testing.T) {
|
||||||
testString := "\x20\x20\x20\x20\x20\x20\x20\x20\x80\x00\x00\x00\x20\x20\x20\x20\x00"
|
t.Parallel()
|
||||||
data := []byte(testString)
|
//t.Skip("Test only useful for reproducing issues")
|
||||||
fuzzAbi(data)
|
fuzzAbi([]byte("\x20\x20\x20\x20\x20\x20\x20\x20\x80\x00\x00\x00\x20\x20\x20\x20\x00"))
|
||||||
|
//fuzzAbi([]byte("asdfasdfkadsf;lasdf;lasd;lfk"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Fuzz(f *testing.F) {
|
// FuzzABI is the main entrypoint for fuzzing
|
||||||
|
func FuzzABI(f *testing.F) {
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
fuzzAbi(data)
|
fuzzAbi(data)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
names = []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"}
|
names = []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"}
|
||||||
stateMut = []string{"", "pure", "view", "payable"}
|
stateMut = []string{"pure", "view", "payable"}
|
||||||
stateMutabilites = []*string{&stateMut[0], &stateMut[1], &stateMut[2], &stateMut[3]}
|
pays = []string{"true", "false"}
|
||||||
pays = []string{"", "true", "false"}
|
vNames = []string{"a", "b", "c", "d", "e", "f", "g"}
|
||||||
payables = []*string{&pays[0], &pays[1]}
|
varNames = append(vNames, names...)
|
||||||
vNames = []string{"a", "b", "c", "d", "e", "f", "g"}
|
varTypes = []string{"bool", "address", "bytes", "string",
|
||||||
varNames = append(vNames, names...)
|
|
||||||
varTypes = []string{"bool", "address", "bytes", "string",
|
|
||||||
"uint8", "int8", "uint8", "int8", "uint16", "int16",
|
"uint8", "int8", "uint8", "int8", "uint16", "int16",
|
||||||
"uint24", "int24", "uint32", "int32", "uint40", "int40", "uint48", "int48", "uint56", "int56",
|
"uint24", "int24", "uint32", "int32", "uint40", "int40", "uint48", "int48", "uint56", "int56",
|
||||||
"uint64", "int64", "uint72", "int72", "uint80", "int80", "uint88", "int88", "uint96", "int96",
|
"uint64", "int64", "uint72", "int72", "uint80", "int80", "uint88", "int88", "uint96", "int96",
|
||||||
@ -62,7 +61,7 @@ var (
|
|||||||
"bytes32", "bytes"}
|
"bytes32", "bytes"}
|
||||||
)
|
)
|
||||||
|
|
||||||
func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool) {
|
func unpackPack(abi ABI, method string, input []byte) ([]interface{}, bool) {
|
||||||
if out, err := abi.Unpack(method, input); err == nil {
|
if out, err := abi.Unpack(method, input); err == nil {
|
||||||
_, err := abi.Pack(method, out...)
|
_, err := abi.Pack(method, out...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -78,7 +77,7 @@ func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool)
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool {
|
func packUnpack(abi ABI, method string, input *[]interface{}) bool {
|
||||||
if packed, err := abi.Pack(method, input); err == nil {
|
if packed, err := abi.Pack(method, input); err == nil {
|
||||||
outptr := reflect.New(reflect.TypeOf(input))
|
outptr := reflect.New(reflect.TypeOf(input))
|
||||||
err := abi.UnpackIntoInterface(outptr.Interface(), method, packed)
|
err := abi.UnpackIntoInterface(outptr.Interface(), method, packed)
|
||||||
@ -94,12 +93,12 @@ func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
type args struct {
|
type arg struct {
|
||||||
name string
|
name string
|
||||||
typ string
|
typ string
|
||||||
}
|
}
|
||||||
|
|
||||||
func createABI(name string, stateMutability, payable *string, inputs []args) (abi.ABI, error) {
|
func createABI(name string, stateMutability, payable *string, inputs []arg) (ABI, error) {
|
||||||
sig := fmt.Sprintf(`[{ "type" : "function", "name" : "%v" `, name)
|
sig := fmt.Sprintf(`[{ "type" : "function", "name" : "%v" `, name)
|
||||||
if stateMutability != nil {
|
if stateMutability != nil {
|
||||||
sig += fmt.Sprintf(`, "stateMutability": "%v" `, *stateMutability)
|
sig += fmt.Sprintf(`, "stateMutability": "%v" `, *stateMutability)
|
||||||
@ -126,56 +125,55 @@ func createABI(name string, stateMutability, payable *string, inputs []args) (ab
|
|||||||
sig += "} ]"
|
sig += "} ]"
|
||||||
}
|
}
|
||||||
sig += `}]`
|
sig += `}]`
|
||||||
|
//fmt.Printf("sig: %s\n", sig)
|
||||||
return abi.JSON(strings.NewReader(sig))
|
return JSON(strings.NewReader(sig))
|
||||||
}
|
}
|
||||||
|
|
||||||
func fuzzAbi(input []byte) int {
|
func fuzzAbi(input []byte) {
|
||||||
good := false
|
var (
|
||||||
fuzzer := fuzz.NewFromGoFuzz(input)
|
fuzzer = fuzz.NewFromGoFuzz(input)
|
||||||
|
name = oneOf(fuzzer, names)
|
||||||
name := names[getUInt(fuzzer)%len(names)]
|
stateM = oneOfOrNil(fuzzer, stateMut)
|
||||||
stateM := stateMutabilites[getUInt(fuzzer)%len(stateMutabilites)]
|
payable = oneOfOrNil(fuzzer, pays)
|
||||||
payable := payables[getUInt(fuzzer)%len(payables)]
|
arguments []arg
|
||||||
maxLen := 5
|
)
|
||||||
for k := 1; k < maxLen; k++ {
|
for i := 0; i < upTo(fuzzer, 10); i++ {
|
||||||
var arg []args
|
argName := oneOf(fuzzer, varNames)
|
||||||
for i := k; i > 0; i-- {
|
argTyp := oneOf(fuzzer, varTypes)
|
||||||
argName := varNames[i]
|
switch upTo(fuzzer, 10) {
|
||||||
argTyp := varTypes[getUInt(fuzzer)%len(varTypes)]
|
case 0: // 10% chance to make it a slice
|
||||||
if getUInt(fuzzer)%10 == 0 {
|
argTyp += "[]"
|
||||||
argTyp += "[]"
|
case 1: // 10% chance to make it an array
|
||||||
} else if getUInt(fuzzer)%10 == 0 {
|
argTyp += fmt.Sprintf("[%d]", 1+upTo(fuzzer, 30))
|
||||||
arrayArgs := getUInt(fuzzer)%30 + 1
|
default:
|
||||||
argTyp += fmt.Sprintf("[%d]", arrayArgs)
|
|
||||||
}
|
|
||||||
arg = append(arg, args{
|
|
||||||
name: argName,
|
|
||||||
typ: argTyp,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
abi, err := createABI(name, stateM, payable, arg)
|
arguments = append(arguments, arg{name: argName, typ: argTyp})
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
structs, b := unpackPack(abi, name, input)
|
|
||||||
c := packUnpack(abi, name, &structs)
|
|
||||||
good = good || b || c
|
|
||||||
}
|
}
|
||||||
if good {
|
abi, err := createABI(name, stateM, payable, arguments)
|
||||||
return 1
|
if err != nil {
|
||||||
|
//fmt.Printf("err: %v\n", err)
|
||||||
|
panic(err)
|
||||||
}
|
}
|
||||||
return 0
|
structs, _ := unpackPack(abi, name, input)
|
||||||
|
_ = packUnpack(abi, name, &structs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUInt(fuzzer *fuzz.Fuzzer) int {
|
func upTo(fuzzer *fuzz.Fuzzer, max int) int {
|
||||||
var i int
|
var i int
|
||||||
fuzzer.Fuzz(&i)
|
fuzzer.Fuzz(&i)
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
i = -i
|
return (-1 - i) % max
|
||||||
if i < 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return i
|
return i % max
|
||||||
|
}
|
||||||
|
|
||||||
|
func oneOf(fuzzer *fuzz.Fuzzer, options []string) string {
|
||||||
|
return options[upTo(fuzzer, len(options))]
|
||||||
|
}
|
||||||
|
|
||||||
|
func oneOfOrNil(fuzzer *fuzz.Fuzzer, options []string) *string {
|
||||||
|
if i := upTo(fuzzer, len(options)+1); i < len(options) {
|
||||||
|
return &options[i]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
@ -80,7 +80,7 @@ func (arguments Arguments) isTuple() bool {
|
|||||||
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
if len(arguments.NonIndexed()) != 0 {
|
if len(arguments.NonIndexed()) != 0 {
|
||||||
return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
|
return nil, errors.New("abi: attempting to unmarshal an empty string while arguments are expected")
|
||||||
}
|
}
|
||||||
return make([]interface{}, 0), nil
|
return make([]interface{}, 0), nil
|
||||||
}
|
}
|
||||||
@ -95,7 +95,7 @@ func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte)
|
|||||||
}
|
}
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
if len(arguments.NonIndexed()) != 0 {
|
if len(arguments.NonIndexed()) != 0 {
|
||||||
return errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
|
return errors.New("abi: attempting to unmarshal an empty string while arguments are expected")
|
||||||
}
|
}
|
||||||
return nil // Nothing to unmarshal, return
|
return nil // Nothing to unmarshal, return
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewKeyStoreTransactor is a utility method to easily create a transaction signer from
|
// NewKeyStoreTransactor is a utility method to easily create a transaction signer from
|
||||||
// an decrypted key from a keystore.
|
// a decrypted key from a keystore.
|
||||||
//
|
//
|
||||||
// Deprecated: Use NewKeyStoreTransactorWithChainID instead.
|
// Deprecated: Use NewKeyStoreTransactorWithChainID instead.
|
||||||
func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) {
|
func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) {
|
||||||
@ -117,7 +117,7 @@ func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.I
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from
|
// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from
|
||||||
// an decrypted key from a keystore.
|
// a decrypted key from a keystore.
|
||||||
func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) {
|
func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) {
|
||||||
if chainID == nil {
|
if chainID == nil {
|
||||||
return nil, ErrNoChainID
|
return nil, ErrNoChainID
|
||||||
|
@ -75,7 +75,7 @@ type BlockHashContractCaller interface {
|
|||||||
// CodeAtHash returns the code of the given account in the state at the specified block hash.
|
// CodeAtHash returns the code of the given account in the state at the specified block hash.
|
||||||
CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error)
|
CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error)
|
||||||
|
|
||||||
// CallContractAtHash executes an Ethereum contract all against the state at the specified block hash.
|
// CallContractAtHash executes an Ethereum contract call against the state at the specified block hash.
|
||||||
CallContractAtHash(ctx context.Context, call ethereum.CallMsg, blockHash common.Hash) ([]byte, error)
|
CallContractAtHash(ctx context.Context, call ethereum.CallMsg, blockHash common.Hash) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -846,7 +846,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
|||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
if len(b.pendingBlock.Transactions()) != 0 {
|
if len(b.pendingBlock.Transactions()) != 0 {
|
||||||
return errors.New("Could not adjust time on non-empty block")
|
return errors.New("could not adjust time on non-empty block")
|
||||||
}
|
}
|
||||||
// Get the last block
|
// Get the last block
|
||||||
block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash())
|
block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash())
|
||||||
|
@ -38,6 +38,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestSimulatedBackend(t *testing.T) {
|
func TestSimulatedBackend(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var gasLimit uint64 = 8000029
|
var gasLimit uint64 = 8000029
|
||||||
key, _ := crypto.GenerateKey() // nolint: gosec
|
key, _ := crypto.GenerateKey() // nolint: gosec
|
||||||
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
@ -121,6 +122,7 @@ func simTestBackend(testAddr common.Address) *SimulatedBackend {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewSimulatedBackend(t *testing.T) {
|
func TestNewSimulatedBackend(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
expectedBal := big.NewInt(10000000000000000)
|
expectedBal := big.NewInt(10000000000000000)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -142,6 +144,7 @@ func TestNewSimulatedBackend(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAdjustTime(t *testing.T) {
|
func TestAdjustTime(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
sim := NewSimulatedBackend(
|
sim := NewSimulatedBackend(
|
||||||
core.GenesisAlloc{}, 10000000,
|
core.GenesisAlloc{}, 10000000,
|
||||||
)
|
)
|
||||||
@ -159,6 +162,7 @@ func TestAdjustTime(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewAdjustTimeFail(t *testing.T) {
|
func TestNewAdjustTimeFail(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.blockchain.Stop()
|
defer sim.blockchain.Stop()
|
||||||
@ -202,6 +206,7 @@ func TestNewAdjustTimeFail(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBalanceAt(t *testing.T) {
|
func TestBalanceAt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
expectedBal := big.NewInt(10000000000000000)
|
expectedBal := big.NewInt(10000000000000000)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -219,6 +224,7 @@ func TestBalanceAt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockByHash(t *testing.T) {
|
func TestBlockByHash(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
sim := NewSimulatedBackend(
|
sim := NewSimulatedBackend(
|
||||||
core.GenesisAlloc{}, 10000000,
|
core.GenesisAlloc{}, 10000000,
|
||||||
)
|
)
|
||||||
@ -240,6 +246,7 @@ func TestBlockByHash(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockByNumber(t *testing.T) {
|
func TestBlockByNumber(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
sim := NewSimulatedBackend(
|
sim := NewSimulatedBackend(
|
||||||
core.GenesisAlloc{}, 10000000,
|
core.GenesisAlloc{}, 10000000,
|
||||||
)
|
)
|
||||||
@ -275,6 +282,7 @@ func TestBlockByNumber(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNonceAt(t *testing.T) {
|
func TestNonceAt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -328,6 +336,7 @@ func TestNonceAt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSendTransaction(t *testing.T) {
|
func TestSendTransaction(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -362,6 +371,7 @@ func TestSendTransaction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTransactionByHash(t *testing.T) {
|
func TestTransactionByHash(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := NewSimulatedBackend(
|
sim := NewSimulatedBackend(
|
||||||
@ -416,6 +426,7 @@ func TestTransactionByHash(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEstimateGas(t *testing.T) {
|
func TestEstimateGas(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
/*
|
/*
|
||||||
pragma solidity ^0.6.4;
|
pragma solidity ^0.6.4;
|
||||||
contract GasEstimation {
|
contract GasEstimation {
|
||||||
@ -535,6 +546,7 @@ func TestEstimateGas(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEstimateGasWithPrice(t *testing.T) {
|
func TestEstimateGasWithPrice(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
key, _ := crypto.GenerateKey()
|
key, _ := crypto.GenerateKey()
|
||||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
|
||||||
@ -625,6 +637,7 @@ func TestEstimateGasWithPrice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHeaderByHash(t *testing.T) {
|
func TestHeaderByHash(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -646,6 +659,7 @@ func TestHeaderByHash(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHeaderByNumber(t *testing.T) {
|
func TestHeaderByNumber(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -692,6 +706,7 @@ func TestHeaderByNumber(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTransactionCount(t *testing.T) {
|
func TestTransactionCount(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -744,6 +759,7 @@ func TestTransactionCount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTransactionInBlock(t *testing.T) {
|
func TestTransactionInBlock(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -809,6 +825,7 @@ func TestTransactionInBlock(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPendingNonceAt(t *testing.T) {
|
func TestPendingNonceAt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -874,6 +891,7 @@ func TestPendingNonceAt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTransactionReceipt(t *testing.T) {
|
func TestTransactionReceipt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
@ -908,6 +926,7 @@ func TestTransactionReceipt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSuggestGasPrice(t *testing.T) {
|
func TestSuggestGasPrice(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
sim := NewSimulatedBackend(
|
sim := NewSimulatedBackend(
|
||||||
core.GenesisAlloc{},
|
core.GenesisAlloc{},
|
||||||
10000000,
|
10000000,
|
||||||
@ -924,6 +943,7 @@ func TestSuggestGasPrice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPendingCodeAt(t *testing.T) {
|
func TestPendingCodeAt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -960,6 +980,7 @@ func TestPendingCodeAt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCodeAt(t *testing.T) {
|
func TestCodeAt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -997,6 +1018,7 @@ func TestCodeAt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCodeAtHash(t *testing.T) {
|
func TestCodeAtHash(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -1037,6 +1059,7 @@ func TestCodeAtHash(t *testing.T) {
|
|||||||
//
|
//
|
||||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||||
func TestPendingAndCallContract(t *testing.T) {
|
func TestPendingAndCallContract(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -1138,6 +1161,7 @@ contract Reverter {
|
|||||||
}
|
}
|
||||||
}*/
|
}*/
|
||||||
func TestCallContractRevert(t *testing.T) {
|
func TestCallContractRevert(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -1233,6 +1257,7 @@ func TestCallContractRevert(t *testing.T) {
|
|||||||
// Since Commit() was called 2n+1 times in total,
|
// Since Commit() was called 2n+1 times in total,
|
||||||
// having a chain length of just n+1 means that a reorg occurred.
|
// having a chain length of just n+1 means that a reorg occurred.
|
||||||
func TestFork(t *testing.T) {
|
func TestFork(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -1286,6 +1311,7 @@ const callableBin = "6080604052348015600f57600080fd5b5060998061001e6000396000f3f
|
|||||||
// 9. Re-send the transaction and mine a block.
|
// 9. Re-send the transaction and mine a block.
|
||||||
// 10. Check that the event was reborn.
|
// 10. Check that the event was reborn.
|
||||||
func TestForkLogsReborn(t *testing.T) {
|
func TestForkLogsReborn(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -1359,6 +1385,7 @@ func TestForkLogsReborn(t *testing.T) {
|
|||||||
// 5. Mine a block, Re-send the transaction and mine another one.
|
// 5. Mine a block, Re-send the transaction and mine another one.
|
||||||
// 6. Check that the TX is now included in block 2.
|
// 6. Check that the TX is now included in block 2.
|
||||||
func TestForkResendTx(t *testing.T) {
|
func TestForkResendTx(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -1395,6 +1422,7 @@ func TestForkResendTx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCommitReturnValue(t *testing.T) {
|
func TestCommitReturnValue(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
@ -1436,6 +1464,7 @@ func TestCommitReturnValue(t *testing.T) {
|
|||||||
// TestAdjustTimeAfterFork ensures that after a fork, AdjustTime uses the pending fork
|
// TestAdjustTimeAfterFork ensures that after a fork, AdjustTime uses the pending fork
|
||||||
// block's parent rather than the canonical head's parent.
|
// block's parent rather than the canonical head's parent.
|
||||||
func TestAdjustTimeAfterFork(t *testing.T) {
|
func TestAdjustTimeAfterFork(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
sim := simTestBackend(testAddr)
|
sim := simTestBackend(testAddr)
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
@ -238,7 +238,7 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// todo(rjl493456442) check the method is payable or not,
|
// todo(rjl493456442) check whether the method is payable or not,
|
||||||
// reject invalid transaction at the first place
|
// reject invalid transaction at the first place
|
||||||
return c.transact(opts, &c.address, input)
|
return c.transact(opts, &c.address, input)
|
||||||
}
|
}
|
||||||
@ -246,7 +246,7 @@ func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...in
|
|||||||
// RawTransact initiates a transaction with the given raw calldata as the input.
|
// RawTransact initiates a transaction with the given raw calldata as the input.
|
||||||
// It's usually used to initiate transactions for invoking **Fallback** function.
|
// It's usually used to initiate transactions for invoking **Fallback** function.
|
||||||
func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
|
func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
|
||||||
// todo(rjl493456442) check the method is payable or not,
|
// todo(rjl493456442) check whether the method is payable or not,
|
||||||
// reject invalid transaction at the first place
|
// reject invalid transaction at the first place
|
||||||
return c.transact(opts, &c.address, calldata)
|
return c.transact(opts, &c.address, calldata)
|
||||||
}
|
}
|
||||||
|
@ -135,6 +135,7 @@ func (mc *mockBlockHashCaller) CallContractAtHash(ctx context.Context, call ethe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPassingBlockNumber(t *testing.T) {
|
func TestPassingBlockNumber(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
mc := &mockPendingCaller{
|
mc := &mockPendingCaller{
|
||||||
mockCaller: &mockCaller{
|
mockCaller: &mockCaller{
|
||||||
codeAtBytes: []byte{1, 2, 3},
|
codeAtBytes: []byte{1, 2, 3},
|
||||||
@ -186,6 +187,7 @@ func TestPassingBlockNumber(t *testing.T) {
|
|||||||
const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158"
|
const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158"
|
||||||
|
|
||||||
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
hash := crypto.Keccak256Hash([]byte("testName"))
|
hash := crypto.Keccak256Hash([]byte("testName"))
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")),
|
crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")),
|
||||||
@ -207,6 +209,7 @@ func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackAnonymousLogIntoMap(t *testing.T) {
|
func TestUnpackAnonymousLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
mockLog := newMockLog(nil, common.HexToHash("0x0"))
|
mockLog := newMockLog(nil, common.HexToHash("0x0"))
|
||||||
|
|
||||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]`
|
abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]`
|
||||||
@ -224,6 +227,7 @@ func TestUnpackAnonymousLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
|
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -249,6 +253,7 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
arrBytes, err := rlp.EncodeToBytes([2]common.Address{common.HexToAddress("0x0"), common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")})
|
arrBytes, err := rlp.EncodeToBytes([2]common.Address{common.HexToAddress("0x0"), common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -274,6 +279,7 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
mockAddress := common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")
|
mockAddress := common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")
|
||||||
addrBytes := mockAddress.Bytes()
|
addrBytes := mockAddress.Bytes()
|
||||||
hash := crypto.Keccak256Hash([]byte("mockFunction(address,uint)"))
|
hash := crypto.Keccak256Hash([]byte("mockFunction(address,uint)"))
|
||||||
@ -300,6 +306,7 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
bytes := []byte{1, 2, 3, 4, 5}
|
bytes := []byte{1, 2, 3, 4, 5}
|
||||||
hash := crypto.Keccak256Hash(bytes)
|
hash := crypto.Keccak256Hash(bytes)
|
||||||
topics := []common.Hash{
|
topics := []common.Hash{
|
||||||
@ -322,6 +329,7 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTransactGasFee(t *testing.T) {
|
func TestTransactGasFee(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
// GasTipCap and GasFeeCap
|
// GasTipCap and GasFeeCap
|
||||||
@ -397,6 +405,7 @@ func newMockLog(topics []common.Hash, txHash common.Hash) types.Log {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCall(t *testing.T) {
|
func TestCall(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var method, methodWithArg = "something", "somethingArrrrg"
|
var method, methodWithArg = "something", "somethingArrrrg"
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name, method string
|
name, method string
|
||||||
@ -572,6 +581,7 @@ func TestCall(t *testing.T) {
|
|||||||
|
|
||||||
// TestCrashers contains some strings which previously caused the abi codec to crash.
|
// TestCrashers contains some strings which previously caused the abi codec to crash.
|
||||||
func TestCrashers(t *testing.T) {
|
func TestCrashers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"_1"}]}]}]`))
|
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"_1"}]}]}]`))
|
||||||
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"&"}]}]}]`))
|
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"&"}]}]}]`))
|
||||||
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"----"}]}]}]`))
|
abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"----"}]}]}]`))
|
||||||
|
@ -79,7 +79,7 @@ func isKeyWord(arg string) bool {
|
|||||||
|
|
||||||
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
|
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
|
||||||
// to be used as is in client code, but rather as an intermediate struct which
|
// to be used as is in client code, but rather as an intermediate struct which
|
||||||
// enforces compile time type safety and naming convention opposed to having to
|
// enforces compile time type safety and naming convention as opposed to having to
|
||||||
// manually maintain hard coded strings that break on runtime.
|
// manually maintain hard coded strings that break on runtime.
|
||||||
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string, aliases map[string]string) (string, error) {
|
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string, aliases map[string]string) (string, error) {
|
||||||
var (
|
var (
|
||||||
@ -363,7 +363,7 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
|||||||
// parameters that are not value types i.e. arrays and structs are not
|
// parameters that are not value types i.e. arrays and structs are not
|
||||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||||
//
|
//
|
||||||
// We only convert stringS and bytes to hash, still need to deal with
|
// We only convert strings and bytes to hash, still need to deal with
|
||||||
// array(both fixed-size and dynamic-size) and struct.
|
// array(both fixed-size and dynamic-size) and struct.
|
||||||
if bound == "string" || bound == "[]byte" {
|
if bound == "string" || bound == "[]byte" {
|
||||||
bound = "common.Hash"
|
bound = "common.Hash"
|
||||||
|
@ -1677,7 +1677,7 @@ var bindTests = []struct {
|
|||||||
}
|
}
|
||||||
sim.Commit()
|
sim.Commit()
|
||||||
|
|
||||||
// This test the existence of the free retreiver call for view and pure functions
|
// This test the existence of the free retriever call for view and pure functions
|
||||||
if num, err := pav.PureFunc(nil); err != nil {
|
if num, err := pav.PureFunc(nil); err != nil {
|
||||||
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
t.Fatalf("Failed to call anonymous field retriever: %v", err)
|
||||||
} else if num.Cmp(big.NewInt(42)) != 0 {
|
} else if num.Cmp(big.NewInt(42)) != 0 {
|
||||||
@ -2067,6 +2067,7 @@ var bindTests = []struct {
|
|||||||
// Tests that packages generated by the binder can be successfully compiled and
|
// Tests that packages generated by the binder can be successfully compiled and
|
||||||
// the requested tester run against it.
|
// the requested tester run against it.
|
||||||
func TestGolangBindings(t *testing.T) {
|
func TestGolangBindings(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Skip the test if no Go command can be found
|
// Skip the test if no Go command can be found
|
||||||
gocmd := runtime.GOROOT() + "/bin/go"
|
gocmd := runtime.GOROOT() + "/bin/go"
|
||||||
if !common.FileExist(gocmd) {
|
if !common.FileExist(gocmd) {
|
||||||
|
@ -53,6 +53,7 @@ var waitDeployedTests = map[string]struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWaitDeployed(t *testing.T) {
|
func TestWaitDeployed(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for name, test := range waitDeployedTests {
|
for name, test := range waitDeployedTests {
|
||||||
backend := backends.NewSimulatedBackend(
|
backend := backends.NewSimulatedBackend(
|
||||||
core.GenesisAlloc{
|
core.GenesisAlloc{
|
||||||
@ -100,6 +101,7 @@ func TestWaitDeployed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWaitDeployedCornerCases(t *testing.T) {
|
func TestWaitDeployedCornerCases(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
backend := backends.NewSimulatedBackend(
|
backend := backends.NewSimulatedBackend(
|
||||||
core.GenesisAlloc{
|
core.GenesisAlloc{
|
||||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
|
||||||
@ -119,9 +121,9 @@ func TestWaitDeployedCornerCases(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
backend.SendTransaction(ctx, tx)
|
backend.SendTransaction(ctx, tx)
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
notContentCreation := errors.New("tx is not contract creation")
|
notContractCreation := errors.New("tx is not contract creation")
|
||||||
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContentCreation.Error() {
|
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != notContractCreation.Error() {
|
||||||
t.Errorf("error missmatch: want %q, got %q, ", notContentCreation, err)
|
t.Errorf("error mismatch: want %q, got %q, ", notContractCreation, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a transaction that is not mined.
|
// Create a transaction that is not mined.
|
||||||
@ -131,7 +133,7 @@ func TestWaitDeployedCornerCases(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
contextCanceled := errors.New("context canceled")
|
contextCanceled := errors.New("context canceled")
|
||||||
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() {
|
if _, err := bind.WaitDeployed(ctx, backend, tx); err.Error() != contextCanceled.Error() {
|
||||||
t.Errorf("error missmatch: want %q, got %q, ", contextCanceled, err)
|
t.Errorf("error mismatch: want %q, got %q, ", contextCanceled, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@ package abi
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -84,10 +83,10 @@ func (e Error) String() string {
|
|||||||
|
|
||||||
func (e *Error) Unpack(data []byte) (interface{}, error) {
|
func (e *Error) Unpack(data []byte) (interface{}, error) {
|
||||||
if len(data) < 4 {
|
if len(data) < 4 {
|
||||||
return "", errors.New("invalid data for unpacking")
|
return "", fmt.Errorf("insufficient data for unpacking: have %d, want at least 4", len(data))
|
||||||
}
|
}
|
||||||
if !bytes.Equal(data[:4], e.ID[:4]) {
|
if !bytes.Equal(data[:4], e.ID[:4]) {
|
||||||
return "", errors.New("invalid data for unpacking")
|
return "", fmt.Errorf("invalid identifier, have %#x want %#x", data[:4], e.ID[:4])
|
||||||
}
|
}
|
||||||
return e.Inputs.Unpack(data[4:])
|
return e.Inputs.Unpack(data[4:])
|
||||||
}
|
}
|
||||||
|
@ -81,6 +81,7 @@ var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa
|
|||||||
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
|
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
|
||||||
|
|
||||||
func TestEventId(t *testing.T) {
|
func TestEventId(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var table = []struct {
|
var table = []struct {
|
||||||
definition string
|
definition string
|
||||||
expectations map[string]common.Hash
|
expectations map[string]common.Hash
|
||||||
@ -112,6 +113,7 @@ func TestEventId(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEventString(t *testing.T) {
|
func TestEventString(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var table = []struct {
|
var table = []struct {
|
||||||
definition string
|
definition string
|
||||||
expectations map[string]string
|
expectations map[string]string
|
||||||
@ -146,6 +148,7 @@ func TestEventString(t *testing.T) {
|
|||||||
|
|
||||||
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
||||||
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -161,6 +164,7 @@ func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEventTupleUnpack(t *testing.T) {
|
func TestEventTupleUnpack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type EventTransfer struct {
|
type EventTransfer struct {
|
||||||
Value *big.Int
|
Value *big.Int
|
||||||
}
|
}
|
||||||
@ -351,6 +355,7 @@ func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, ass
|
|||||||
|
|
||||||
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
||||||
func TestEventUnpackIndexed(t *testing.T) {
|
func TestEventUnpackIndexed(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||||
type testStruct struct {
|
type testStruct struct {
|
||||||
Value1 uint8 // indexed
|
Value1 uint8 // indexed
|
||||||
@ -368,6 +373,7 @@ func TestEventUnpackIndexed(t *testing.T) {
|
|||||||
|
|
||||||
// TestEventIndexedWithArrayUnpack verifies that decoder will not overflow when static array is indexed input.
|
// TestEventIndexedWithArrayUnpack verifies that decoder will not overflow when static array is indexed input.
|
||||||
func TestEventIndexedWithArrayUnpack(t *testing.T) {
|
func TestEventIndexedWithArrayUnpack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]`
|
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]`
|
||||||
type testStruct struct {
|
type testStruct struct {
|
||||||
Value1 [2]uint8 // indexed
|
Value1 [2]uint8 // indexed
|
||||||
|
@ -117,15 +117,6 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
|
|||||||
sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
|
sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
|
||||||
id = crypto.Keccak256([]byte(sig))[:4]
|
id = crypto.Keccak256([]byte(sig))[:4]
|
||||||
}
|
}
|
||||||
// Extract meaningful state mutability of solidity method.
|
|
||||||
// If it's default value, never print it.
|
|
||||||
state := mutability
|
|
||||||
if state == "nonpayable" {
|
|
||||||
state = ""
|
|
||||||
}
|
|
||||||
if state != "" {
|
|
||||||
state = state + " "
|
|
||||||
}
|
|
||||||
identity := fmt.Sprintf("function %v", rawName)
|
identity := fmt.Sprintf("function %v", rawName)
|
||||||
switch funType {
|
switch funType {
|
||||||
case Fallback:
|
case Fallback:
|
||||||
@ -135,7 +126,14 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
|
|||||||
case Constructor:
|
case Constructor:
|
||||||
identity = "constructor"
|
identity = "constructor"
|
||||||
}
|
}
|
||||||
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
|
var str string
|
||||||
|
// Extract meaningful state mutability of solidity method.
|
||||||
|
// If it's empty string or default value "nonpayable", never print it.
|
||||||
|
if mutability == "" || mutability == "nonpayable" {
|
||||||
|
str = fmt.Sprintf("%v(%v) returns(%v)", identity, strings.Join(inputNames, ", "), strings.Join(outputNames, ", "))
|
||||||
|
} else {
|
||||||
|
str = fmt.Sprintf("%v(%v) %s returns(%v)", identity, strings.Join(inputNames, ", "), mutability, strings.Join(outputNames, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
return Method{
|
return Method{
|
||||||
Name: name,
|
Name: name,
|
||||||
|
@ -35,6 +35,7 @@ const methoddata = `
|
|||||||
]`
|
]`
|
||||||
|
|
||||||
func TestMethodString(t *testing.T) {
|
func TestMethodString(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var table = []struct {
|
var table = []struct {
|
||||||
method string
|
method string
|
||||||
expectation string
|
expectation string
|
||||||
@ -99,6 +100,7 @@ func TestMethodString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodSig(t *testing.T) {
|
func TestMethodSig(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var cases = []struct {
|
var cases = []struct {
|
||||||
method string
|
method string
|
||||||
expect string
|
expect string
|
||||||
|
@ -57,7 +57,7 @@ func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
|
|||||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||||
}
|
}
|
||||||
if reflectValue.Type() != reflect.TypeOf([]byte{}) {
|
if reflectValue.Type() != reflect.TypeOf([]byte{}) {
|
||||||
return []byte{}, errors.New("Bytes type is neither slice nor array")
|
return []byte{}, errors.New("bytes type is neither slice nor array")
|
||||||
}
|
}
|
||||||
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil
|
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil
|
||||||
case FixedBytesTy, FunctionTy:
|
case FixedBytesTy, FunctionTy:
|
||||||
@ -66,7 +66,7 @@ func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return common.RightPadBytes(reflectValue.Bytes(), 32), nil
|
return common.RightPadBytes(reflectValue.Bytes(), 32), nil
|
||||||
default:
|
default:
|
||||||
return []byte{}, fmt.Errorf("Could not pack element, unknown type: %v", t.T)
|
return []byte{}, fmt.Errorf("could not pack element, unknown type: %v", t.T)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,8 +32,11 @@ import (
|
|||||||
|
|
||||||
// TestPack tests the general pack/unpack tests in packing_test.go
|
// TestPack tests the general pack/unpack tests in packing_test.go
|
||||||
func TestPack(t *testing.T) {
|
func TestPack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, test := range packUnpackTests {
|
for i, test := range packUnpackTests {
|
||||||
|
i, test := i, test
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
encb, err := hex.DecodeString(test.packed)
|
encb, err := hex.DecodeString(test.packed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("invalid hex %s: %v", test.packed, err)
|
t.Fatalf("invalid hex %s: %v", test.packed, err)
|
||||||
@ -57,6 +60,7 @@ func TestPack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodPack(t *testing.T) {
|
func TestMethodPack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(jsondata))
|
abi, err := JSON(strings.NewReader(jsondata))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -177,6 +181,7 @@ func TestMethodPack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPackNumber(t *testing.T) {
|
func TestPackNumber(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
value reflect.Value
|
value reflect.Value
|
||||||
packed []byte
|
packed []byte
|
||||||
|
@ -134,7 +134,7 @@ func setSlice(dst, src reflect.Value) error {
|
|||||||
dst.Set(slice)
|
dst.Set(slice)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.New("Cannot set slice, destination not settable")
|
return errors.New("cannot set slice, destination not settable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func setArray(dst, src reflect.Value) error {
|
func setArray(dst, src reflect.Value) error {
|
||||||
@ -155,7 +155,7 @@ func setArray(dst, src reflect.Value) error {
|
|||||||
dst.Set(array)
|
dst.Set(array)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.New("Cannot set array, destination not settable")
|
return errors.New("cannot set array, destination not settable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func setStruct(dst, src reflect.Value) error {
|
func setStruct(dst, src reflect.Value) error {
|
||||||
@ -163,7 +163,7 @@ func setStruct(dst, src reflect.Value) error {
|
|||||||
srcField := src.Field(i)
|
srcField := src.Field(i)
|
||||||
dstField := dst.Field(i)
|
dstField := dst.Field(i)
|
||||||
if !dstField.IsValid() || !srcField.IsValid() {
|
if !dstField.IsValid() || !srcField.IsValid() {
|
||||||
return fmt.Errorf("Could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
|
return fmt.Errorf("could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
|
||||||
}
|
}
|
||||||
if err := set(dstField, srcField); err != nil {
|
if err := set(dstField, srcField); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -170,8 +170,11 @@ var reflectTests = []reflectTest{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReflectNameToStruct(t *testing.T) {
|
func TestReflectNameToStruct(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for _, test := range reflectTests {
|
for _, test := range reflectTests {
|
||||||
|
test := test
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
|
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
|
||||||
if len(test.err) > 0 {
|
if len(test.err) > 0 {
|
||||||
if err == nil || err.Error() != test.err {
|
if err == nil || err.Error() != test.err {
|
||||||
@ -192,6 +195,7 @@ func TestReflectNameToStruct(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConvertType(t *testing.T) {
|
func TestConvertType(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Test Basic Struct
|
// Test Basic Struct
|
||||||
type T struct {
|
type T struct {
|
||||||
X *big.Int
|
X *big.Int
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestParseSelector(t *testing.T) {
|
func TestParseSelector(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
mkType := func(types ...interface{}) []ArgumentMarshaling {
|
mkType := func(types ...interface{}) []ArgumentMarshaling {
|
||||||
var result []ArgumentMarshaling
|
var result []ArgumentMarshaling
|
||||||
for i, typeOrComponents := range types {
|
for i, typeOrComponents := range types {
|
||||||
|
@ -75,7 +75,7 @@ func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
|||||||
copy(topic[:], hash[:])
|
copy(topic[:], hash[:])
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// todo(rjl493456442) according solidity documentation, indexed event
|
// todo(rjl493456442) according to solidity documentation, indexed event
|
||||||
// parameters that are not value types i.e. arrays and structs are not
|
// parameters that are not value types i.e. arrays and structs are not
|
||||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||||
//
|
//
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestMakeTopics(t *testing.T) {
|
func TestMakeTopics(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type args struct {
|
type args struct {
|
||||||
query [][]interface{}
|
query [][]interface{}
|
||||||
}
|
}
|
||||||
@ -117,7 +118,9 @@ func TestMakeTopics(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
got, err := MakeTopics(tt.args.query...)
|
got, err := MakeTopics(tt.args.query...)
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
@ -347,10 +350,13 @@ func setupTopicsTests() []topicTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseTopics(t *testing.T) {
|
func TestParseTopics(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := setupTopicsTests()
|
tests := setupTopicsTests()
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
createObj := tt.args.createObj()
|
createObj := tt.args.createObj()
|
||||||
if err := ParseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
if err := ParseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
@ -364,10 +370,13 @@ func TestParseTopics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseTopicsIntoMap(t *testing.T) {
|
func TestParseTopicsIntoMap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := setupTopicsTests()
|
tests := setupTopicsTests()
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
outMap := make(map[string]interface{})
|
outMap := make(map[string]interface{})
|
||||||
if err := ParseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
if err := ParseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
@ -31,6 +31,7 @@ type typeWithoutStringer Type
|
|||||||
|
|
||||||
// Tests that all allowed types get recognized by the type parser.
|
// Tests that all allowed types get recognized by the type parser.
|
||||||
func TestTypeRegexp(t *testing.T) {
|
func TestTypeRegexp(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
blob string
|
blob string
|
||||||
components []ArgumentMarshaling
|
components []ArgumentMarshaling
|
||||||
@ -117,6 +118,7 @@ func TestTypeRegexp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTypeCheck(t *testing.T) {
|
func TestTypeCheck(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
typ string
|
typ string
|
||||||
components []ArgumentMarshaling
|
components []ArgumentMarshaling
|
||||||
@ -308,6 +310,7 @@ func TestTypeCheck(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalType(t *testing.T) {
|
func TestInternalType(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
components := []ArgumentMarshaling{{Name: "a", Type: "int64"}}
|
components := []ArgumentMarshaling{{Name: "a", Type: "int64"}}
|
||||||
internalType := "struct a.b[]"
|
internalType := "struct a.b[]"
|
||||||
kind := Type{
|
kind := Type{
|
||||||
@ -332,6 +335,7 @@ func TestInternalType(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetTypeSize(t *testing.T) {
|
func TestGetTypeSize(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var testCases = []struct {
|
var testCases = []struct {
|
||||||
typ string
|
typ string
|
||||||
components []ArgumentMarshaling
|
components []ArgumentMarshaling
|
||||||
@ -368,6 +372,7 @@ func TestGetTypeSize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewFixedBytesOver32(t *testing.T) {
|
func TestNewFixedBytesOver32(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, err := NewType("bytes4096", "", nil)
|
_, err := NewType("bytes4096", "", nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("fixed bytes with size over 32 is not spec'd")
|
t.Errorf("fixed bytes with size over 32 is not spec'd")
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
|
|
||||||
// TestUnpack tests the general pack/unpack tests in packing_test.go
|
// TestUnpack tests the general pack/unpack tests in packing_test.go
|
||||||
func TestUnpack(t *testing.T) {
|
func TestUnpack(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, test := range packUnpackTests {
|
for i, test := range packUnpackTests {
|
||||||
t.Run(strconv.Itoa(i)+" "+test.def, func(t *testing.T) {
|
t.Run(strconv.Itoa(i)+" "+test.def, func(t *testing.T) {
|
||||||
//Unpack
|
//Unpack
|
||||||
@ -206,13 +207,13 @@ var unpackTests = []unpackTest{
|
|||||||
def: `[{"type":"bool"}]`,
|
def: `[{"type":"bool"}]`,
|
||||||
enc: "",
|
enc: "",
|
||||||
want: false,
|
want: false,
|
||||||
err: "abi: attempting to unmarshall an empty string while arguments are expected",
|
err: "abi: attempting to unmarshal an empty string while arguments are expected",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type":"bytes32","indexed":true},{"type":"uint256","indexed":false}]`,
|
def: `[{"type":"bytes32","indexed":true},{"type":"uint256","indexed":false}]`,
|
||||||
enc: "",
|
enc: "",
|
||||||
want: false,
|
want: false,
|
||||||
err: "abi: attempting to unmarshall an empty string while arguments are expected",
|
err: "abi: attempting to unmarshal an empty string while arguments are expected",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type":"bool","indexed":true},{"type":"uint64","indexed":true}]`,
|
def: `[{"type":"bool","indexed":true},{"type":"uint64","indexed":true}]`,
|
||||||
@ -224,6 +225,7 @@ var unpackTests = []unpackTest{
|
|||||||
// TestLocalUnpackTests runs test specially designed only for unpacking.
|
// TestLocalUnpackTests runs test specially designed only for unpacking.
|
||||||
// All test cases that can be used to test packing and unpacking should move to packing_test.go
|
// All test cases that can be used to test packing and unpacking should move to packing_test.go
|
||||||
func TestLocalUnpackTests(t *testing.T) {
|
func TestLocalUnpackTests(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, test := range unpackTests {
|
for i, test := range unpackTests {
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
//Unpack
|
//Unpack
|
||||||
@ -251,6 +253,7 @@ func TestLocalUnpackTests(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) {
|
func TestUnpackIntoInterfaceSetDynamicArrayOutput(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
|
abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -321,6 +324,7 @@ func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOut
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodMultiReturn(t *testing.T) {
|
func TestMethodMultiReturn(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type reversed struct {
|
type reversed struct {
|
||||||
String string
|
String string
|
||||||
Int *big.Int
|
Int *big.Int
|
||||||
@ -400,6 +404,7 @@ func TestMethodMultiReturn(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithArray(t *testing.T) {
|
func TestMultiReturnWithArray(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -423,6 +428,7 @@ func TestMultiReturnWithArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithStringArray(t *testing.T) {
|
func TestMultiReturnWithStringArray(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -453,6 +459,7 @@ func TestMultiReturnWithStringArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithStringSlice(t *testing.T) {
|
func TestMultiReturnWithStringSlice(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
|
const definition = `[{"name" : "multi", "type": "function", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -485,6 +492,7 @@ func TestMultiReturnWithStringSlice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Similar to TestMultiReturnWithArray, but with a special case in mind:
|
// Similar to TestMultiReturnWithArray, but with a special case in mind:
|
||||||
// values of nested static arrays count towards the size as well, and any element following
|
// values of nested static arrays count towards the size as well, and any element following
|
||||||
// after such nested array argument should be read with the correct offset,
|
// after such nested array argument should be read with the correct offset,
|
||||||
@ -525,6 +533,7 @@ func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshal(t *testing.T) {
|
func TestUnmarshal(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const definition = `[
|
const definition = `[
|
||||||
{ "name" : "int", "type": "function", "outputs": [ { "type": "uint256" } ] },
|
{ "name" : "int", "type": "function", "outputs": [ { "type": "uint256" } ] },
|
||||||
{ "name" : "bool", "type": "function", "outputs": [ { "type": "bool" } ] },
|
{ "name" : "bool", "type": "function", "outputs": [ { "type": "bool" } ] },
|
||||||
@ -774,6 +783,7 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnpackTuple(t *testing.T) {
|
func TestUnpackTuple(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const simpleTuple = `[{"name":"tuple","type":"function","outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
|
const simpleTuple = `[{"name":"tuple","type":"function","outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
|
||||||
abi, err := JSON(strings.NewReader(simpleTuple))
|
abi, err := JSON(strings.NewReader(simpleTuple))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -876,6 +886,7 @@ func TestUnpackTuple(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOOMMaliciousInput(t *testing.T) {
|
func TestOOMMaliciousInput(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
oomTests := []unpackTest{
|
oomTests := []unpackTest{
|
||||||
{
|
{
|
||||||
def: `[{"type": "uint8[]"}]`,
|
def: `[{"type": "uint8[]"}]`,
|
||||||
@ -946,6 +957,7 @@ func TestOOMMaliciousInput(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPackAndUnpackIncompatibleNumber(t *testing.T) {
|
func TestPackAndUnpackIncompatibleNumber(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var encodeABI Arguments
|
var encodeABI Arguments
|
||||||
uint256Ty, err := NewType("uint256", "", nil)
|
uint256Ty, err := NewType("uint256", "", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestTextHash(t *testing.T) {
|
func TestTextHash(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
hash := TextHash([]byte("Hello Joe"))
|
hash := TextHash([]byte("Hello Joe"))
|
||||||
want := hexutil.MustDecode("0xa080337ae51c4e064c189e113edd0ba391df9206e2f49db658bb32cf2911730b")
|
want := hexutil.MustDecode("0xa080337ae51c4e064c189e113edd0ba391df9206e2f49db658bb32cf2911730b")
|
||||||
if !bytes.Equal(hash, want) {
|
if !bytes.Equal(hash, want) {
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
// Tests that HD derivation paths can be correctly parsed into our internal binary
|
// Tests that HD derivation paths can be correctly parsed into our internal binary
|
||||||
// representation.
|
// representation.
|
||||||
func TestHDPathParsing(t *testing.T) {
|
func TestHDPathParsing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input string
|
input string
|
||||||
output DerivationPath
|
output DerivationPath
|
||||||
@ -89,6 +90,7 @@ func testDerive(t *testing.T, next func() DerivationPath, expected []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHdPathIteration(t *testing.T) {
|
func TestHdPathIteration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testDerive(t, DefaultIterator(DefaultBaseDerivationPath),
|
testDerive(t, DefaultIterator(DefaultBaseDerivationPath),
|
||||||
[]string{
|
[]string{
|
||||||
"m/44'/60'/0'/0/0", "m/44'/60'/0'/0/1",
|
"m/44'/60'/0'/0/0", "m/44'/60'/0'/0/1",
|
||||||
|
@ -68,7 +68,7 @@ func waitWatcherStart(ks *KeyStore) bool {
|
|||||||
|
|
||||||
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
||||||
var list []accounts.Account
|
var list []accounts.Account
|
||||||
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(200 * time.Millisecond) {
|
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(100 * time.Millisecond) {
|
||||||
list = ks.Accounts()
|
list = ks.Accounts()
|
||||||
if reflect.DeepEqual(list, wantAccounts) {
|
if reflect.DeepEqual(list, wantAccounts) {
|
||||||
// ks should have also received change notifications
|
// ks should have also received change notifications
|
||||||
@ -152,6 +152,7 @@ func TestWatchNoDir(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheInitialReload(t *testing.T) {
|
func TestCacheInitialReload(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cache, _ := newAccountCache(cachetestDir)
|
cache, _ := newAccountCache(cachetestDir)
|
||||||
accounts := cache.accounts()
|
accounts := cache.accounts()
|
||||||
if !reflect.DeepEqual(accounts, cachetestAccounts) {
|
if !reflect.DeepEqual(accounts, cachetestAccounts) {
|
||||||
@ -160,6 +161,7 @@ func TestCacheInitialReload(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheAddDeleteOrder(t *testing.T) {
|
func TestCacheAddDeleteOrder(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cache, _ := newAccountCache("testdata/no-such-dir")
|
cache, _ := newAccountCache("testdata/no-such-dir")
|
||||||
cache.watcher.running = true // prevent unexpected reloads
|
cache.watcher.running = true // prevent unexpected reloads
|
||||||
|
|
||||||
@ -244,6 +246,7 @@ func TestCacheAddDeleteOrder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCacheFind(t *testing.T) {
|
func TestCacheFind(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
dir := filepath.Join("testdata", "dir")
|
dir := filepath.Join("testdata", "dir")
|
||||||
cache, _ := newAccountCache(dir)
|
cache, _ := newAccountCache(dir)
|
||||||
cache.watcher.running = true // prevent unexpected reloads
|
cache.watcher.running = true // prevent unexpected reloads
|
||||||
@ -350,7 +353,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||||
time.Sleep(time.Second)
|
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||||
|
|
||||||
// Now replace file contents
|
// Now replace file contents
|
||||||
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
||||||
@ -366,7 +369,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||||
time.Sleep(time.Second)
|
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||||
|
|
||||||
// Now replace file contents again
|
// Now replace file contents again
|
||||||
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
||||||
@ -382,7 +385,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// needed so that modTime of `file` is different to its current value after os.WriteFile
|
// needed so that modTime of `file` is different to its current value after os.WriteFile
|
||||||
time.Sleep(time.Second)
|
os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
|
||||||
|
|
||||||
// Now replace file contents with crap
|
// Now replace file contents with crap
|
||||||
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
|
if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
|
||||||
|
@ -16,10 +16,19 @@
|
|||||||
|
|
||||||
package keystore
|
package keystore
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
func Fuzz(f *testing.F) {
|
func FuzzPassword(f *testing.F) {
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
f.Fuzz(func(t *testing.T, password string) {
|
||||||
fuzz(data)
|
ks := NewKeyStore(t.TempDir(), LightScryptN, LightScryptP)
|
||||||
|
a, err := ks.NewAccount(password)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := ks.Unlock(a, password); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
@ -36,6 +36,7 @@ import (
|
|||||||
var testSigData = make([]byte, 32)
|
var testSigData = make([]byte, 32)
|
||||||
|
|
||||||
func TestKeyStore(t *testing.T) {
|
func TestKeyStore(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
dir, ks := tmpKeyStore(t, true)
|
dir, ks := tmpKeyStore(t, true)
|
||||||
|
|
||||||
a, err := ks.NewAccount("foo")
|
a, err := ks.NewAccount("foo")
|
||||||
@ -70,6 +71,7 @@ func TestKeyStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSign(t *testing.T) {
|
func TestSign(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
|
|
||||||
pass := "" // not used but required by API
|
pass := "" // not used but required by API
|
||||||
@ -86,6 +88,7 @@ func TestSign(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSignWithPassphrase(t *testing.T) {
|
func TestSignWithPassphrase(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
|
|
||||||
pass := "passwd"
|
pass := "passwd"
|
||||||
@ -280,6 +283,7 @@ type walletEvent struct {
|
|||||||
// Tests that wallet notifications and correctly fired when accounts are added
|
// Tests that wallet notifications and correctly fired when accounts are added
|
||||||
// or deleted from the keystore.
|
// or deleted from the keystore.
|
||||||
func TestWalletNotifications(t *testing.T) {
|
func TestWalletNotifications(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
|
|
||||||
// Subscribe to the wallet feed and collect events.
|
// Subscribe to the wallet feed and collect events.
|
||||||
@ -341,6 +345,7 @@ func TestWalletNotifications(t *testing.T) {
|
|||||||
|
|
||||||
// TestImportExport tests the import functionality of a keystore.
|
// TestImportExport tests the import functionality of a keystore.
|
||||||
func TestImportECDSA(t *testing.T) {
|
func TestImportECDSA(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
key, err := crypto.GenerateKey()
|
key, err := crypto.GenerateKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -359,6 +364,7 @@ func TestImportECDSA(t *testing.T) {
|
|||||||
|
|
||||||
// TestImportECDSA tests the import and export functionality of a keystore.
|
// TestImportECDSA tests the import and export functionality of a keystore.
|
||||||
func TestImportExport(t *testing.T) {
|
func TestImportExport(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
acc, err := ks.NewAccount("old")
|
acc, err := ks.NewAccount("old")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -387,6 +393,7 @@ func TestImportExport(t *testing.T) {
|
|||||||
// TestImportRace tests the keystore on races.
|
// TestImportRace tests the keystore on races.
|
||||||
// This test should fail under -race if importing races.
|
// This test should fail under -race if importing races.
|
||||||
func TestImportRace(t *testing.T) {
|
func TestImportRace(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
acc, err := ks.NewAccount("old")
|
acc, err := ks.NewAccount("old")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -30,6 +30,7 @@ const (
|
|||||||
|
|
||||||
// Tests that a json key file can be decrypted and encrypted in multiple rounds.
|
// Tests that a json key file can be decrypted and encrypted in multiple rounds.
|
||||||
func TestKeyEncryptDecrypt(t *testing.T) {
|
func TestKeyEncryptDecrypt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
keyjson, err := os.ReadFile("testdata/very-light-scrypt.json")
|
keyjson, err := os.ReadFile("testdata/very-light-scrypt.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -54,7 +55,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
|
|||||||
// Recrypt with a new password and start over
|
// Recrypt with a new password and start over
|
||||||
password += "new data appended" // nolint: gosec
|
password += "new data appended" // nolint: gosec
|
||||||
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
|
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
|
||||||
t.Errorf("test %d: failed to recrypt key %v", i, err)
|
t.Errorf("test %d: failed to re-encrypt key %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@ func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePlain(t *testing.T) {
|
func TestKeyStorePlain(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStoreIface(t, false)
|
_, ks := tmpKeyStoreIface(t, false)
|
||||||
|
|
||||||
pass := "" // not used but required by API
|
pass := "" // not used but required by API
|
||||||
@ -60,6 +61,7 @@ func TestKeyStorePlain(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePassphrase(t *testing.T) {
|
func TestKeyStorePassphrase(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStoreIface(t, true)
|
_, ks := tmpKeyStoreIface(t, true)
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
@ -80,6 +82,7 @@ func TestKeyStorePassphrase(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, ks := tmpKeyStoreIface(t, true)
|
_, ks := tmpKeyStoreIface(t, true)
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
@ -93,6 +96,7 @@ func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestImportPreSaleKey(t *testing.T) {
|
func TestImportPreSaleKey(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
dir, ks := tmpKeyStoreIface(t, true)
|
dir, ks := tmpKeyStoreIface(t, true)
|
||||||
|
|
||||||
// file content of a presale key file generated with:
|
// file content of a presale key file generated with:
|
||||||
|
@ -125,7 +125,7 @@ func (w *watcher) loop() {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Info("Filsystem watcher error", "err", err)
|
log.Info("Filesystem watcher error", "err", err)
|
||||||
case <-debounce.C:
|
case <-debounce.C:
|
||||||
w.ac.scanAccounts()
|
w.ac.scanAccounts()
|
||||||
rescanTriggered = false
|
rescanTriggered = false
|
||||||
|
@ -776,16 +776,16 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
|
|||||||
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := strings.SplitN(account.URL.Path, "/", 2)
|
url, path, found := strings.Cut(account.URL.Path, "/")
|
||||||
if len(parts) != 2 {
|
if !found {
|
||||||
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
if url != fmt.Sprintf("%x", w.PublicKey[1:3]) {
|
||||||
return nil, fmt.Errorf("URL %s is not for this wallet", account.URL)
|
return nil, fmt.Errorf("URL %s is not for this wallet", account.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
return accounts.ParseDerivationPath(parts[1])
|
return accounts.ParseDerivationPath(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Session represents a secured communication session with the wallet.
|
// Session represents a secured communication session with the wallet.
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestURLParsing(t *testing.T) {
|
func TestURLParsing(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
url, err := parseURL("https://ethereum.org")
|
url, err := parseURL("https://ethereum.org")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -40,6 +41,7 @@ func TestURLParsing(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestURLString(t *testing.T) {
|
func TestURLString(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||||
if url.String() != "https://ethereum.org" {
|
if url.String() != "https://ethereum.org" {
|
||||||
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
|
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
|
||||||
@ -52,10 +54,11 @@ func TestURLString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestURLMarshalJSON(t *testing.T) {
|
func TestURLMarshalJSON(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||||
json, err := url.MarshalJSON()
|
json, err := url.MarshalJSON()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpcted error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if string(json) != "\"https://ethereum.org\"" {
|
if string(json) != "\"https://ethereum.org\"" {
|
||||||
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
|
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
|
||||||
@ -63,10 +66,11 @@ func TestURLMarshalJSON(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestURLUnmarshalJSON(t *testing.T) {
|
func TestURLUnmarshalJSON(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
url := &URL{}
|
url := &URL{}
|
||||||
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
|
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpcted error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if url.Scheme != "https" {
|
if url.Scheme != "https" {
|
||||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||||
@ -77,6 +81,7 @@ func TestURLUnmarshalJSON(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestURLComparison(t *testing.T) {
|
func TestURLComparison(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
urlA URL
|
urlA URL
|
||||||
urlB URL
|
urlB URL
|
||||||
|
125
beacon/light/canonical.go
Normal file
125
beacon/light/canonical.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/lru"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// canonicalStore stores instances of the given type in a database and caches
|
||||||
|
// them in memory, associated with a continuous range of period numbers.
|
||||||
|
// Note: canonicalStore is not thread safe and it is the caller's responsibility
|
||||||
|
// to avoid concurrent access.
|
||||||
|
type canonicalStore[T any] struct {
|
||||||
|
keyPrefix []byte
|
||||||
|
periods periodRange
|
||||||
|
cache *lru.Cache[uint64, T]
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCanonicalStore creates a new canonicalStore and loads all keys associated
|
||||||
|
// with the keyPrefix in order to determine the ranges available in the database.
|
||||||
|
func newCanonicalStore[T any](db ethdb.Iteratee, keyPrefix []byte) (*canonicalStore[T], error) {
|
||||||
|
cs := &canonicalStore[T]{
|
||||||
|
keyPrefix: keyPrefix,
|
||||||
|
cache: lru.NewCache[uint64, T](100),
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
iter = db.NewIterator(keyPrefix, nil)
|
||||||
|
kl = len(keyPrefix)
|
||||||
|
first = true
|
||||||
|
)
|
||||||
|
defer iter.Release()
|
||||||
|
|
||||||
|
for iter.Next() {
|
||||||
|
if len(iter.Key()) != kl+8 {
|
||||||
|
log.Warn("Invalid key length in the canonical chain database", "key", fmt.Sprintf("%#x", iter.Key()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
period := binary.BigEndian.Uint64(iter.Key()[kl : kl+8])
|
||||||
|
if first {
|
||||||
|
cs.periods.Start = period
|
||||||
|
} else if cs.periods.End != period {
|
||||||
|
return nil, fmt.Errorf("gap in the canonical chain database between periods %d and %d", cs.periods.End, period-1)
|
||||||
|
}
|
||||||
|
first = false
|
||||||
|
cs.periods.End = period + 1
|
||||||
|
}
|
||||||
|
return cs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// databaseKey returns the database key belonging to the given period.
|
||||||
|
func (cs *canonicalStore[T]) databaseKey(period uint64) []byte {
|
||||||
|
return binary.BigEndian.AppendUint64(append([]byte{}, cs.keyPrefix...), period)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds the given item to the database. It also ensures that the range remains
|
||||||
|
// continuous. Can be used either with a batch or database backend.
|
||||||
|
func (cs *canonicalStore[T]) add(backend ethdb.KeyValueWriter, period uint64, value T) error {
|
||||||
|
if !cs.periods.canExpand(period) {
|
||||||
|
return fmt.Errorf("period expansion is not allowed, first: %d, next: %d, period: %d", cs.periods.Start, cs.periods.End, period)
|
||||||
|
}
|
||||||
|
enc, err := rlp.EncodeToBytes(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := backend.Put(cs.databaseKey(period), enc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cs.cache.Add(period, value)
|
||||||
|
cs.periods.expand(period)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFrom removes items starting from the given period.
|
||||||
|
func (cs *canonicalStore[T]) deleteFrom(db ethdb.KeyValueWriter, fromPeriod uint64) (deleted periodRange) {
|
||||||
|
keepRange, deleteRange := cs.periods.split(fromPeriod)
|
||||||
|
deleteRange.each(func(period uint64) {
|
||||||
|
db.Delete(cs.databaseKey(period))
|
||||||
|
cs.cache.Remove(period)
|
||||||
|
})
|
||||||
|
cs.periods = keepRange
|
||||||
|
return deleteRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// get returns the item at the given period or the null value of the given type
|
||||||
|
// if no item is present.
|
||||||
|
func (cs *canonicalStore[T]) get(backend ethdb.KeyValueReader, period uint64) (T, bool) {
|
||||||
|
var null, value T
|
||||||
|
if !cs.periods.contains(period) {
|
||||||
|
return null, false
|
||||||
|
}
|
||||||
|
if value, ok := cs.cache.Get(period); ok {
|
||||||
|
return value, true
|
||||||
|
}
|
||||||
|
enc, err := backend.Get(cs.databaseKey(period))
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Canonical store value not found", "period", period, "start", cs.periods.Start, "end", cs.periods.End)
|
||||||
|
return null, false
|
||||||
|
}
|
||||||
|
if err := rlp.DecodeBytes(enc, &value); err != nil {
|
||||||
|
log.Error("Error decoding canonical store value", "error", err)
|
||||||
|
return null, false
|
||||||
|
}
|
||||||
|
cs.cache.Add(period, value)
|
||||||
|
return value, true
|
||||||
|
}
|
514
beacon/light/committee_chain.go
Normal file
514
beacon/light/committee_chain.go
Normal file
@ -0,0 +1,514 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/params"
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/types"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/lru"
|
||||||
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNeedCommittee = errors.New("sync committee required")
|
||||||
|
ErrInvalidUpdate = errors.New("invalid committee update")
|
||||||
|
ErrInvalidPeriod = errors.New("invalid update period")
|
||||||
|
ErrWrongCommitteeRoot = errors.New("wrong committee root")
|
||||||
|
ErrCannotReorg = errors.New("can not reorg committee chain")
|
||||||
|
)
|
||||||
|
|
||||||
|
// CommitteeChain is a passive data structure that can validate, hold and update
|
||||||
|
// a chain of beacon light sync committees and updates. It requires at least one
|
||||||
|
// externally set fixed committee root at the beginning of the chain which can
|
||||||
|
// be set either based on a BootstrapData or a trusted source (a local beacon
|
||||||
|
// full node). This makes the structure useful for both light client and light
|
||||||
|
// server setups.
|
||||||
|
//
|
||||||
|
// It always maintains the following consistency constraints:
|
||||||
|
// - a committee can only be present if its root hash matches an existing fixed
|
||||||
|
// root or if it is proven by an update at the previous period
|
||||||
|
// - an update can only be present if a committee is present at the same period
|
||||||
|
// and the update signature is valid and has enough participants.
|
||||||
|
// The committee at the next period (proven by the update) should also be
|
||||||
|
// present (note that this means they can only be added together if neither
|
||||||
|
// is present yet). If a fixed root is present at the next period then the
|
||||||
|
// update can only be present if it proves the same committee root.
|
||||||
|
//
|
||||||
|
// Once synced to the current sync period, CommitteeChain can also validate
|
||||||
|
// signed beacon headers.
|
||||||
|
type CommitteeChain struct {
|
||||||
|
// chainmu guards against concurrent access to the canonicalStore structures
|
||||||
|
// (updates, committees, fixedCommitteeRoots) and ensures that they stay consistent
|
||||||
|
// with each other and with committeeCache.
|
||||||
|
chainmu sync.RWMutex
|
||||||
|
db ethdb.KeyValueStore
|
||||||
|
updates *canonicalStore[*types.LightClientUpdate]
|
||||||
|
committees *canonicalStore[*types.SerializedSyncCommittee]
|
||||||
|
fixedCommitteeRoots *canonicalStore[common.Hash]
|
||||||
|
committeeCache *lru.Cache[uint64, syncCommittee] // cache deserialized committees
|
||||||
|
|
||||||
|
clock mclock.Clock // monotonic clock (simulated clock in tests)
|
||||||
|
unixNano func() int64 // system clock (simulated clock in tests)
|
||||||
|
sigVerifier committeeSigVerifier // BLS sig verifier (dummy verifier in tests)
|
||||||
|
|
||||||
|
config *types.ChainConfig
|
||||||
|
signerThreshold int
|
||||||
|
minimumUpdateScore types.UpdateScore
|
||||||
|
enforceTime bool // enforceTime specifies whether the age of a signed header should be checked
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCommitteeChain creates a new CommitteeChain.
|
||||||
|
func NewCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool) *CommitteeChain {
|
||||||
|
return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() })
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCommitteeChain creates a new CommitteeChain with the option of replacing the
|
||||||
|
// clock source and signature verification for testing purposes.
|
||||||
|
func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
|
||||||
|
s := &CommitteeChain{
|
||||||
|
committeeCache: lru.NewCache[uint64, syncCommittee](10),
|
||||||
|
db: db,
|
||||||
|
sigVerifier: sigVerifier,
|
||||||
|
clock: clock,
|
||||||
|
unixNano: unixNano,
|
||||||
|
config: config,
|
||||||
|
signerThreshold: signerThreshold,
|
||||||
|
enforceTime: enforceTime,
|
||||||
|
minimumUpdateScore: types.UpdateScore{
|
||||||
|
SignerCount: uint32(signerThreshold),
|
||||||
|
SubPeriodIndex: params.SyncPeriodLength / 16,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var err1, err2, err3 error
|
||||||
|
if s.fixedCommitteeRoots, err1 = newCanonicalStore[common.Hash](db, rawdb.FixedCommitteeRootKey); err1 != nil {
|
||||||
|
log.Error("Error creating fixed committee root store", "error", err1)
|
||||||
|
}
|
||||||
|
if s.committees, err2 = newCanonicalStore[*types.SerializedSyncCommittee](db, rawdb.SyncCommitteeKey); err2 != nil {
|
||||||
|
log.Error("Error creating committee store", "error", err2)
|
||||||
|
}
|
||||||
|
if s.updates, err3 = newCanonicalStore[*types.LightClientUpdate](db, rawdb.BestUpdateKey); err3 != nil {
|
||||||
|
log.Error("Error creating update store", "error", err3)
|
||||||
|
}
|
||||||
|
if err1 != nil || err2 != nil || err3 != nil || !s.checkConstraints() {
|
||||||
|
log.Info("Resetting invalid committee chain")
|
||||||
|
s.Reset()
|
||||||
|
}
|
||||||
|
// roll back invalid updates (might be necessary if forks have been changed since last time)
|
||||||
|
for !s.updates.periods.isEmpty() {
|
||||||
|
update, ok := s.updates.get(s.db, s.updates.periods.End-1)
|
||||||
|
if !ok {
|
||||||
|
log.Error("Sync committee update missing", "period", s.updates.periods.End-1)
|
||||||
|
s.Reset()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if valid, err := s.verifyUpdate(update); err != nil {
|
||||||
|
log.Error("Error validating update", "period", s.updates.periods.End-1, "error", err)
|
||||||
|
} else if valid {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := s.rollback(s.updates.periods.End); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !s.committees.periods.isEmpty() {
|
||||||
|
log.Trace("Sync committee chain loaded", "first period", s.committees.periods.Start, "last period", s.committees.periods.End-1)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkConstraints checks committee chain validity constraints
|
||||||
|
func (s *CommitteeChain) checkConstraints() bool {
|
||||||
|
isNotInFixedCommitteeRootRange := func(r periodRange) bool {
|
||||||
|
return s.fixedCommitteeRoots.periods.isEmpty() ||
|
||||||
|
r.Start < s.fixedCommitteeRoots.periods.Start ||
|
||||||
|
r.Start >= s.fixedCommitteeRoots.periods.End
|
||||||
|
}
|
||||||
|
|
||||||
|
valid := true
|
||||||
|
if !s.updates.periods.isEmpty() {
|
||||||
|
if isNotInFixedCommitteeRootRange(s.updates.periods) {
|
||||||
|
log.Error("Start update is not in the fixed roots range")
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
if s.committees.periods.Start > s.updates.periods.Start || s.committees.periods.End <= s.updates.periods.End {
|
||||||
|
log.Error("Missing committees in update range")
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !s.committees.periods.isEmpty() {
|
||||||
|
if isNotInFixedCommitteeRootRange(s.committees.periods) {
|
||||||
|
log.Error("Start committee is not in the fixed roots range")
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
if s.committees.periods.End > s.fixedCommitteeRoots.periods.End && s.committees.periods.End > s.updates.periods.End+1 {
|
||||||
|
log.Error("Last committee is neither in the fixed roots range nor proven by updates")
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return valid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the committee chain.
|
||||||
|
func (s *CommitteeChain) Reset() {
|
||||||
|
s.chainmu.Lock()
|
||||||
|
defer s.chainmu.Unlock()
|
||||||
|
|
||||||
|
if err := s.rollback(0); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckpointInit initializes a CommitteeChain based on the checkpoint.
|
||||||
|
// Note: if the chain is already initialized and the committees proven by the
|
||||||
|
// checkpoint do match the existing chain then the chain is retained and the
|
||||||
|
// new checkpoint becomes fixed.
|
||||||
|
func (s *CommitteeChain) CheckpointInit(bootstrap *types.BootstrapData) error {
|
||||||
|
s.chainmu.Lock()
|
||||||
|
defer s.chainmu.Unlock()
|
||||||
|
|
||||||
|
if err := bootstrap.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
period := bootstrap.Header.SyncPeriod()
|
||||||
|
if err := s.deleteFixedCommitteeRootsFrom(period + 2); err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if s.addFixedCommitteeRoot(period, bootstrap.CommitteeRoot) != nil {
|
||||||
|
s.Reset()
|
||||||
|
if err := s.addFixedCommitteeRoot(period, bootstrap.CommitteeRoot); err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := s.addFixedCommitteeRoot(period+1, common.Hash(bootstrap.CommitteeBranch[0])); err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.addCommittee(period, bootstrap.Committee); err != nil {
|
||||||
|
s.Reset()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addFixedCommitteeRoot sets a fixed committee root at the given period.
|
||||||
|
// Note that the period where the first committee is added has to have a fixed
|
||||||
|
// root which can either come from a BootstrapData or a trusted source.
|
||||||
|
func (s *CommitteeChain) addFixedCommitteeRoot(period uint64, root common.Hash) error {
|
||||||
|
if root == (common.Hash{}) {
|
||||||
|
return ErrWrongCommitteeRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := s.db.NewBatch()
|
||||||
|
oldRoot := s.getCommitteeRoot(period)
|
||||||
|
if !s.fixedCommitteeRoots.periods.canExpand(period) {
|
||||||
|
// Note: the fixed committee root range should always be continuous and
|
||||||
|
// therefore the expected syncing method is to forward sync and optionally
|
||||||
|
// backward sync periods one by one, starting from a checkpoint. The only
|
||||||
|
// case when a root that is not adjacent to the already fixed ones can be
|
||||||
|
// fixed is when the same root has already been proven by an update chain.
|
||||||
|
// In this case the all roots in between can and should be fixed.
|
||||||
|
// This scenario makes sense when a new trusted checkpoint is added to an
|
||||||
|
// existing chain, ensuring that it will not be rolled back (might be
|
||||||
|
// important in case of low signer participation rate).
|
||||||
|
if root != oldRoot {
|
||||||
|
return ErrInvalidPeriod
|
||||||
|
}
|
||||||
|
// if the old root exists and matches the new one then it is guaranteed
|
||||||
|
// that the given period is after the existing fixed range and the roots
|
||||||
|
// in between can also be fixed.
|
||||||
|
for p := s.fixedCommitteeRoots.periods.End; p < period; p++ {
|
||||||
|
if err := s.fixedCommitteeRoots.add(batch, p, s.getCommitteeRoot(p)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if oldRoot != (common.Hash{}) && (oldRoot != root) {
|
||||||
|
// existing old root was different, we have to reorg the chain
|
||||||
|
if err := s.rollback(period); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := s.fixedCommitteeRoots.add(batch, period, root); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFixedCommitteeRootsFrom deletes fixed roots starting from the given period.
|
||||||
|
// It also maintains chain consistency, meaning that it also deletes updates and
|
||||||
|
// committees if they are no longer supported by a valid update chain.
|
||||||
|
func (s *CommitteeChain) deleteFixedCommitteeRootsFrom(period uint64) error {
|
||||||
|
if period >= s.fixedCommitteeRoots.periods.End {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
batch := s.db.NewBatch()
|
||||||
|
s.fixedCommitteeRoots.deleteFrom(batch, period)
|
||||||
|
if s.updates.periods.isEmpty() || period <= s.updates.periods.Start {
|
||||||
|
// Note: the first period of the update chain should always be fixed so if
|
||||||
|
// the fixed root at the first update is removed then the entire update chain
|
||||||
|
// and the proven committees have to be removed. Earlier committees in the
|
||||||
|
// remaining fixed root range can stay.
|
||||||
|
s.updates.deleteFrom(batch, period)
|
||||||
|
s.deleteCommitteesFrom(batch, period)
|
||||||
|
} else {
|
||||||
|
// The update chain stays intact, some previously fixed committee roots might
|
||||||
|
// get unfixed but are still proven by the update chain. If there were
|
||||||
|
// committees present after the range proven by updates, those should be
|
||||||
|
// removed if the belonging fixed roots are also removed.
|
||||||
|
fromPeriod := s.updates.periods.End + 1 // not proven by updates
|
||||||
|
if period > fromPeriod {
|
||||||
|
fromPeriod = period // also not justified by fixed roots
|
||||||
|
}
|
||||||
|
s.deleteCommitteesFrom(batch, fromPeriod)
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteCommitteesFrom deletes committees starting from the given period.
|
||||||
|
func (s *CommitteeChain) deleteCommitteesFrom(batch ethdb.Batch, period uint64) {
|
||||||
|
deleted := s.committees.deleteFrom(batch, period)
|
||||||
|
for period := deleted.Start; period < deleted.End; period++ {
|
||||||
|
s.committeeCache.Remove(period)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addCommittee adds a committee at the given period if possible.
|
||||||
|
func (s *CommitteeChain) addCommittee(period uint64, committee *types.SerializedSyncCommittee) error {
|
||||||
|
if !s.committees.periods.canExpand(period) {
|
||||||
|
return ErrInvalidPeriod
|
||||||
|
}
|
||||||
|
root := s.getCommitteeRoot(period)
|
||||||
|
if root == (common.Hash{}) {
|
||||||
|
return ErrInvalidPeriod
|
||||||
|
}
|
||||||
|
if root != committee.Root() {
|
||||||
|
return ErrWrongCommitteeRoot
|
||||||
|
}
|
||||||
|
if !s.committees.periods.contains(period) {
|
||||||
|
if err := s.committees.add(s.db, period, committee); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.committeeCache.Remove(period)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertUpdate adds a new update if possible.
|
||||||
|
func (s *CommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error {
|
||||||
|
s.chainmu.Lock()
|
||||||
|
defer s.chainmu.Unlock()
|
||||||
|
|
||||||
|
period := update.AttestedHeader.Header.SyncPeriod()
|
||||||
|
if !s.updates.periods.canExpand(period) || !s.committees.periods.contains(period) {
|
||||||
|
return ErrInvalidPeriod
|
||||||
|
}
|
||||||
|
if s.minimumUpdateScore.BetterThan(update.Score()) {
|
||||||
|
return ErrInvalidUpdate
|
||||||
|
}
|
||||||
|
oldRoot := s.getCommitteeRoot(period + 1)
|
||||||
|
reorg := oldRoot != (common.Hash{}) && oldRoot != update.NextSyncCommitteeRoot
|
||||||
|
if oldUpdate, ok := s.updates.get(s.db, period); ok && !update.Score().BetterThan(oldUpdate.Score()) {
|
||||||
|
// a better or equal update already exists; no changes, only fail if new one tried to reorg
|
||||||
|
if reorg {
|
||||||
|
return ErrCannotReorg
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.fixedCommitteeRoots.periods.contains(period+1) && reorg {
|
||||||
|
return ErrCannotReorg
|
||||||
|
}
|
||||||
|
if ok, err := s.verifyUpdate(update); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !ok {
|
||||||
|
return ErrInvalidUpdate
|
||||||
|
}
|
||||||
|
addCommittee := !s.committees.periods.contains(period+1) || reorg
|
||||||
|
if addCommittee {
|
||||||
|
if nextCommittee == nil {
|
||||||
|
return ErrNeedCommittee
|
||||||
|
}
|
||||||
|
if nextCommittee.Root() != update.NextSyncCommitteeRoot {
|
||||||
|
return ErrWrongCommitteeRoot
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if reorg {
|
||||||
|
if err := s.rollback(period + 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
batch := s.db.NewBatch()
|
||||||
|
if addCommittee {
|
||||||
|
if err := s.committees.add(batch, period+1, nextCommittee); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.committeeCache.Remove(period + 1)
|
||||||
|
}
|
||||||
|
if err := s.updates.add(batch, period, update); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("Inserted new committee update", "period", period, "next committee root", update.NextSyncCommitteeRoot)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextSyncPeriod returns the next period where an update can be added and also
|
||||||
|
// whether the chain is initialized at all.
|
||||||
|
func (s *CommitteeChain) NextSyncPeriod() (uint64, bool) {
|
||||||
|
s.chainmu.RLock()
|
||||||
|
defer s.chainmu.RUnlock()
|
||||||
|
|
||||||
|
if s.committees.periods.isEmpty() {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
if !s.updates.periods.isEmpty() {
|
||||||
|
return s.updates.periods.End, true
|
||||||
|
}
|
||||||
|
return s.committees.periods.End - 1, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// rollback removes all committees and fixed roots from the given period and updates
|
||||||
|
// starting from the previous period.
|
||||||
|
func (s *CommitteeChain) rollback(period uint64) error {
|
||||||
|
max := s.updates.periods.End + 1
|
||||||
|
if s.committees.periods.End > max {
|
||||||
|
max = s.committees.periods.End
|
||||||
|
}
|
||||||
|
if s.fixedCommitteeRoots.periods.End > max {
|
||||||
|
max = s.fixedCommitteeRoots.periods.End
|
||||||
|
}
|
||||||
|
for max > period {
|
||||||
|
max--
|
||||||
|
batch := s.db.NewBatch()
|
||||||
|
s.deleteCommitteesFrom(batch, max)
|
||||||
|
s.fixedCommitteeRoots.deleteFrom(batch, max)
|
||||||
|
if max > 0 {
|
||||||
|
s.updates.deleteFrom(batch, max-1)
|
||||||
|
}
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
log.Error("Error writing batch into chain database", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCommitteeRoot returns the committee root at the given period, either fixed,
|
||||||
|
// proven by a previous update or both. It returns an empty hash if the committee
|
||||||
|
// root is unknown.
|
||||||
|
func (s *CommitteeChain) getCommitteeRoot(period uint64) common.Hash {
|
||||||
|
if root, ok := s.fixedCommitteeRoots.get(s.db, period); ok || period == 0 {
|
||||||
|
return root
|
||||||
|
}
|
||||||
|
if update, ok := s.updates.get(s.db, period-1); ok {
|
||||||
|
return update.NextSyncCommitteeRoot
|
||||||
|
}
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSyncCommittee returns the deserialized sync committee at the given period.
|
||||||
|
func (s *CommitteeChain) getSyncCommittee(period uint64) (syncCommittee, error) {
|
||||||
|
if c, ok := s.committeeCache.Get(period); ok {
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
if sc, ok := s.committees.get(s.db, period); ok {
|
||||||
|
c, err := s.sigVerifier.deserializeSyncCommittee(sc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Sync committee #%d deserialization error: %v", period, err)
|
||||||
|
}
|
||||||
|
s.committeeCache.Add(period, c)
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Missing serialized sync committee #%d", period)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignedHeader returns true if the given signed header has a valid signature
|
||||||
|
// according to the local committee chain. The caller should ensure that the
|
||||||
|
// committees advertised by the same source where the signed header came from are
|
||||||
|
// synced before verifying the signature.
|
||||||
|
// The age of the header is also returned (the time elapsed since the beginning
|
||||||
|
// of the given slot, according to the local system clock). If enforceTime is
|
||||||
|
// true then negative age (future) headers are rejected.
|
||||||
|
func (s *CommitteeChain) VerifySignedHeader(head types.SignedHeader) (bool, time.Duration, error) {
|
||||||
|
s.chainmu.RLock()
|
||||||
|
defer s.chainmu.RUnlock()
|
||||||
|
|
||||||
|
return s.verifySignedHeader(head)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CommitteeChain) verifySignedHeader(head types.SignedHeader) (bool, time.Duration, error) {
|
||||||
|
var age time.Duration
|
||||||
|
now := s.unixNano()
|
||||||
|
if head.Header.Slot < (uint64(now-math.MinInt64)/uint64(time.Second)-s.config.GenesisTime)/12 {
|
||||||
|
age = time.Duration(now - int64(time.Second)*int64(s.config.GenesisTime+head.Header.Slot*12))
|
||||||
|
} else {
|
||||||
|
age = time.Duration(math.MinInt64)
|
||||||
|
}
|
||||||
|
if s.enforceTime && age < 0 {
|
||||||
|
return false, age, nil
|
||||||
|
}
|
||||||
|
committee, err := s.getSyncCommittee(types.SyncPeriod(head.SignatureSlot))
|
||||||
|
if err != nil {
|
||||||
|
return false, 0, err
|
||||||
|
}
|
||||||
|
if committee == nil {
|
||||||
|
return false, age, nil
|
||||||
|
}
|
||||||
|
if signingRoot, err := s.config.Forks.SigningRoot(head.Header); err == nil {
|
||||||
|
return s.sigVerifier.verifySignature(committee, signingRoot, &head.Signature), age, nil
|
||||||
|
}
|
||||||
|
return false, age, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyUpdate checks whether the header signature is correct and the update
|
||||||
|
// fits into the specified constraints (assumes that the update has been
|
||||||
|
// successfully validated previously)
|
||||||
|
func (s *CommitteeChain) verifyUpdate(update *types.LightClientUpdate) (bool, error) {
|
||||||
|
// Note: SignatureSlot determines the sync period of the committee used for signature
|
||||||
|
// verification. Though in reality SignatureSlot is always bigger than update.Header.Slot,
|
||||||
|
// setting them as equal here enforces the rule that they have to be in the same sync
|
||||||
|
// period in order for the light client update proof to be meaningful.
|
||||||
|
ok, age, err := s.verifySignedHeader(update.AttestedHeader)
|
||||||
|
if age < 0 {
|
||||||
|
log.Warn("Future committee update received", "age", age)
|
||||||
|
}
|
||||||
|
return ok, err
|
||||||
|
}
|
356
beacon/light/committee_chain_test.go
Normal file
356
beacon/light/committee_chain_test.go
Normal file
@ -0,0 +1,356 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/params"
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/types"
|
||||||
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
testGenesis = newTestGenesis()
|
||||||
|
testGenesis2 = newTestGenesis()
|
||||||
|
|
||||||
|
tfBase = newTestForks(testGenesis, types.Forks{
|
||||||
|
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||||
|
})
|
||||||
|
tfAlternative = newTestForks(testGenesis, types.Forks{
|
||||||
|
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||||
|
&types.Fork{Epoch: 0x700, Version: []byte{1}},
|
||||||
|
})
|
||||||
|
tfAnotherGenesis = newTestForks(testGenesis2, types.Forks{
|
||||||
|
&types.Fork{Epoch: 0, Version: []byte{0}},
|
||||||
|
})
|
||||||
|
|
||||||
|
tcBase = newTestCommitteeChain(nil, tfBase, true, 0, 10, 400, false)
|
||||||
|
tcBaseWithInvalidUpdates = newTestCommitteeChain(tcBase, tfBase, false, 5, 10, 200, false) // signer count too low
|
||||||
|
tcBaseWithBetterUpdates = newTestCommitteeChain(tcBase, tfBase, false, 5, 10, 440, false)
|
||||||
|
tcReorgWithWorseUpdates = newTestCommitteeChain(tcBase, tfBase, true, 5, 10, 400, false)
|
||||||
|
tcReorgWithWorseUpdates2 = newTestCommitteeChain(tcBase, tfBase, true, 5, 10, 380, false)
|
||||||
|
tcReorgWithBetterUpdates = newTestCommitteeChain(tcBase, tfBase, true, 5, 10, 420, false)
|
||||||
|
tcReorgWithFinalizedUpdates = newTestCommitteeChain(tcBase, tfBase, true, 5, 10, 400, true)
|
||||||
|
tcFork = newTestCommitteeChain(tcBase, tfAlternative, true, 7, 10, 400, false)
|
||||||
|
tcAnotherGenesis = newTestCommitteeChain(nil, tfAnotherGenesis, true, 0, 10, 400, false)
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCommitteeChainFixedCommitteeRoots(t *testing.T) {
|
||||||
|
for _, reload := range []bool{false, true} {
|
||||||
|
c := newCommitteeChainTest(t, tfBase, 300, true)
|
||||||
|
c.setClockPeriod(7)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 4, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 5, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 6, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 8, ErrInvalidPeriod) // range has to be continuous
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 3, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 2, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.addCommittee(tcBase, 4, nil)
|
||||||
|
c.addCommittee(tcBase, 6, ErrInvalidPeriod) // range has to be continuous
|
||||||
|
c.addCommittee(tcBase, 5, nil)
|
||||||
|
c.addCommittee(tcBase, 6, nil)
|
||||||
|
c.addCommittee(tcAnotherGenesis, 3, ErrWrongCommitteeRoot)
|
||||||
|
c.addCommittee(tcBase, 3, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 6)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommitteeChainCheckpointSync(t *testing.T) {
|
||||||
|
for _, enforceTime := range []bool{false, true} {
|
||||||
|
for _, reload := range []bool{false, true} {
|
||||||
|
c := newCommitteeChainTest(t, tfBase, 300, enforceTime)
|
||||||
|
if enforceTime {
|
||||||
|
c.setClockPeriod(6)
|
||||||
|
}
|
||||||
|
c.insertUpdate(tcBase, 3, true, ErrInvalidPeriod)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 3, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 4, nil)
|
||||||
|
c.insertUpdate(tcBase, 4, true, ErrInvalidPeriod) // still no committee
|
||||||
|
c.addCommittee(tcBase, 3, nil)
|
||||||
|
c.addCommittee(tcBase, 4, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 4)
|
||||||
|
c.insertUpdate(tcBase, 3, false, nil) // update can be added without committee here
|
||||||
|
c.insertUpdate(tcBase, 4, false, ErrNeedCommittee) // but not here as committee 5 is not there yet
|
||||||
|
c.insertUpdate(tcBase, 4, true, nil)
|
||||||
|
c.verifyRange(tcBase, 3, 5)
|
||||||
|
c.insertUpdate(tcBaseWithInvalidUpdates, 5, true, ErrInvalidUpdate) // signer count too low
|
||||||
|
c.insertUpdate(tcBase, 5, true, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
if enforceTime {
|
||||||
|
c.insertUpdate(tcBase, 6, true, ErrInvalidUpdate) // future update rejected
|
||||||
|
c.setClockPeriod(7)
|
||||||
|
}
|
||||||
|
c.insertUpdate(tcBase, 6, true, nil) // when the time comes it's accepted
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
if enforceTime {
|
||||||
|
c.verifyRange(tcBase, 3, 6) // committee 7 is there but still in the future
|
||||||
|
c.setClockPeriod(8)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 7) // now period 7 can also be verified
|
||||||
|
// try reverse syncing an update
|
||||||
|
c.insertUpdate(tcBase, 2, false, ErrInvalidPeriod) // fixed committee is needed first
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 2, nil)
|
||||||
|
c.addCommittee(tcBase, 2, nil)
|
||||||
|
c.insertUpdate(tcBase, 2, false, nil)
|
||||||
|
c.verifyRange(tcBase, 2, 7)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommitteeChainReorg(t *testing.T) {
|
||||||
|
for _, reload := range []bool{false, true} {
|
||||||
|
for _, addBetterUpdates := range []bool{false, true} {
|
||||||
|
c := newCommitteeChainTest(t, tfBase, 300, true)
|
||||||
|
c.setClockPeriod(11)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 3, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 4, nil)
|
||||||
|
c.addCommittee(tcBase, 3, nil)
|
||||||
|
for period := uint64(3); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcBase, period, true, nil)
|
||||||
|
}
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 10)
|
||||||
|
c.insertUpdate(tcReorgWithWorseUpdates, 5, true, ErrCannotReorg)
|
||||||
|
c.insertUpdate(tcReorgWithWorseUpdates2, 5, true, ErrCannotReorg)
|
||||||
|
if addBetterUpdates {
|
||||||
|
// add better updates for the base chain and expect first reorg to fail
|
||||||
|
// (only add updates as committees should be the same)
|
||||||
|
for period := uint64(5); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcBaseWithBetterUpdates, period, false, nil)
|
||||||
|
}
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 10) // still on the same chain
|
||||||
|
c.insertUpdate(tcReorgWithBetterUpdates, 5, true, ErrCannotReorg)
|
||||||
|
} else {
|
||||||
|
// reorg with better updates
|
||||||
|
c.insertUpdate(tcReorgWithBetterUpdates, 5, false, ErrNeedCommittee)
|
||||||
|
c.verifyRange(tcBase, 3, 10) // no success yet, still on the base chain
|
||||||
|
c.verifyRange(tcReorgWithBetterUpdates, 3, 5)
|
||||||
|
c.insertUpdate(tcReorgWithBetterUpdates, 5, true, nil)
|
||||||
|
// successful reorg, base chain should only match before the reorg period
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 3, 5)
|
||||||
|
c.verifyRange(tcReorgWithBetterUpdates, 3, 6)
|
||||||
|
for period := uint64(6); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcReorgWithBetterUpdates, period, true, nil)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcReorgWithBetterUpdates, 3, 10)
|
||||||
|
}
|
||||||
|
// reorg with finalized updates; should succeed even if base chain updates
|
||||||
|
// have been improved because a finalized update beats everything else
|
||||||
|
c.insertUpdate(tcReorgWithFinalizedUpdates, 5, false, ErrNeedCommittee)
|
||||||
|
c.insertUpdate(tcReorgWithFinalizedUpdates, 5, true, nil)
|
||||||
|
if reload {
|
||||||
|
c.reloadChain()
|
||||||
|
}
|
||||||
|
c.verifyRange(tcReorgWithFinalizedUpdates, 3, 6)
|
||||||
|
for period := uint64(6); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcReorgWithFinalizedUpdates, period, true, nil)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcReorgWithFinalizedUpdates, 3, 10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommitteeChainFork(t *testing.T) {
|
||||||
|
c := newCommitteeChainTest(t, tfAlternative, 300, true)
|
||||||
|
c.setClockPeriod(11)
|
||||||
|
// trying to sync a chain on an alternative fork with the base chain data
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 0, nil)
|
||||||
|
c.addFixedCommitteeRoot(tcBase, 1, nil)
|
||||||
|
c.addCommittee(tcBase, 0, nil)
|
||||||
|
// shared section should sync without errors
|
||||||
|
for period := uint64(0); period < 7; period++ {
|
||||||
|
c.insertUpdate(tcBase, period, true, nil)
|
||||||
|
}
|
||||||
|
c.insertUpdate(tcBase, 7, true, ErrInvalidUpdate) // wrong fork
|
||||||
|
// committee root #7 is still the same but signatures are already signed with
|
||||||
|
// a different fork id so period 7 should only verify on the alternative fork
|
||||||
|
c.verifyRange(tcBase, 0, 6)
|
||||||
|
c.verifyRange(tcFork, 0, 7)
|
||||||
|
for period := uint64(7); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcFork, period, true, nil)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcFork, 0, 10)
|
||||||
|
// reload the chain while switching to the base fork
|
||||||
|
c.config = tfBase
|
||||||
|
c.reloadChain()
|
||||||
|
// updates 7..9 should be rolled back now
|
||||||
|
c.verifyRange(tcFork, 0, 6) // again, period 7 only verifies on the right fork
|
||||||
|
c.verifyRange(tcBase, 0, 7)
|
||||||
|
c.insertUpdate(tcFork, 7, true, ErrInvalidUpdate) // wrong fork
|
||||||
|
for period := uint64(7); period < 10; period++ {
|
||||||
|
c.insertUpdate(tcBase, period, true, nil)
|
||||||
|
}
|
||||||
|
c.verifyRange(tcBase, 0, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
type committeeChainTest struct {
|
||||||
|
t *testing.T
|
||||||
|
db *memorydb.Database
|
||||||
|
clock *mclock.Simulated
|
||||||
|
config types.ChainConfig
|
||||||
|
signerThreshold int
|
||||||
|
enforceTime bool
|
||||||
|
chain *CommitteeChain
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCommitteeChainTest(t *testing.T, config types.ChainConfig, signerThreshold int, enforceTime bool) *committeeChainTest {
|
||||||
|
c := &committeeChainTest{
|
||||||
|
t: t,
|
||||||
|
db: memorydb.New(),
|
||||||
|
clock: &mclock.Simulated{},
|
||||||
|
config: config,
|
||||||
|
signerThreshold: signerThreshold,
|
||||||
|
enforceTime: enforceTime,
|
||||||
|
}
|
||||||
|
c.chain = newCommitteeChain(c.db, &config, signerThreshold, enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) })
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) reloadChain() {
|
||||||
|
c.chain = newCommitteeChain(c.db, &c.config, c.signerThreshold, c.enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) setClockPeriod(period float64) {
|
||||||
|
target := mclock.AbsTime(period * float64(time.Second*12*params.SyncPeriodLength))
|
||||||
|
wait := time.Duration(target - c.clock.Now())
|
||||||
|
if wait < 0 {
|
||||||
|
c.t.Fatalf("Invalid setClockPeriod")
|
||||||
|
}
|
||||||
|
c.clock.Run(wait)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) addFixedCommitteeRoot(tc *testCommitteeChain, period uint64, expErr error) {
|
||||||
|
if err := c.chain.addFixedCommitteeRoot(period, tc.periods[period].committee.Root()); err != expErr {
|
||||||
|
c.t.Errorf("Incorrect error output from addFixedCommitteeRoot at period %d (expected %v, got %v)", period, expErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) addCommittee(tc *testCommitteeChain, period uint64, expErr error) {
|
||||||
|
if err := c.chain.addCommittee(period, tc.periods[period].committee); err != expErr {
|
||||||
|
c.t.Errorf("Incorrect error output from addCommittee at period %d (expected %v, got %v)", period, expErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) insertUpdate(tc *testCommitteeChain, period uint64, addCommittee bool, expErr error) {
|
||||||
|
var committee *types.SerializedSyncCommittee
|
||||||
|
if addCommittee {
|
||||||
|
committee = tc.periods[period+1].committee
|
||||||
|
}
|
||||||
|
if err := c.chain.InsertUpdate(tc.periods[period].update, committee); err != expErr {
|
||||||
|
c.t.Errorf("Incorrect error output from InsertUpdate at period %d (expected %v, got %v)", period, expErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) verifySignedHeader(tc *testCommitteeChain, period float64, expOk bool) {
|
||||||
|
slot := uint64(period * float64(params.SyncPeriodLength))
|
||||||
|
signedHead := GenerateTestSignedHeader(types.Header{Slot: slot}, &tc.config, tc.periods[types.SyncPeriod(slot)].committee, slot+1, 400)
|
||||||
|
if ok, _, _ := c.chain.VerifySignedHeader(signedHead); ok != expOk {
|
||||||
|
c.t.Errorf("Incorrect output from VerifySignedHeader at period %f (expected %v, got %v)", period, expOk, ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *committeeChainTest) verifyRange(tc *testCommitteeChain, begin, end uint64) {
|
||||||
|
if begin > 0 {
|
||||||
|
c.verifySignedHeader(tc, float64(begin)-0.5, false)
|
||||||
|
}
|
||||||
|
for period := begin; period <= end; period++ {
|
||||||
|
c.verifySignedHeader(tc, float64(period)+0.5, true)
|
||||||
|
}
|
||||||
|
c.verifySignedHeader(tc, float64(end)+1.5, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestGenesis() types.ChainConfig {
|
||||||
|
var config types.ChainConfig
|
||||||
|
rand.Read(config.GenesisValidatorsRoot[:])
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestForks(config types.ChainConfig, forks types.Forks) types.ChainConfig {
|
||||||
|
for _, fork := range forks {
|
||||||
|
config.AddFork(fork.Name, fork.Epoch, fork.Version)
|
||||||
|
}
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestCommitteeChain(parent *testCommitteeChain, config types.ChainConfig, newCommittees bool, begin, end int, signerCount int, finalizedHeader bool) *testCommitteeChain {
|
||||||
|
tc := &testCommitteeChain{
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
if parent != nil {
|
||||||
|
tc.periods = make([]testPeriod, len(parent.periods))
|
||||||
|
copy(tc.periods, parent.periods)
|
||||||
|
}
|
||||||
|
if newCommittees {
|
||||||
|
if begin == 0 {
|
||||||
|
tc.fillCommittees(begin, end+1)
|
||||||
|
} else {
|
||||||
|
tc.fillCommittees(begin+1, end+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tc.fillUpdates(begin, end, signerCount, finalizedHeader)
|
||||||
|
return tc
|
||||||
|
}
|
||||||
|
|
||||||
|
type testPeriod struct {
|
||||||
|
committee *types.SerializedSyncCommittee
|
||||||
|
update *types.LightClientUpdate
|
||||||
|
}
|
||||||
|
|
||||||
|
type testCommitteeChain struct {
|
||||||
|
periods []testPeriod
|
||||||
|
config types.ChainConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testCommitteeChain) fillCommittees(begin, end int) {
|
||||||
|
if len(tc.periods) <= end {
|
||||||
|
tc.periods = append(tc.periods, make([]testPeriod, end+1-len(tc.periods))...)
|
||||||
|
}
|
||||||
|
for i := begin; i <= end; i++ {
|
||||||
|
tc.periods[i].committee = GenerateTestCommittee()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testCommitteeChain) fillUpdates(begin, end int, signerCount int, finalizedHeader bool) {
|
||||||
|
for i := begin; i <= end; i++ {
|
||||||
|
tc.periods[i].update = GenerateTestUpdate(&tc.config, uint64(i), tc.periods[i].committee, tc.periods[i+1].committee, signerCount, finalizedHeader)
|
||||||
|
}
|
||||||
|
}
|
78
beacon/light/range.go
Normal file
78
beacon/light/range.go
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
// periodRange represents a (possibly zero-length) range of integers (sync periods).
|
||||||
|
type periodRange struct {
|
||||||
|
Start, End uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// isEmpty returns true if the length of the range is zero.
|
||||||
|
func (a periodRange) isEmpty() bool {
|
||||||
|
return a.End == a.Start
|
||||||
|
}
|
||||||
|
|
||||||
|
// contains returns true if the range includes the given period.
|
||||||
|
func (a periodRange) contains(period uint64) bool {
|
||||||
|
return period >= a.Start && period < a.End
|
||||||
|
}
|
||||||
|
|
||||||
|
// canExpand returns true if the range includes or can be expanded with the given
|
||||||
|
// period (either the range is empty or the given period is inside, right before or
|
||||||
|
// right after the range).
|
||||||
|
func (a periodRange) canExpand(period uint64) bool {
|
||||||
|
return a.isEmpty() || (period+1 >= a.Start && period <= a.End)
|
||||||
|
}
|
||||||
|
|
||||||
|
// expand expands the range with the given period.
|
||||||
|
// This method assumes that canExpand returned true: otherwise this is a no-op.
|
||||||
|
func (a *periodRange) expand(period uint64) {
|
||||||
|
if a.isEmpty() {
|
||||||
|
a.Start, a.End = period, period+1
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if a.Start == period+1 {
|
||||||
|
a.Start--
|
||||||
|
}
|
||||||
|
if a.End == period {
|
||||||
|
a.End++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// split splits the range into two ranges. The 'fromPeriod' will be the first
|
||||||
|
// element in the second range (if present).
|
||||||
|
// The original range is unchanged by this operation
|
||||||
|
func (a *periodRange) split(fromPeriod uint64) (periodRange, periodRange) {
|
||||||
|
if fromPeriod <= a.Start {
|
||||||
|
// First range empty, everything in second range,
|
||||||
|
return periodRange{}, *a
|
||||||
|
}
|
||||||
|
if fromPeriod >= a.End {
|
||||||
|
// Second range empty, everything in first range,
|
||||||
|
return *a, periodRange{}
|
||||||
|
}
|
||||||
|
x := periodRange{a.Start, fromPeriod}
|
||||||
|
y := periodRange{fromPeriod, a.End}
|
||||||
|
return x, y
|
||||||
|
}
|
||||||
|
|
||||||
|
// each invokes the supplied function fn once per period in range
|
||||||
|
func (a *periodRange) each(fn func(uint64)) {
|
||||||
|
for p := a.Start; p < a.End; p++ {
|
||||||
|
fn(p)
|
||||||
|
}
|
||||||
|
}
|
152
beacon/light/test_helpers.go
Normal file
152
beacon/light/test_helpers.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package light
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
mrand "math/rand"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/merkle"
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/params"
|
||||||
|
"github.com/ethereum/go-ethereum/beacon/types"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GenerateTestCommittee() *types.SerializedSyncCommittee {
|
||||||
|
s := new(types.SerializedSyncCommittee)
|
||||||
|
rand.Read(s[:32])
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateTestUpdate(config *types.ChainConfig, period uint64, committee, nextCommittee *types.SerializedSyncCommittee, signerCount int, finalizedHeader bool) *types.LightClientUpdate {
|
||||||
|
update := new(types.LightClientUpdate)
|
||||||
|
update.NextSyncCommitteeRoot = nextCommittee.Root()
|
||||||
|
var attestedHeader types.Header
|
||||||
|
if finalizedHeader {
|
||||||
|
update.FinalizedHeader = new(types.Header)
|
||||||
|
*update.FinalizedHeader, update.NextSyncCommitteeBranch = makeTestHeaderWithMerkleProof(types.SyncPeriodStart(period)+100, params.StateIndexNextSyncCommittee, merkle.Value(update.NextSyncCommitteeRoot))
|
||||||
|
attestedHeader, update.FinalityBranch = makeTestHeaderWithMerkleProof(types.SyncPeriodStart(period)+200, params.StateIndexFinalBlock, merkle.Value(update.FinalizedHeader.Hash()))
|
||||||
|
} else {
|
||||||
|
attestedHeader, update.NextSyncCommitteeBranch = makeTestHeaderWithMerkleProof(types.SyncPeriodStart(period)+2000, params.StateIndexNextSyncCommittee, merkle.Value(update.NextSyncCommitteeRoot))
|
||||||
|
}
|
||||||
|
update.AttestedHeader = GenerateTestSignedHeader(attestedHeader, config, committee, attestedHeader.Slot+1, signerCount)
|
||||||
|
return update
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateTestSignedHeader(header types.Header, config *types.ChainConfig, committee *types.SerializedSyncCommittee, signatureSlot uint64, signerCount int) types.SignedHeader {
|
||||||
|
bitmask := makeBitmask(signerCount)
|
||||||
|
signingRoot, _ := config.Forks.SigningRoot(header)
|
||||||
|
c, _ := dummyVerifier{}.deserializeSyncCommittee(committee)
|
||||||
|
return types.SignedHeader{
|
||||||
|
Header: header,
|
||||||
|
Signature: types.SyncAggregate{
|
||||||
|
Signers: bitmask,
|
||||||
|
Signature: makeDummySignature(c.(dummySyncCommittee), signingRoot, bitmask),
|
||||||
|
},
|
||||||
|
SignatureSlot: signatureSlot,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateTestCheckpoint(period uint64, committee *types.SerializedSyncCommittee) *types.BootstrapData {
|
||||||
|
header, branch := makeTestHeaderWithMerkleProof(types.SyncPeriodStart(period)+200, params.StateIndexSyncCommittee, merkle.Value(committee.Root()))
|
||||||
|
return &types.BootstrapData{
|
||||||
|
Header: header,
|
||||||
|
Committee: committee,
|
||||||
|
CommitteeRoot: committee.Root(),
|
||||||
|
CommitteeBranch: branch,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeBitmask(signerCount int) (bitmask [params.SyncCommitteeBitmaskSize]byte) {
|
||||||
|
for i := 0; i < params.SyncCommitteeSize; i++ {
|
||||||
|
if mrand.Intn(params.SyncCommitteeSize-i) < signerCount {
|
||||||
|
bitmask[i/8] += byte(1) << (i & 7)
|
||||||
|
signerCount--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestHeaderWithMerkleProof(slot, index uint64, value merkle.Value) (types.Header, merkle.Values) {
|
||||||
|
var branch merkle.Values
|
||||||
|
hasher := sha256.New()
|
||||||
|
for index > 1 {
|
||||||
|
var proofHash merkle.Value
|
||||||
|
rand.Read(proofHash[:])
|
||||||
|
hasher.Reset()
|
||||||
|
if index&1 == 0 {
|
||||||
|
hasher.Write(value[:])
|
||||||
|
hasher.Write(proofHash[:])
|
||||||
|
} else {
|
||||||
|
hasher.Write(proofHash[:])
|
||||||
|
hasher.Write(value[:])
|
||||||
|
}
|
||||||
|
hasher.Sum(value[:0])
|
||||||
|
index >>= 1
|
||||||
|
branch = append(branch, proofHash)
|
||||||
|
}
|
||||||
|
return types.Header{Slot: slot, StateRoot: common.Hash(value)}, branch
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncCommittee holds either a blsSyncCommittee or a fake dummySyncCommittee used for testing
|
||||||
|
type syncCommittee interface{}
|
||||||
|
|
||||||
|
// committeeSigVerifier verifies sync committee signatures (either proper BLS
|
||||||
|
// signatures or fake signatures used for testing)
|
||||||
|
type committeeSigVerifier interface {
|
||||||
|
deserializeSyncCommittee(s *types.SerializedSyncCommittee) (syncCommittee, error)
|
||||||
|
verifySignature(committee syncCommittee, signedRoot common.Hash, aggregate *types.SyncAggregate) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// blsVerifier implements committeeSigVerifier
|
||||||
|
type blsVerifier struct{}
|
||||||
|
|
||||||
|
// deserializeSyncCommittee implements committeeSigVerifier
|
||||||
|
func (blsVerifier) deserializeSyncCommittee(s *types.SerializedSyncCommittee) (syncCommittee, error) {
|
||||||
|
return s.Deserialize()
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifySignature implements committeeSigVerifier
|
||||||
|
func (blsVerifier) verifySignature(committee syncCommittee, signingRoot common.Hash, aggregate *types.SyncAggregate) bool {
|
||||||
|
return committee.(*types.SyncCommittee).VerifySignature(signingRoot, aggregate)
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummySyncCommittee [32]byte
|
||||||
|
|
||||||
|
// dummyVerifier implements committeeSigVerifier
|
||||||
|
type dummyVerifier struct{}
|
||||||
|
|
||||||
|
// deserializeSyncCommittee implements committeeSigVerifier
|
||||||
|
func (dummyVerifier) deserializeSyncCommittee(s *types.SerializedSyncCommittee) (syncCommittee, error) {
|
||||||
|
var sc dummySyncCommittee
|
||||||
|
copy(sc[:], s[:32])
|
||||||
|
return sc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifySignature implements committeeSigVerifier
|
||||||
|
func (dummyVerifier) verifySignature(committee syncCommittee, signingRoot common.Hash, aggregate *types.SyncAggregate) bool {
|
||||||
|
return aggregate.Signature == makeDummySignature(committee.(dummySyncCommittee), signingRoot, aggregate.Signers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeDummySignature(committee dummySyncCommittee, signingRoot common.Hash, bitmask [params.SyncCommitteeBitmaskSize]byte) (sig [params.BLSSignatureSize]byte) {
|
||||||
|
for i, b := range committee[:] {
|
||||||
|
sig[i] = b ^ signingRoot[i]
|
||||||
|
}
|
||||||
|
copy(sig[32:], bitmask[:])
|
||||||
|
return
|
||||||
|
}
|
@ -25,6 +25,24 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// BootstrapData contains a sync committee where light sync can be started,
|
||||||
|
// together with a proof through a beacon header and corresponding state.
|
||||||
|
// Note: BootstrapData is fetched from a server based on a known checkpoint hash.
|
||||||
|
type BootstrapData struct {
|
||||||
|
Header Header
|
||||||
|
CommitteeRoot common.Hash
|
||||||
|
Committee *SerializedSyncCommittee `rlp:"-"`
|
||||||
|
CommitteeBranch merkle.Values
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate verifies the proof included in BootstrapData.
|
||||||
|
func (c *BootstrapData) Validate() error {
|
||||||
|
if c.CommitteeRoot != c.Committee.Root() {
|
||||||
|
return errors.New("wrong committee root")
|
||||||
|
}
|
||||||
|
return merkle.VerifyProof(c.Header.StateRoot, params.StateIndexSyncCommittee, c.CommitteeBranch, merkle.Value(c.CommitteeRoot))
|
||||||
|
}
|
||||||
|
|
||||||
// LightClientUpdate is a proof of the next sync committee root based on a header
|
// LightClientUpdate is a proof of the next sync committee root based on a header
|
||||||
// signed by the sync committee of the given period. Optionally, the update can
|
// signed by the sync committee of the given period. Optionally, the update can
|
||||||
// prove quasi-finality by the signed header referring to a previous, finalized
|
// prove quasi-finality by the signed header referring to a previous, finalized
|
@ -5,22 +5,22 @@
|
|||||||
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
|
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
|
||||||
485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
|
485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
|
||||||
|
|
||||||
# version:golang 1.21.4
|
# version:golang 1.21.5
|
||||||
# https://go.dev/dl/
|
# https://go.dev/dl/
|
||||||
47b26a83d2b65a3c1c1bcace273b69bee49a7a7b5168a7604ded3d26a37bd787 go1.21.4.src.tar.gz
|
285cbbdf4b6e6e62ed58f370f3f6d8c30825d6e56c5853c66d3c23bcdb09db19 go1.21.5.src.tar.gz
|
||||||
cd3bdcc802b759b70e8418bc7afbc4a65ca73a3fe576060af9fc8a2a5e71c3b8 go1.21.4.darwin-amd64.tar.gz
|
a2e1d5743e896e5fe1e7d96479c0a769254aed18cf216cf8f4c3a2300a9b3923 go1.21.5.darwin-amd64.tar.gz
|
||||||
8b7caf2ac60bdff457dba7d4ff2a01def889592b834453431ae3caecf884f6a5 go1.21.4.darwin-arm64.tar.gz
|
d0f8ac0c4fb3efc223a833010901d02954e3923cfe2c9a2ff0e4254a777cc9cc go1.21.5.darwin-arm64.tar.gz
|
||||||
f1e685d086eb36f4be5b8b953b52baf7752bc6235400d84bb7d87e500b65f03e go1.21.4.freebsd-386.tar.gz
|
2c05bbe0dc62456b90b7ddd354a54f373b7c377a98f8b22f52ab694b4f6cca58 go1.21.5.freebsd-386.tar.gz
|
||||||
59f9b32187efb98d344a3818a631d3815ebb5c7bbefc367bab6515caaca544e9 go1.21.4.freebsd-amd64.tar.gz
|
30b6c64e9a77129605bc12f836422bf09eec577a8c899ee46130aeff81567003 go1.21.5.freebsd-amd64.tar.gz
|
||||||
64d3e5d295806e137c9e39d1e1f10b00a30fcd5c2f230d72b3298f579bb3c89a go1.21.4.linux-386.tar.gz
|
8f4dba9cf5c61757bbd7e9ebdb93b6a30a1b03f4a636a1ba0cc2f27b907ab8e1 go1.21.5.linux-386.tar.gz
|
||||||
73cac0215254d0c7d1241fa40837851f3b9a8a742d0b54714cbdfb3feaf8f0af go1.21.4.linux-amd64.tar.gz
|
e2bc0b3e4b64111ec117295c088bde5f00eeed1567999ff77bc859d7df70078e go1.21.5.linux-amd64.tar.gz
|
||||||
ce1983a7289856c3a918e1fd26d41e072cc39f928adfb11ba1896440849b95da go1.21.4.linux-arm64.tar.gz
|
841cced7ecda9b2014f139f5bab5ae31785f35399f236b8b3e75dff2a2978d96 go1.21.5.linux-arm64.tar.gz
|
||||||
6c62e89113750cc77c498194d13a03fadfda22bd2c7d44e8a826fd354db60252 go1.21.4.linux-armv6l.tar.gz
|
837f4bf4e22fcdf920ffeaa4abf3d02d1314e03725431065f4d44c46a01b42fe go1.21.5.linux-armv6l.tar.gz
|
||||||
2c63b36d2adcfb22013102a2ee730f058ec2f93b9f27479793c80b2e3641783f go1.21.4.linux-ppc64le.tar.gz
|
907b8c6ec4be9b184952e5d3493be66b1746442394a8bc78556c56834cd7c38b go1.21.5.linux-ppc64le.tar.gz
|
||||||
7a75ba4afc7a96058ca65903d994cd862381825d7dca12b2183f087c757c26c0 go1.21.4.linux-s390x.tar.gz
|
9c4a81b72ebe44368813cd03684e1080a818bf915d84163abae2ed325a1b2dc0 go1.21.5.linux-s390x.tar.gz
|
||||||
870a0e462b94671dc2d6cac707e9e19f7524fdc3c90711e6cd4450c3713a8ce0 go1.21.4.windows-386.zip
|
6da2418889dfb37763d0eb149c4a8d728c029e12f0cd54fbca0a31ae547e2d34 go1.21.5.windows-386.zip
|
||||||
79e5428e068c912d9cfa6cd115c13549856ec689c1332eac17f5d6122e19d595 go1.21.4.windows-amd64.zip
|
bbe603cde7c9dee658f45164b4d06de1eff6e6e6b800100824e7c00d56a9a92f go1.21.5.windows-amd64.zip
|
||||||
58bc7c6f4d4c72da2df4d2650c8222fe03c9978070eb3c66be8bbaa2a4757ac1 go1.21.4.windows-arm64.zip
|
9b7acca50e674294e43202df4fbc26d5af4d8bc3170a3342a1514f09a2dab5e9 go1.21.5.windows-arm64.zip
|
||||||
|
|
||||||
# version:golangci 1.51.1
|
# version:golangci 1.51.1
|
||||||
# https://github.com/golangci/golangci-lint/releases/
|
# https://github.com/golangci/golangci-lint/releases/
|
||||||
|
@ -232,7 +232,7 @@ func abigen(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
|
||||||
|
|
||||||
if err := app.Run(os.Args); err != nil {
|
if err := app.Run(os.Args); err != nil {
|
||||||
fmt.Fprintln(os.Stderr, err)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestNameFilter(t *testing.T) {
|
func TestNameFilter(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, err := newNameFilter("Foo")
|
_, err := newNameFilter("Foo")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = newNameFilter("too/many:colons:Foo")
|
_, err = newNameFilter("too/many:colons:Foo")
|
||||||
|
@ -44,7 +44,7 @@ func main() {
|
|||||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
|
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)")
|
||||||
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
|
||||||
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
|
||||||
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-5)")
|
verbosity = flag.Int("verbosity", 3, "log verbosity (0-5)")
|
||||||
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
vmodule = flag.String("vmodule", "", "log verbosity pattern")
|
||||||
|
|
||||||
nodeKey *ecdsa.PrivateKey
|
nodeKey *ecdsa.PrivateKey
|
||||||
@ -52,10 +52,11 @@ func main() {
|
|||||||
)
|
)
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
|
||||||
glogger.Verbosity(log.Lvl(*verbosity))
|
slogVerbosity := log.FromLegacyLevel(*verbosity)
|
||||||
|
glogger.Verbosity(slogVerbosity)
|
||||||
glogger.Vmodule(*vmodule)
|
glogger.Vmodule(*vmodule)
|
||||||
log.Root().SetHandler(glogger)
|
log.SetDefault(log.NewLogger(glogger))
|
||||||
|
|
||||||
natm, err := nat.Parse(*natdesc)
|
natm, err := nat.Parse(*natdesc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Clef can be used to sign transactions and data and is meant as a(n eventual) replacement for Geth's account management. This allows DApps to not depend on Geth's account management. When a DApp wants to sign data (or a transaction), it can send the content to Clef, which will then provide the user with context and asks for permission to sign the content. If the users grants the signing request, Clef will send the signature back to the DApp.
|
Clef can be used to sign transactions and data and is meant as a(n eventual) replacement for Geth's account management. This allows DApps to not depend on Geth's account management. When a DApp wants to sign data (or a transaction), it can send the content to Clef, which will then provide the user with context and asks for permission to sign the content. If the users grants the signing request, Clef will send the signature back to the DApp.
|
||||||
|
|
||||||
This setup allows a DApp to connect to a remote Ethereum node and send transactions that are locally signed. This can help in situations when a DApp is connected to an untrusted remote Ethereum node, because a local one is not available, not synchronised with the chain, or is a node that has no built-in (or limited) account management.
|
This setup allows a DApp to connect to a remote Ethereum node and send transactions that are locally signed. This can help in situations when a DApp is connected to an untrusted remote Ethereum node, because a local one is not available, not synchronized with the chain, or is a node that has no built-in (or limited) account management.
|
||||||
|
|
||||||
Clef can run as a daemon on the same machine, off a usb-stick like [USB armory](https://inversepath.com/usbarmory), or even a separate VM in a [QubesOS](https://www.qubes-os.org/) type setup.
|
Clef can run as a daemon on the same machine, off a usb-stick like [USB armory](https://inversepath.com/usbarmory), or even a separate VM in a [QubesOS](https://www.qubes-os.org/) type setup.
|
||||||
|
|
||||||
|
@ -26,12 +26,13 @@ import (
|
|||||||
|
|
||||||
// TestImportRaw tests clef --importraw
|
// TestImportRaw tests clef --importraw
|
||||||
func TestImportRaw(t *testing.T) {
|
func TestImportRaw(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
t.Cleanup(func() { os.Remove(keyPath) })
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("happy-path", func(t *testing.T) {
|
t.Run("happy-path", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Run clef importraw
|
// Run clef importraw
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("myverylongpassword").input("myverylongpassword")
|
clef.input("myverylongpassword").input("myverylongpassword")
|
||||||
@ -43,6 +44,7 @@ func TestImportRaw(t *testing.T) {
|
|||||||
})
|
})
|
||||||
// tests clef --importraw with mismatched passwords.
|
// tests clef --importraw with mismatched passwords.
|
||||||
t.Run("pw-mismatch", func(t *testing.T) {
|
t.Run("pw-mismatch", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Run clef importraw
|
// Run clef importraw
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit()
|
clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit()
|
||||||
@ -52,6 +54,7 @@ func TestImportRaw(t *testing.T) {
|
|||||||
})
|
})
|
||||||
// tests clef --importraw with a too short password.
|
// tests clef --importraw with a too short password.
|
||||||
t.Run("short-pw", func(t *testing.T) {
|
t.Run("short-pw", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Run clef importraw
|
// Run clef importraw
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("shorty").input("shorty").WaitExit()
|
clef.input("shorty").input("shorty").WaitExit()
|
||||||
@ -64,12 +67,13 @@ func TestImportRaw(t *testing.T) {
|
|||||||
|
|
||||||
// TestListAccounts tests clef --list-accounts
|
// TestListAccounts tests clef --list-accounts
|
||||||
func TestListAccounts(t *testing.T) {
|
func TestListAccounts(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
t.Cleanup(func() { os.Remove(keyPath) })
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("no-accounts", func(t *testing.T) {
|
t.Run("no-accounts", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts")
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts")
|
||||||
if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") {
|
if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") {
|
||||||
t.Logf("Output\n%v", out)
|
t.Logf("Output\n%v", out)
|
||||||
@ -77,6 +81,7 @@ func TestListAccounts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("one-account", func(t *testing.T) {
|
t.Run("one-account", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// First, we need to import
|
// First, we need to import
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
||||||
@ -91,12 +96,13 @@ func TestListAccounts(t *testing.T) {
|
|||||||
|
|
||||||
// TestListWallets tests clef --list-wallets
|
// TestListWallets tests clef --list-wallets
|
||||||
func TestListWallets(t *testing.T) {
|
func TestListWallets(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name()))
|
||||||
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777)
|
||||||
t.Cleanup(func() { os.Remove(keyPath) })
|
t.Cleanup(func() { os.Remove(keyPath) })
|
||||||
|
|
||||||
t.Parallel()
|
|
||||||
t.Run("no-accounts", func(t *testing.T) {
|
t.Run("no-accounts", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets")
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets")
|
||||||
if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") {
|
if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") {
|
||||||
t.Logf("Output\n%v", out)
|
t.Logf("Output\n%v", out)
|
||||||
@ -104,6 +110,7 @@ func TestListWallets(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("one-account", func(t *testing.T) {
|
t.Run("one-account", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// First, we need to import
|
// First, we need to import
|
||||||
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath)
|
||||||
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
clef.input("myverylongpassword").input("myverylongpassword").WaitExit()
|
||||||
|
@ -492,7 +492,8 @@ func initialize(c *cli.Context) error {
|
|||||||
if usecolor {
|
if usecolor {
|
||||||
output = colorable.NewColorable(logOutput)
|
output = colorable.NewColorable(logOutput)
|
||||||
}
|
}
|
||||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int(logLevelFlag.Name)), log.StreamHandler(output, log.TerminalFormat(usecolor))))
|
verbosity := log.FromLegacyLevel(c.Int(logLevelFlag.Name))
|
||||||
|
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(output, verbosity, usecolor)))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -581,6 +582,7 @@ func accountImport(c *cli.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if first != second {
|
if first != second {
|
||||||
|
//lint:ignore ST1005 This is a message for the user
|
||||||
return errors.New("Passwords do not match")
|
return errors.New("Passwords do not match")
|
||||||
}
|
}
|
||||||
acc, err := internalApi.ImportRawKey(hex.EncodeToString(crypto.FromECDSA(pKey)), first)
|
acc, err := internalApi.ImportRawKey(hex.EncodeToString(crypto.FromECDSA(pKey)), first)
|
||||||
|
@ -91,7 +91,7 @@ class StdIOHandler:
|
|||||||
{"jsonrpc":"2.0","id":20,"method":"ui_approveTx","params":[{"transaction":{"from":"0xDEADbEeF000000000000000000000000DeaDbeEf","to":"0xDEADbEeF000000000000000000000000DeaDbeEf","gas":"0x3e8","gasPrice":"0x5","maxFeePerGas":null,"maxPriorityFeePerGas":null,"value":"0x6","nonce":"0x1","data":"0x"},"call_info":null,"meta":{"remote":"clef binary","local":"main","scheme":"in-proc","User-Agent":"","Origin":""}}]}
|
{"jsonrpc":"2.0","id":20,"method":"ui_approveTx","params":[{"transaction":{"from":"0xDEADbEeF000000000000000000000000DeaDbeEf","to":"0xDEADbEeF000000000000000000000000DeaDbeEf","gas":"0x3e8","gasPrice":"0x5","maxFeePerGas":null,"maxPriorityFeePerGas":null,"value":"0x6","nonce":"0x1","data":"0x"},"call_info":null,"meta":{"remote":"clef binary","local":"main","scheme":"in-proc","User-Agent":"","Origin":""}}]}
|
||||||
|
|
||||||
:param transaction: transaction info
|
:param transaction: transaction info
|
||||||
:param call_info: info abou the call, e.g. if ABI info could not be
|
:param call_info: info about the call, e.g. if ABI info could not be
|
||||||
:param meta: metadata about the request, e.g. where the call comes from
|
:param meta: metadata about the request, e.g. where the call comes from
|
||||||
:return:
|
:return:
|
||||||
""" # noqa: E501
|
""" # noqa: E501
|
||||||
|
@ -236,7 +236,7 @@ func discv4Crawl(ctx *cli.Context) error {
|
|||||||
func discv4Test(ctx *cli.Context) error {
|
func discv4Test(ctx *cli.Context) error {
|
||||||
// Configure test package globals.
|
// Configure test package globals.
|
||||||
if !ctx.IsSet(remoteEnodeFlag.Name) {
|
if !ctx.IsSet(remoteEnodeFlag.Name) {
|
||||||
return fmt.Errorf("Missing -%v", remoteEnodeFlag.Name)
|
return fmt.Errorf("missing -%v", remoteEnodeFlag.Name)
|
||||||
}
|
}
|
||||||
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
|
v4test.Remote = ctx.String(remoteEnodeFlag.Name)
|
||||||
v4test.Listen1 = ctx.String(testListen1Flag.Name)
|
v4test.Listen1 = ctx.String(testListen1Flag.Name)
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
// This test checks that computeChanges/splitChanges create DNS changes in
|
// This test checks that computeChanges/splitChanges create DNS changes in
|
||||||
// leaf-added -> root-changed -> leaf-deleted order.
|
// leaf-added -> root-changed -> leaf-deleted order.
|
||||||
func TestRoute53ChangeSort(t *testing.T) {
|
func TestRoute53ChangeSort(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testTree0 := map[string]recordSet{
|
testTree0 := map[string]recordSet{
|
||||||
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
|
"2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
|
||||||
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
|
`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
|
||||||
@ -164,6 +165,7 @@ func TestRoute53ChangeSort(t *testing.T) {
|
|||||||
|
|
||||||
// This test checks that computeChanges compares the quoted value of the records correctly.
|
// This test checks that computeChanges compares the quoted value of the records correctly.
|
||||||
func TestRoute53NoChange(t *testing.T) {
|
func TestRoute53NoChange(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Existing record set.
|
// Existing record set.
|
||||||
testTree0 := map[string]recordSet{
|
testTree0 := map[string]recordSet{
|
||||||
"n": {ttl: rootTTL, values: []string{
|
"n": {ttl: rootTTL, values: []string{
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
// TestEthProtocolNegotiation tests whether the test suite
|
// TestEthProtocolNegotiation tests whether the test suite
|
||||||
// can negotiate the highest eth protocol in a status message exchange
|
// can negotiate the highest eth protocol in a status message exchange
|
||||||
func TestEthProtocolNegotiation(t *testing.T) {
|
func TestEthProtocolNegotiation(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
conn *Conn
|
conn *Conn
|
||||||
caps []p2p.Cap
|
caps []p2p.Cap
|
||||||
@ -125,6 +126,7 @@ func TestEthProtocolNegotiation(t *testing.T) {
|
|||||||
// TestChain_GetHeaders tests whether the test suite can correctly
|
// TestChain_GetHeaders tests whether the test suite can correctly
|
||||||
// respond to a GetBlockHeaders request from a node.
|
// respond to a GetBlockHeaders request from a node.
|
||||||
func TestChain_GetHeaders(t *testing.T) {
|
func TestChain_GetHeaders(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
chainFile, err := filepath.Abs("./testdata/chain.rlp")
|
chainFile, err := filepath.Abs("./testdata/chain.rlp")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -461,7 +461,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
|||||||
common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790"),
|
common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}[7:] {
|
} {
|
||||||
tc := tc
|
tc := tc
|
||||||
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
||||||
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
||||||
@ -683,7 +683,7 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
|
|||||||
hash := make([]byte, 32)
|
hash := make([]byte, 32)
|
||||||
trienodes := res.Nodes
|
trienodes := res.Nodes
|
||||||
if got, want := len(trienodes), len(tc.expHashes); got != want {
|
if got, want := len(trienodes), len(tc.expHashes); got != want {
|
||||||
return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
|
return fmt.Errorf("wrong trienode count, got %d, want %d", got, want)
|
||||||
}
|
}
|
||||||
for i, trienode := range trienodes {
|
for i, trienode := range trienodes {
|
||||||
hasher.Reset()
|
hasher.Reset()
|
||||||
|
@ -35,6 +35,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestEthSuite(t *testing.T) {
|
func TestEthSuite(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth, err := runGeth()
|
geth, err := runGeth()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not run geth: %v", err)
|
t.Fatalf("could not run geth: %v", err)
|
||||||
@ -56,6 +57,7 @@ func TestEthSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSnapSuite(t *testing.T) {
|
func TestSnapSuite(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth, err := runGeth()
|
geth, err := runGeth()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not run geth: %v", err)
|
t.Fatalf("could not run geth: %v", err)
|
||||||
|
@ -54,7 +54,7 @@ func runTests(ctx *cli.Context, tests []utesting.Test) error {
|
|||||||
}
|
}
|
||||||
// Disable logging unless explicitly enabled.
|
// Disable logging unless explicitly enabled.
|
||||||
if !ctx.IsSet("verbosity") && !ctx.IsSet("vmodule") {
|
if !ctx.IsSet("verbosity") && !ctx.IsSet("vmodule") {
|
||||||
log.Root().SetHandler(log.DiscardHandler())
|
log.SetDefault(log.NewLogger(log.DiscardHandler()))
|
||||||
}
|
}
|
||||||
// Run the tests.
|
// Run the tests.
|
||||||
var run = utesting.RunTests
|
var run = utesting.RunTests
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestMessageSignVerify(t *testing.T) {
|
func TestMessageSignVerify(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
|
|
||||||
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
||||||
|
@ -88,7 +88,7 @@ type Env struct {
|
|||||||
CurrentTimestamp uint64 `json:"currentTimestamp"`
|
CurrentTimestamp uint64 `json:"currentTimestamp"`
|
||||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||||
// optional
|
// optional
|
||||||
CurrentDifficulty *big.Int `json:"currentDifficuly"`
|
CurrentDifficulty *big.Int `json:"currentDifficulty"`
|
||||||
CurrentRandom *big.Int `json:"currentRandom"`
|
CurrentRandom *big.Int `json:"currentRandom"`
|
||||||
CurrentBaseFee *big.Int `json:"currentBaseFee"`
|
CurrentBaseFee *big.Int `json:"currentBaseFee"`
|
||||||
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
@ -40,7 +41,7 @@ var RunFlag = &cli.StringFlag{
|
|||||||
var blockTestCommand = &cli.Command{
|
var blockTestCommand = &cli.Command{
|
||||||
Action: blockTestCmd,
|
Action: blockTestCmd,
|
||||||
Name: "blocktest",
|
Name: "blocktest",
|
||||||
Usage: "executes the given blockchain tests",
|
Usage: "Executes the given blockchain tests",
|
||||||
ArgsUsage: "<file>",
|
ArgsUsage: "<file>",
|
||||||
Flags: []cli.Flag{RunFlag},
|
Flags: []cli.Flag{RunFlag},
|
||||||
}
|
}
|
||||||
@ -85,7 +86,13 @@ func blockTestCmd(ctx *cli.Context) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
test := tests[name]
|
test := tests[name]
|
||||||
if err := test.Run(false, rawdb.HashScheme, tracer); err != nil {
|
if err := test.Run(false, rawdb.HashScheme, tracer, func(res error, chain *core.BlockChain) {
|
||||||
|
if ctx.Bool(DumpFlag.Name) {
|
||||||
|
if state, _ := chain.State(); state != nil {
|
||||||
|
fmt.Println(string(state.Dump(nil)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}); err != nil {
|
||||||
return fmt.Errorf("test %v: %w", name, err)
|
return fmt.Errorf("test %v: %w", name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
var compileCommand = &cli.Command{
|
var compileCommand = &cli.Command{
|
||||||
Action: compileCmd,
|
Action: compileCmd,
|
||||||
Name: "compile",
|
Name: "compile",
|
||||||
Usage: "compiles easm source to evm binary",
|
Usage: "Compiles easm source to evm binary",
|
||||||
ArgsUsage: "<file>",
|
ArgsUsage: "<file>",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
var disasmCommand = &cli.Command{
|
var disasmCommand = &cli.Command{
|
||||||
Action: disasmCmd,
|
Action: disasmCmd,
|
||||||
Name: "disasm",
|
Name: "disasm",
|
||||||
Usage: "disassembles evm binary",
|
Usage: "Disassembles evm binary",
|
||||||
ArgsUsage: "<file>",
|
ArgsUsage: "<file>",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
@ -215,11 +214,6 @@ func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) {
|
|||||||
|
|
||||||
// BuildBlock constructs a block from the given inputs.
|
// BuildBlock constructs a block from the given inputs.
|
||||||
func BuildBlock(ctx *cli.Context) error {
|
func BuildBlock(ctx *cli.Context) error {
|
||||||
// Configure the go-ethereum logger
|
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
|
||||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
|
||||||
log.Root().SetHandler(glogger)
|
|
||||||
|
|
||||||
baseDir, err := createBasedir(ctx)
|
baseDir, err := createBasedir(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
||||||
|
@ -117,7 +117,7 @@ type rejectedTx struct {
|
|||||||
// Apply applies a set of transactions to a pre-state
|
// Apply applies a set of transactions to a pre-state
|
||||||
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
txIt txIterator, miningReward int64,
|
txIt txIterator, miningReward int64,
|
||||||
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, []byte, error) {
|
getTracerFn func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
|
||||||
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
// Capture errors for BLOCKHASH operation, if we haven't been supplied the
|
||||||
// required blockhashes
|
// required blockhashes
|
||||||
var hashError error
|
var hashError error
|
||||||
|
@ -28,12 +28,15 @@ import (
|
|||||||
var (
|
var (
|
||||||
TraceFlag = &cli.BoolFlag{
|
TraceFlag = &cli.BoolFlag{
|
||||||
Name: "trace",
|
Name: "trace",
|
||||||
Usage: "Output full trace logs to files <txhash>.jsonl",
|
Usage: "Configures the use of the JSON opcode tracer. This tracer emits traces to files as trace-<txIndex>-<txHash>.jsonl",
|
||||||
}
|
}
|
||||||
TraceDisableMemoryFlag = &cli.BoolFlag{
|
TraceTracerFlag = &cli.StringFlag{
|
||||||
Name: "trace.nomemory",
|
Name: "trace.tracer",
|
||||||
Value: true,
|
Usage: "Configures the use of a custom tracer, e.g native or js tracers. Examples are callTracer and 4byteTracer. These tracers emit results into files as trace-<txIndex>-<txHash>.json",
|
||||||
Usage: "Disable full memory dump in traces (deprecated)",
|
}
|
||||||
|
TraceTracerConfigFlag = &cli.StringFlag{
|
||||||
|
Name: "trace.jsonconfig",
|
||||||
|
Usage: "The configurations for the custom tracer specified by --trace.tracer. If provided, must be in JSON format",
|
||||||
}
|
}
|
||||||
TraceEnableMemoryFlag = &cli.BoolFlag{
|
TraceEnableMemoryFlag = &cli.BoolFlag{
|
||||||
Name: "trace.memory",
|
Name: "trace.memory",
|
||||||
@ -43,11 +46,6 @@ var (
|
|||||||
Name: "trace.nostack",
|
Name: "trace.nostack",
|
||||||
Usage: "Disable stack output in traces",
|
Usage: "Disable stack output in traces",
|
||||||
}
|
}
|
||||||
TraceDisableReturnDataFlag = &cli.BoolFlag{
|
|
||||||
Name: "trace.noreturndata",
|
|
||||||
Value: true,
|
|
||||||
Usage: "Disable return data output in traces (deprecated)",
|
|
||||||
}
|
|
||||||
TraceEnableReturnDataFlag = &cli.BoolFlag{
|
TraceEnableReturnDataFlag = &cli.BoolFlag{
|
||||||
Name: "trace.returndata",
|
Name: "trace.returndata",
|
||||||
Usage: "Enable return data output in traces",
|
Usage: "Enable return data output in traces",
|
||||||
|
81
cmd/evm/internal/t8ntool/tracewriter.go
Normal file
81
cmd/evm/internal/t8ntool/tracewriter.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
// Copyright 2020 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package t8ntool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// traceWriter is an vm.EVMLogger which also holds an inner logger/tracer.
|
||||||
|
// When the TxEnd event happens, the inner tracer result is written to the file, and
|
||||||
|
// the file is closed.
|
||||||
|
type traceWriter struct {
|
||||||
|
inner vm.EVMLogger
|
||||||
|
f io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile-time interface check
|
||||||
|
var _ = vm.EVMLogger((*traceWriter)(nil))
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureTxEnd(restGas uint64) {
|
||||||
|
t.inner.CaptureTxEnd(restGas)
|
||||||
|
defer t.f.Close()
|
||||||
|
|
||||||
|
if tracer, ok := t.inner.(tracers.Tracer); ok {
|
||||||
|
result, err := tracer.GetResult()
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error in tracer", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = json.NewEncoder(t.f).Encode(result)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("Error writing tracer output", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureTxStart(gasLimit uint64) { t.inner.CaptureTxStart(gasLimit) }
|
||||||
|
func (t *traceWriter) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
|
||||||
|
t.inner.CaptureStart(env, from, to, create, input, gas, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureEnd(output []byte, gasUsed uint64, err error) {
|
||||||
|
t.inner.CaptureEnd(output, gasUsed, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
|
||||||
|
t.inner.CaptureEnter(typ, from, to, input, gas, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureExit(output []byte, gasUsed uint64, err error) {
|
||||||
|
t.inner.CaptureExit(output, gasUsed, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *traceWriter) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
|
||||||
|
t.inner.CaptureState(pc, op, gas, cost, scope, rData, depth, err)
|
||||||
|
}
|
||||||
|
func (t *traceWriter) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
|
||||||
|
t.inner.CaptureFault(pc, op, gas, cost, scope, depth, err)
|
||||||
|
}
|
@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
@ -65,11 +64,6 @@ func (r *result) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Transaction(ctx *cli.Context) error {
|
func Transaction(ctx *cli.Context) error {
|
||||||
// Configure the go-ethereum logger
|
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
|
||||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
|
||||||
log.Root().SetHandler(glogger)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
@ -80,62 +81,43 @@ type input struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Transition(ctx *cli.Context) error {
|
func Transition(ctx *cli.Context) error {
|
||||||
// Configure the go-ethereum logger
|
var getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) { return nil, nil }
|
||||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
|
|
||||||
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
|
|
||||||
log.Root().SetHandler(glogger)
|
|
||||||
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
tracer vm.EVMLogger
|
|
||||||
)
|
|
||||||
var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)
|
|
||||||
|
|
||||||
baseDir, err := createBasedir(ctx)
|
baseDir, err := createBasedir(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
|
||||||
}
|
}
|
||||||
if ctx.Bool(TraceFlag.Name) {
|
|
||||||
if ctx.IsSet(TraceDisableMemoryFlag.Name) && ctx.IsSet(TraceEnableMemoryFlag.Name) {
|
if ctx.Bool(TraceFlag.Name) { // JSON opcode tracing
|
||||||
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
|
|
||||||
}
|
|
||||||
if ctx.IsSet(TraceDisableReturnDataFlag.Name) && ctx.IsSet(TraceEnableReturnDataFlag.Name) {
|
|
||||||
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
|
|
||||||
}
|
|
||||||
if ctx.IsSet(TraceDisableMemoryFlag.Name) {
|
|
||||||
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
|
|
||||||
}
|
|
||||||
if ctx.IsSet(TraceDisableReturnDataFlag.Name) {
|
|
||||||
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
|
|
||||||
}
|
|
||||||
// Configure the EVM logger
|
// Configure the EVM logger
|
||||||
logConfig := &logger.Config{
|
logConfig := &logger.Config{
|
||||||
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||||
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name) || ctx.Bool(TraceEnableMemoryFlag.Name),
|
EnableMemory: ctx.Bool(TraceEnableMemoryFlag.Name),
|
||||||
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name) || ctx.Bool(TraceEnableReturnDataFlag.Name),
|
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
|
||||||
Debug: true,
|
Debug: true,
|
||||||
}
|
}
|
||||||
var prevFile *os.File
|
|
||||||
// This one closes the last file
|
|
||||||
defer func() {
|
|
||||||
if prevFile != nil {
|
|
||||||
prevFile.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
|
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
|
||||||
if prevFile != nil {
|
|
||||||
prevFile.Close()
|
|
||||||
}
|
|
||||||
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
|
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||||
}
|
}
|
||||||
prevFile = traceFile
|
return &traceWriter{logger.NewJSONLogger(logConfig, traceFile), traceFile}, nil
|
||||||
return logger.NewJSONLogger(logConfig, traceFile), nil
|
|
||||||
}
|
}
|
||||||
} else {
|
} else if ctx.IsSet(TraceTracerFlag.Name) {
|
||||||
getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) {
|
var config json.RawMessage
|
||||||
return nil, nil
|
if ctx.IsSet(TraceTracerConfigFlag.Name) {
|
||||||
|
config = []byte(ctx.String(TraceTracerConfigFlag.Name))
|
||||||
|
}
|
||||||
|
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
|
||||||
|
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String())))
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
|
||||||
|
}
|
||||||
|
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %w", err))
|
||||||
|
}
|
||||||
|
return &traceWriter{tracer, traceFile}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// We need to load three things: alloc, env and transactions. May be either in
|
// We need to load three things: alloc, env and transactions. May be either in
|
||||||
@ -174,9 +156,7 @@ func Transition(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
prestate.Env = *inputData.Env
|
prestate.Env = *inputData.Env
|
||||||
|
|
||||||
vmConfig := vm.Config{
|
vmConfig := vm.Config{}
|
||||||
Tracer: tracer,
|
|
||||||
}
|
|
||||||
// Construct the chainconfig
|
// Construct the chainconfig
|
||||||
var chainConfig *params.ChainConfig
|
var chainConfig *params.ChainConfig
|
||||||
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
|
||||||
|
@ -26,6 +26,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
|
// Force-load the tracer engines to trigger registration
|
||||||
|
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
|
||||||
|
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -139,14 +143,14 @@ var (
|
|||||||
var stateTransitionCommand = &cli.Command{
|
var stateTransitionCommand = &cli.Command{
|
||||||
Name: "transition",
|
Name: "transition",
|
||||||
Aliases: []string{"t8n"},
|
Aliases: []string{"t8n"},
|
||||||
Usage: "executes a full state transition",
|
Usage: "Executes a full state transition",
|
||||||
Action: t8ntool.Transition,
|
Action: t8ntool.Transition,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
t8ntool.TraceFlag,
|
t8ntool.TraceFlag,
|
||||||
t8ntool.TraceDisableMemoryFlag,
|
t8ntool.TraceTracerFlag,
|
||||||
|
t8ntool.TraceTracerConfigFlag,
|
||||||
t8ntool.TraceEnableMemoryFlag,
|
t8ntool.TraceEnableMemoryFlag,
|
||||||
t8ntool.TraceDisableStackFlag,
|
t8ntool.TraceDisableStackFlag,
|
||||||
t8ntool.TraceDisableReturnDataFlag,
|
|
||||||
t8ntool.TraceEnableReturnDataFlag,
|
t8ntool.TraceEnableReturnDataFlag,
|
||||||
t8ntool.OutputBasedir,
|
t8ntool.OutputBasedir,
|
||||||
t8ntool.OutputAllocFlag,
|
t8ntool.OutputAllocFlag,
|
||||||
@ -158,27 +162,25 @@ var stateTransitionCommand = &cli.Command{
|
|||||||
t8ntool.ForknameFlag,
|
t8ntool.ForknameFlag,
|
||||||
t8ntool.ChainIDFlag,
|
t8ntool.ChainIDFlag,
|
||||||
t8ntool.RewardFlag,
|
t8ntool.RewardFlag,
|
||||||
t8ntool.VerbosityFlag,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var transactionCommand = &cli.Command{
|
var transactionCommand = &cli.Command{
|
||||||
Name: "transaction",
|
Name: "transaction",
|
||||||
Aliases: []string{"t9n"},
|
Aliases: []string{"t9n"},
|
||||||
Usage: "performs transaction validation",
|
Usage: "Performs transaction validation",
|
||||||
Action: t8ntool.Transaction,
|
Action: t8ntool.Transaction,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
t8ntool.InputTxsFlag,
|
t8ntool.InputTxsFlag,
|
||||||
t8ntool.ChainIDFlag,
|
t8ntool.ChainIDFlag,
|
||||||
t8ntool.ForknameFlag,
|
t8ntool.ForknameFlag,
|
||||||
t8ntool.VerbosityFlag,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var blockBuilderCommand = &cli.Command{
|
var blockBuilderCommand = &cli.Command{
|
||||||
Name: "block-builder",
|
Name: "block-builder",
|
||||||
Aliases: []string{"b11r"},
|
Aliases: []string{"b11r"},
|
||||||
Usage: "builds a block",
|
Usage: "Builds a block",
|
||||||
Action: t8ntool.BuildBlock,
|
Action: t8ntool.BuildBlock,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
t8ntool.OutputBasedir,
|
t8ntool.OutputBasedir,
|
||||||
@ -188,7 +190,6 @@ var blockBuilderCommand = &cli.Command{
|
|||||||
t8ntool.InputWithdrawalsFlag,
|
t8ntool.InputWithdrawalsFlag,
|
||||||
t8ntool.InputTxsRlpFlag,
|
t8ntool.InputTxsRlpFlag,
|
||||||
t8ntool.SealCliqueFlag,
|
t8ntool.SealCliqueFlag,
|
||||||
t8ntool.VerbosityFlag,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ import (
|
|||||||
var runCommand = &cli.Command{
|
var runCommand = &cli.Command{
|
||||||
Action: runCmd,
|
Action: runCmd,
|
||||||
Name: "run",
|
Name: "run",
|
||||||
Usage: "run arbitrary evm binary",
|
Usage: "Run arbitrary evm binary",
|
||||||
ArgsUsage: "<code>",
|
ArgsUsage: "<code>",
|
||||||
Description: `The run command runs arbitrary EVM code.`,
|
Description: `The run command runs arbitrary EVM code.`,
|
||||||
Flags: flags.Merge(vmFlags, traceFlags),
|
Flags: flags.Merge(vmFlags, traceFlags),
|
||||||
@ -144,7 +144,7 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
initialGas = genesisConfig.GasLimit
|
initialGas = genesisConfig.GasLimit
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
genesisConfig.Config = params.AllEthashProtocolChanges
|
genesisConfig.Config = params.AllDevChainProtocolChanges
|
||||||
}
|
}
|
||||||
|
|
||||||
db := rawdb.NewMemoryDatabase()
|
db := rawdb.NewMemoryDatabase()
|
||||||
|
@ -100,18 +100,19 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error {
|
|||||||
for _, st := range test.Subtests() {
|
for _, st := range test.Subtests() {
|
||||||
// Run the test and aggregate the result
|
// Run the test and aggregate the result
|
||||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||||
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
|
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, statedb *state.StateDB) {
|
||||||
if state != nil {
|
var root common.Hash
|
||||||
root := state.IntermediateRoot(false)
|
if statedb != nil {
|
||||||
|
root = statedb.IntermediateRoot(false)
|
||||||
result.Root = &root
|
result.Root = &root
|
||||||
if jsonOut {
|
if jsonOut {
|
||||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
|
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
|
||||||
}
|
}
|
||||||
}
|
if dump { // Dump any state to aid debugging
|
||||||
// Dump any state to aid debugging
|
cpy, _ := state.New(root, statedb.Database(), nil)
|
||||||
if dump {
|
dump := cpy.RawDump(nil)
|
||||||
dump := state.RawDump(nil)
|
result.State = &dump
|
||||||
result.State = &dump
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Test failed, mark as so
|
// Test failed, mark as so
|
||||||
|
@ -106,6 +106,7 @@ func (args *t8nOutput) get() (out []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestT8n(t *testing.T) {
|
func TestT8n(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tt := new(testT8n)
|
tt := new(testT8n)
|
||||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
@ -338,6 +339,7 @@ func (args *t9nInput) get(base string) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestT9n(t *testing.T) {
|
func TestT9n(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tt := new(testT8n)
|
tt := new(testT8n)
|
||||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
@ -473,6 +475,7 @@ func (args *b11rInput) get(base string) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestB11r(t *testing.T) {
|
func TestB11r(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tt := new(testT8n)
|
tt := new(testT8n)
|
||||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||||
for i, tc := range []struct {
|
for i, tc := range []struct {
|
||||||
|
@ -1,52 +0,0 @@
|
|||||||
# Faucet
|
|
||||||
|
|
||||||
The `faucet` is a simplistic web application with the goal of distributing small amounts of Ether in private and test networks.
|
|
||||||
|
|
||||||
Users need to post their Ethereum addresses to fund in a Twitter status update or public Facebook post and share the link to the faucet. The faucet will in turn deduplicate user requests and send the Ether. After a funding round, the faucet prevents the same user from requesting again for a pre-configured amount of time, proportional to the amount of Ether requested.
|
|
||||||
|
|
||||||
## Operation
|
|
||||||
|
|
||||||
The `faucet` is a single binary app (everything included) with all configurations set via command line flags and a few files.
|
|
||||||
|
|
||||||
First things first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set:
|
|
||||||
|
|
||||||
- `-genesis` is a path to a file containing the network `genesis.json`. or using:
|
|
||||||
- `-goerli` with the faucet with Görli network config
|
|
||||||
- `-sepolia` with the faucet with Sepolia network config
|
|
||||||
- `-network` is the devp2p network id used during connection
|
|
||||||
- `-bootnodes` is a list of `enode://` ids to join the network through
|
|
||||||
|
|
||||||
The `faucet` will use the `les` protocol to join the configured Ethereum network and will store its data in `$HOME/.faucet` (currently not configurable).
|
|
||||||
|
|
||||||
## Funding
|
|
||||||
|
|
||||||
To be able to distribute funds, the `faucet` needs access to an already funded Ethereum account. This can be configured via:
|
|
||||||
|
|
||||||
- `-account.json` is a path to the Ethereum account's JSON key file
|
|
||||||
- `-account.pass` is a path to a text file with the decryption passphrase
|
|
||||||
|
|
||||||
The faucet is able to distribute various amounts of Ether in exchange for various timeouts. These can be configured via:
|
|
||||||
|
|
||||||
- `-faucet.amount` is the number of Ethers to send by default
|
|
||||||
- `-faucet.minutes` is the time to wait before allowing a rerequest
|
|
||||||
- `-faucet.tiers` is the funding tiers to support (x3 time, x2.5 funds)
|
|
||||||
|
|
||||||
## Sybil protection
|
|
||||||
|
|
||||||
To prevent the same user from exhausting funds in a loop, the `faucet` ties requests to social networks and captcha resolvers.
|
|
||||||
|
|
||||||
Captcha protection uses Google's invisible ReCaptcha, thus the `faucet` needs to run on a live domain. The domain needs to be registered in Google's systems to retrieve the captcha API token and secrets. After doing so, captcha protection may be enabled via:
|
|
||||||
|
|
||||||
- `-captcha.token` is the API token for ReCaptcha
|
|
||||||
- `-captcha.secret` is the API secret for ReCaptcha
|
|
||||||
|
|
||||||
Sybil protection via Twitter requires an API key as of 15th December, 2020. To obtain it, a Twitter user must be upgraded to developer status and a new Twitter App deployed with it. The app's `Bearer` token is required by the faucet to retrieve tweet data:
|
|
||||||
|
|
||||||
- `-twitter.token` is the Bearer token for `v2` API access
|
|
||||||
- `-twitter.token.v1` is the Bearer token for `v1` API access
|
|
||||||
|
|
||||||
Sybil protection via Facebook uses the website to directly download post data thus does not currently require an API configuration.
|
|
||||||
|
|
||||||
## Miscellaneous
|
|
||||||
|
|
||||||
Beside the above - mostly essential - CLI flags, there are a number that can be used to fine-tune the `faucet`'s operation. Please see `faucet --help` for a full list.
|
|
@ -1,891 +0,0 @@
|
|||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// faucet is an Ether faucet backed by a light client.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
_ "embed"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"math/big"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
|
||||||
"github.com/ethereum/go-ethereum/ethstats"
|
|
||||||
"github.com/ethereum/go-ethereum/internal/version"
|
|
||||||
"github.com/ethereum/go-ethereum/les"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/node"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/gorilla/websocket"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
|
|
||||||
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
|
|
||||||
ethPortFlag = flag.Int("ethport", 30303, "Listener port for the devp2p connection")
|
|
||||||
bootFlag = flag.String("bootnodes", "", "Comma separated bootnode enode URLs to seed with")
|
|
||||||
netFlag = flag.Uint64("network", 0, "Network ID to use for the Ethereum protocol")
|
|
||||||
statsFlag = flag.String("ethstats", "", "Ethstats network monitoring auth string")
|
|
||||||
|
|
||||||
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
|
|
||||||
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
|
|
||||||
minutesFlag = flag.Int("faucet.minutes", 1440, "Number of minutes to wait between funding rounds")
|
|
||||||
tiersFlag = flag.Int("faucet.tiers", 3, "Number of funding tiers to enable (x3 time, x2.5 funds)")
|
|
||||||
|
|
||||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
|
||||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
|
||||||
|
|
||||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
|
||||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
|
||||||
|
|
||||||
noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication")
|
|
||||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
|
||||||
|
|
||||||
twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API")
|
|
||||||
twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
|
|
||||||
|
|
||||||
goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config")
|
|
||||||
sepoliaFlag = flag.Bool("sepolia", false, "Initializes the faucet with Sepolia network config")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed faucet.html
|
|
||||||
var websiteTmpl string
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Parse the flags and set up the logger to print everything requested
|
|
||||||
flag.Parse()
|
|
||||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*logFlag), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
||||||
|
|
||||||
// Construct the payout tiers
|
|
||||||
amounts := make([]string, *tiersFlag)
|
|
||||||
periods := make([]string, *tiersFlag)
|
|
||||||
for i := 0; i < *tiersFlag; i++ {
|
|
||||||
// Calculate the amount for the next tier and format it
|
|
||||||
amount := float64(*payoutFlag) * math.Pow(2.5, float64(i))
|
|
||||||
amounts[i] = fmt.Sprintf("%s Ethers", strconv.FormatFloat(amount, 'f', -1, 64))
|
|
||||||
if amount == 1 {
|
|
||||||
amounts[i] = strings.TrimSuffix(amounts[i], "s")
|
|
||||||
}
|
|
||||||
// Calculate the period for the next tier and format it
|
|
||||||
period := *minutesFlag * int(math.Pow(3, float64(i)))
|
|
||||||
periods[i] = fmt.Sprintf("%d mins", period)
|
|
||||||
if period%60 == 0 {
|
|
||||||
period /= 60
|
|
||||||
periods[i] = fmt.Sprintf("%d hours", period)
|
|
||||||
|
|
||||||
if period%24 == 0 {
|
|
||||||
period /= 24
|
|
||||||
periods[i] = fmt.Sprintf("%d days", period)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if period == 1 {
|
|
||||||
periods[i] = strings.TrimSuffix(periods[i], "s")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
website := new(bytes.Buffer)
|
|
||||||
err := template.Must(template.New("").Parse(websiteTmpl)).Execute(website, map[string]interface{}{
|
|
||||||
"Network": *netnameFlag,
|
|
||||||
"Amounts": amounts,
|
|
||||||
"Periods": periods,
|
|
||||||
"Recaptcha": *captchaToken,
|
|
||||||
"NoAuth": *noauthFlag,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to render the faucet template", "err", err)
|
|
||||||
}
|
|
||||||
// Load and parse the genesis block requested by the user
|
|
||||||
genesis, err := getGenesis(*genesisFlag, *goerliFlag, *sepoliaFlag)
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to parse genesis config", "err", err)
|
|
||||||
}
|
|
||||||
// Convert the bootnodes to internal enode representations
|
|
||||||
var enodes []*enode.Node
|
|
||||||
for _, boot := range strings.Split(*bootFlag, ",") {
|
|
||||||
if url, err := enode.Parse(enode.ValidSchemes, boot); err == nil {
|
|
||||||
enodes = append(enodes, url)
|
|
||||||
} else {
|
|
||||||
log.Error("Failed to parse bootnode URL", "url", boot, "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Load up the account key and decrypt its password
|
|
||||||
blob, err := os.ReadFile(*accPassFlag)
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
|
|
||||||
}
|
|
||||||
pass := strings.TrimSuffix(string(blob), "\n")
|
|
||||||
|
|
||||||
ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP)
|
|
||||||
if blob, err = os.ReadFile(*accJSONFlag); err != nil {
|
|
||||||
log.Crit("Failed to read account key contents", "file", *accJSONFlag, "err", err)
|
|
||||||
}
|
|
||||||
acc, err := ks.Import(blob, pass, pass)
|
|
||||||
if err != nil && err != keystore.ErrAccountAlreadyExists {
|
|
||||||
log.Crit("Failed to import faucet signer account", "err", err)
|
|
||||||
}
|
|
||||||
if err := ks.Unlock(acc, pass); err != nil {
|
|
||||||
log.Crit("Failed to unlock faucet signer account", "err", err)
|
|
||||||
}
|
|
||||||
// Assemble and start the faucet light service
|
|
||||||
faucet, err := newFaucet(genesis, *ethPortFlag, enodes, *netFlag, *statsFlag, ks, website.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to start faucet", "err", err)
|
|
||||||
}
|
|
||||||
defer faucet.close()
|
|
||||||
|
|
||||||
if err := faucet.listenAndServe(*apiPortFlag); err != nil {
|
|
||||||
log.Crit("Failed to launch faucet API", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// request represents an accepted funding request.
|
|
||||||
type request struct {
|
|
||||||
Avatar string `json:"avatar"` // Avatar URL to make the UI nicer
|
|
||||||
Account common.Address `json:"account"` // Ethereum address being funded
|
|
||||||
Time time.Time `json:"time"` // Timestamp when the request was accepted
|
|
||||||
Tx *types.Transaction `json:"tx"` // Transaction funding the account
|
|
||||||
}
|
|
||||||
|
|
||||||
// faucet represents a crypto faucet backed by an Ethereum light client.
|
|
||||||
type faucet struct {
|
|
||||||
config *params.ChainConfig // Chain configurations for signing
|
|
||||||
stack *node.Node // Ethereum protocol stack
|
|
||||||
client *ethclient.Client // Client connection to the Ethereum chain
|
|
||||||
index []byte // Index page to serve up on the web
|
|
||||||
|
|
||||||
keystore *keystore.KeyStore // Keystore containing the single signer
|
|
||||||
account accounts.Account // Account funding user faucet requests
|
|
||||||
head *types.Header // Current head header of the faucet
|
|
||||||
balance *big.Int // Current balance of the faucet
|
|
||||||
nonce uint64 // Current pending nonce of the faucet
|
|
||||||
price *big.Int // Current gas price to issue funds with
|
|
||||||
|
|
||||||
conns []*wsConn // Currently live websocket connections
|
|
||||||
timeouts map[string]time.Time // History of users and their funding timeouts
|
|
||||||
reqs []*request // Currently pending funding requests
|
|
||||||
update chan struct{} // Channel to signal request updates
|
|
||||||
|
|
||||||
lock sync.RWMutex // Lock protecting the faucet's internals
|
|
||||||
}
|
|
||||||
|
|
||||||
// wsConn wraps a websocket connection with a write mutex as the underlying
|
|
||||||
// websocket library does not synchronize access to the stream.
|
|
||||||
type wsConn struct {
|
|
||||||
conn *websocket.Conn
|
|
||||||
wlock sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
|
|
||||||
// Assemble the raw devp2p protocol stack
|
|
||||||
git, _ := version.VCS()
|
|
||||||
stack, err := node.New(&node.Config{
|
|
||||||
Name: "geth",
|
|
||||||
Version: params.VersionWithCommit(git.Commit, git.Date),
|
|
||||||
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
|
|
||||||
P2P: p2p.Config{
|
|
||||||
NAT: nat.Any(),
|
|
||||||
NoDiscovery: true,
|
|
||||||
DiscoveryV5: true,
|
|
||||||
ListenAddr: fmt.Sprintf(":%d", port),
|
|
||||||
MaxPeers: 25,
|
|
||||||
BootstrapNodesV5: enodes,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assemble the Ethereum light client protocol
|
|
||||||
cfg := ethconfig.Defaults
|
|
||||||
cfg.SyncMode = downloader.LightSync
|
|
||||||
cfg.NetworkId = network
|
|
||||||
cfg.Genesis = genesis
|
|
||||||
utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock().Hash())
|
|
||||||
|
|
||||||
lesBackend, err := les.New(stack, &cfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assemble the ethstats monitoring and reporting service'
|
|
||||||
if stats != "" {
|
|
||||||
if err := ethstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Boot up the client and ensure it connects to bootnodes
|
|
||||||
if err := stack.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, boot := range enodes {
|
|
||||||
old, err := enode.Parse(enode.ValidSchemes, boot.String())
|
|
||||||
if err == nil {
|
|
||||||
stack.Server().AddPeer(old)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Attach to the client and retrieve and interesting metadatas
|
|
||||||
api := stack.Attach()
|
|
||||||
client := ethclient.NewClient(api)
|
|
||||||
|
|
||||||
return &faucet{
|
|
||||||
config: genesis.Config,
|
|
||||||
stack: stack,
|
|
||||||
client: client,
|
|
||||||
index: index,
|
|
||||||
keystore: ks,
|
|
||||||
account: ks.Accounts()[0],
|
|
||||||
timeouts: make(map[string]time.Time),
|
|
||||||
update: make(chan struct{}, 1),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// close terminates the Ethereum connection and tears down the faucet.
|
|
||||||
func (f *faucet) close() error {
|
|
||||||
return f.stack.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// listenAndServe registers the HTTP handlers for the faucet and boots it up
|
|
||||||
// for service user funding requests.
|
|
||||||
func (f *faucet) listenAndServe(port int) error {
|
|
||||||
go f.loop()
|
|
||||||
|
|
||||||
http.HandleFunc("/", f.webHandler)
|
|
||||||
http.HandleFunc("/api", f.apiHandler)
|
|
||||||
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// webHandler handles all non-api requests, simply flattening and returning the
|
|
||||||
// faucet website.
|
|
||||||
func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Write(f.index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// apiHandler handles requests for Ether grants and transaction statuses.
|
|
||||||
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
upgrader := websocket.Upgrader{}
|
|
||||||
conn, err := upgrader.Upgrade(w, r, nil)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start tracking the connection and drop at the end
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
f.lock.Lock()
|
|
||||||
wsconn := &wsConn{conn: conn}
|
|
||||||
f.conns = append(f.conns, wsconn)
|
|
||||||
f.lock.Unlock()
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
f.lock.Lock()
|
|
||||||
for i, c := range f.conns {
|
|
||||||
if c.conn == conn {
|
|
||||||
f.conns = append(f.conns[:i], f.conns[i+1:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.lock.Unlock()
|
|
||||||
}()
|
|
||||||
// Gather the initial stats from the network to report
|
|
||||||
var (
|
|
||||||
head *types.Header
|
|
||||||
balance *big.Int
|
|
||||||
nonce uint64
|
|
||||||
)
|
|
||||||
for head == nil || balance == nil {
|
|
||||||
// Retrieve the current stats cached by the faucet
|
|
||||||
f.lock.RLock()
|
|
||||||
if f.head != nil {
|
|
||||||
head = types.CopyHeader(f.head)
|
|
||||||
}
|
|
||||||
if f.balance != nil {
|
|
||||||
balance = new(big.Int).Set(f.balance)
|
|
||||||
}
|
|
||||||
nonce = f.nonce
|
|
||||||
f.lock.RUnlock()
|
|
||||||
|
|
||||||
if head == nil || balance == nil {
|
|
||||||
// Report the faucet offline until initial stats are ready
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
if err = sendError(wsconn, errors.New("Faucet offline")); err != nil {
|
|
||||||
log.Warn("Failed to send faucet error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Send over the initial stats and the latest header
|
|
||||||
f.lock.RLock()
|
|
||||||
reqs := f.reqs
|
|
||||||
f.lock.RUnlock()
|
|
||||||
if err = send(wsconn, map[string]interface{}{
|
|
||||||
"funds": new(big.Int).Div(balance, ether),
|
|
||||||
"funded": nonce,
|
|
||||||
"peers": f.stack.Server().PeerCount(),
|
|
||||||
"requests": reqs,
|
|
||||||
}, 3*time.Second); err != nil {
|
|
||||||
log.Warn("Failed to send initial stats to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = send(wsconn, head, 3*time.Second); err != nil {
|
|
||||||
log.Warn("Failed to send initial header to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Keep reading requests from the websocket until the connection breaks
|
|
||||||
for {
|
|
||||||
// Fetch the next funding request and validate against github
|
|
||||||
var msg struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
Tier uint `json:"tier"`
|
|
||||||
Captcha string `json:"captcha"`
|
|
||||||
}
|
|
||||||
if err = conn.ReadJSON(&msg); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
|
|
||||||
if err = sendError(wsconn, errors.New("URL doesn't link to supported services")); err != nil {
|
|
||||||
log.Warn("Failed to send URL error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if msg.Tier >= uint(*tiersFlag) {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
if err = sendError(wsconn, errors.New("Invalid funding tier requested")); err != nil {
|
|
||||||
log.Warn("Failed to send tier error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
|
|
||||||
|
|
||||||
// If captcha verifications are enabled, make sure we're not dealing with a robot
|
|
||||||
if *captchaToken != "" {
|
|
||||||
form := url.Values{}
|
|
||||||
form.Add("secret", *captchaSecret)
|
|
||||||
form.Add("response", msg.Captcha)
|
|
||||||
|
|
||||||
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
|
|
||||||
if err != nil {
|
|
||||||
if err = sendError(wsconn, err); err != nil {
|
|
||||||
log.Warn("Failed to send captcha post error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var result struct {
|
|
||||||
Success bool `json:"success"`
|
|
||||||
Errors json.RawMessage `json:"error-codes"`
|
|
||||||
}
|
|
||||||
err = json.NewDecoder(res.Body).Decode(&result)
|
|
||||||
res.Body.Close()
|
|
||||||
if err != nil {
|
|
||||||
if err = sendError(wsconn, err); err != nil {
|
|
||||||
log.Warn("Failed to send captcha decode error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !result.Success {
|
|
||||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
|
||||||
//lint:ignore ST1005 it's funny and the robot won't mind
|
|
||||||
if err = sendError(wsconn, errors.New("Beep-bop, you're a robot!")); err != nil {
|
|
||||||
log.Warn("Failed to send captcha failure to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Retrieve the Ethereum address to fund, the requesting user and a profile picture
|
|
||||||
var (
|
|
||||||
id string
|
|
||||||
username string
|
|
||||||
avatar string
|
|
||||||
address common.Address
|
|
||||||
)
|
|
||||||
switch {
|
|
||||||
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
|
|
||||||
id, username, avatar, address, err = authTwitter(msg.URL, *twitterTokenV1Flag, *twitterTokenFlag)
|
|
||||||
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
|
|
||||||
username, avatar, address, err = authFacebook(msg.URL)
|
|
||||||
id = username
|
|
||||||
case *noauthFlag:
|
|
||||||
username, avatar, address, err = authNoAuth(msg.URL)
|
|
||||||
id = username
|
|
||||||
default:
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if err = sendError(wsconn, err); err != nil {
|
|
||||||
log.Warn("Failed to send prefix error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
|
|
||||||
|
|
||||||
// Ensure the user didn't request funds too recently
|
|
||||||
f.lock.Lock()
|
|
||||||
var (
|
|
||||||
fund bool
|
|
||||||
timeout time.Time
|
|
||||||
)
|
|
||||||
if timeout = f.timeouts[id]; time.Now().After(timeout) {
|
|
||||||
// User wasn't funded recently, create the funding transaction
|
|
||||||
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
|
|
||||||
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
|
|
||||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
|
||||||
|
|
||||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
|
||||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
|
|
||||||
if err != nil {
|
|
||||||
f.lock.Unlock()
|
|
||||||
if err = sendError(wsconn, err); err != nil {
|
|
||||||
log.Warn("Failed to send transaction creation error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Submit the transaction and mark as funded if successful
|
|
||||||
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
|
|
||||||
f.lock.Unlock()
|
|
||||||
if err = sendError(wsconn, err); err != nil {
|
|
||||||
log.Warn("Failed to send transaction transmission error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.reqs = append(f.reqs, &request{
|
|
||||||
Avatar: avatar,
|
|
||||||
Account: address,
|
|
||||||
Time: time.Now(),
|
|
||||||
Tx: signed,
|
|
||||||
})
|
|
||||||
timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute
|
|
||||||
grace := timeout / 288 // 24h timeout => 5m grace
|
|
||||||
|
|
||||||
f.timeouts[id] = time.Now().Add(timeout - grace)
|
|
||||||
fund = true
|
|
||||||
}
|
|
||||||
f.lock.Unlock()
|
|
||||||
|
|
||||||
// Send an error if too frequent funding, othewise a success
|
|
||||||
if !fund {
|
|
||||||
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
|
|
||||||
log.Warn("Failed to send funding error to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
|
|
||||||
log.Warn("Failed to send funding success to client", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case f.update <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// refresh attempts to retrieve the latest header from the chain and extract the
|
|
||||||
// associated faucet balance and nonce for connectivity caching.
|
|
||||||
func (f *faucet) refresh(head *types.Header) error {
|
|
||||||
// Ensure a state update does not run for too long
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// If no header was specified, use the current chain head
|
|
||||||
var err error
|
|
||||||
if head == nil {
|
|
||||||
if head, err = f.client.HeaderByNumber(ctx, nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Retrieve the balance, nonce and gas price from the current head
|
|
||||||
var (
|
|
||||||
balance *big.Int
|
|
||||||
nonce uint64
|
|
||||||
price *big.Int
|
|
||||||
)
|
|
||||||
if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if price, err = f.client.SuggestGasPrice(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Everything succeeded, update the cached stats and eject old requests
|
|
||||||
f.lock.Lock()
|
|
||||||
f.head, f.balance = head, balance
|
|
||||||
f.price, f.nonce = price, nonce
|
|
||||||
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
|
|
||||||
f.reqs = f.reqs[1:]
|
|
||||||
}
|
|
||||||
f.lock.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loop keeps waiting for interesting events and pushes them out to connected
|
|
||||||
// websockets.
|
|
||||||
func (f *faucet) loop() {
|
|
||||||
// Wait for chain events and push them to clients
|
|
||||||
heads := make(chan *types.Header, 16)
|
|
||||||
sub, err := f.client.SubscribeNewHead(context.Background(), heads)
|
|
||||||
if err != nil {
|
|
||||||
log.Crit("Failed to subscribe to head events", "err", err)
|
|
||||||
}
|
|
||||||
defer sub.Unsubscribe()
|
|
||||||
|
|
||||||
// Start a goroutine to update the state from head notifications in the background
|
|
||||||
update := make(chan *types.Header)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for head := range update {
|
|
||||||
// New chain head arrived, query the current stats and stream to clients
|
|
||||||
timestamp := time.Unix(int64(head.Time), 0)
|
|
||||||
if time.Since(timestamp) > time.Hour {
|
|
||||||
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := f.refresh(head); err != nil {
|
|
||||||
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Faucet state retrieved, update locally and send to clients
|
|
||||||
f.lock.RLock()
|
|
||||||
log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price)
|
|
||||||
|
|
||||||
balance := new(big.Int).Div(f.balance, ether)
|
|
||||||
peers := f.stack.Server().PeerCount()
|
|
||||||
|
|
||||||
for _, conn := range f.conns {
|
|
||||||
if err := send(conn, map[string]interface{}{
|
|
||||||
"funds": balance,
|
|
||||||
"funded": f.nonce,
|
|
||||||
"peers": peers,
|
|
||||||
"requests": f.reqs,
|
|
||||||
}, time.Second); err != nil {
|
|
||||||
log.Warn("Failed to send stats to client", "err", err)
|
|
||||||
conn.conn.Close()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := send(conn, head, time.Second); err != nil {
|
|
||||||
log.Warn("Failed to send header to client", "err", err)
|
|
||||||
conn.conn.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.lock.RUnlock()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Wait for various events and assing to the appropriate background threads
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case head := <-heads:
|
|
||||||
// New head arrived, send if for state update if there's none running
|
|
||||||
select {
|
|
||||||
case update <- head:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-f.update:
|
|
||||||
// Pending requests updated, stream to clients
|
|
||||||
f.lock.RLock()
|
|
||||||
for _, conn := range f.conns {
|
|
||||||
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
|
|
||||||
log.Warn("Failed to send requests to client", "err", err)
|
|
||||||
conn.conn.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.lock.RUnlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sends transmits a data packet to the remote end of the websocket, but also
|
|
||||||
// setting a write deadline to prevent waiting forever on the node.
|
|
||||||
func send(conn *wsConn, value interface{}, timeout time.Duration) error {
|
|
||||||
if timeout == 0 {
|
|
||||||
timeout = 60 * time.Second
|
|
||||||
}
|
|
||||||
conn.wlock.Lock()
|
|
||||||
defer conn.wlock.Unlock()
|
|
||||||
conn.conn.SetWriteDeadline(time.Now().Add(timeout))
|
|
||||||
return conn.conn.WriteJSON(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendError transmits an error to the remote end of the websocket, also setting
|
|
||||||
// the write deadline to 1 second to prevent waiting forever.
|
|
||||||
func sendError(conn *wsConn, err error) error {
|
|
||||||
return send(conn, map[string]string{"error": err.Error()}, time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendSuccess transmits a success message to the remote end of the websocket, also
|
|
||||||
// setting the write deadline to 1 second to prevent waiting forever.
|
|
||||||
func sendSuccess(conn *wsConn, msg string) error {
|
|
||||||
return send(conn, map[string]string{"success": msg}, time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
|
|
||||||
// the uniqueness identifier (user id/username), username, avatar URL and Ethereum address to fund on success.
|
|
||||||
func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, common.Address, error) {
|
|
||||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
|
||||||
parts := strings.Split(url, "/")
|
|
||||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
|
||||||
}
|
|
||||||
// Strip any query parameters from the tweet id and ensure it's numeric
|
|
||||||
tweetID := strings.Split(parts[len(parts)-1], "?")[0]
|
|
||||||
if !regexp.MustCompile("^[0-9]+$").MatchString(tweetID) {
|
|
||||||
return "", "", "", common.Address{}, errors.New("Invalid Tweet URL")
|
|
||||||
}
|
|
||||||
// Twitter's API isn't really friendly with direct links.
|
|
||||||
// It is restricted to 300 queries / 15 minute with an app api key.
|
|
||||||
// Anything more will require read only authorization from the users and that we want to avoid.
|
|
||||||
|
|
||||||
// If Twitter bearer token is provided, use the API, selecting the version
|
|
||||||
// the user would prefer (currently there's a limit of 1 v2 app / developer
|
|
||||||
// but unlimited v1.1 apps).
|
|
||||||
switch {
|
|
||||||
case tokenV1 != "":
|
|
||||||
return authTwitterWithTokenV1(tweetID, tokenV1)
|
|
||||||
case tokenV2 != "":
|
|
||||||
return authTwitterWithTokenV2(tweetID, tokenV2)
|
|
||||||
}
|
|
||||||
// Twitter API token isn't provided so we just load the public posts
|
|
||||||
// and scrape it for the Ethereum address and profile URL. We need to load
|
|
||||||
// the mobile page though since the main page loads tweet contents via JS.
|
|
||||||
url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1)
|
|
||||||
|
|
||||||
res, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
// Resolve the username from the final redirect, no intermediate junk
|
|
||||||
parts = strings.Split(res.Request.URL.String(), "/")
|
|
||||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
|
||||||
}
|
|
||||||
username := parts[len(parts)-3]
|
|
||||||
|
|
||||||
body, err := io.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
|
||||||
if address == (common.Address{}) {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
|
||||||
}
|
|
||||||
var avatar string
|
|
||||||
if parts = regexp.MustCompile(`src="([^"]+twimg\.com/profile_images[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
|
|
||||||
avatar = parts[1]
|
|
||||||
}
|
|
||||||
return username + "@twitter", username, avatar, address, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// authTwitterWithTokenV1 tries to authenticate a faucet request using Twitter's v1
|
|
||||||
// API, returning the user id, username, avatar URL and Ethereum address to fund on
|
|
||||||
// success.
|
|
||||||
func authTwitterWithTokenV1(tweetID string, token string) (string, string, string, common.Address, error) {
|
|
||||||
// Query the tweet details from Twitter
|
|
||||||
url := fmt.Sprintf("https://api.twitter.com/1.1/statuses/show.json?id=%s", tweetID)
|
|
||||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
|
||||||
res, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
Text string `json:"text"`
|
|
||||||
User struct {
|
|
||||||
ID string `json:"id_str"`
|
|
||||||
Username string `json:"screen_name"`
|
|
||||||
Avatar string `json:"profile_image_url"`
|
|
||||||
} `json:"user"`
|
|
||||||
}
|
|
||||||
err = json.NewDecoder(res.Body).Decode(&result)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Text))
|
|
||||||
if address == (common.Address{}) {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
|
||||||
}
|
|
||||||
return result.User.ID + "@twitter", result.User.Username, result.User.Avatar, address, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// authTwitterWithTokenV2 tries to authenticate a faucet request using Twitter's v2
|
|
||||||
// API, returning the user id, username, avatar URL and Ethereum address to fund on
|
|
||||||
// success.
|
|
||||||
func authTwitterWithTokenV2(tweetID string, token string) (string, string, string, common.Address, error) {
|
|
||||||
// Query the tweet details from Twitter
|
|
||||||
url := fmt.Sprintf("https://api.twitter.com/2/tweets/%s?expansions=author_id&user.fields=profile_image_url", tweetID)
|
|
||||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
|
||||||
res, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
Data struct {
|
|
||||||
AuthorID string `json:"author_id"`
|
|
||||||
Text string `json:"text"`
|
|
||||||
} `json:"data"`
|
|
||||||
Includes struct {
|
|
||||||
Users []struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Username string `json:"username"`
|
|
||||||
Avatar string `json:"profile_image_url"`
|
|
||||||
} `json:"users"`
|
|
||||||
} `json:"includes"`
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.NewDecoder(res.Body).Decode(&result)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Data.Text))
|
|
||||||
if address == (common.Address{}) {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
|
||||||
}
|
|
||||||
return result.Data.AuthorID + "@twitter", result.Includes.Users[0].Username, result.Includes.Users[0].Avatar, address, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// authFacebook tries to authenticate a faucet request using Facebook posts,
|
|
||||||
// returning the username, avatar URL and Ethereum address to fund on success.
|
|
||||||
func authFacebook(url string) (string, string, common.Address, error) {
|
|
||||||
// Ensure the user specified a meaningful URL, no fancy nonsense
|
|
||||||
parts := strings.Split(strings.Split(url, "?")[0], "/")
|
|
||||||
if parts[len(parts)-1] == "" {
|
|
||||||
parts = parts[0 : len(parts)-1]
|
|
||||||
}
|
|
||||||
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
|
|
||||||
}
|
|
||||||
username := parts[len(parts)-3]
|
|
||||||
|
|
||||||
// Facebook's Graph API isn't really friendly with direct links. Still, we don't
|
|
||||||
// want to do ask read permissions from users, so just load the public posts and
|
|
||||||
// scrape it for the Ethereum address and profile URL.
|
|
||||||
//
|
|
||||||
// Facebook recently changed their desktop webpage to use AJAX for loading post
|
|
||||||
// content, so switch over to the mobile site for now. Will probably end up having
|
|
||||||
// to use the API eventually.
|
|
||||||
crawl := strings.Replace(url, "www.facebook.com", "m.facebook.com", 1)
|
|
||||||
|
|
||||||
res, err := http.Get(crawl)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", common.Address{}, err
|
|
||||||
}
|
|
||||||
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
|
|
||||||
if address == (common.Address{}) {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund. Please check the post URL and verify that it can be viewed publicly.")
|
|
||||||
}
|
|
||||||
var avatar string
|
|
||||||
if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
|
|
||||||
avatar = parts[1]
|
|
||||||
}
|
|
||||||
return username + "@facebook", avatar, address, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// authNoAuth tries to interpret a faucet request as a plain Ethereum address,
|
|
||||||
// without actually performing any remote authentication. This mode is prone to
|
|
||||||
// Byzantine attack, so only ever use for truly private networks.
|
|
||||||
func authNoAuth(url string) (string, string, common.Address, error) {
|
|
||||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
|
|
||||||
if address == (common.Address{}) {
|
|
||||||
//lint:ignore ST1005 This error is to be displayed in the browser
|
|
||||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
|
||||||
}
|
|
||||||
return address.Hex() + "@noauth", "", address, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getGenesis returns a genesis based on input args
|
|
||||||
func getGenesis(genesisFlag string, goerliFlag bool, sepoliaFlag bool) (*core.Genesis, error) {
|
|
||||||
switch {
|
|
||||||
case genesisFlag != "":
|
|
||||||
var genesis core.Genesis
|
|
||||||
err := common.LoadJSON(genesisFlag, &genesis)
|
|
||||||
return &genesis, err
|
|
||||||
case goerliFlag:
|
|
||||||
return core.DefaultGoerliGenesisBlock(), nil
|
|
||||||
case sepoliaFlag:
|
|
||||||
return core.DefaultSepoliaGenesisBlock(), nil
|
|
||||||
default:
|
|
||||||
return nil, errors.New("no genesis flag provided")
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,233 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
||||||
|
|
||||||
<title>{{.Network}}: Authenticated Faucet</title>
|
|
||||||
|
|
||||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" />
|
|
||||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" />
|
|
||||||
|
|
||||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
|
|
||||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-noty/2.4.1/packaged/jquery.noty.packaged.min.js"></script>
|
|
||||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
|
|
||||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.18.0/moment.min.js"></script>
|
|
||||||
|
|
||||||
<style>
|
|
||||||
.vertical-center {
|
|
||||||
min-height: 100%;
|
|
||||||
min-height: 100vh;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
}
|
|
||||||
.progress {
|
|
||||||
position: relative;
|
|
||||||
}
|
|
||||||
.progress span {
|
|
||||||
position: absolute;
|
|
||||||
display: block;
|
|
||||||
width: 100%;
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
pre {
|
|
||||||
padding: 6px;
|
|
||||||
margin: 0;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<div class="vertical-center">
|
|
||||||
<div class="container">
|
|
||||||
<div class="row" style="margin-bottom: 16px;">
|
|
||||||
<div class="col-lg-12">
|
|
||||||
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} Authenticated Faucet</h1>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-lg-8 col-lg-offset-2">
|
|
||||||
<div class="input-group">
|
|
||||||
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address..."/>
|
|
||||||
<span class="input-group-btn">
|
|
||||||
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
|
|
||||||
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
|
|
||||||
<li><a style="text-align: center;" onclick="tier={{$idx}}; {{if $.Recaptcha}}grecaptcha.execute(){{else}}submit({{$idx}}){{end}}">{{$amount}} / {{index $.Periods $idx}}</a></li>{{end}}
|
|
||||||
</ul>
|
|
||||||
</span>
|
|
||||||
</div>{{if .Recaptcha}}
|
|
||||||
<div class="g-recaptcha" data-sitekey="{{.Recaptcha}}" data-callback="submit" data-size="invisible"></div>{{end}}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="row" style="margin-top: 32px;">
|
|
||||||
<div class="col-lg-6 col-lg-offset-3">
|
|
||||||
<div class="panel panel-small panel-default">
|
|
||||||
<div class="panel-body" style="padding: 0; overflow: auto; max-height: 300px;">
|
|
||||||
<table id="requests" class="table table-condensed" style="margin: 0;"></table>
|
|
||||||
</div>
|
|
||||||
<div class="panel-footer">
|
|
||||||
<table style="width: 100%"><tr>
|
|
||||||
<td style="text-align: center;"><i class="fa fa-rss" aria-hidden="true"></i> <span id="peers"></span> peers</td>
|
|
||||||
<td style="text-align: center;"><i class="fa fa-database" aria-hidden="true"></i> <span id="block"></span> blocks</td>
|
|
||||||
<td style="text-align: center;"><i class="fa fa-heartbeat" aria-hidden="true"></i> <span id="funds"></span> Ethers</td>
|
|
||||||
<td style="text-align: center;"><i class="fa fa-university" aria-hidden="true"></i> <span id="funded"></span> funded</td>
|
|
||||||
</tr></table>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="row" style="margin-top: 32px;">
|
|
||||||
<div class="col-lg-12">
|
|
||||||
<h3>How does this work?</h3>
|
|
||||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to common 3rd party social network accounts. Anyone having a Twitter or Facebook account may request funds within the permitted limits.</p>
|
|
||||||
<dl class="dl-horizontal">
|
|
||||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-twitter" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
|
||||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Twitter, make a <a href="https://twitter.com/intent/tweet?text=Requesting%20faucet%20funds%20into%200x0000000000000000000000000000000000000000%20on%20the%20%23{{.Network}}%20%23Ethereum%20test%20network." target="_about:blank">tweet</a> with your Ethereum address pasted into the contents (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://support.twitter.com/articles/80586" target="_about:blank">tweets URL</a> into the above input box and fire away!</dd>
|
|
||||||
|
|
||||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-facebook" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
|
||||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Facebook, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://www.facebook.com/help/community/question/?id=282662498552845" target="_about:blank">posts URL</a> into the above input box and fire away!</dd>
|
|
||||||
|
|
||||||
{{if .NoAuth}}
|
|
||||||
<dt class="text-danger" style="width: auto; margin-left: 40px;"><i class="fa fa-unlock-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
|
||||||
<dd class="text-danger" style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds <strong>without authentication</strong>, simply copy-paste your Ethereum address into the above input box (surrounding text doesn't matter) and fire away.<br/>This mode is susceptible to Byzantine attacks. Only use for debugging or private networks!</dd>
|
|
||||||
{{end}}
|
|
||||||
</dl>
|
|
||||||
<p>You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
|
||||||
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<script>
|
|
||||||
// Global variables to hold the current status of the faucet
|
|
||||||
var attempt = 0;
|
|
||||||
var server;
|
|
||||||
var tier = 0;
|
|
||||||
var requests = [];
|
|
||||||
|
|
||||||
// Define a function that creates closures to drop old requests
|
|
||||||
var dropper = function(hash) {
|
|
||||||
return function() {
|
|
||||||
for (var i=0; i<requests.length; i++) {
|
|
||||||
if (requests[i].tx.hash == hash) {
|
|
||||||
requests.splice(i, 1);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// Define the function that submits a gist url to the server
|
|
||||||
var submit = function({{if .Recaptcha}}captcha{{end}}) {
|
|
||||||
server.send(JSON.stringify({url: $("#url")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
|
||||||
grecaptcha.reset();{{end}}
|
|
||||||
};
|
|
||||||
// Define a method to reconnect upon server loss
|
|
||||||
var reconnect = function() {
|
|
||||||
server = new WebSocket(((window.location.protocol === "https:") ? "wss://" : "ws://") + window.location.host + "/api");
|
|
||||||
|
|
||||||
server.onmessage = function(event) {
|
|
||||||
var msg = JSON.parse(event.data);
|
|
||||||
if (msg === null) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (msg.funds !== undefined) {
|
|
||||||
$("#funds").text(msg.funds);
|
|
||||||
}
|
|
||||||
if (msg.funded !== undefined) {
|
|
||||||
$("#funded").text(msg.funded);
|
|
||||||
}
|
|
||||||
if (msg.peers !== undefined) {
|
|
||||||
$("#peers").text(msg.peers);
|
|
||||||
}
|
|
||||||
if (msg.number !== undefined) {
|
|
||||||
$("#block").text(parseInt(msg.number, 16));
|
|
||||||
}
|
|
||||||
if (msg.error !== undefined) {
|
|
||||||
noty({layout: 'topCenter', text: msg.error, type: 'error', timeout: 5000, progressBar: true});
|
|
||||||
}
|
|
||||||
if (msg.success !== undefined) {
|
|
||||||
noty({layout: 'topCenter', text: msg.success, type: 'success', timeout: 5000, progressBar: true});
|
|
||||||
}
|
|
||||||
if (msg.requests !== undefined && msg.requests !== null) {
|
|
||||||
// Mark all previous requests missing as done
|
|
||||||
for (var i=0; i<requests.length; i++) {
|
|
||||||
if (msg.requests.length > 0 && msg.requests[0].tx.hash == requests[i].tx.hash) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (requests[i].time != "") {
|
|
||||||
requests[i].time = "";
|
|
||||||
setTimeout(dropper(requests[i].tx.hash), 3000);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Append any new requests into our local collection
|
|
||||||
var common = -1;
|
|
||||||
if (requests.length > 0) {
|
|
||||||
for (var i=0; i<msg.requests.length; i++) {
|
|
||||||
if (requests[requests.length-1].tx.hash == msg.requests[i].tx.hash) {
|
|
||||||
common = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (var i=common+1; i<msg.requests.length; i++) {
|
|
||||||
requests.push(msg.requests[i]);
|
|
||||||
}
|
|
||||||
// Iterate over our entire local collection and re-render the funding table
|
|
||||||
var content = "";
|
|
||||||
for (var i=requests.length-1; i >= 0; i--) {
|
|
||||||
var done = requests[i].time == "";
|
|
||||||
var elapsed = moment().unix()-moment(requests[i].time).unix();
|
|
||||||
|
|
||||||
content += "<tr id='" + requests[i].tx.hash + "'>";
|
|
||||||
content += " <td><div style=\"background: url('" + requests[i].avatar + "'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td>";
|
|
||||||
content += " <td><pre>" + requests[i].account + "</pre></td>";
|
|
||||||
content += " <td style=\"width: 100%; text-align: center; vertical-align: middle;\">";
|
|
||||||
if (done) {
|
|
||||||
content += " funded";
|
|
||||||
} else {
|
|
||||||
content += " <span id='time-" + i + "' class='timer'>" + moment.duration(-elapsed, 'seconds').humanize(true) + "</span>";
|
|
||||||
}
|
|
||||||
content += " <div class='progress' style='height: 4px; margin: 0;'>";
|
|
||||||
if (done) {
|
|
||||||
content += " <div class='progress-bar progress-bar-success' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
|
|
||||||
} else if (elapsed > 30) {
|
|
||||||
content += " <div class='progress-bar progress-bar-danger progress-bar-striped active' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
|
|
||||||
} else {
|
|
||||||
content += " <div class='progress-bar progress-bar-striped active' role='progressbar' aria-valuenow='" + elapsed + "' style='width:" + (elapsed * 100 / 30) + "%;'></div>";
|
|
||||||
}
|
|
||||||
content += " </div>";
|
|
||||||
content += " </td>";
|
|
||||||
content += "</tr>";
|
|
||||||
}
|
|
||||||
$("#requests").html("<tbody>" + content + "</tbody>");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
server.onclose = function() { setTimeout(reconnect, 3000); };
|
|
||||||
}
|
|
||||||
// Start a UI updater to push the progress bars forward until they are done
|
|
||||||
setInterval(function() {
|
|
||||||
$('.progress-bar').each(function() {
|
|
||||||
var progress = Number($(this).attr('aria-valuenow')) + 1;
|
|
||||||
if (progress < 30) {
|
|
||||||
$(this).attr('aria-valuenow', progress);
|
|
||||||
$(this).css('width', (progress * 100 / 30) + '%');
|
|
||||||
} else if (progress == 30) {
|
|
||||||
$(this).css('width', '100%');
|
|
||||||
$(this).addClass("progress-bar-danger");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
$('.timer').each(function() {
|
|
||||||
var index = Number($(this).attr('id').substring(5));
|
|
||||||
$(this).html(moment.duration(moment(requests[index].time).unix()-moment().unix(), 'seconds').humanize(true));
|
|
||||||
})
|
|
||||||
}, 1000);
|
|
||||||
|
|
||||||
// Establish a websocket connection to the API server
|
|
||||||
reconnect();
|
|
||||||
</script>{{if .Recaptcha}}
|
|
||||||
<script src="https://www.google.com/recaptcha/api.js" async defer></script>{{end}}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,45 +0,0 @@
|
|||||||
// Copyright 2021 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFacebook(t *testing.T) {
|
|
||||||
// TODO: Remove facebook auth or implement facebook api, which seems to require an API key
|
|
||||||
t.Skipf("The facebook access is flaky, needs to be reimplemented or removed")
|
|
||||||
for _, tt := range []struct {
|
|
||||||
url string
|
|
||||||
want common.Address
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"https://www.facebook.com/fooz.gazonk/posts/2837228539847129",
|
|
||||||
common.HexToAddress("0xDeadDeaDDeaDbEefbEeFbEEfBeeFBeefBeeFbEEF"),
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
_, _, gotAddress, err := authFacebook(tt.url)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if gotAddress != tt.want {
|
|
||||||
t.Fatalf("address wrong, have %v want %v", gotAddress, tt.want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -43,11 +43,13 @@ func tmpDatadirWithKeystore(t *testing.T) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountListEmpty(t *testing.T) {
|
func TestAccountListEmpty(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runGeth(t, "account", "list")
|
geth := runGeth(t, "account", "list")
|
||||||
geth.ExpectExit()
|
geth.ExpectExit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountList(t *testing.T) {
|
func TestAccountList(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
datadir := tmpDatadirWithKeystore(t)
|
datadir := tmpDatadirWithKeystore(t)
|
||||||
var want = `
|
var want = `
|
||||||
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
|
||||||
@ -74,6 +76,7 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\k
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountNew(t *testing.T) {
|
func TestAccountNew(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||||
defer geth.ExpectExit()
|
defer geth.ExpectExit()
|
||||||
geth.Expect(`
|
geth.Expect(`
|
||||||
@ -96,6 +99,7 @@ Path of the secret key file: .*UTC--.+--[0-9a-f]{40}
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountImport(t *testing.T) {
|
func TestAccountImport(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct{ name, key, output string }{
|
tests := []struct{ name, key, output string }{
|
||||||
{
|
{
|
||||||
name: "correct account",
|
name: "correct account",
|
||||||
@ -118,6 +122,7 @@ func TestAccountImport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountHelp(t *testing.T) {
|
func TestAccountHelp(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runGeth(t, "account", "-h")
|
geth := runGeth(t, "account", "-h")
|
||||||
geth.WaitExit()
|
geth.WaitExit()
|
||||||
if have, want := geth.ExitStatus(), 0; have != want {
|
if have, want := geth.ExitStatus(), 0; have != want {
|
||||||
@ -147,6 +152,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountNewBadRepeat(t *testing.T) {
|
func TestAccountNewBadRepeat(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||||
defer geth.ExpectExit()
|
defer geth.ExpectExit()
|
||||||
geth.Expect(`
|
geth.Expect(`
|
||||||
@ -159,6 +165,7 @@ Fatal: Passwords do not match
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountUpdate(t *testing.T) {
|
func TestAccountUpdate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
datadir := tmpDatadirWithKeystore(t)
|
datadir := tmpDatadirWithKeystore(t)
|
||||||
geth := runGeth(t, "account", "update",
|
geth := runGeth(t, "account", "update",
|
||||||
"--datadir", datadir, "--lightkdf",
|
"--datadir", datadir, "--lightkdf",
|
||||||
@ -175,6 +182,7 @@ Repeat password: {{.InputLine "foobar2"}}
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWalletImport(t *testing.T) {
|
func TestWalletImport(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||||
defer geth.ExpectExit()
|
defer geth.ExpectExit()
|
||||||
geth.Expect(`
|
geth.Expect(`
|
||||||
@ -190,6 +198,7 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWalletImportBadPassword(t *testing.T) {
|
func TestWalletImportBadPassword(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||||
defer geth.ExpectExit()
|
defer geth.ExpectExit()
|
||||||
geth.Expect(`
|
geth.Expect(`
|
||||||
@ -200,6 +209,7 @@ Fatal: could not decrypt key with given password
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnlockFlag(t *testing.T) {
|
func TestUnlockFlag(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
|
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
|
||||||
geth.Expect(`
|
geth.Expect(`
|
||||||
@ -222,6 +232,7 @@ undefined
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnlockFlagWrongPassword(t *testing.T) {
|
func TestUnlockFlagWrongPassword(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
|
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')")
|
||||||
|
|
||||||
@ -240,6 +251,7 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could
|
|||||||
|
|
||||||
// https://github.com/ethereum/go-ethereum/issues/1785
|
// https://github.com/ethereum/go-ethereum/issues/1785
|
||||||
func TestUnlockFlagMultiIndex(t *testing.T) {
|
func TestUnlockFlagMultiIndex(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
|
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
|
||||||
|
|
||||||
@ -266,6 +278,7 @@ undefined
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnlockFlagPasswordFile(t *testing.T) {
|
func TestUnlockFlagPasswordFile(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
|
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')")
|
||||||
|
|
||||||
@ -287,6 +300,7 @@ undefined
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
|
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password",
|
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password",
|
||||||
"testdata/wrong-passwords.txt", "--unlock", "0,2")
|
"testdata/wrong-passwords.txt", "--unlock", "0,2")
|
||||||
@ -297,6 +311,7 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given password)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnlockFlagAmbiguous(t *testing.T) {
|
func TestUnlockFlagAmbiguous(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
||||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
|
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
|
||||||
@ -336,6 +351,7 @@ undefined
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
|
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
|
||||||
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t),
|
||||||
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
|
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore",
|
||||||
|
@ -137,20 +137,7 @@ The import-preimages command imports hash preimages from an RLP encoded stream.
|
|||||||
It's deprecated, please use "geth db import" instead.
|
It's deprecated, please use "geth db import" instead.
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
exportPreimagesCommand = &cli.Command{
|
|
||||||
Action: exportPreimages,
|
|
||||||
Name: "export-preimages",
|
|
||||||
Usage: "Export the preimage database into an RLP stream",
|
|
||||||
ArgsUsage: "<dumpfile>",
|
|
||||||
Flags: flags.Merge([]cli.Flag{
|
|
||||||
utils.CacheFlag,
|
|
||||||
utils.SyncModeFlag,
|
|
||||||
}, utils.DatabaseFlags),
|
|
||||||
Description: `
|
|
||||||
The export-preimages command exports hash preimages to an RLP encoded stream.
|
|
||||||
It's deprecated, please use "geth db export" instead.
|
|
||||||
`,
|
|
||||||
}
|
|
||||||
dumpCommand = &cli.Command{
|
dumpCommand = &cli.Command{
|
||||||
Action: dump,
|
Action: dump,
|
||||||
Name: "dump",
|
Name: "dump",
|
||||||
@ -211,7 +198,7 @@ func initGenesis(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
defer chaindb.Close()
|
defer chaindb.Close()
|
||||||
|
|
||||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false)
|
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
|
_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
|
||||||
@ -224,14 +211,21 @@ func initGenesis(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func dumpGenesis(ctx *cli.Context) error {
|
func dumpGenesis(ctx *cli.Context) error {
|
||||||
// if there is a testnet preset enabled, dump that
|
// check if there is a testnet preset enabled
|
||||||
|
var genesis *core.Genesis
|
||||||
if utils.IsNetworkPreset(ctx) {
|
if utils.IsNetworkPreset(ctx) {
|
||||||
genesis := utils.MakeGenesis(ctx)
|
genesis = utils.MakeGenesis(ctx)
|
||||||
|
} else if ctx.IsSet(utils.DeveloperFlag.Name) && !ctx.IsSet(utils.DataDirFlag.Name) {
|
||||||
|
genesis = core.DeveloperGenesisBlock(11_500_000, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if genesis != nil {
|
||||||
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
|
if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
|
||||||
utils.Fatalf("could not encode genesis: %s", err)
|
utils.Fatalf("could not encode genesis: %s", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// dump whatever already exists in the datadir
|
// dump whatever already exists in the datadir
|
||||||
stack, _ := makeConfigNode(ctx)
|
stack, _ := makeConfigNode(ctx)
|
||||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||||
@ -256,7 +250,7 @@ func dumpGenesis(ctx *cli.Context) error {
|
|||||||
if ctx.IsSet(utils.DataDirFlag.Name) {
|
if ctx.IsSet(utils.DataDirFlag.Name) {
|
||||||
utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
|
utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
|
||||||
}
|
}
|
||||||
utils.Fatalf("no network preset provided, no existing genesis in the default datadir")
|
utils.Fatalf("no network preset provided, and no genesis exists in the default datadir")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,6 +373,9 @@ func exportChain(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// importPreimages imports preimage data from the specified file.
|
// importPreimages imports preimage data from the specified file.
|
||||||
|
// it is deprecated, and the export function has been removed, but
|
||||||
|
// the import function is kept around for the time being so that
|
||||||
|
// older file formats can still be imported.
|
||||||
func importPreimages(ctx *cli.Context) error {
|
func importPreimages(ctx *cli.Context) error {
|
||||||
if ctx.Args().Len() < 1 {
|
if ctx.Args().Len() < 1 {
|
||||||
utils.Fatalf("This command requires an argument.")
|
utils.Fatalf("This command requires an argument.")
|
||||||
@ -398,25 +395,6 @@ func importPreimages(ctx *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// exportPreimages dumps the preimage data to specified json file in streaming way.
|
|
||||||
func exportPreimages(ctx *cli.Context) error {
|
|
||||||
if ctx.Args().Len() < 1 {
|
|
||||||
utils.Fatalf("This command requires an argument.")
|
|
||||||
}
|
|
||||||
stack, _ := makeConfigNode(ctx)
|
|
||||||
defer stack.Close()
|
|
||||||
|
|
||||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
|
||||||
defer db.Close()
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
|
|
||||||
utils.Fatalf("Export error: %v\n", err)
|
|
||||||
}
|
|
||||||
fmt.Printf("Export done in %v\n", time.Since(start))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
|
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
|
||||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
@ -485,7 +463,7 @@ func dump(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
triedb := utils.MakeTrieDatabase(ctx, db, true, true) // always enable preimage lookup
|
triedb := utils.MakeTrieDatabase(ctx, db, true, true, false) // always enable preimage lookup
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
|
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
|
||||||
@ -495,11 +473,6 @@ func dump(ctx *cli.Context) error {
|
|||||||
if ctx.Bool(utils.IterativeOutputFlag.Name) {
|
if ctx.Bool(utils.IterativeOutputFlag.Name) {
|
||||||
state.IterativeDump(conf, json.NewEncoder(os.Stdout))
|
state.IterativeDump(conf, json.NewEncoder(os.Stdout))
|
||||||
} else {
|
} else {
|
||||||
if conf.OnlyWithAddresses {
|
|
||||||
fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
|
|
||||||
" otherwise the accounts will overwrite each other in the resulting mapping.")
|
|
||||||
return errors.New("incompatible options")
|
|
||||||
}
|
|
||||||
fmt.Println(string(state.Dump(conf)))
|
fmt.Println(string(state.Dump(conf)))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -35,7 +35,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
@ -222,7 +221,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
}
|
}
|
||||||
catalyst.RegisterSimulatedBeaconAPIs(stack, simBeacon)
|
catalyst.RegisterSimulatedBeaconAPIs(stack, simBeacon)
|
||||||
stack.RegisterLifecycle(simBeacon)
|
stack.RegisterLifecycle(simBeacon)
|
||||||
} else if cfg.Eth.SyncMode != downloader.LightSync {
|
} else {
|
||||||
err := catalyst.Register(stack, eth)
|
err := catalyst.Register(stack, eth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("failed to register catalyst service: %v", err)
|
utils.Fatalf("failed to register catalyst service: %v", err)
|
||||||
|
@ -50,6 +50,7 @@ func runMinimalGeth(t *testing.T, args ...string) *testgeth {
|
|||||||
// Tests that a node embedded within a console can be started up properly and
|
// Tests that a node embedded within a console can be started up properly and
|
||||||
// then terminated by closing the input stream.
|
// then terminated by closing the input stream.
|
||||||
func TestConsoleWelcome(t *testing.T) {
|
func TestConsoleWelcome(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
|
||||||
|
|
||||||
// Start a geth console, make sure it's cleaned up and terminate the console
|
// Start a geth console, make sure it's cleaned up and terminate the console
|
||||||
|
@ -482,7 +482,7 @@ func dbDumpTrie(ctx *cli.Context) error {
|
|||||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
triedb := utils.MakeTrieDatabase(ctx, db, false, true)
|
triedb := utils.MakeTrieDatabase(ctx, db, false, true, false)
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
// TestExport does a basic test of "geth export", exporting the test-genesis.
|
// TestExport does a basic test of "geth export", exporting the test-genesis.
|
||||||
func TestExport(t *testing.T) {
|
func TestExport(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
outfile := fmt.Sprintf("%v/testExport.out", os.TempDir())
|
outfile := fmt.Sprintf("%v/testExport.out", os.TempDir())
|
||||||
defer os.Remove(outfile)
|
defer os.Remove(outfile)
|
||||||
geth := runGeth(t, "--datadir", initGeth(t), "export", outfile)
|
geth := runGeth(t, "--datadir", initGeth(t), "export", outfile)
|
||||||
|
@ -1,205 +0,0 @@
|
|||||||
// Copyright 2020 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
type gethrpc struct {
|
|
||||||
name string
|
|
||||||
rpc *rpc.Client
|
|
||||||
geth *testgeth
|
|
||||||
nodeInfo *p2p.NodeInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *gethrpc) killAndWait() {
|
|
||||||
g.geth.Kill()
|
|
||||||
g.geth.WaitExit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *gethrpc) callRPC(result interface{}, method string, args ...interface{}) {
|
|
||||||
if err := g.rpc.Call(&result, method, args...); err != nil {
|
|
||||||
g.geth.Fatalf("callRPC %v: %v", method, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *gethrpc) addPeer(peer *gethrpc) {
|
|
||||||
g.geth.Logf("%v.addPeer(%v)", g.name, peer.name)
|
|
||||||
enode := peer.getNodeInfo().Enode
|
|
||||||
peerCh := make(chan *p2p.PeerEvent)
|
|
||||||
sub, err := g.rpc.Subscribe(context.Background(), "admin", peerCh, "peerEvents")
|
|
||||||
if err != nil {
|
|
||||||
g.geth.Fatalf("subscribe %v: %v", g.name, err)
|
|
||||||
}
|
|
||||||
defer sub.Unsubscribe()
|
|
||||||
g.callRPC(nil, "admin_addPeer", enode)
|
|
||||||
dur := 14 * time.Second
|
|
||||||
timeout := time.After(dur)
|
|
||||||
select {
|
|
||||||
case ev := <-peerCh:
|
|
||||||
g.geth.Logf("%v received event: type=%v, peer=%v", g.name, ev.Type, ev.Peer)
|
|
||||||
case err := <-sub.Err():
|
|
||||||
g.geth.Fatalf("%v sub error: %v", g.name, err)
|
|
||||||
case <-timeout:
|
|
||||||
g.geth.Error("timeout adding peer after", dur)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use this function instead of `g.nodeInfo` directly
|
|
||||||
func (g *gethrpc) getNodeInfo() *p2p.NodeInfo {
|
|
||||||
if g.nodeInfo != nil {
|
|
||||||
return g.nodeInfo
|
|
||||||
}
|
|
||||||
g.nodeInfo = &p2p.NodeInfo{}
|
|
||||||
g.callRPC(&g.nodeInfo, "admin_nodeInfo")
|
|
||||||
return g.nodeInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
|
||||||
// account the set data folders as well as the designated platform we're currently
|
|
||||||
// running on.
|
|
||||||
func ipcEndpoint(ipcPath, datadir string) string {
|
|
||||||
// On windows we can only use plain top-level pipes
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
|
|
||||||
return ipcPath
|
|
||||||
}
|
|
||||||
return `\\.\pipe\` + ipcPath
|
|
||||||
}
|
|
||||||
// Resolve names into the data directory full paths otherwise
|
|
||||||
if filepath.Base(ipcPath) == ipcPath {
|
|
||||||
if datadir == "" {
|
|
||||||
return filepath.Join(os.TempDir(), ipcPath)
|
|
||||||
}
|
|
||||||
return filepath.Join(datadir, ipcPath)
|
|
||||||
}
|
|
||||||
return ipcPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextIPC ensures that each ipc pipe gets a unique name.
|
|
||||||
// On linux, it works well to use ipc pipes all over the filesystem (in datadirs),
|
|
||||||
// but windows require pipes to sit in "\\.\pipe\". Therefore, to run several
|
|
||||||
// nodes simultaneously, we need to distinguish between them, which we do by
|
|
||||||
// the pipe filename instead of folder.
|
|
||||||
var nextIPC atomic.Uint32
|
|
||||||
|
|
||||||
func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
|
|
||||||
ipcName := fmt.Sprintf("geth-%d.ipc", nextIPC.Add(1))
|
|
||||||
args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...)
|
|
||||||
t.Logf("Starting %v with rpc: %v", name, args)
|
|
||||||
|
|
||||||
g := &gethrpc{
|
|
||||||
name: name,
|
|
||||||
geth: runGeth(t, args...),
|
|
||||||
}
|
|
||||||
ipcpath := ipcEndpoint(ipcName, g.geth.Datadir)
|
|
||||||
// We can't know exactly how long geth will take to start, so we try 10
|
|
||||||
// times over a 5 second period.
|
|
||||||
var err error
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
if g.rpc, err = rpc.Dial(ipcpath); err == nil {
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func initGeth(t *testing.T) string {
|
|
||||||
args := []string{"--networkid=42", "init", "./testdata/clique.json"}
|
|
||||||
t.Logf("Initializing geth: %v ", args)
|
|
||||||
g := runGeth(t, args...)
|
|
||||||
datadir := g.Datadir
|
|
||||||
g.WaitExit()
|
|
||||||
return datadir
|
|
||||||
}
|
|
||||||
|
|
||||||
func startLightServer(t *testing.T) *gethrpc {
|
|
||||||
datadir := initGeth(t)
|
|
||||||
t.Logf("Importing keys to geth")
|
|
||||||
runGeth(t, "account", "import", "--datadir", datadir, "--password", "./testdata/password.txt", "--lightkdf", "./testdata/key.prv").WaitExit()
|
|
||||||
account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
|
|
||||||
server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--miner.etherbase=0x02f0d131f1f97aef08aec6e3291b957d9efe7105", "--mine", "--light.serve=100", "--light.maxpeers=1", "--discv4=false", "--nat=extip:127.0.0.1", "--verbosity=4")
|
|
||||||
return server
|
|
||||||
}
|
|
||||||
|
|
||||||
func startClient(t *testing.T, name string) *gethrpc {
|
|
||||||
datadir := initGeth(t)
|
|
||||||
return startGethWithIpc(t, name, "--datadir", datadir, "--discv4=false", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPriorityClient(t *testing.T) {
|
|
||||||
lightServer := startLightServer(t)
|
|
||||||
defer lightServer.killAndWait()
|
|
||||||
|
|
||||||
// Start client and add lightServer as peer
|
|
||||||
freeCli := startClient(t, "freeCli")
|
|
||||||
defer freeCli.killAndWait()
|
|
||||||
freeCli.addPeer(lightServer)
|
|
||||||
|
|
||||||
var peers []*p2p.PeerInfo
|
|
||||||
freeCli.callRPC(&peers, "admin_peers")
|
|
||||||
if len(peers) != 1 {
|
|
||||||
t.Errorf("Expected: # of client peers == 1, actual: %v", len(peers))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up priority client, get its nodeID, increase its balance on the lightServer
|
|
||||||
prioCli := startClient(t, "prioCli")
|
|
||||||
defer prioCli.killAndWait()
|
|
||||||
// 3_000_000_000 once we move to Go 1.13
|
|
||||||
tokens := uint64(3000000000)
|
|
||||||
lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens)
|
|
||||||
prioCli.addPeer(lightServer)
|
|
||||||
|
|
||||||
// Check if priority client is actually syncing and the regular client got kicked out
|
|
||||||
prioCli.callRPC(&peers, "admin_peers")
|
|
||||||
if len(peers) != 1 {
|
|
||||||
t.Errorf("Expected: # of prio peers == 1, actual: %v", len(peers))
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes := map[string]*gethrpc{
|
|
||||||
lightServer.getNodeInfo().ID: lightServer,
|
|
||||||
freeCli.getNodeInfo().ID: freeCli,
|
|
||||||
prioCli.getNodeInfo().ID: prioCli,
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
lightServer.callRPC(&peers, "admin_peers")
|
|
||||||
peersWithNames := make(map[string]string)
|
|
||||||
for _, p := range peers {
|
|
||||||
peersWithNames[nodes[p.ID].name] = p.ID
|
|
||||||
}
|
|
||||||
if _, freeClientFound := peersWithNames[freeCli.name]; freeClientFound {
|
|
||||||
t.Error("client is still a peer of lightServer", peersWithNames)
|
|
||||||
}
|
|
||||||
if _, prioClientFound := peersWithNames[prioCli.name]; !prioClientFound {
|
|
||||||
t.Error("prio client is not among lightServer peers", peersWithNames)
|
|
||||||
}
|
|
||||||
}
|
|
@ -21,6 +21,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -58,6 +59,7 @@ func censor(input string, start, end int) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLogging(t *testing.T) {
|
func TestLogging(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
testConsoleLogging(t, "terminal", 6, 24)
|
testConsoleLogging(t, "terminal", 6, 24)
|
||||||
testConsoleLogging(t, "logfmt", 2, 26)
|
testConsoleLogging(t, "logfmt", 2, 26)
|
||||||
}
|
}
|
||||||
@ -97,7 +99,55 @@ func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestJsonLogging(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
haveB, err := runSelf("--log.format", "json", "logtest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
readFile, err := os.Open("testdata/logging/logtest-json.txt")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
wantLines := split(readFile)
|
||||||
|
haveLines := split(bytes.NewBuffer(haveB))
|
||||||
|
for i, wantLine := range wantLines {
|
||||||
|
if i > len(haveLines)-1 {
|
||||||
|
t.Fatalf("format %v, line %d missing, want:%v", "json", i, wantLine)
|
||||||
|
}
|
||||||
|
haveLine := haveLines[i]
|
||||||
|
for strings.Contains(haveLine, "Unknown config environment variable") {
|
||||||
|
// This can happen on CI runs. Drop it.
|
||||||
|
haveLines = append(haveLines[:i], haveLines[i+1:]...)
|
||||||
|
haveLine = haveLines[i]
|
||||||
|
}
|
||||||
|
var have, want []byte
|
||||||
|
{
|
||||||
|
var h map[string]any
|
||||||
|
if err := json.Unmarshal([]byte(haveLine), &h); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
h["t"] = "xxx"
|
||||||
|
have, _ = json.Marshal(h)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
var w map[string]any
|
||||||
|
if err := json.Unmarshal([]byte(wantLine), &w); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
w["t"] = "xxx"
|
||||||
|
want, _ = json.Marshal(w)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(have, want) {
|
||||||
|
// show an intelligent diff
|
||||||
|
t.Logf(nicediff(have, want))
|
||||||
|
t.Errorf("file content wrong")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestVmodule(t *testing.T) {
|
func TestVmodule(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
checkOutput := func(level int, want, wantNot string) {
|
checkOutput := func(level int, want, wantNot string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
output, err := runSelf("--log.format", "terminal", "--verbosity=0", "--log.vmodule", fmt.Sprintf("logtestcmd_active.go=%d", level), "logtest")
|
output, err := runSelf("--log.format", "terminal", "--verbosity=0", "--log.vmodule", fmt.Sprintf("logtestcmd_active.go=%d", level), "logtest")
|
||||||
@ -145,6 +195,7 @@ func nicediff(have, want []byte) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFileOut(t *testing.T) {
|
func TestFileOut(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var (
|
var (
|
||||||
have, want []byte
|
have, want []byte
|
||||||
err error
|
err error
|
||||||
@ -165,6 +216,7 @@ func TestFileOut(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRotatingFileOut(t *testing.T) {
|
func TestRotatingFileOut(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var (
|
var (
|
||||||
have, want []byte
|
have, want []byte
|
||||||
err error
|
err error
|
||||||
|
@ -19,12 +19,14 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -39,10 +41,19 @@ var logTestCommand = &cli.Command{
|
|||||||
This command is only meant for testing.
|
This command is only meant for testing.
|
||||||
`}
|
`}
|
||||||
|
|
||||||
|
type customQuotedStringer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c customQuotedStringer) String() string {
|
||||||
|
return "output with 'quotes'"
|
||||||
|
}
|
||||||
|
|
||||||
// logTest is an entry point which spits out some logs. This is used by testing
|
// logTest is an entry point which spits out some logs. This is used by testing
|
||||||
// to verify expected outputs
|
// to verify expected outputs
|
||||||
func logTest(ctx *cli.Context) error {
|
func logTest(ctx *cli.Context) error {
|
||||||
log.ResetGlobalState()
|
// clear field padding map
|
||||||
|
debug.ResetLogging()
|
||||||
|
|
||||||
{ // big.Int
|
{ // big.Int
|
||||||
ba, _ := new(big.Int).SetString("111222333444555678999", 10) // "111,222,333,444,555,678,999"
|
ba, _ := new(big.Int).SetString("111222333444555678999", 10) // "111,222,333,444,555,678,999"
|
||||||
bb, _ := new(big.Int).SetString("-111222333444555678999", 10) // "-111,222,333,444,555,678,999"
|
bb, _ := new(big.Int).SetString("-111222333444555678999", 10) // "-111,222,333,444,555,678,999"
|
||||||
@ -83,12 +94,13 @@ func logTest(ctx *cli.Context) error {
|
|||||||
|
|
||||||
colored := fmt.Sprintf("\u001B[%dmColored\u001B[0m[", 35)
|
colored := fmt.Sprintf("\u001B[%dmColored\u001B[0m[", 35)
|
||||||
log.Info(colored, colored, colored)
|
log.Info(colored, colored, colored)
|
||||||
|
err := errors.New("this is an 'error'")
|
||||||
|
log.Info("an error message with quotes", "error", err)
|
||||||
}
|
}
|
||||||
{ // Custom Stringer() - type
|
{ // Custom Stringer() - type
|
||||||
log.Info("Custom Stringer value", "2562047h47m16.854s", common.PrettyDuration(time.Duration(9223372036854775807)))
|
log.Info("Custom Stringer value", "2562047h47m16.854s", common.PrettyDuration(time.Duration(9223372036854775807)))
|
||||||
}
|
var c customQuotedStringer
|
||||||
{ // Lazy eval
|
log.Info("a custom stringer that emits quoted text", "output", c)
|
||||||
log.Info("Lazy evaluation of value", "key", log.Lazy{Fn: func() interface{} { return "lazy value" }})
|
|
||||||
}
|
}
|
||||||
{ // Multi-line message
|
{ // Multi-line message
|
||||||
log.Info("A message with wonky \U0001F4A9 characters")
|
log.Info("A message with wonky \U0001F4A9 characters")
|
||||||
@ -150,6 +162,10 @@ func logTest(ctx *cli.Context) error {
|
|||||||
{ // Logging with 'reserved' keys
|
{ // Logging with 'reserved' keys
|
||||||
log.Info("Using keys 't', 'lvl', 'time', 'level' and 'msg'", "t", "t", "time", "time", "lvl", "lvl", "level", "level", "msg", "msg")
|
log.Info("Using keys 't', 'lvl', 'time', 'level' and 'msg'", "t", "t", "time", "time", "lvl", "lvl", "level", "level", "msg", "msg")
|
||||||
}
|
}
|
||||||
|
{ // Logging with wrong attr-value pairs
|
||||||
|
log.Info("Odd pair (1 attr)", "key")
|
||||||
|
log.Info("Odd pair (3 attr)", "key", "value", "key2")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ var (
|
|||||||
utils.MinFreeDiskSpaceFlag,
|
utils.MinFreeDiskSpaceFlag,
|
||||||
utils.KeyStoreDirFlag,
|
utils.KeyStoreDirFlag,
|
||||||
utils.ExternalSignerFlag,
|
utils.ExternalSignerFlag,
|
||||||
utils.NoUSBFlag,
|
utils.NoUSBFlag, // deprecated
|
||||||
utils.USBFlag,
|
utils.USBFlag,
|
||||||
utils.SmartCardDaemonPathFlag,
|
utils.SmartCardDaemonPathFlag,
|
||||||
utils.OverrideCancun,
|
utils.OverrideCancun,
|
||||||
@ -94,24 +94,24 @@ var (
|
|||||||
utils.ExitWhenSyncedFlag,
|
utils.ExitWhenSyncedFlag,
|
||||||
utils.GCModeFlag,
|
utils.GCModeFlag,
|
||||||
utils.SnapshotFlag,
|
utils.SnapshotFlag,
|
||||||
utils.TxLookupLimitFlag,
|
utils.TxLookupLimitFlag, // deprecated
|
||||||
utils.TransactionHistoryFlag,
|
utils.TransactionHistoryFlag,
|
||||||
utils.StateHistoryFlag,
|
utils.StateHistoryFlag,
|
||||||
utils.LightServeFlag,
|
utils.LightServeFlag, // deprecated
|
||||||
utils.LightIngressFlag,
|
utils.LightIngressFlag, // deprecated
|
||||||
utils.LightEgressFlag,
|
utils.LightEgressFlag, // deprecated
|
||||||
utils.LightMaxPeersFlag,
|
utils.LightMaxPeersFlag, // deprecated
|
||||||
utils.LightNoPruneFlag,
|
utils.LightNoPruneFlag, // deprecated
|
||||||
utils.LightKDFFlag,
|
utils.LightKDFFlag,
|
||||||
utils.LightNoSyncServeFlag,
|
utils.LightNoSyncServeFlag, // deprecated
|
||||||
utils.EthRequiredBlocksFlag,
|
utils.EthRequiredBlocksFlag,
|
||||||
utils.LegacyWhitelistFlag,
|
utils.LegacyWhitelistFlag, // deprecated
|
||||||
utils.BloomFilterSizeFlag,
|
utils.BloomFilterSizeFlag,
|
||||||
utils.CacheFlag,
|
utils.CacheFlag,
|
||||||
utils.CacheDatabaseFlag,
|
utils.CacheDatabaseFlag,
|
||||||
utils.CacheTrieFlag,
|
utils.CacheTrieFlag,
|
||||||
utils.CacheTrieJournalFlag,
|
utils.CacheTrieJournalFlag, // deprecated
|
||||||
utils.CacheTrieRejournalFlag,
|
utils.CacheTrieRejournalFlag, // deprecated
|
||||||
utils.CacheGCFlag,
|
utils.CacheGCFlag,
|
||||||
utils.CacheSnapshotFlag,
|
utils.CacheSnapshotFlag,
|
||||||
utils.CacheNoPrefetchFlag,
|
utils.CacheNoPrefetchFlag,
|
||||||
@ -134,7 +134,7 @@ var (
|
|||||||
utils.NoDiscoverFlag,
|
utils.NoDiscoverFlag,
|
||||||
utils.DiscoveryV4Flag,
|
utils.DiscoveryV4Flag,
|
||||||
utils.DiscoveryV5Flag,
|
utils.DiscoveryV5Flag,
|
||||||
utils.LegacyDiscoveryV5Flag,
|
utils.LegacyDiscoveryV5Flag, // deprecated
|
||||||
utils.NetrestrictFlag,
|
utils.NetrestrictFlag,
|
||||||
utils.NodeKeyFileFlag,
|
utils.NodeKeyFileFlag,
|
||||||
utils.NodeKeyHexFlag,
|
utils.NodeKeyHexFlag,
|
||||||
@ -151,6 +151,8 @@ var (
|
|||||||
utils.GpoMaxGasPriceFlag,
|
utils.GpoMaxGasPriceFlag,
|
||||||
utils.GpoIgnoreGasPriceFlag,
|
utils.GpoIgnoreGasPriceFlag,
|
||||||
configFileFlag,
|
configFileFlag,
|
||||||
|
utils.LogDebugFlag,
|
||||||
|
utils.LogBacktraceAtFlag,
|
||||||
}, utils.NetworkFlags, utils.DatabaseFlags)
|
}, utils.NetworkFlags, utils.DatabaseFlags)
|
||||||
|
|
||||||
rpcFlags = []cli.Flag{
|
rpcFlags = []cli.Flag{
|
||||||
@ -215,7 +217,6 @@ func init() {
|
|||||||
importCommand,
|
importCommand,
|
||||||
exportCommand,
|
exportCommand,
|
||||||
importPreimagesCommand,
|
importPreimagesCommand,
|
||||||
exportPreimagesCommand,
|
|
||||||
removedbCommand,
|
removedbCommand,
|
||||||
dumpCommand,
|
dumpCommand,
|
||||||
dumpGenesisCommand,
|
dumpGenesisCommand,
|
||||||
@ -314,7 +315,7 @@ func prepare(ctx *cli.Context) {
|
|||||||
log.Info("Starting Geth on Ethereum mainnet...")
|
log.Info("Starting Geth on Ethereum mainnet...")
|
||||||
}
|
}
|
||||||
// If we're a full node on mainnet without --cache specified, bump default cache allowance
|
// If we're a full node on mainnet without --cache specified, bump default cache allowance
|
||||||
if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
|
if !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
|
||||||
// Make sure we're not on any supported preconfigured testnet either
|
// Make sure we're not on any supported preconfigured testnet either
|
||||||
if !ctx.IsSet(utils.HoleskyFlag.Name) &&
|
if !ctx.IsSet(utils.HoleskyFlag.Name) &&
|
||||||
!ctx.IsSet(utils.SepoliaFlag.Name) &&
|
!ctx.IsSet(utils.SepoliaFlag.Name) &&
|
||||||
@ -325,11 +326,6 @@ func prepare(ctx *cli.Context) {
|
|||||||
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096))
|
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If we're running a light client on any network, drop the cache to some meaningfully low amount
|
|
||||||
if ctx.String(utils.SyncModeFlag.Name) == "light" && !ctx.IsSet(utils.CacheFlag.Name) {
|
|
||||||
log.Info("Dropping default light client cache", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 128)
|
|
||||||
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(128))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start metrics export if enabled
|
// Start metrics export if enabled
|
||||||
utils.SetupMetrics(ctx)
|
utils.SetupMetrics(ctx)
|
||||||
|
@ -55,6 +55,15 @@ func TestMain(m *testing.M) {
|
|||||||
os.Exit(m.Run())
|
os.Exit(m.Run())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func initGeth(t *testing.T) string {
|
||||||
|
args := []string{"--networkid=42", "init", "./testdata/clique.json"}
|
||||||
|
t.Logf("Initializing geth: %v ", args)
|
||||||
|
g := runGeth(t, args...)
|
||||||
|
datadir := g.Datadir
|
||||||
|
g.WaitExit()
|
||||||
|
return datadir
|
||||||
|
}
|
||||||
|
|
||||||
// spawns geth with the given command line args. If the args don't set --datadir, the
|
// spawns geth with the given command line args. If the args don't set --datadir, the
|
||||||
// child g gets a temporary data directory.
|
// child g gets a temporary data directory.
|
||||||
func runGeth(t *testing.T, args ...string) *testgeth {
|
func runGeth(t *testing.T, args ...string) *testgeth {
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -147,6 +148,17 @@ as the backend data source, making this command a lot faster.
|
|||||||
|
|
||||||
The argument is interpreted as block number or hash. If none is provided, the latest
|
The argument is interpreted as block number or hash. If none is provided, the latest
|
||||||
block is used.
|
block is used.
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: snapshotExportPreimages,
|
||||||
|
Name: "export-preimages",
|
||||||
|
Usage: "Export the preimage in snapshot enumeration order",
|
||||||
|
ArgsUsage: "<dumpfile> [<root>]",
|
||||||
|
Flags: utils.DatabaseFlags,
|
||||||
|
Description: `
|
||||||
|
The export-preimages command exports hash preimages to a flat file, in exactly
|
||||||
|
the expected order for the overlay tree migration.
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -205,7 +217,7 @@ func verifyState(ctx *cli.Context) error {
|
|||||||
log.Error("Failed to load head block")
|
log.Error("Failed to load head block")
|
||||||
return errors.New("no head block")
|
return errors.New("no head block")
|
||||||
}
|
}
|
||||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
|
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
snapConfig := snapshot.Config{
|
snapConfig := snapshot.Config{
|
||||||
@ -260,7 +272,7 @@ func traverseState(ctx *cli.Context) error {
|
|||||||
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
defer chaindb.Close()
|
defer chaindb.Close()
|
||||||
|
|
||||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
|
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
headBlock := rawdb.ReadHeadBlock(chaindb)
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||||
@ -369,7 +381,7 @@ func traverseRawState(ctx *cli.Context) error {
|
|||||||
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
defer chaindb.Close()
|
defer chaindb.Close()
|
||||||
|
|
||||||
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
|
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
headBlock := rawdb.ReadHeadBlock(chaindb)
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||||
@ -533,7 +545,7 @@ func dumpState(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
triedb := utils.MakeTrieDatabase(ctx, db, false, true)
|
triedb := utils.MakeTrieDatabase(ctx, db, false, true, false)
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
snapConfig := snapshot.Config{
|
snapConfig := snapshot.Config{
|
||||||
@ -568,11 +580,11 @@ func dumpState(ctx *cli.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
da := &state.DumpAccount{
|
da := &state.DumpAccount{
|
||||||
Balance: account.Balance.String(),
|
Balance: account.Balance.String(),
|
||||||
Nonce: account.Nonce,
|
Nonce: account.Nonce,
|
||||||
Root: account.Root.Bytes(),
|
Root: account.Root.Bytes(),
|
||||||
CodeHash: account.CodeHash,
|
CodeHash: account.CodeHash,
|
||||||
SecureKey: accIt.Hash().Bytes(),
|
AddressHash: accIt.Hash().Bytes(),
|
||||||
}
|
}
|
||||||
if !conf.SkipCode && !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
|
if !conf.SkipCode && !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
|
||||||
da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash))
|
da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash))
|
||||||
@ -604,6 +616,48 @@ func dumpState(ctx *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// snapshotExportPreimages dumps the preimage data to a flat file.
|
||||||
|
func snapshotExportPreimages(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() < 1 {
|
||||||
|
utils.Fatalf("This command requires an argument.")
|
||||||
|
}
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
|
||||||
|
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
|
defer chaindb.Close()
|
||||||
|
|
||||||
|
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true, false)
|
||||||
|
defer triedb.Close()
|
||||||
|
|
||||||
|
var root common.Hash
|
||||||
|
if ctx.NArg() > 1 {
|
||||||
|
rootBytes := common.FromHex(ctx.Args().Get(1))
|
||||||
|
if len(rootBytes) != common.HashLength {
|
||||||
|
return fmt.Errorf("invalid hash: %s", ctx.Args().Get(1))
|
||||||
|
}
|
||||||
|
root = common.BytesToHash(rootBytes)
|
||||||
|
} else {
|
||||||
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||||
|
if headBlock == nil {
|
||||||
|
log.Error("Failed to load head block")
|
||||||
|
return errors.New("no head block")
|
||||||
|
}
|
||||||
|
root = headBlock.Root()
|
||||||
|
}
|
||||||
|
snapConfig := snapshot.Config{
|
||||||
|
CacheSize: 256,
|
||||||
|
Recovery: false,
|
||||||
|
NoBuild: true,
|
||||||
|
AsyncBuild: false,
|
||||||
|
}
|
||||||
|
snaptree, err := snapshot.New(snapConfig, chaindb, triedb, root)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return utils.ExportSnapshotPreimages(chaindb, snaptree, ctx.Args().First(), root)
|
||||||
|
}
|
||||||
|
|
||||||
// checkAccount iterates the snap data layers, and looks up the given account
|
// checkAccount iterates the snap data layers, and looks up the given account
|
||||||
// across all layers.
|
// across all layers.
|
||||||
func checkAccount(ctx *cli.Context) error {
|
func checkAccount(ctx *cli.Context) error {
|
||||||
|
101
cmd/geth/testdata/logging/logtest-json.txt
vendored
101
cmd/geth/testdata/logging/logtest-json.txt
vendored
@ -1,49 +1,52 @@
|
|||||||
{"111,222,333,444,555,678,999":"111222333444555678999","lvl":"info","msg":"big.Int","t":"2023-11-09T08:33:19.464383209+01:00"}
|
{"t":"2023-11-22T15:42:00.407963+08:00","lvl":"info","msg":"big.Int","111,222,333,444,555,678,999":"111222333444555678999"}
|
||||||
{"-111,222,333,444,555,678,999":"-111222333444555678999","lvl":"info","msg":"-big.Int","t":"2023-11-09T08:33:19.46455928+01:00"}
|
{"t":"2023-11-22T15:42:00.408084+08:00","lvl":"info","msg":"-big.Int","-111,222,333,444,555,678,999":"-111222333444555678999"}
|
||||||
{"11,122,233,344,455,567,899,900":"11122233344455567899900","lvl":"info","msg":"big.Int","t":"2023-11-09T08:33:19.464582073+01:00"}
|
{"t":"2023-11-22T15:42:00.408092+08:00","lvl":"info","msg":"big.Int","11,122,233,344,455,567,899,900":"11122233344455567899900"}
|
||||||
{"-11,122,233,344,455,567,899,900":"-11122233344455567899900","lvl":"info","msg":"-big.Int","t":"2023-11-09T08:33:19.464594846+01:00"}
|
{"t":"2023-11-22T15:42:00.408097+08:00","lvl":"info","msg":"-big.Int","-11,122,233,344,455,567,899,900":"-11122233344455567899900"}
|
||||||
{"111,222,333,444,555,678,999":"0x607851afc94ca2517","lvl":"info","msg":"uint256","t":"2023-11-09T08:33:19.464607873+01:00"}
|
{"t":"2023-11-22T15:42:00.408127+08:00","lvl":"info","msg":"uint256","111,222,333,444,555,678,999":"111222333444555678999"}
|
||||||
{"11,122,233,344,455,567,899,900":"0x25aeffe8aaa1ef67cfc","lvl":"info","msg":"uint256","t":"2023-11-09T08:33:19.464694639+01:00"}
|
{"t":"2023-11-22T15:42:00.408133+08:00","lvl":"info","msg":"uint256","11,122,233,344,455,567,899,900":"11122233344455567899900"}
|
||||||
{"1,000,000":1000000,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464708835+01:00"}
|
{"t":"2023-11-22T15:42:00.408137+08:00","lvl":"info","msg":"int64","1,000,000":1000000}
|
||||||
{"-1,000,000":-1000000,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464725054+01:00"}
|
{"t":"2023-11-22T15:42:00.408145+08:00","lvl":"info","msg":"int64","-1,000,000":-1000000}
|
||||||
{"9,223,372,036,854,775,807":9223372036854775807,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464735773+01:00"}
|
{"t":"2023-11-22T15:42:00.408149+08:00","lvl":"info","msg":"int64","9,223,372,036,854,775,807":9223372036854775807}
|
||||||
{"-9,223,372,036,854,775,808":-9223372036854775808,"lvl":"info","msg":"int64","t":"2023-11-09T08:33:19.464744532+01:00"}
|
{"t":"2023-11-22T15:42:00.408153+08:00","lvl":"info","msg":"int64","-9,223,372,036,854,775,808":-9223372036854775808}
|
||||||
{"1,000,000":1000000,"lvl":"info","msg":"uint64","t":"2023-11-09T08:33:19.464752807+01:00"}
|
{"t":"2023-11-22T15:42:00.408156+08:00","lvl":"info","msg":"uint64","1,000,000":1000000}
|
||||||
{"18,446,744,073,709,551,615":18446744073709551615,"lvl":"info","msg":"uint64","t":"2023-11-09T08:33:19.464779296+01:00"}
|
{"t":"2023-11-22T15:42:00.40816+08:00","lvl":"info","msg":"uint64","18,446,744,073,709,551,615":18446744073709551615}
|
||||||
{"key":"special \r\n\t chars","lvl":"info","msg":"Special chars in value","t":"2023-11-09T08:33:19.464794181+01:00"}
|
{"t":"2023-11-22T15:42:00.408164+08:00","lvl":"info","msg":"Special chars in value","key":"special \r\n\t chars"}
|
||||||
{"lvl":"info","msg":"Special chars in key","special \n\t chars":"value","t":"2023-11-09T08:33:19.464827197+01:00"}
|
{"t":"2023-11-22T15:42:00.408167+08:00","lvl":"info","msg":"Special chars in key","special \n\t chars":"value"}
|
||||||
{"lvl":"info","msg":"nospace","nospace":"nospace","t":"2023-11-09T08:33:19.464841118+01:00"}
|
{"t":"2023-11-22T15:42:00.408171+08:00","lvl":"info","msg":"nospace","nospace":"nospace"}
|
||||||
{"lvl":"info","msg":"with space","t":"2023-11-09T08:33:19.464862818+01:00","with nospace":"with nospace"}
|
{"t":"2023-11-22T15:42:00.408174+08:00","lvl":"info","msg":"with space","with nospace":"with nospace"}
|
||||||
{"key":"\u001b[1G\u001b[K\u001b[1A","lvl":"info","msg":"Bash escapes in value","t":"2023-11-09T08:33:19.464876802+01:00"}
|
{"t":"2023-11-22T15:42:00.408178+08:00","lvl":"info","msg":"Bash escapes in value","key":"\u001b[1G\u001b[K\u001b[1A"}
|
||||||
{"\u001b[1G\u001b[K\u001b[1A":"value","lvl":"info","msg":"Bash escapes in key","t":"2023-11-09T08:33:19.464885416+01:00"}
|
{"t":"2023-11-22T15:42:00.408182+08:00","lvl":"info","msg":"Bash escapes in key","\u001b[1G\u001b[K\u001b[1A":"value"}
|
||||||
{"key":"value","lvl":"info","msg":"Bash escapes in message \u001b[1G\u001b[K\u001b[1A end","t":"2023-11-09T08:33:19.464906946+01:00"}
|
{"t":"2023-11-22T15:42:00.408186+08:00","lvl":"info","msg":"Bash escapes in message \u001b[1G\u001b[K\u001b[1A end","key":"value"}
|
||||||
{"\u001b[35mColored\u001b[0m[":"\u001b[35mColored\u001b[0m[","lvl":"info","msg":"\u001b[35mColored\u001b[0m[","t":"2023-11-09T08:33:19.464921455+01:00"}
|
{"t":"2023-11-22T15:42:00.408194+08:00","lvl":"info","msg":"\u001b[35mColored\u001b[0m[","\u001b[35mColored\u001b[0m[":"\u001b[35mColored\u001b[0m["}
|
||||||
{"2562047h47m16.854s":"2562047h47m16.854s","lvl":"info","msg":"Custom Stringer value","t":"2023-11-09T08:33:19.464943893+01:00"}
|
{"t":"2023-11-22T15:42:00.408197+08:00","lvl":"info","msg":"an error message with quotes","error":"this is an 'error'"}
|
||||||
{"key":"lazy value","lvl":"info","msg":"Lazy evaluation of value","t":"2023-11-09T08:33:19.465013552+01:00"}
|
{"t":"2023-11-22T15:42:00.408202+08:00","lvl":"info","msg":"Custom Stringer value","2562047h47m16.854s":"2562047h47m16.854s"}
|
||||||
{"lvl":"info","msg":"A message with wonky 💩 characters","t":"2023-11-09T08:33:19.465069437+01:00"}
|
{"t":"2023-11-22T15:42:00.408208+08:00","lvl":"info","msg":"a custom stringer that emits quoted text","output":"output with 'quotes'"}
|
||||||
{"lvl":"info","msg":"A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩","t":"2023-11-09T08:33:19.465083053+01:00"}
|
{"t":"2023-11-22T15:42:00.408219+08:00","lvl":"info","msg":"A message with wonky 💩 characters"}
|
||||||
{"lvl":"info","msg":"A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above","t":"2023-11-09T08:33:19.465104289+01:00"}
|
{"t":"2023-11-22T15:42:00.408222+08:00","lvl":"info","msg":"A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"}
|
||||||
{"false":"false","lvl":"info","msg":"boolean","t":"2023-11-09T08:33:19.465117185+01:00","true":"true"}
|
{"t":"2023-11-22T15:42:00.408226+08:00","lvl":"info","msg":"A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above"}
|
||||||
{"foo":"beta","lvl":"info","msg":"repeated-key 1","t":"2023-11-09T08:33:19.465143425+01:00"}
|
{"t":"2023-11-22T15:42:00.408229+08:00","lvl":"info","msg":"boolean","true":true,"false":false}
|
||||||
{"lvl":"info","msg":"repeated-key 2","t":"2023-11-09T08:33:19.465156323+01:00","xx":"longer"}
|
{"t":"2023-11-22T15:42:00.408234+08:00","lvl":"info","msg":"repeated-key 1","foo":"alpha","foo":"beta"}
|
||||||
{"lvl":"info","msg":"log at level info","t":"2023-11-09T08:33:19.465193158+01:00"}
|
{"t":"2023-11-22T15:42:00.408237+08:00","lvl":"info","msg":"repeated-key 2","xx":"short","xx":"longer"}
|
||||||
{"lvl":"warn","msg":"log at level warn","t":"2023-11-09T08:33:19.465228964+01:00"}
|
{"t":"2023-11-22T15:42:00.408241+08:00","lvl":"info","msg":"log at level info"}
|
||||||
{"lvl":"eror","msg":"log at level error","t":"2023-11-09T08:33:19.465240352+01:00"}
|
{"t":"2023-11-22T15:42:00.408244+08:00","lvl":"warn","msg":"log at level warn"}
|
||||||
{"a":"aligned left","bar":"short","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465247226+01:00"}
|
{"t":"2023-11-22T15:42:00.408247+08:00","lvl":"eror","msg":"log at level error"}
|
||||||
{"a":1,"bar":"a long message","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465269028+01:00"}
|
{"t":"2023-11-22T15:42:00.408251+08:00","lvl":"info","msg":"test","bar":"short","a":"aligned left"}
|
||||||
{"a":"aligned right","bar":"short","lvl":"info","msg":"test","t":"2023-11-09T08:33:19.465313611+01:00"}
|
{"t":"2023-11-22T15:42:00.408254+08:00","lvl":"info","msg":"test","bar":"a long message","a":1}
|
||||||
{"lvl":"info","msg":"The following logs should align so that the key-fields make 5 columns","t":"2023-11-09T08:33:19.465328188+01:00"}
|
{"t":"2023-11-22T15:42:00.408258+08:00","lvl":"info","msg":"test","bar":"short","a":"aligned right"}
|
||||||
{"gas":1123123,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","lvl":"info","msg":"Inserted known block","number":1012,"other":"first","t":"2023-11-09T08:33:19.465350507+01:00","txs":200}
|
{"t":"2023-11-22T15:42:00.408261+08:00","lvl":"info","msg":"The following logs should align so that the key-fields make 5 columns"}
|
||||||
{"gas":1123,"hash":"0x0000000000000000000000000000000000000000000000000000000000001235","lvl":"info","msg":"Inserted new block","number":1,"other":"second","t":"2023-11-09T08:33:19.465387952+01:00","txs":2}
|
{"t":"2023-11-22T15:42:00.408275+08:00","lvl":"info","msg":"Inserted known block","number":1012,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","txs":200,"gas":1123123,"other":"first"}
|
||||||
{"gas":1,"hash":"0x0000000000000000000000000000000000000000000000000000000000012322","lvl":"info","msg":"Inserted known block","number":99,"other":"third","t":"2023-11-09T08:33:19.465406687+01:00","txs":10}
|
{"t":"2023-11-22T15:42:00.408281+08:00","lvl":"info","msg":"Inserted new block","number":1,"hash":"0x0000000000000000000000000000000000000000000000000000000000001235","txs":2,"gas":1123,"other":"second"}
|
||||||
{"gas":99,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","lvl":"warn","msg":"Inserted known block","number":1012,"other":"fourth","t":"2023-11-09T08:33:19.465433025+01:00","txs":200}
|
{"t":"2023-11-22T15:42:00.408287+08:00","lvl":"info","msg":"Inserted known block","number":99,"hash":"0x0000000000000000000000000000000000000000000000000000000000012322","txs":10,"gas":1,"other":"third"}
|
||||||
{"\u003cnil\u003e":"\u003cnil\u003e","lvl":"info","msg":"(*big.Int)(nil)","t":"2023-11-09T08:33:19.465450283+01:00"}
|
{"t":"2023-11-22T15:42:00.408296+08:00","lvl":"warn","msg":"Inserted known block","number":1012,"hash":"0x0000000000000000000000000000000000000000000000000000000000001234","txs":200,"gas":99,"other":"fourth"}
|
||||||
{"\u003cnil\u003e":"nil","lvl":"info","msg":"(*uint256.Int)(nil)","t":"2023-11-09T08:33:19.465472953+01:00"}
|
{"t":"2023-11-22T15:42:00.4083+08:00","lvl":"info","msg":"(*big.Int)(nil)","<nil>":"<nil>"}
|
||||||
{"lvl":"info","msg":"(fmt.Stringer)(nil)","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465538633+01:00"}
|
{"t":"2023-11-22T15:42:00.408303+08:00","lvl":"info","msg":"(*uint256.Int)(nil)","<nil>":"<nil>"}
|
||||||
{"lvl":"info","msg":"nil-concrete-stringer","res":"nil","t":"2023-11-09T08:33:19.465552355+01:00"}
|
{"t":"2023-11-22T15:42:00.408311+08:00","lvl":"info","msg":"(fmt.Stringer)(nil)","res":null}
|
||||||
{"lvl":"info","msg":"error(nil) ","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465601029+01:00"}
|
{"t":"2023-11-22T15:42:00.408318+08:00","lvl":"info","msg":"nil-concrete-stringer","res":"<nil>"}
|
||||||
{"lvl":"info","msg":"nil-concrete-error","res":"","t":"2023-11-09T08:33:19.46561622+01:00"}
|
{"t":"2023-11-22T15:42:00.408322+08:00","lvl":"info","msg":"error(nil) ","res":null}
|
||||||
{"lvl":"info","msg":"nil-custom-struct","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465638888+01:00"}
|
{"t":"2023-11-22T15:42:00.408326+08:00","lvl":"info","msg":"nil-concrete-error","res":""}
|
||||||
{"lvl":"info","msg":"raw nil","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465673664+01:00"}
|
{"t":"2023-11-22T15:42:00.408334+08:00","lvl":"info","msg":"nil-custom-struct","res":null}
|
||||||
{"lvl":"info","msg":"(*uint64)(nil)","res":"\u003cnil\u003e","t":"2023-11-09T08:33:19.465700264+01:00"}
|
{"t":"2023-11-22T15:42:00.40835+08:00","lvl":"info","msg":"raw nil","res":null}
|
||||||
{"level":"level","lvl":"lvl","msg":"msg","t":"t","time":"time"}
|
{"t":"2023-11-22T15:42:00.408354+08:00","lvl":"info","msg":"(*uint64)(nil)","res":null}
|
||||||
|
{"t":"2023-11-22T15:42:00.408361+08:00","lvl":"info","msg":"Using keys 't', 'lvl', 'time', 'level' and 'msg'","t":"t","time":"time","lvl":"lvl","level":"level","msg":"msg"}
|
||||||
|
{"t":"2023-11-29T15:13:00.195655931+01:00","lvl":"info","msg":"Odd pair (1 attr)","key":null,"LOG_ERROR":"Normalized odd number of arguments by adding nil"}
|
||||||
|
{"t":"2023-11-29T15:13:00.195681832+01:00","lvl":"info","msg":"Odd pair (3 attr)","key":"value","key2":null,"LOG_ERROR":"Normalized odd number of arguments by adding nil"}
|
||||||
|
101
cmd/geth/testdata/logging/logtest-logfmt.txt
vendored
101
cmd/geth/testdata/logging/logtest-logfmt.txt
vendored
@ -1,49 +1,52 @@
|
|||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=big.Int 111,222,333,444,555,678,999=111222333444555678999
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=-big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=-big.Int -111,222,333,444,555,678,999=-111222333444555678999
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=big.Int 11,122,233,344,455,567,899,900=11122233344455567899900
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=-big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=-big.Int -11,122,233,344,455,567,899,900=-11122233344455567899900
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=uint256 111,222,333,444,555,678,999=111222333444555678999
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=uint256 11,122,233,344,455,567,899,900=11122233344455567899900
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 1,000,000=1,000,000
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=int64 1,000,000=1000000
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 -1,000,000=-1,000,000
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=int64 -1,000,000=-1000000
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=int64 9,223,372,036,854,775,807=9223372036854775807
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=int64 -9,223,372,036,854,775,808=-9223372036854775808
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint64 1,000,000=1,000,000
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=uint64 1,000,000=1000000
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=uint64 18,446,744,073,709,551,615=18446744073709551615
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Special chars in value" key="special \r\n\t chars"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Special chars in value" key="special \r\n\t chars"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Special chars in key" "special \n\t chars"=value
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Special chars in key" "special \n\t chars"=value
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nospace nospace=nospace
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=nospace nospace=nospace
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="with space" "with nospace"="with nospace"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="with space" "with nospace"="with nospace"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in value" key="\x1b[1G\x1b[K\x1b[1A"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Bash escapes in value" key="\x1b[1G\x1b[K\x1b[1A"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in key" "\x1b[1G\x1b[K\x1b[1A"=value
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Bash escapes in key" "\x1b[1G\x1b[K\x1b[1A"=value
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Custom Stringer value" 2562047h47m16.854s=2562047h47m16.854s
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="an error message with quotes" error="this is an 'error'"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Lazy evaluation of value" key="lazy value"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Custom Stringer value" 2562047h47m16.854s=2562047h47m16.854s
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A message with wonky 💩 characters"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="a custom stringer that emits quoted text" output="output with 'quotes'"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="A message with wonky 💩 characters"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=boolean true=true false=false
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="A multiline message \nLALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="repeated-key 1" foo=alpha foo=beta
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=boolean true=true false=false
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="repeated-key 2" xx=short xx=longer
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="repeated-key 1" foo=alpha foo=beta
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="log at level info"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="repeated-key 2" xx=short xx=longer
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=warn msg="log at level warn"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="log at level info"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=eror msg="log at level error"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=warn msg="log at level warn"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar=short a="aligned left"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=eror msg="log at level error"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar="a long message" a=1
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=test bar=short a="aligned left"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=test bar=short a="aligned right"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=test bar="a long message" a=1
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="The following logs should align so that the key-fields make 5 columns"
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=test bar=short a="aligned right"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=1,123,123 other=first
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="The following logs should align so that the key-fields make 5 columns"
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted new block" number=1 hash=0x0000000000000000000000000000000000000000000000000000000000001235 txs=2 gas=1123 other=second
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=1123123 other=first
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Inserted known block" number=99 hash=0x0000000000000000000000000000000000000000000000000000000000012322 txs=10 gas=1 other=third
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Inserted new block" number=1 hash=0x0000000000000000000000000000000000000000000000000000000000001235 txs=2 gas=1123 other=second
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=warn msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=99 other=fourth
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Inserted known block" number=99 hash=0x0000000000000000000000000000000000000000000000000000000000012322 txs=10 gas=1 other=third
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*big.Int)(nil) <nil>=<nil>
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=warn msg="Inserted known block" number=1012 hash=0x0000000000000000000000000000000000000000000000000000000000001234 txs=200 gas=99 other=fourth
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*uint256.Int)(nil) <nil>=<nil>
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=(*big.Int)(nil) <nil>=<nil>
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(fmt.Stringer)(nil) res=nil
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=(*uint256.Int)(nil) <nil>=<nil>
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-concrete-stringer res=nil
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=(fmt.Stringer)(nil) res=<nil>
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="error(nil) " res=nil
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=nil-concrete-stringer res=<nil>
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-concrete-error res=
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="error(nil) " res=<nil>
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=nil-custom-struct res=<nil>
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=nil-concrete-error res=""
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="raw nil" res=nil
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=nil-custom-struct res=<nil>
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg=(*uint64)(nil) res=<nil>
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="raw nil" res=<nil>
|
||||||
t=xxxxxxxxxxxxxxxxxxxxxxxx lvl=info msg="Using keys 't', 'lvl', 'time', 'level' and 'msg'" t=t time=time lvl=lvl level=level msg=msg
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg=(*uint64)(nil) res=<nil>
|
||||||
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Using keys 't', 'lvl', 'time', 'level' and 'msg'" t=t time=time lvl=lvl level=level msg=msg
|
||||||
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Odd pair (1 attr)" key=<nil> LOG_ERROR="Normalized odd number of arguments by adding nil"
|
||||||
|
t=xxxx-xx-xxTxx:xx:xx+xxxx lvl=info msg="Odd pair (3 attr)" key=value key2=<nil> LOG_ERROR="Normalized odd number of arguments by adding nil"
|
||||||
|
103
cmd/geth/testdata/logging/logtest-terminal.txt
vendored
103
cmd/geth/testdata/logging/logtest-terminal.txt
vendored
@ -1,50 +1,53 @@
|
|||||||
INFO [XX-XX|XX:XX:XX.XXX] big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999
|
INFO [xx-xx|xx:xx:xx.xxx] big.Int 111,222,333,444,555,678,999=111,222,333,444,555,678,999
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] -big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999
|
INFO [xx-xx|xx:xx:xx.xxx] -big.Int -111,222,333,444,555,678,999=-111,222,333,444,555,678,999
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
|
INFO [xx-xx|xx:xx:xx.xxx] big.Int 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] -big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900
|
INFO [xx-xx|xx:xx:xx.xxx] -big.Int -11,122,233,344,455,567,899,900=-11,122,233,344,455,567,899,900
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999
|
INFO [xx-xx|xx:xx:xx.xxx] uint256 111,222,333,444,555,678,999=111,222,333,444,555,678,999
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
|
INFO [xx-xx|xx:xx:xx.xxx] uint256 11,122,233,344,455,567,899,900=11,122,233,344,455,567,899,900
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] int64 1,000,000=1,000,000
|
INFO [xx-xx|xx:xx:xx.xxx] int64 1,000,000=1,000,000
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] int64 -1,000,000=-1,000,000
|
INFO [xx-xx|xx:xx:xx.xxx] int64 -1,000,000=-1,000,000
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807
|
INFO [xx-xx|xx:xx:xx.xxx] int64 9,223,372,036,854,775,807=9,223,372,036,854,775,807
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808
|
INFO [xx-xx|xx:xx:xx.xxx] int64 -9,223,372,036,854,775,808=-9,223,372,036,854,775,808
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] uint64 1,000,000=1,000,000
|
INFO [xx-xx|xx:xx:xx.xxx] uint64 1,000,000=1,000,000
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615
|
INFO [xx-xx|xx:xx:xx.xxx] uint64 18,446,744,073,709,551,615=18,446,744,073,709,551,615
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Special chars in value key="special \r\n\t chars"
|
INFO [xx-xx|xx:xx:xx.xxx] Special chars in value key="special \r\n\t chars"
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Special chars in key "special \n\t chars"=value
|
INFO [xx-xx|xx:xx:xx.xxx] Special chars in key "special \n\t chars"=value
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] nospace nospace=nospace
|
INFO [xx-xx|xx:xx:xx.xxx] nospace nospace=nospace
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] with space "with nospace"="with nospace"
|
INFO [xx-xx|xx:xx:xx.xxx] with space "with nospace"="with nospace"
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Bash escapes in value key="\x1b[1G\x1b[K\x1b[1A"
|
INFO [xx-xx|xx:xx:xx.xxx] Bash escapes in value key="\x1b[1G\x1b[K\x1b[1A"
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Bash escapes in key "\x1b[1G\x1b[K\x1b[1A"=value
|
INFO [xx-xx|xx:xx:xx.xxx] Bash escapes in key "\x1b[1G\x1b[K\x1b[1A"=value
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] "Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
|
INFO [xx-xx|xx:xx:xx.xxx] "Bash escapes in message \x1b[1G\x1b[K\x1b[1A end" key=value
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] "\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
|
INFO [xx-xx|xx:xx:xx.xxx] "\x1b[35mColored\x1b[0m[" "\x1b[35mColored\x1b[0m["="\x1b[35mColored\x1b[0m["
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Custom Stringer value 2562047h47m16.854s=2562047h47m16.854s
|
INFO [xx-xx|xx:xx:xx.xxx] an error message with quotes error="this is an 'error'"
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Lazy evaluation of value key="lazy value"
|
INFO [xx-xx|xx:xx:xx.xxx] Custom Stringer value 2562047h47m16.854s=2562047h47m16.854s
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] "A message with wonky 💩 characters"
|
INFO [xx-xx|xx:xx:xx.xxx] a custom stringer that emits quoted text output="output with 'quotes'"
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] "A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
|
INFO [xx-xx|xx:xx:xx.xxx] "A message with wonky 💩 characters"
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] A multiline message
|
INFO [xx-xx|xx:xx:xx.xxx] "A multiline message \nINFO [10-18|14:11:31.106] with wonky characters 💩"
|
||||||
LALA [XXZXXZXXZXXZXXZXXX] Actually part of message above
|
INFO [xx-xx|xx:xx:xx.xxx] A multiline message
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] boolean true=true false=false
|
LALA [ZZZZZZZZZZZZZZZZZZ] Actually part of message above
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] repeated-key 1 foo=alpha foo=beta
|
INFO [xx-xx|xx:xx:xx.xxx] boolean true=true false=false
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] repeated-key 2 xx=short xx=longer
|
INFO [xx-xx|xx:xx:xx.xxx] repeated-key 1 foo=alpha foo=beta
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] log at level info
|
INFO [xx-xx|xx:xx:xx.xxx] repeated-key 2 xx=short xx=longer
|
||||||
WARN [XX-XX|XX:XX:XX.XXX] log at level warn
|
INFO [xx-xx|xx:xx:xx.xxx] log at level info
|
||||||
ERROR[XX-XX|XX:XX:XX.XXX] log at level error
|
WARN [xx-xx|xx:xx:xx.xxx] log at level warn
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] test bar=short a="aligned left"
|
ERROR[xx-xx|xx:xx:xx.xxx] log at level error
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] test bar="a long message" a=1
|
INFO [xx-xx|xx:xx:xx.xxx] test bar=short a="aligned left"
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] test bar=short a="aligned right"
|
INFO [xx-xx|xx:xx:xx.xxx] test bar="a long message" a=1
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] The following logs should align so that the key-fields make 5 columns
|
INFO [xx-xx|xx:xx:xx.xxx] test bar=short a="aligned right"
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Inserted known block number=1012 hash=000000..001234 txs=200 gas=1,123,123 other=first
|
INFO [xx-xx|xx:xx:xx.xxx] The following logs should align so that the key-fields make 5 columns
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Inserted new block number=1 hash=000000..001235 txs=2 gas=1123 other=second
|
INFO [xx-xx|xx:xx:xx.xxx] Inserted known block number=1012 hash=000000..001234 txs=200 gas=1,123,123 other=first
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Inserted known block number=99 hash=000000..012322 txs=10 gas=1 other=third
|
INFO [xx-xx|xx:xx:xx.xxx] Inserted new block number=1 hash=000000..001235 txs=2 gas=1123 other=second
|
||||||
WARN [XX-XX|XX:XX:XX.XXX] Inserted known block number=1012 hash=000000..001234 txs=200 gas=99 other=fourth
|
INFO [xx-xx|xx:xx:xx.xxx] Inserted known block number=99 hash=000000..012322 txs=10 gas=1 other=third
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] (*big.Int)(nil) <nil>=<nil>
|
WARN [xx-xx|xx:xx:xx.xxx] Inserted known block number=1012 hash=000000..001234 txs=200 gas=99 other=fourth
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] (*uint256.Int)(nil) <nil>=<nil>
|
INFO [xx-xx|xx:xx:xx.xxx] (*big.Int)(nil) <nil>=<nil>
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] (fmt.Stringer)(nil) res=nil
|
INFO [xx-xx|xx:xx:xx.xxx] (*uint256.Int)(nil) <nil>=<nil>
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] nil-concrete-stringer res=nil
|
INFO [xx-xx|xx:xx:xx.xxx] (fmt.Stringer)(nil) res=<nil>
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] error(nil) res=nil
|
INFO [xx-xx|xx:xx:xx.xxx] nil-concrete-stringer res=<nil>
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] nil-concrete-error res=
|
INFO [xx-xx|xx:xx:xx.xxx] error(nil) res=<nil>
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] nil-custom-struct res=<nil>
|
INFO [xx-xx|xx:xx:xx.xxx] nil-concrete-error res=
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] raw nil res=nil
|
INFO [xx-xx|xx:xx:xx.xxx] nil-custom-struct res=<nil>
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] (*uint64)(nil) res=<nil>
|
INFO [xx-xx|xx:xx:xx.xxx] raw nil res=<nil>
|
||||||
INFO [XX-XX|XX:XX:XX.XXX] Using keys 't', 'lvl', 'time', 'level' and 'msg' t=t time=time lvl=lvl level=level msg=msg
|
INFO [xx-xx|xx:xx:xx.xxx] (*uint64)(nil) res=<nil>
|
||||||
|
INFO [xx-xx|xx:xx:xx.xxx] Using keys 't', 'lvl', 'time', 'level' and 'msg' t=t time=time lvl=lvl level=level msg=msg
|
||||||
|
INFO [xx-xx|xx:xx:xx.xxx] Odd pair (1 attr) key=<nil> LOG_ERROR="Normalized odd number of arguments by adding nil"
|
||||||
|
INFO [xx-xx|xx:xx:xx.xxx] Odd pair (3 attr) key=value key2=<nil> LOG_ERROR="Normalized odd number of arguments by adding nil"
|
||||||
|
@ -84,7 +84,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error
|
|||||||
return fmt.Errorf("could not find child %x in db: %w", childC, err)
|
return fmt.Errorf("could not find child %x in db: %w", childC, err)
|
||||||
}
|
}
|
||||||
// depth is set to 0, the tree isn't rebuilt so it's not a problem
|
// depth is set to 0, the tree isn't rebuilt so it's not a problem
|
||||||
childN, err := verkle.ParseNode(childS, 0, childC[:])
|
childN, err := verkle.ParseNode(childS, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err)
|
return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err)
|
||||||
}
|
}
|
||||||
@ -145,7 +145,7 @@ func verifyVerkle(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
root, err := verkle.ParseNode(serializedRoot, 0, rootC[:])
|
root, err := verkle.ParseNode(serializedRoot, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -195,7 +195,7 @@ func expandVerkle(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
root, err := verkle.ParseNode(serializedRoot, 0, rootC[:])
|
root, err := verkle.ParseNode(serializedRoot, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -30,14 +30,17 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestVerification(t *testing.T) {
|
func TestVerification(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Signatures generated with `minisign`. Legacy format, not pre-hashed file.
|
// Signatures generated with `minisign`. Legacy format, not pre-hashed file.
|
||||||
t.Run("minisig-legacy", func(t *testing.T) {
|
t.Run("minisig-legacy", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// For this test, the pubkey is in testdata/vcheck/minisign.pub
|
// For this test, the pubkey is in testdata/vcheck/minisign.pub
|
||||||
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
|
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
|
||||||
pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp"
|
pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp"
|
||||||
testVerification(t, pub, "./testdata/vcheck/minisig-sigs/")
|
testVerification(t, pub, "./testdata/vcheck/minisig-sigs/")
|
||||||
})
|
})
|
||||||
t.Run("minisig-new", func(t *testing.T) {
|
t.Run("minisig-new", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// For this test, the pubkey is in testdata/vcheck/minisign.pub
|
// For this test, the pubkey is in testdata/vcheck/minisign.pub
|
||||||
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
|
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
|
||||||
// `minisign -S -s ./minisign.sec -m data.json -x ./minisig-sigs-new/data.json.minisig`
|
// `minisign -S -s ./minisign.sec -m data.json -x ./minisig-sigs-new/data.json.minisig`
|
||||||
@ -46,6 +49,7 @@ func TestVerification(t *testing.T) {
|
|||||||
})
|
})
|
||||||
// Signatures generated with `signify-openbsd`
|
// Signatures generated with `signify-openbsd`
|
||||||
t.Run("signify-openbsd", func(t *testing.T) {
|
t.Run("signify-openbsd", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2")
|
t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2")
|
||||||
// For this test, the pubkey is in testdata/vcheck/signifykey.pub
|
// For this test, the pubkey is in testdata/vcheck/signifykey.pub
|
||||||
// (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' )
|
// (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' )
|
||||||
@ -97,6 +101,7 @@ func versionUint(v string) int {
|
|||||||
|
|
||||||
// TestMatching can be used to check that the regexps are correct
|
// TestMatching can be used to check that the regexps are correct
|
||||||
func TestMatching(t *testing.T) {
|
func TestMatching(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
data, _ := os.ReadFile("./testdata/vcheck/vulnerabilities.json")
|
data, _ := os.ReadFile("./testdata/vcheck/vulnerabilities.json")
|
||||||
var vulns []vulnJson
|
var vulns []vulnJson
|
||||||
if err := json.Unmarshal(data, &vulns); err != nil {
|
if err := json.Unmarshal(data, &vulns); err != nil {
|
||||||
@ -141,6 +146,7 @@ func TestMatching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGethPubKeysParseable(t *testing.T) {
|
func TestGethPubKeysParseable(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for _, pubkey := range gethPubKeys {
|
for _, pubkey := range gethPubKeys {
|
||||||
_, err := minisign.NewPublicKey(pubkey)
|
_, err := minisign.NewPublicKey(pubkey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -150,6 +156,7 @@ func TestGethPubKeysParseable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyID(t *testing.T) {
|
func TestKeyID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type args struct {
|
type args struct {
|
||||||
id [8]byte
|
id [8]byte
|
||||||
}
|
}
|
||||||
@ -163,7 +170,9 @@ func TestKeyID(t *testing.T) {
|
|||||||
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
|
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
if got := keyID(tt.args.id); got != tt.want {
|
if got := keyID(tt.args.id); got != tt.want {
|
||||||
t.Errorf("keyID() = %v, want %v", got, tt.want)
|
t.Errorf("keyID() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
|
@ -417,9 +417,7 @@ func rpcNode(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func rpcSubscribe(client *rpc.Client, out io.Writer, method string, args ...string) error {
|
func rpcSubscribe(client *rpc.Client, out io.Writer, method string, args ...string) error {
|
||||||
parts := strings.SplitN(method, "_", 2)
|
namespace, method, _ := strings.Cut(method, "_")
|
||||||
namespace := parts[0]
|
|
||||||
method = parts[1]
|
|
||||||
ch := make(chan interface{})
|
ch := make(chan interface{})
|
||||||
subArgs := make([]interface{}, len(args)+1)
|
subArgs := make([]interface{}, len(args)+1)
|
||||||
subArgs[0] = method
|
subArgs[0] = method
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestRoundtrip(t *testing.T) {
|
func TestRoundtrip(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for i, want := range []string{
|
for i, want := range []string{
|
||||||
"0xf880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28",
|
"0xf880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28",
|
||||||
"0xd5c0d3cb84746573742a2a808213378667617a6f6e6b",
|
"0xd5c0d3cb84746573742a2a808213378667617a6f6e6b",
|
||||||
@ -51,6 +52,7 @@ func TestRoundtrip(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTextToRlp(t *testing.T) {
|
func TestTextToRlp(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type tc struct {
|
type tc struct {
|
||||||
text string
|
text string
|
||||||
want string
|
want string
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
@ -374,6 +375,101 @@ func ExportPreimages(db ethdb.Database, fn string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
|
||||||
|
// the snapshot for a given root.
|
||||||
|
func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn string, root common.Hash) error {
|
||||||
|
log.Info("Exporting preimages", "file", fn)
|
||||||
|
|
||||||
|
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
// Enable gzip compressing if file name has gz suffix.
|
||||||
|
var writer io.Writer = fh
|
||||||
|
if strings.HasSuffix(fn, ".gz") {
|
||||||
|
gz := gzip.NewWriter(writer)
|
||||||
|
defer gz.Close()
|
||||||
|
writer = gz
|
||||||
|
}
|
||||||
|
buf := bufio.NewWriter(writer)
|
||||||
|
defer buf.Flush()
|
||||||
|
writer = buf
|
||||||
|
|
||||||
|
type hashAndPreimageSize struct {
|
||||||
|
Hash common.Hash
|
||||||
|
Size int
|
||||||
|
}
|
||||||
|
hashCh := make(chan hashAndPreimageSize)
|
||||||
|
|
||||||
|
var (
|
||||||
|
start = time.Now()
|
||||||
|
logged = time.Now()
|
||||||
|
preimages int
|
||||||
|
)
|
||||||
|
go func() {
|
||||||
|
defer close(hashCh)
|
||||||
|
accIt, err := snaptree.AccountIterator(root, common.Hash{})
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to create account iterator", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer accIt.Release()
|
||||||
|
|
||||||
|
for accIt.Next() {
|
||||||
|
acc, err := types.FullAccount(accIt.Account())
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to get full account", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
preimages += 1
|
||||||
|
hashCh <- hashAndPreimageSize{Hash: accIt.Hash(), Size: common.AddressLength}
|
||||||
|
|
||||||
|
if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash {
|
||||||
|
stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to create storage iterator", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for stIt.Next() {
|
||||||
|
preimages += 1
|
||||||
|
hashCh <- hashAndPreimageSize{Hash: stIt.Hash(), Size: common.HashLength}
|
||||||
|
|
||||||
|
if time.Since(logged) > time.Second*8 {
|
||||||
|
logged = time.Now()
|
||||||
|
log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stIt.Release()
|
||||||
|
}
|
||||||
|
if time.Since(logged) > time.Second*8 {
|
||||||
|
logged = time.Now()
|
||||||
|
log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for item := range hashCh {
|
||||||
|
preimage := rawdb.ReadPreimage(chaindb, item.Hash)
|
||||||
|
if len(preimage) == 0 {
|
||||||
|
return fmt.Errorf("missing preimage for %v", item.Hash)
|
||||||
|
}
|
||||||
|
if len(preimage) != item.Size {
|
||||||
|
return fmt.Errorf("invalid preimage size, have %d", len(preimage))
|
||||||
|
}
|
||||||
|
rlpenc, err := rlp.EncodeToBytes(preimage)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error encoding preimage: %w", err)
|
||||||
|
}
|
||||||
|
if _, err := writer.Write(rlpenc); err != nil {
|
||||||
|
return fmt.Errorf("failed to write preimage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Info("Exported preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)), "file", fn)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// exportHeader is used in the export/import flow. When we do an export,
|
// exportHeader is used in the export/import flow. When we do an export,
|
||||||
// the first element we output is the exportHeader.
|
// the first element we output is the exportHeader.
|
||||||
// Whenever a backwards-incompatible change is made, the Version header
|
// Whenever a backwards-incompatible change is made, the Version header
|
||||||
@ -460,7 +556,7 @@ func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan
|
|||||||
case OpBatchAdd:
|
case OpBatchAdd:
|
||||||
batch.Put(key, val)
|
batch.Put(key, val)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown op %d\n", op)
|
return fmt.Errorf("unknown op %d", op)
|
||||||
}
|
}
|
||||||
if batch.ValueSize() > ethdb.IdealBatchSize {
|
if batch.ValueSize() > ethdb.IdealBatchSize {
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
|
@ -170,6 +170,7 @@ func testDeletion(t *testing.T, f string) {
|
|||||||
|
|
||||||
// TestImportFutureFormat tests that we reject unsupported future versions.
|
// TestImportFutureFormat tests that we reject unsupported future versions.
|
||||||
func TestImportFutureFormat(t *testing.T) {
|
func TestImportFutureFormat(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
|
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
|
||||||
defer func() {
|
defer func() {
|
||||||
os.Remove(f)
|
os.Remove(f)
|
||||||
|
@ -57,7 +57,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/graphql"
|
"github.com/ethereum/go-ethereum/graphql"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/les"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/metrics/exp"
|
"github.com/ethereum/go-ethereum/metrics/exp"
|
||||||
@ -263,7 +262,7 @@ var (
|
|||||||
}
|
}
|
||||||
SyncModeFlag = &flags.TextMarshalerFlag{
|
SyncModeFlag = &flags.TextMarshalerFlag{
|
||||||
Name: "syncmode",
|
Name: "syncmode",
|
||||||
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
|
Usage: `Blockchain sync mode ("snap" or "full")`,
|
||||||
Value: &defaultSyncMode,
|
Value: &defaultSyncMode,
|
||||||
Category: flags.StateCategory,
|
Category: flags.StateCategory,
|
||||||
}
|
}
|
||||||
@ -290,41 +289,6 @@ var (
|
|||||||
Value: ethconfig.Defaults.TransactionHistory,
|
Value: ethconfig.Defaults.TransactionHistory,
|
||||||
Category: flags.StateCategory,
|
Category: flags.StateCategory,
|
||||||
}
|
}
|
||||||
// Light server and client settings
|
|
||||||
LightServeFlag = &cli.IntFlag{
|
|
||||||
Name: "light.serve",
|
|
||||||
Usage: "Maximum percentage of time allowed for serving LES requests (multi-threaded processing allows values over 100)",
|
|
||||||
Value: ethconfig.Defaults.LightServ,
|
|
||||||
Category: flags.LightCategory,
|
|
||||||
}
|
|
||||||
LightIngressFlag = &cli.IntFlag{
|
|
||||||
Name: "light.ingress",
|
|
||||||
Usage: "Incoming bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
|
|
||||||
Value: ethconfig.Defaults.LightIngress,
|
|
||||||
Category: flags.LightCategory,
|
|
||||||
}
|
|
||||||
LightEgressFlag = &cli.IntFlag{
|
|
||||||
Name: "light.egress",
|
|
||||||
Usage: "Outgoing bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
|
|
||||||
Value: ethconfig.Defaults.LightEgress,
|
|
||||||
Category: flags.LightCategory,
|
|
||||||
}
|
|
||||||
LightMaxPeersFlag = &cli.IntFlag{
|
|
||||||
Name: "light.maxpeers",
|
|
||||||
Usage: "Maximum number of light clients to serve, or light servers to attach to",
|
|
||||||
Value: ethconfig.Defaults.LightPeers,
|
|
||||||
Category: flags.LightCategory,
|
|
||||||
}
|
|
||||||
LightNoPruneFlag = &cli.BoolFlag{
|
|
||||||
Name: "light.nopruning",
|
|
||||||
Usage: "Disable ancient light chain data pruning",
|
|
||||||
Category: flags.LightCategory,
|
|
||||||
}
|
|
||||||
LightNoSyncServeFlag = &cli.BoolFlag{
|
|
||||||
Name: "light.nosyncserve",
|
|
||||||
Usage: "Enables serving light clients before syncing",
|
|
||||||
Category: flags.LightCategory,
|
|
||||||
}
|
|
||||||
// Transaction pool settings
|
// Transaction pool settings
|
||||||
TxPoolLocalsFlag = &cli.StringFlag{
|
TxPoolLocalsFlag = &cli.StringFlag{
|
||||||
Name: "txpool.locals",
|
Name: "txpool.locals",
|
||||||
@ -1137,8 +1101,10 @@ func SplitAndTrim(input string) (ret []string) {
|
|||||||
// setHTTP creates the HTTP RPC listener interface string from the set
|
// setHTTP creates the HTTP RPC listener interface string from the set
|
||||||
// command line flags, returning empty if the HTTP endpoint is disabled.
|
// command line flags, returning empty if the HTTP endpoint is disabled.
|
||||||
func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
||||||
if ctx.Bool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" {
|
if ctx.Bool(HTTPEnabledFlag.Name) {
|
||||||
cfg.HTTPHost = "127.0.0.1"
|
if cfg.HTTPHost == "" {
|
||||||
|
cfg.HTTPHost = "127.0.0.1"
|
||||||
|
}
|
||||||
if ctx.IsSet(HTTPListenAddrFlag.Name) {
|
if ctx.IsSet(HTTPListenAddrFlag.Name) {
|
||||||
cfg.HTTPHost = ctx.String(HTTPListenAddrFlag.Name)
|
cfg.HTTPHost = ctx.String(HTTPListenAddrFlag.Name)
|
||||||
}
|
}
|
||||||
@ -1202,8 +1168,10 @@ func setGraphQL(ctx *cli.Context, cfg *node.Config) {
|
|||||||
// setWS creates the WebSocket RPC listener interface string from the set
|
// setWS creates the WebSocket RPC listener interface string from the set
|
||||||
// command line flags, returning empty if the HTTP endpoint is disabled.
|
// command line flags, returning empty if the HTTP endpoint is disabled.
|
||||||
func setWS(ctx *cli.Context, cfg *node.Config) {
|
func setWS(ctx *cli.Context, cfg *node.Config) {
|
||||||
if ctx.Bool(WSEnabledFlag.Name) && cfg.WSHost == "" {
|
if ctx.Bool(WSEnabledFlag.Name) {
|
||||||
cfg.WSHost = "127.0.0.1"
|
if cfg.WSHost == "" {
|
||||||
|
cfg.WSHost = "127.0.0.1"
|
||||||
|
}
|
||||||
if ctx.IsSet(WSListenAddrFlag.Name) {
|
if ctx.IsSet(WSListenAddrFlag.Name) {
|
||||||
cfg.WSHost = ctx.String(WSListenAddrFlag.Name)
|
cfg.WSHost = ctx.String(WSListenAddrFlag.Name)
|
||||||
}
|
}
|
||||||
@ -1237,25 +1205,25 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// setLes configures the les server and ultra light client settings from the command line flags.
|
// setLes shows the deprecation warnings for LES flags.
|
||||||
func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
|
func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
|
||||||
if ctx.IsSet(LightServeFlag.Name) {
|
if ctx.IsSet(LightServeFlag.Name) {
|
||||||
cfg.LightServ = ctx.Int(LightServeFlag.Name)
|
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightServeFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(LightIngressFlag.Name) {
|
if ctx.IsSet(LightIngressFlag.Name) {
|
||||||
cfg.LightIngress = ctx.Int(LightIngressFlag.Name)
|
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightIngressFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(LightEgressFlag.Name) {
|
if ctx.IsSet(LightEgressFlag.Name) {
|
||||||
cfg.LightEgress = ctx.Int(LightEgressFlag.Name)
|
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightEgressFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(LightMaxPeersFlag.Name) {
|
if ctx.IsSet(LightMaxPeersFlag.Name) {
|
||||||
cfg.LightPeers = ctx.Int(LightMaxPeersFlag.Name)
|
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightMaxPeersFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(LightNoPruneFlag.Name) {
|
if ctx.IsSet(LightNoPruneFlag.Name) {
|
||||||
cfg.LightNoPrune = ctx.Bool(LightNoPruneFlag.Name)
|
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoPruneFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(LightNoSyncServeFlag.Name) {
|
if ctx.IsSet(LightNoSyncServeFlag.Name) {
|
||||||
cfg.LightNoSyncServe = ctx.Bool(LightNoSyncServeFlag.Name)
|
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoSyncServeFlag.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1353,58 +1321,24 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
|||||||
setBootstrapNodes(ctx, cfg)
|
setBootstrapNodes(ctx, cfg)
|
||||||
setBootstrapNodesV5(ctx, cfg)
|
setBootstrapNodesV5(ctx, cfg)
|
||||||
|
|
||||||
lightClient := ctx.String(SyncModeFlag.Name) == "light"
|
|
||||||
lightServer := (ctx.Int(LightServeFlag.Name) != 0)
|
|
||||||
|
|
||||||
lightPeers := ctx.Int(LightMaxPeersFlag.Name)
|
|
||||||
if lightClient && !ctx.IsSet(LightMaxPeersFlag.Name) {
|
|
||||||
// dynamic default - for clients we use 1/10th of the default for servers
|
|
||||||
lightPeers /= 10
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.IsSet(MaxPeersFlag.Name) {
|
if ctx.IsSet(MaxPeersFlag.Name) {
|
||||||
cfg.MaxPeers = ctx.Int(MaxPeersFlag.Name)
|
cfg.MaxPeers = ctx.Int(MaxPeersFlag.Name)
|
||||||
if lightServer && !ctx.IsSet(LightMaxPeersFlag.Name) {
|
|
||||||
cfg.MaxPeers += lightPeers
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if lightServer {
|
|
||||||
cfg.MaxPeers += lightPeers
|
|
||||||
}
|
|
||||||
if lightClient && ctx.IsSet(LightMaxPeersFlag.Name) && cfg.MaxPeers < lightPeers {
|
|
||||||
cfg.MaxPeers = lightPeers
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if !(lightClient || lightServer) {
|
ethPeers := cfg.MaxPeers
|
||||||
lightPeers = 0
|
log.Info("Maximum peer count", "ETH", ethPeers, "total", cfg.MaxPeers)
|
||||||
}
|
|
||||||
ethPeers := cfg.MaxPeers - lightPeers
|
|
||||||
if lightClient {
|
|
||||||
ethPeers = 0
|
|
||||||
}
|
|
||||||
log.Info("Maximum peer count", "ETH", ethPeers, "LES", lightPeers, "total", cfg.MaxPeers)
|
|
||||||
|
|
||||||
if ctx.IsSet(MaxPendingPeersFlag.Name) {
|
if ctx.IsSet(MaxPendingPeersFlag.Name) {
|
||||||
cfg.MaxPendingPeers = ctx.Int(MaxPendingPeersFlag.Name)
|
cfg.MaxPendingPeers = ctx.Int(MaxPendingPeersFlag.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(NoDiscoverFlag.Name) || lightClient {
|
if ctx.IsSet(NoDiscoverFlag.Name) {
|
||||||
cfg.NoDiscovery = true
|
cfg.NoDiscovery = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disallow --nodiscover when used in conjunction with light mode.
|
|
||||||
if (lightClient || lightServer) && ctx.Bool(NoDiscoverFlag.Name) {
|
|
||||||
Fatalf("Cannot use --" + NoDiscoverFlag.Name + " in light client or light server mode")
|
|
||||||
}
|
|
||||||
CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag)
|
CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag)
|
||||||
CheckExclusive(ctx, DiscoveryV5Flag, NoDiscoverFlag)
|
CheckExclusive(ctx, DiscoveryV5Flag, NoDiscoverFlag)
|
||||||
cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name)
|
cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name)
|
||||||
cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name)
|
cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name)
|
||||||
|
|
||||||
// If we're running a light client or server, force enable the v5 peer discovery.
|
|
||||||
if lightClient || lightServer {
|
|
||||||
cfg.DiscoveryV5 = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if netrestrict := ctx.String(NetrestrictFlag.Name); netrestrict != "" {
|
if netrestrict := ctx.String(NetrestrictFlag.Name); netrestrict != "" {
|
||||||
list, err := netutil.ParseNetlist(netrestrict)
|
list, err := netutil.ParseNetlist(netrestrict)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1472,6 +1406,13 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
|||||||
log.Info(fmt.Sprintf("Using %s as db engine", dbEngine))
|
log.Info(fmt.Sprintf("Using %s as db engine", dbEngine))
|
||||||
cfg.DBEngine = dbEngine
|
cfg.DBEngine = dbEngine
|
||||||
}
|
}
|
||||||
|
// deprecation notice for log debug flags (TODO: find a more appropriate place to put these?)
|
||||||
|
if ctx.IsSet(LogBacktraceAtFlag.Name) {
|
||||||
|
log.Warn("log.backtrace flag is deprecated")
|
||||||
|
}
|
||||||
|
if ctx.IsSet(LogDebugFlag.Name) {
|
||||||
|
log.Warn("log.debug flag is deprecated")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setSmartCard(ctx *cli.Context, cfg *node.Config) {
|
func setSmartCard(ctx *cli.Context, cfg *node.Config) {
|
||||||
@ -1515,12 +1456,7 @@ func SetDataDir(ctx *cli.Context, cfg *node.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) {
|
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||||
// If we are running the light client, apply another group
|
|
||||||
// settings for gas oracle.
|
|
||||||
if light {
|
|
||||||
*cfg = ethconfig.LightClientGPO
|
|
||||||
}
|
|
||||||
if ctx.IsSet(GpoBlocksFlag.Name) {
|
if ctx.IsSet(GpoBlocksFlag.Name) {
|
||||||
cfg.Blocks = ctx.Int(GpoBlocksFlag.Name)
|
cfg.Blocks = ctx.Int(GpoBlocksFlag.Name)
|
||||||
}
|
}
|
||||||
@ -1669,12 +1605,11 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
|
|||||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||||
// Avoid conflicting network flags
|
// Avoid conflicting network flags
|
||||||
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag)
|
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag)
|
||||||
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
|
|
||||||
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
|
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
|
||||||
|
|
||||||
// Set configurations from CLI flags
|
// Set configurations from CLI flags
|
||||||
setEtherbase(ctx, cfg)
|
setEtherbase(ctx, cfg)
|
||||||
setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light")
|
setGPO(ctx, &cfg.GPO)
|
||||||
setTxPool(ctx, &cfg.TxPool)
|
setTxPool(ctx, &cfg.TxPool)
|
||||||
setMiner(ctx, &cfg.Miner)
|
setMiner(ctx, &cfg.Miner)
|
||||||
setRequiredBlocks(ctx, cfg)
|
setRequiredBlocks(ctx, cfg)
|
||||||
@ -1767,9 +1702,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
cfg.TransactionHistory = 0
|
cfg.TransactionHistory = 0
|
||||||
log.Warn("Disabled transaction unindexing for archive node")
|
log.Warn("Disabled transaction unindexing for archive node")
|
||||||
}
|
}
|
||||||
if ctx.IsSet(LightServeFlag.Name) && cfg.TransactionHistory != 0 {
|
|
||||||
log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited")
|
|
||||||
}
|
|
||||||
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
|
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
|
||||||
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
|
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
|
||||||
}
|
}
|
||||||
@ -1782,10 +1714,16 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
if ctx.IsSet(CacheLogSizeFlag.Name) {
|
if ctx.IsSet(CacheLogSizeFlag.Name) {
|
||||||
cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name)
|
cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name)
|
||||||
}
|
}
|
||||||
if !ctx.Bool(SnapshotFlag.Name) {
|
if !ctx.Bool(SnapshotFlag.Name) || cfg.SnapshotCache == 0 {
|
||||||
// If snap-sync is requested, this flag is also required
|
// If snap-sync is requested, this flag is also required
|
||||||
if cfg.SyncMode == downloader.SnapSync {
|
if cfg.SyncMode == downloader.SnapSync {
|
||||||
log.Info("Snap sync requested, enabling --snapshot")
|
if !ctx.Bool(SnapshotFlag.Name) {
|
||||||
|
log.Warn("Snap sync requested, enabling --snapshot")
|
||||||
|
}
|
||||||
|
if cfg.SnapshotCache == 0 {
|
||||||
|
log.Warn("Snap sync requested, resetting --cache.snapshot")
|
||||||
|
cfg.SnapshotCache = ctx.Int(CacheFlag.Name) * CacheSnapshotFlag.Value / 100
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
cfg.TrieCleanCache += cfg.SnapshotCache
|
cfg.TrieCleanCache += cfg.SnapshotCache
|
||||||
cfg.SnapshotCache = 0 // Disabled
|
cfg.SnapshotCache = 0 // Disabled
|
||||||
@ -1898,11 +1836,26 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
log.Info("Using developer account", "address", developer.Address)
|
log.Info("Using developer account", "address", developer.Address)
|
||||||
|
|
||||||
// Create a new developer genesis block or reuse existing one
|
// Create a new developer genesis block or reuse existing one
|
||||||
cfg.Genesis = core.DeveloperGenesisBlock(ctx.Uint64(DeveloperGasLimitFlag.Name), developer.Address)
|
cfg.Genesis = core.DeveloperGenesisBlock(ctx.Uint64(DeveloperGasLimitFlag.Name), &developer.Address)
|
||||||
if ctx.IsSet(DataDirFlag.Name) {
|
if ctx.IsSet(DataDirFlag.Name) {
|
||||||
chaindb := tryMakeReadOnlyDatabase(ctx, stack)
|
chaindb := tryMakeReadOnlyDatabase(ctx, stack)
|
||||||
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
||||||
cfg.Genesis = nil // fallback to db content
|
cfg.Genesis = nil // fallback to db content
|
||||||
|
|
||||||
|
//validate genesis has PoS enabled in block 0
|
||||||
|
genesis, err := core.ReadGenesis(chaindb)
|
||||||
|
if err != nil {
|
||||||
|
Fatalf("Could not read genesis from database: %v", err)
|
||||||
|
}
|
||||||
|
if !genesis.Config.TerminalTotalDifficultyPassed {
|
||||||
|
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficultyPassed must be true in developer mode")
|
||||||
|
}
|
||||||
|
if genesis.Config.TerminalTotalDifficulty == nil {
|
||||||
|
Fatalf("Bad developer-mode genesis configuration: terminalTotalDifficulty must be specified.")
|
||||||
|
}
|
||||||
|
if genesis.Difficulty.Cmp(genesis.Config.TerminalTotalDifficulty) != 1 {
|
||||||
|
Fatalf("Bad developer-mode genesis configuration: genesis block difficulty must be > terminalTotalDifficulty")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
chaindb.Close()
|
chaindb.Close()
|
||||||
}
|
}
|
||||||
@ -1941,9 +1894,6 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
|||||||
return // already set through flags/config
|
return // already set through flags/config
|
||||||
}
|
}
|
||||||
protocol := "all"
|
protocol := "all"
|
||||||
if cfg.SyncMode == downloader.LightSync {
|
|
||||||
protocol = "les"
|
|
||||||
}
|
|
||||||
if url := params.KnownDNSNetwork(genesis, protocol); url != "" {
|
if url := params.KnownDNSNetwork(genesis, protocol); url != "" {
|
||||||
cfg.EthDiscoveryURLs = []string{url}
|
cfg.EthDiscoveryURLs = []string{url}
|
||||||
cfg.SnapDiscoveryURLs = cfg.EthDiscoveryURLs
|
cfg.SnapDiscoveryURLs = cfg.EthDiscoveryURLs
|
||||||
@ -1951,27 +1901,12 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RegisterEthService adds an Ethereum client to the stack.
|
// RegisterEthService adds an Ethereum client to the stack.
|
||||||
// The second return value is the full node instance, which may be nil if the
|
// The second return value is the full node instance.
|
||||||
// node is running as a light client.
|
|
||||||
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
|
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
|
||||||
if cfg.SyncMode == downloader.LightSync {
|
|
||||||
backend, err := les.New(stack, cfg)
|
|
||||||
if err != nil {
|
|
||||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
|
||||||
}
|
|
||||||
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
|
||||||
return backend.ApiBackend, nil
|
|
||||||
}
|
|
||||||
backend, err := eth.New(stack, cfg)
|
backend, err := eth.New(stack, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||||
}
|
}
|
||||||
if cfg.LightServ > 0 {
|
|
||||||
_, err := les.NewLesServer(stack, backend, cfg)
|
|
||||||
if err != nil {
|
|
||||||
Fatalf("Failed to create the LES server: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
|
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
|
||||||
return backend.APIBackend, backend
|
return backend.APIBackend, backend
|
||||||
}
|
}
|
||||||
@ -1993,13 +1928,12 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst
|
|||||||
|
|
||||||
// RegisterFilterAPI adds the eth log filtering RPC API to the node.
|
// RegisterFilterAPI adds the eth log filtering RPC API to the node.
|
||||||
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
|
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
|
||||||
isLightClient := ethcfg.SyncMode == downloader.LightSync
|
|
||||||
filterSystem := filters.NewFilterSystem(backend, filters.Config{
|
filterSystem := filters.NewFilterSystem(backend, filters.Config{
|
||||||
LogCacheSize: ethcfg.FilterLogCacheSize,
|
LogCacheSize: ethcfg.FilterLogCacheSize,
|
||||||
})
|
})
|
||||||
stack.RegisterAPIs([]rpc.API{{
|
stack.RegisterAPIs([]rpc.API{{
|
||||||
Namespace: "eth",
|
Namespace: "eth",
|
||||||
Service: filters.NewFilterAPI(filterSystem, isLightClient),
|
Service: filters.NewFilterAPI(filterSystem, false),
|
||||||
}})
|
}})
|
||||||
return filterSystem
|
return filterSystem
|
||||||
}
|
}
|
||||||
@ -2260,9 +2194,10 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MakeTrieDatabase constructs a trie database based on the configured scheme.
|
// MakeTrieDatabase constructs a trie database based on the configured scheme.
|
||||||
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database {
|
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *trie.Database {
|
||||||
config := &trie.Config{
|
config := &trie.Config{
|
||||||
Preimages: preimage,
|
Preimages: preimage,
|
||||||
|
IsVerkle: isVerkle,
|
||||||
}
|
}
|
||||||
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
|
scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -39,6 +39,14 @@ var DeprecatedFlags = []cli.Flag{
|
|||||||
CacheTrieRejournalFlag,
|
CacheTrieRejournalFlag,
|
||||||
LegacyDiscoveryV5Flag,
|
LegacyDiscoveryV5Flag,
|
||||||
TxLookupLimitFlag,
|
TxLookupLimitFlag,
|
||||||
|
LightServeFlag,
|
||||||
|
LightIngressFlag,
|
||||||
|
LightEgressFlag,
|
||||||
|
LightMaxPeersFlag,
|
||||||
|
LightNoPruneFlag,
|
||||||
|
LightNoSyncServeFlag,
|
||||||
|
LogBacktraceAtFlag,
|
||||||
|
LogDebugFlag,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -77,6 +85,53 @@ var (
|
|||||||
Value: ethconfig.Defaults.TransactionHistory,
|
Value: ethconfig.Defaults.TransactionHistory,
|
||||||
Category: flags.DeprecatedCategory,
|
Category: flags.DeprecatedCategory,
|
||||||
}
|
}
|
||||||
|
// Light server and client settings, Deprecated November 2023
|
||||||
|
LightServeFlag = &cli.IntFlag{
|
||||||
|
Name: "light.serve",
|
||||||
|
Usage: "Maximum percentage of time allowed for serving LES requests (deprecated)",
|
||||||
|
Value: ethconfig.Defaults.LightServ,
|
||||||
|
Category: flags.LightCategory,
|
||||||
|
}
|
||||||
|
LightIngressFlag = &cli.IntFlag{
|
||||||
|
Name: "light.ingress",
|
||||||
|
Usage: "Incoming bandwidth limit for serving light clients (deprecated)",
|
||||||
|
Value: ethconfig.Defaults.LightIngress,
|
||||||
|
Category: flags.LightCategory,
|
||||||
|
}
|
||||||
|
LightEgressFlag = &cli.IntFlag{
|
||||||
|
Name: "light.egress",
|
||||||
|
Usage: "Outgoing bandwidth limit for serving light clients (deprecated)",
|
||||||
|
Value: ethconfig.Defaults.LightEgress,
|
||||||
|
Category: flags.LightCategory,
|
||||||
|
}
|
||||||
|
LightMaxPeersFlag = &cli.IntFlag{
|
||||||
|
Name: "light.maxpeers",
|
||||||
|
Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated)",
|
||||||
|
Value: ethconfig.Defaults.LightPeers,
|
||||||
|
Category: flags.LightCategory,
|
||||||
|
}
|
||||||
|
LightNoPruneFlag = &cli.BoolFlag{
|
||||||
|
Name: "light.nopruning",
|
||||||
|
Usage: "Disable ancient light chain data pruning (deprecated)",
|
||||||
|
Category: flags.LightCategory,
|
||||||
|
}
|
||||||
|
LightNoSyncServeFlag = &cli.BoolFlag{
|
||||||
|
Name: "light.nosyncserve",
|
||||||
|
Usage: "Enables serving light clients before syncing (deprecated)",
|
||||||
|
Category: flags.LightCategory,
|
||||||
|
}
|
||||||
|
// Deprecated November 2023
|
||||||
|
LogBacktraceAtFlag = &cli.StringFlag{
|
||||||
|
Name: "log.backtrace",
|
||||||
|
Usage: "Request a stack trace at a specific logging statement (deprecated)",
|
||||||
|
Value: "",
|
||||||
|
Category: flags.DeprecatedCategory,
|
||||||
|
}
|
||||||
|
LogDebugFlag = &cli.BoolFlag{
|
||||||
|
Name: "log.debug",
|
||||||
|
Usage: "Prepends log messages with call-site location (deprecated)",
|
||||||
|
Category: flags.DeprecatedCategory,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
|
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user