(VDB-309) rpc method to get statediff at specific block #16
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@ -1,6 +1,6 @@
|
||||
Hi there,
|
||||
|
||||
please note that this is an issue tracker reserved for bug reports and feature requests.
|
||||
Please note that this is an issue tracker reserved for bug reports and feature requests.
|
||||
|
||||
For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com.
|
||||
|
||||
|
31
.travis.yml
31
.travis.yml
@ -7,7 +7,7 @@ jobs:
|
||||
- stage: lint
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
@ -18,15 +18,15 @@ jobs:
|
||||
- stage: build
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.10.x
|
||||
go: 1.11.x
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- stage: build
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.11.x
|
||||
go: 1.12.x
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
@ -35,14 +35,14 @@ jobs:
|
||||
- stage: build
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- stage: build
|
||||
os: osx
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
script:
|
||||
- echo "Increase the maximum number of open file descriptors on macOS"
|
||||
- NOFILE=20480
|
||||
@ -61,7 +61,7 @@ jobs:
|
||||
if: type = push
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
git:
|
||||
@ -75,9 +75,12 @@ jobs:
|
||||
- fakeroot
|
||||
- python-bzrlib
|
||||
- python-paramiko
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.gobundle
|
||||
script:
|
||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -goversion 1.13.4 -gohash 95dbeab442ee2746b9acf0934c8e2fc26414a0565c008631b04addb8c02e7624 -gobundle $HOME/.gobundle/go.tar.gz
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- stage: build
|
||||
@ -85,7 +88,7 @@ jobs:
|
||||
os: linux
|
||||
dist: xenial
|
||||
sudo: required
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
env:
|
||||
- azure-linux
|
||||
git:
|
||||
@ -121,7 +124,7 @@ jobs:
|
||||
dist: xenial
|
||||
services:
|
||||
- docker
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
git:
|
||||
@ -167,7 +170,7 @@ jobs:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://dl.google.com/go/go1.12.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://dl.google.com/go/go1.13.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
@ -185,7 +188,7 @@ jobs:
|
||||
- stage: build
|
||||
if: type = push
|
||||
os: osx
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
@ -216,7 +219,7 @@ jobs:
|
||||
if: type = cron
|
||||
os: linux
|
||||
dist: xenial
|
||||
go: 1.12.x
|
||||
go: 1.13.x
|
||||
env:
|
||||
- azure-purge
|
||||
git:
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.12-alpine as builder
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.12-alpine as builder
|
||||
FROM golang:1.13-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers git
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -8,7 +8,7 @@
|
||||
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
|
||||
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
|
||||
|
||||
GOBIN = $(shell pwd)/build/bin
|
||||
GOBIN = ./build/bin
|
||||
GO ?= latest
|
||||
|
||||
geth:
|
||||
|
@ -233,8 +233,8 @@ aware of and agree upon. This consists of a small JSON file (e.g. call it `genes
|
||||
|
||||
The above fields should be fine for most purposes, although we'd recommend changing
|
||||
the `nonce` to some random value so you prevent unknown remote nodes from being able
|
||||
to connect to you. If you'd like to pre-fund some accounts for easier testing, you can
|
||||
populate the `alloc` field with account configs:
|
||||
to connect to you. If you'd like to pre-fund some accounts for easier testing, create
|
||||
the accounts and populate the `alloc` field with their addresses.
|
||||
|
||||
```json
|
||||
"alloc": {
|
||||
@ -303,7 +303,7 @@ ones either). To start a `geth` instance for mining, run it with all your usual
|
||||
by:
|
||||
|
||||
```shell
|
||||
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
$ geth <usual-flags> --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
```
|
||||
|
||||
Which will start mining blocks and transactions on a single CPU thread, crediting all
|
||||
|
@ -75,9 +75,6 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) {
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
@ -94,9 +91,6 @@ func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) {
|
||||
|
||||
// UnpackIntoMap unpacks a log into the provided map[string]interface{}
|
||||
func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) {
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
|
@ -57,7 +57,7 @@ const jsondata2 = `
|
||||
]`
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
Uint256, _ := NewType("uint256", nil)
|
||||
Uint256, _ := NewType("uint256", "", nil)
|
||||
exp := ABI{
|
||||
Methods: map[string]Method{
|
||||
"balance": {
|
||||
@ -172,7 +172,7 @@ func TestTestSlice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMethodSignature(t *testing.T) {
|
||||
String, _ := NewType("string", nil)
|
||||
String, _ := NewType("string", "", nil)
|
||||
m := Method{"foo", "foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
|
||||
exp := "foo(string,string)"
|
||||
if m.Sig() != exp {
|
||||
@ -184,7 +184,7 @@ func TestMethodSignature(t *testing.T) {
|
||||
t.Errorf("expected ids to match %x != %x", m.ID(), idexp)
|
||||
}
|
||||
|
||||
uintt, _ := NewType("uint256", nil)
|
||||
uintt, _ := NewType("uint256", "", nil)
|
||||
m = Method{"foo", "foo", false, []Argument{{"bar", uintt, false}}, nil}
|
||||
exp = "foo(uint256)"
|
||||
if m.Sig() != exp {
|
||||
@ -192,7 +192,7 @@ func TestMethodSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
// Method with tuple arguments
|
||||
s, _ := NewType("tuple", []ArgumentMarshaling{
|
||||
s, _ := NewType("tuple", "", []ArgumentMarshaling{
|
||||
{Name: "a", Type: "int256"},
|
||||
{Name: "b", Type: "int256[]"},
|
||||
{Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
|
||||
@ -602,9 +602,9 @@ func TestBareEvents(t *testing.T) {
|
||||
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
||||
]`
|
||||
|
||||
arg0, _ := NewType("uint256", nil)
|
||||
arg1, _ := NewType("address", nil)
|
||||
tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||
arg0, _ := NewType("uint256", "", nil)
|
||||
arg1, _ := NewType("address", "", nil)
|
||||
tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||
|
||||
expectedEvents := map[string]struct {
|
||||
Anonymous bool
|
||||
|
@ -34,10 +34,11 @@ type Argument struct {
|
||||
type Arguments []Argument
|
||||
|
||||
type ArgumentMarshaling struct {
|
||||
Name string
|
||||
Type string
|
||||
Components []ArgumentMarshaling
|
||||
Indexed bool
|
||||
Name string
|
||||
Type string
|
||||
InternalType string
|
||||
Components []ArgumentMarshaling
|
||||
Indexed bool
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
@ -48,7 +49,7 @@ func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||
return fmt.Errorf("argument json err: %v", err)
|
||||
}
|
||||
|
||||
argument.Type, err = NewType(arg.Type, arg.Components)
|
||||
argument.Type, err = NewType(arg.Type, arg.InternalType, arg.Components)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -88,6 +89,13 @@ func (arguments Arguments) isTuple() bool {
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
if len(data) == 0 {
|
||||
if len(arguments) != 0 {
|
||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
} else {
|
||||
return nil // Nothing to unmarshal, return
|
||||
}
|
||||
}
|
||||
// make sure the passed value is arguments pointer
|
||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
@ -104,11 +112,17 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
|
||||
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value
|
||||
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
||||
if len(data) == 0 {
|
||||
if len(arguments) != 0 {
|
||||
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
} else {
|
||||
return nil // Nothing to unmarshal, return
|
||||
}
|
||||
}
|
||||
marshalledValues, err := arguments.UnpackValues(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return arguments.unpackIntoMap(v, marshalledValues)
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
}
|
||||
}
|
||||
// If the contract surely has code (or code is not needed), estimate the transaction
|
||||
msg := ethereum.CallMsg{From: opts.From, To: contract, Value: value, Data: input}
|
||||
msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: gasPrice, Value: value, Data: input}
|
||||
gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate gas needed: %v", err)
|
||||
|
@ -86,7 +86,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if _, exist := structs[input.Type.String()]; input.Type.T == abi.TupleTy && !exist {
|
||||
if hasStruct(input.Type) {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
}
|
||||
@ -96,7 +96,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
if output.Name != "" {
|
||||
normalized.Outputs[j].Name = capitalise(output.Name)
|
||||
}
|
||||
if _, exist := structs[output.Type.String()]; output.Type.T == abi.TupleTy && !exist {
|
||||
if hasStruct(output.Type) {
|
||||
bindStructType[lang](output.Type, structs)
|
||||
}
|
||||
}
|
||||
@ -119,14 +119,11 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||
copy(normalized.Inputs, original.Inputs)
|
||||
for j, input := range normalized.Inputs {
|
||||
// Indexed fields are input, non-indexed ones are outputs
|
||||
if input.Indexed {
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if _, exist := structs[input.Type.String()]; input.Type.T == abi.TupleTy && !exist {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
if hasStruct(input.Type) {
|
||||
bindStructType[lang](input.Type, structs)
|
||||
}
|
||||
}
|
||||
// Append the event to the accumulator list
|
||||
@ -244,7 +241,7 @@ func bindBasicTypeGo(kind abi.Type) string {
|
||||
func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
return structs[kind.String()].Name
|
||||
return structs[kind.TupleRawName+kind.String()].Name
|
||||
case abi.ArrayTy:
|
||||
return fmt.Sprintf("[%d]", kind.Size) + bindTypeGo(*kind.Elem, structs)
|
||||
case abi.SliceTy:
|
||||
@ -321,7 +318,7 @@ func pluralizeJavaType(typ string) string {
|
||||
func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
return structs[kind.String()].Name
|
||||
return structs[kind.TupleRawName+kind.String()].Name
|
||||
case abi.ArrayTy, abi.SliceTy:
|
||||
return pluralizeJavaType(bindTypeJava(*kind.Elem, structs))
|
||||
default:
|
||||
@ -340,6 +337,13 @@ var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct)
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
bound := bindTypeGo(kind, structs)
|
||||
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
if bound == "string" || bound == "[]byte" {
|
||||
bound = "common.Hash"
|
||||
}
|
||||
@ -350,6 +354,13 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
bound := bindTypeJava(kind, structs)
|
||||
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
if bound == "String" || bound == "byte[]" {
|
||||
bound = "Hash"
|
||||
}
|
||||
@ -369,7 +380,14 @@ var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct
|
||||
func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
if s, exist := structs[kind.String()]; exist {
|
||||
// We compose raw struct name and canonical parameter expression
|
||||
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||
// is empty, so we use canonical parameter expression to distinguish
|
||||
// different struct definition. From the consideration of backward
|
||||
// compatibility, we concat these two together so that if kind.TupleRawName
|
||||
// is not empty, it can have unique id.
|
||||
id := kind.TupleRawName + kind.String()
|
||||
if s, exist := structs[id]; exist {
|
||||
return s.Name
|
||||
}
|
||||
var fields []*tmplField
|
||||
@ -377,8 +395,11 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
field := bindStructTypeGo(*elem, structs)
|
||||
fields = append(fields, &tmplField{Type: field, Name: capitalise(kind.TupleRawNames[i]), SolKind: *elem})
|
||||
}
|
||||
name := fmt.Sprintf("Struct%d", len(structs))
|
||||
structs[kind.String()] = &tmplStruct{
|
||||
name := kind.TupleRawName
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Struct%d", len(structs))
|
||||
}
|
||||
structs[id] = &tmplStruct{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
}
|
||||
@ -398,7 +419,14 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
switch kind.T {
|
||||
case abi.TupleTy:
|
||||
if s, exist := structs[kind.String()]; exist {
|
||||
// We compose raw struct name and canonical parameter expression
|
||||
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
|
||||
// is empty, so we use canonical parameter expression to distinguish
|
||||
// different struct definition. From the consideration of backward
|
||||
// compatibility, we concat these two together so that if kind.TupleRawName
|
||||
// is not empty, it can have unique id.
|
||||
id := kind.TupleRawName + kind.String()
|
||||
if s, exist := structs[id]; exist {
|
||||
return s.Name
|
||||
}
|
||||
var fields []*tmplField
|
||||
@ -406,8 +434,11 @@ func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
|
||||
field := bindStructTypeJava(*elem, structs)
|
||||
fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem})
|
||||
}
|
||||
name := fmt.Sprintf("Class%d", len(structs))
|
||||
structs[kind.String()] = &tmplStruct{
|
||||
name := kind.TupleRawName
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Class%d", len(structs))
|
||||
}
|
||||
structs[id] = &tmplStruct{
|
||||
Name: name,
|
||||
Fields: fields,
|
||||
}
|
||||
@ -497,6 +528,21 @@ func structured(args abi.Arguments) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// hasStruct returns an indicator whether the given type is struct, struct slice
|
||||
// or struct array.
|
||||
func hasStruct(t abi.Type) bool {
|
||||
switch t.T {
|
||||
case abi.SliceTy:
|
||||
return hasStruct(*t.Elem)
|
||||
case abi.ArrayTy:
|
||||
return hasStruct(*t.Elem)
|
||||
case abi.TupleTy:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// resolveArgName converts a raw argument representation into a user friendly format.
|
||||
func resolveArgName(arg abi.Argument, structs map[string]*tmplStruct) string {
|
||||
var (
|
||||
@ -512,7 +558,7 @@ loop:
|
||||
case abi.ArrayTy:
|
||||
prefix += fmt.Sprintf("[%d]", typ.Size)
|
||||
default:
|
||||
embedded = typ.String()
|
||||
embedded = typ.TupleRawName + typ.String()
|
||||
break loop
|
||||
}
|
||||
typ = typ.Elem
|
||||
|
File diff suppressed because one or more lines are too long
@ -65,7 +65,7 @@ type tmplField struct {
|
||||
// tmplStruct is a wrapper around an abi.tuple contains a auto-generated
|
||||
// struct name.
|
||||
type tmplStruct struct {
|
||||
Name string // Auto-generated struct name(We can't obtain the raw struct name through abi)
|
||||
Name string // Auto-generated struct name(before solidity v0.5.11) or raw name.
|
||||
Fields []*tmplField // Struct fields definition depends on the binding language.
|
||||
}
|
||||
|
||||
@ -483,7 +483,7 @@ var (
|
||||
|
||||
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
|
||||
//
|
||||
// Solidity: {{.Original.String}}
|
||||
// Solidity: {{formatevent .Original $structs}}
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
|
||||
event := new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
|
||||
|
@ -80,15 +80,19 @@ func makeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
||||
copy(topic[:], hash[:])
|
||||
|
||||
default:
|
||||
// todo(rjl493456442) according solidity documentation, indexed event
|
||||
// parameters that are not value types i.e. arrays and structs are not
|
||||
// stored directly but instead a keccak256-hash of an encoding is stored.
|
||||
//
|
||||
// We only convert stringS and bytes to hash, still need to deal with
|
||||
// array(both fixed-size and dynamic-size) and struct.
|
||||
|
||||
// Attempt to generate the topic from funky types
|
||||
val := reflect.ValueOf(rule)
|
||||
|
||||
switch {
|
||||
|
||||
// static byte array
|
||||
case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8:
|
||||
reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported indexed type: %T", rule)
|
||||
}
|
||||
@ -162,6 +166,7 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er
|
||||
|
||||
default:
|
||||
// Ran out of plain primitive types, try custom types
|
||||
|
||||
switch field.Type() {
|
||||
case reflectHash: // Also covers all dynamic types
|
||||
field.Set(reflect.ValueOf(topics[0]))
|
||||
@ -178,11 +183,9 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er
|
||||
default:
|
||||
// Ran out of custom types, try the crazies
|
||||
switch {
|
||||
|
||||
// static byte array
|
||||
case arg.Type.T == abi.FixedBytesTy:
|
||||
reflect.Copy(field, reflect.ValueOf(topics[0][:arg.Type.Size]))
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported indexed type: %v", arg.Type)
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func TestParseTopics(t *testing.T) {
|
||||
type bytesStruct struct {
|
||||
StaticBytes [5]byte
|
||||
}
|
||||
bytesType, _ := abi.NewType("bytes5", nil)
|
||||
bytesType, _ := abi.NewType("bytes5", "", nil)
|
||||
type args struct {
|
||||
createObj func() interface{}
|
||||
resultObj func() interface{}
|
||||
|
@ -613,7 +613,7 @@ func TestPack(t *testing.T) {
|
||||
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
|
||||
},
|
||||
} {
|
||||
typ, err := NewType(test.typ, test.components)
|
||||
typ, err := NewType(test.typ, "", test.components)
|
||||
if err != nil {
|
||||
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ type Type struct {
|
||||
stringKind string // holds the unparsed string for deriving signatures
|
||||
|
||||
// Tuple relative fields
|
||||
TupleRawName string // Raw struct name defined in source code, may be empty.
|
||||
TupleElems []*Type // Type information of all tuple fields
|
||||
TupleRawNames []string // Raw field name of all tuple fields
|
||||
}
|
||||
@ -63,7 +64,7 @@ var (
|
||||
)
|
||||
|
||||
// NewType creates a new reflection type of abi type given in t.
|
||||
func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
// check that array brackets are equal if they exist
|
||||
if strings.Count(t, "[") != strings.Count(t, "]") {
|
||||
return Type{}, fmt.Errorf("invalid arg type in abi")
|
||||
@ -73,9 +74,14 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
// if there are brackets, get ready to go into slice/array mode and
|
||||
// recursively create the type
|
||||
if strings.Count(t, "[") != 0 {
|
||||
i := strings.LastIndex(t, "[")
|
||||
// Note internalType can be empty here.
|
||||
subInternal := internalType
|
||||
if i := strings.LastIndex(internalType, "["); i != -1 {
|
||||
subInternal = subInternal[:i]
|
||||
}
|
||||
// recursively embed the type
|
||||
embeddedType, err := NewType(t[:i], components)
|
||||
i := strings.LastIndex(t, "[")
|
||||
embeddedType, err := NewType(t[:i], subInternal, components)
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
@ -173,7 +179,7 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
)
|
||||
expression += "("
|
||||
for idx, c := range components {
|
||||
cType, err := NewType(c.Type, c.Components)
|
||||
cType, err := NewType(c.Type, c.InternalType, c.Components)
|
||||
if err != nil {
|
||||
return Type{}, err
|
||||
}
|
||||
@ -199,6 +205,17 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||
typ.TupleRawNames = names
|
||||
typ.T = TupleTy
|
||||
typ.stringKind = expression
|
||||
|
||||
const structPrefix = "struct "
|
||||
// After solidity 0.5.10, a new field of abi "internalType"
|
||||
// is introduced. From that we can obtain the struct name
|
||||
// user defined in the source code.
|
||||
if internalType != "" && strings.HasPrefix(internalType, structPrefix) {
|
||||
// Foo.Bar type definition is not allowed in golang,
|
||||
// convert the format to FooBar
|
||||
typ.TupleRawName = strings.Replace(internalType[len(structPrefix):], ".", "", -1)
|
||||
}
|
||||
|
||||
case "function":
|
||||
typ.Kind = reflect.Array
|
||||
typ.T = FunctionTy
|
||||
|
@ -106,7 +106,7 @@ func TestTypeRegexp(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
typ, err := NewType(tt.blob, tt.components)
|
||||
typ, err := NewType(tt.blob, "", tt.components)
|
||||
if err != nil {
|
||||
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
|
||||
}
|
||||
@ -281,7 +281,7 @@ func TestTypeCheck(t *testing.T) {
|
||||
B *big.Int
|
||||
}{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
|
||||
} {
|
||||
typ, err := NewType(test.typ, test.components)
|
||||
typ, err := NewType(test.typ, "", test.components)
|
||||
if err != nil && len(test.err) == 0 {
|
||||
t.Fatal("unexpected parse error:", err)
|
||||
} else if err != nil && len(test.err) != 0 {
|
||||
|
@ -51,6 +51,7 @@ func (test unpackTest) checkError(err error) error {
|
||||
}
|
||||
|
||||
var unpackTests = []unpackTest{
|
||||
// Bools
|
||||
{
|
||||
def: `[{ "type": "bool" }]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
@ -73,6 +74,7 @@ var unpackTests = []unpackTest{
|
||||
want: false,
|
||||
err: "abi: improperly encoded boolean value",
|
||||
},
|
||||
// Integers
|
||||
{
|
||||
def: `[{"type": "uint32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
@ -122,11 +124,13 @@ var unpackTests = []unpackTest{
|
||||
enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
want: big.NewInt(-1),
|
||||
},
|
||||
// Address
|
||||
{
|
||||
def: `[{"type": "address"}]`,
|
||||
enc: "0000000000000000000000000100000000000000000000000000000000000000",
|
||||
want: common.Address{1},
|
||||
},
|
||||
// Bytes
|
||||
{
|
||||
def: `[{"type": "bytes32"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
@ -154,23 +158,39 @@ var unpackTests = []unpackTest{
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
// Functions
|
||||
{
|
||||
def: `[{"type": "function"}]`,
|
||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [24]byte{1},
|
||||
},
|
||||
// slices
|
||||
// Slice and Array
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: []uint8{1, 2},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: []uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint256[]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: []*big.Int{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2]uint8{1, 2},
|
||||
},
|
||||
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [][]uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@ -186,11 +206,21 @@ var unpackTests = []unpackTest{
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: [2][2]uint8{{1, 2}, {1, 2}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [2][]uint8{{}, {}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[][2]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: [2][]uint8{{1}, {1}},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: [][2]uint8{},
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint8[2][]"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
@ -420,7 +450,7 @@ func TestUnpack(t *testing.T) {
|
||||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
t.Fatalf("invalid hex %s: %v", test.enc, err)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.want))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
|
@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.12.9.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.12.9.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.13.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
@ -22,19 +22,18 @@ variables `PPA_SIGNING_KEY` and `PPA_SSH_KEY` on Travis.
|
||||
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||
golang-1.11, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
we bundle the entire Go sources into our own source archive and start the built job by
|
||||
compiling Go and then using that to build go-ethereum. On Trusty we have a special case
|
||||
requiring the `~gophers/ubuntu/archive` PPA since Trusty can't even build Go itself. PPA
|
||||
deps are set at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
|
||||
## Building Packages Locally (for testing)
|
||||
|
||||
You need to run Ubuntu to do test packaging.
|
||||
|
||||
Add the gophers PPA and install Go 1.11 and Debian packaging tools:
|
||||
Install any version of Go and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.11 devscripts debhelper python-bzrlib python-paramiko
|
||||
$ sudo apt-get install build-essential golang-go devscripts debhelper python-bzrlib python-paramiko
|
||||
|
||||
Create the source packages:
|
||||
|
||||
@ -42,10 +41,10 @@ Create the source packages:
|
||||
|
||||
Then go into the source package directory for your running distribution and build the package:
|
||||
|
||||
$ cd dist/ethereum-unstable-1.6.0+xenial
|
||||
$ cd dist/ethereum-unstable-1.9.6+bionic
|
||||
$ dpkg-buildpackage
|
||||
|
||||
Built packages are placed in the dist/ directory.
|
||||
|
||||
$ cd ..
|
||||
$ dpkg-deb -c geth-unstable_1.6.0+xenial_amd64.deb
|
||||
$ dpkg-deb -c geth-unstable_1.9.6+bionic_amd64.deb
|
||||
|
74
build/ci.go
74
build/ci.go
@ -58,6 +58,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/internal/build"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
@ -138,7 +139,18 @@ var (
|
||||
// Note: zesty is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: artful is unsupported because it was officially deprecated on Launchpad.
|
||||
// Note: cosmic is unsupported because it was officially deprecated on Launchpad.
|
||||
debDistros = []string{"trusty", "xenial", "bionic", "disco", "eoan"}
|
||||
debDistroGoBoots = map[string]string{
|
||||
"trusty": "golang-1.11",
|
||||
"xenial": "golang-go",
|
||||
"bionic": "golang-go",
|
||||
"disco": "golang-go",
|
||||
"eoan": "golang-go",
|
||||
}
|
||||
|
||||
debGoBootPaths = map[string]string{
|
||||
"golang-1.11": "/usr/lib/go-1.11",
|
||||
"golang-go": "/usr/lib/go",
|
||||
}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@ -214,7 +226,6 @@ func doInstall(cmdline []string) {
|
||||
if flag.NArg() > 0 {
|
||||
packages = flag.Args()
|
||||
}
|
||||
packages = build.ExpandPackagesNoVendor(packages)
|
||||
|
||||
if *arch == "" || *arch == runtime.GOARCH {
|
||||
goinstall := goTool("install", buildFlags(env)...)
|
||||
@ -311,13 +322,12 @@ func doTest(cmdline []string) {
|
||||
if len(flag.CommandLine.Args()) > 0 {
|
||||
packages = flag.CommandLine.Args()
|
||||
}
|
||||
packages = build.ExpandPackagesNoVendor(packages)
|
||||
|
||||
// Run the actual tests.
|
||||
// Test a single package at a time. CI builders are slow
|
||||
// and some tests run into timeouts under load.
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m")
|
||||
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m", "--short")
|
||||
if *coverage {
|
||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||
}
|
||||
@ -461,11 +471,14 @@ func maybeSkipArchive(env build.Environment) {
|
||||
// Debian Packaging
|
||||
func doDebianSource(cmdline []string) {
|
||||
var (
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
|
||||
sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
|
||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||
now = time.Now()
|
||||
goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`)
|
||||
gobundle = flag.String("gobundle", "/tmp/go.tar.gz", `Filesystem path to cache the downloaded Go bundles at`)
|
||||
gohash = flag.String("gohash", "", `SHA256 checksum of the Go sources requested to build with`)
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
|
||||
sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
|
||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||
now = time.Now()
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
*workdir = makeWorkdir(*workdir)
|
||||
@ -478,12 +491,25 @@ func doDebianSource(cmdline []string) {
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
}
|
||||
|
||||
// Download and verify the Go source package
|
||||
if err := build.EnsureGoSources(*goversion, hexutil.MustDecode("0x"+*gohash), *gobundle); err != nil {
|
||||
log.Fatalf("Failed to ensure Go source package: %v", err)
|
||||
}
|
||||
// Create Debian packages and upload them
|
||||
for _, pkg := range debPackages {
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
for distro, goboot := range debDistroGoBoots {
|
||||
// Prepare the debian package with the go-ethereum sources
|
||||
meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
|
||||
// Ship the Go sources along so we have a proper thing to build with
|
||||
if err := build.ExtractTarballArchive(*gobundle, pkgdir); err != nil {
|
||||
log.Fatalf("Failed to extract Go sources: %v", err)
|
||||
}
|
||||
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil {
|
||||
log.Fatalf("Failed to rename Go source folder: %v", err)
|
||||
}
|
||||
// Run the packaging and upload to the PPA
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
@ -563,7 +589,9 @@ type debPackage struct {
|
||||
}
|
||||
|
||||
type debMetadata struct {
|
||||
Env build.Environment
|
||||
Env build.Environment
|
||||
GoBootPackage string
|
||||
GoBootPath string
|
||||
|
||||
PackageName string
|
||||
|
||||
@ -592,19 +620,21 @@ func (d debExecutable) Package() string {
|
||||
return d.BinaryName
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
if author == "" {
|
||||
// No signing key, use default author.
|
||||
author = "Ethereum Builds <fjl@ethereum.org>"
|
||||
}
|
||||
return debMetadata{
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: exes,
|
||||
GoBootPackage: goboot,
|
||||
GoBootPath: debGoBootPaths[goboot],
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: exes,
|
||||
}
|
||||
}
|
||||
|
||||
@ -669,7 +699,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
if err := os.Mkdir(pkgdir, 0755); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Copy the source code.
|
||||
build.MustRunCommand("git", "checkout-index", "-a", "--prefix", pkgdir+string(filepath.Separator))
|
||||
|
||||
@ -687,7 +716,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe)
|
||||
}
|
||||
|
||||
return pkgdir
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.11
|
||||
Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}}
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
|
@ -6,9 +6,11 @@
|
||||
|
||||
# Launchpad rejects Go's access to $HOME/.cache, use custom folder
|
||||
export GOCACHE=/tmp/go-build
|
||||
export GOROOT_BOOTSTRAP={{.GoBootPath}}
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.11/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
(cd .go/src && ./make.bash)
|
||||
build/env.sh .go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
|
@ -70,7 +70,9 @@ func main() {
|
||||
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
return
|
||||
if !*writeAddr {
|
||||
return
|
||||
}
|
||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
||||
|
@ -404,6 +404,27 @@ func initialize(c *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||
// account the set data folders as well as the designated platform we're currently
|
||||
// running on.
|
||||
func ipcEndpoint(ipcPath, datadir string) string {
|
||||
// On windows we can only use plain top-level pipes
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
|
||||
return ipcPath
|
||||
}
|
||||
return `\\.\pipe\` + ipcPath
|
||||
}
|
||||
// Resolve names into the data directory full paths otherwise
|
||||
if filepath.Base(ipcPath) == ipcPath {
|
||||
if datadir == "" {
|
||||
return filepath.Join(os.TempDir(), ipcPath)
|
||||
}
|
||||
return filepath.Join(datadir, ipcPath)
|
||||
}
|
||||
return ipcPath
|
||||
}
|
||||
|
||||
func signer(c *cli.Context) error {
|
||||
// If we have some unrecognized command, bail out
|
||||
if args := c.Args(); len(args) > 0 {
|
||||
@ -532,12 +553,8 @@ func signer(c *cli.Context) error {
|
||||
}()
|
||||
}
|
||||
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
|
||||
if c.IsSet(utils.IPCPathFlag.Name) {
|
||||
ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
ipcapiURL = filepath.Join(configDir, "clef.ipc")
|
||||
}
|
||||
|
||||
givenPath := c.GlobalString(utils.IPCPathFlag.Name)
|
||||
ipcapiURL = ipcEndpoint(filepath.Join(givenPath, "clef.ipc"), configDir)
|
||||
listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start IPC api: %v", err)
|
||||
@ -547,7 +564,6 @@ func signer(c *cli.Context) error {
|
||||
listener.Close()
|
||||
log.Info("IPC endpoint closed", "url", ipcapiURL)
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
if c.GlobalBool(testFlag.Name) {
|
||||
|
152
cmd/devp2p/crawl.go
Normal file
152
cmd/devp2p/crawl.go
Normal file
@ -0,0 +1,152 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
type crawler struct {
|
||||
input nodeSet
|
||||
output nodeSet
|
||||
disc *discover.UDPv4
|
||||
iters []enode.Iterator
|
||||
inputIter enode.Iterator
|
||||
ch chan *enode.Node
|
||||
closed chan struct{}
|
||||
|
||||
// settings
|
||||
revalidateInterval time.Duration
|
||||
}
|
||||
|
||||
func newCrawler(input nodeSet, disc *discover.UDPv4, iters ...enode.Iterator) *crawler {
|
||||
c := &crawler{
|
||||
input: input,
|
||||
output: make(nodeSet, len(input)),
|
||||
disc: disc,
|
||||
iters: iters,
|
||||
inputIter: enode.IterNodes(input.nodes()),
|
||||
ch: make(chan *enode.Node),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
c.iters = append(c.iters, c.inputIter)
|
||||
// Copy input to output initially. Any nodes that fail validation
|
||||
// will be dropped from output during the run.
|
||||
for id, n := range input {
|
||||
c.output[id] = n
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *crawler) run(timeout time.Duration) nodeSet {
|
||||
var (
|
||||
timeoutTimer = time.NewTimer(timeout)
|
||||
timeoutCh <-chan time.Time
|
||||
doneCh = make(chan enode.Iterator, len(c.iters))
|
||||
liveIters = len(c.iters)
|
||||
)
|
||||
for _, it := range c.iters {
|
||||
go c.runIterator(doneCh, it)
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case n := <-c.ch:
|
||||
c.updateNode(n)
|
||||
case it := <-doneCh:
|
||||
if it == c.inputIter {
|
||||
// Enable timeout when we're done revalidating the input nodes.
|
||||
log.Info("Revalidation of input set is done", "len", len(c.input))
|
||||
if timeout > 0 {
|
||||
timeoutCh = timeoutTimer.C
|
||||
}
|
||||
}
|
||||
if liveIters--; liveIters == 0 {
|
||||
break loop
|
||||
}
|
||||
case <-timeoutCh:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
close(c.closed)
|
||||
for _, it := range c.iters {
|
||||
it.Close()
|
||||
}
|
||||
for ; liveIters > 0; liveIters-- {
|
||||
<-doneCh
|
||||
}
|
||||
return c.output
|
||||
}
|
||||
|
||||
func (c *crawler) runIterator(done chan<- enode.Iterator, it enode.Iterator) {
|
||||
defer func() { done <- it }()
|
||||
for it.Next() {
|
||||
select {
|
||||
case c.ch <- it.Node():
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *crawler) updateNode(n *enode.Node) {
|
||||
node, ok := c.output[n.ID()]
|
||||
|
||||
// Skip validation of recently-seen nodes.
|
||||
if ok && time.Since(node.LastCheck) < c.revalidateInterval {
|
||||
return
|
||||
}
|
||||
|
||||
// Request the node record.
|
||||
nn, err := c.disc.RequestENR(n)
|
||||
node.LastCheck = truncNow()
|
||||
if err != nil {
|
||||
if node.Score == 0 {
|
||||
// Node doesn't implement EIP-868.
|
||||
log.Debug("Skipping node", "id", n.ID())
|
||||
return
|
||||
}
|
||||
node.Score /= 2
|
||||
} else {
|
||||
node.N = nn
|
||||
node.Seq = nn.Seq()
|
||||
node.Score++
|
||||
if node.FirstResponse.IsZero() {
|
||||
node.FirstResponse = node.LastCheck
|
||||
}
|
||||
node.LastResponse = node.LastCheck
|
||||
}
|
||||
|
||||
// Store/update node in output set.
|
||||
if node.Score <= 0 {
|
||||
log.Info("Removing node", "id", n.ID())
|
||||
delete(c.output, n.ID())
|
||||
} else {
|
||||
log.Info("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score)
|
||||
c.output[n.ID()] = node
|
||||
}
|
||||
}
|
||||
|
||||
func truncNow() time.Time {
|
||||
return time.Now().UTC().Truncate(1 * time.Second)
|
||||
}
|
@ -19,10 +19,10 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@ -38,36 +38,59 @@ var (
|
||||
discv4PingCommand,
|
||||
discv4RequestRecordCommand,
|
||||
discv4ResolveCommand,
|
||||
discv4ResolveJSONCommand,
|
||||
discv4CrawlCommand,
|
||||
},
|
||||
}
|
||||
discv4PingCommand = cli.Command{
|
||||
Name: "ping",
|
||||
Usage: "Sends ping to a node",
|
||||
Action: discv4Ping,
|
||||
Name: "ping",
|
||||
Usage: "Sends ping to a node",
|
||||
Action: discv4Ping,
|
||||
ArgsUsage: "<node>",
|
||||
}
|
||||
discv4RequestRecordCommand = cli.Command{
|
||||
Name: "requestenr",
|
||||
Usage: "Requests a node record using EIP-868 enrRequest",
|
||||
Action: discv4RequestRecord,
|
||||
Name: "requestenr",
|
||||
Usage: "Requests a node record using EIP-868 enrRequest",
|
||||
Action: discv4RequestRecord,
|
||||
ArgsUsage: "<node>",
|
||||
}
|
||||
discv4ResolveCommand = cli.Command{
|
||||
Name: "resolve",
|
||||
Usage: "Finds a node in the DHT",
|
||||
Action: discv4Resolve,
|
||||
Flags: []cli.Flag{bootnodesFlag},
|
||||
Name: "resolve",
|
||||
Usage: "Finds a node in the DHT",
|
||||
Action: discv4Resolve,
|
||||
ArgsUsage: "<node>",
|
||||
Flags: []cli.Flag{bootnodesFlag},
|
||||
}
|
||||
discv4ResolveJSONCommand = cli.Command{
|
||||
Name: "resolve-json",
|
||||
Usage: "Re-resolves nodes in a nodes.json file",
|
||||
Action: discv4ResolveJSON,
|
||||
Flags: []cli.Flag{bootnodesFlag},
|
||||
ArgsUsage: "<nodes.json file>",
|
||||
}
|
||||
discv4CrawlCommand = cli.Command{
|
||||
Name: "crawl",
|
||||
Usage: "Updates a nodes.json file with random nodes found in the DHT",
|
||||
Action: discv4Crawl,
|
||||
Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag},
|
||||
}
|
||||
)
|
||||
|
||||
var bootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated nodes used for bootstrapping",
|
||||
}
|
||||
var (
|
||||
bootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated nodes used for bootstrapping",
|
||||
}
|
||||
crawlTimeoutFlag = cli.DurationFlag{
|
||||
Name: "timeout",
|
||||
Usage: "Time limit for the crawl.",
|
||||
Value: 30 * time.Minute,
|
||||
}
|
||||
)
|
||||
|
||||
func discv4Ping(ctx *cli.Context) error {
|
||||
n, disc, err := getNodeArgAndStartV4(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := getNodeArg(ctx)
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
|
||||
start := time.Now()
|
||||
@ -79,10 +102,8 @@ func discv4Ping(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func discv4RequestRecord(ctx *cli.Context) error {
|
||||
n, disc, err := getNodeArgAndStartV4(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := getNodeArg(ctx)
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
|
||||
respN, err := disc.RequestENR(n)
|
||||
@ -94,33 +115,61 @@ func discv4RequestRecord(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func discv4Resolve(ctx *cli.Context) error {
|
||||
n, disc, err := getNodeArgAndStartV4(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := getNodeArg(ctx)
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
|
||||
fmt.Println(disc.Resolve(n).String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNodeArgAndStartV4(ctx *cli.Context) (*enode.Node, *discover.UDPv4, error) {
|
||||
if ctx.NArg() != 1 {
|
||||
return nil, nil, fmt.Errorf("missing node as command-line argument")
|
||||
func discv4ResolveJSON(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
n, err := parseNode(ctx.Args()[0])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
nodesFile := ctx.Args().Get(0)
|
||||
inputSet := make(nodeSet)
|
||||
if common.FileExist(nodesFile) {
|
||||
inputSet = loadNodesJSON(nodesFile)
|
||||
}
|
||||
var bootnodes []*enode.Node
|
||||
if commandHasFlag(ctx, bootnodesFlag) {
|
||||
bootnodes, err = parseBootnodes(ctx)
|
||||
|
||||
// Add extra nodes from command line arguments.
|
||||
var nodeargs []*enode.Node
|
||||
for i := 1; i < ctx.NArg(); i++ {
|
||||
n, err := parseNode(ctx.Args().Get(i))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
exit(err)
|
||||
}
|
||||
nodeargs = append(nodeargs, n)
|
||||
}
|
||||
disc, err := startV4(bootnodes)
|
||||
return n, disc, err
|
||||
|
||||
// Run the crawler.
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
|
||||
c.revalidateInterval = 0
|
||||
output := c.run(0)
|
||||
writeNodesJSON(nodesFile, output)
|
||||
return nil
|
||||
}
|
||||
|
||||
func discv4Crawl(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
nodesFile := ctx.Args().First()
|
||||
var inputSet nodeSet
|
||||
if common.FileExist(nodesFile) {
|
||||
inputSet = loadNodesJSON(nodesFile)
|
||||
}
|
||||
|
||||
disc := startV4(ctx)
|
||||
defer disc.Close()
|
||||
c := newCrawler(inputSet, disc, disc.RandomNodes())
|
||||
c.revalidateInterval = 10 * time.Minute
|
||||
output := c.run(ctx.Duration(crawlTimeoutFlag.Name))
|
||||
writeNodesJSON(nodesFile, output)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
||||
@ -139,28 +188,39 @@ func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// commandHasFlag returns true if the current command supports the given flag.
|
||||
func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool {
|
||||
flags := ctx.FlagNames()
|
||||
sort.Strings(flags)
|
||||
i := sort.SearchStrings(flags, flag.GetName())
|
||||
return i != len(flags) && flags[i] == flag.GetName()
|
||||
// startV4 starts an ephemeral discovery V4 node.
|
||||
func startV4(ctx *cli.Context) *discover.UDPv4 {
|
||||
socket, ln, cfg, err := listen()
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if commandHasFlag(ctx, bootnodesFlag) {
|
||||
bn, err := parseBootnodes(ctx)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
cfg.Bootnodes = bn
|
||||
}
|
||||
disc, err := discover.ListenV4(socket, ln, cfg)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return disc
|
||||
}
|
||||
|
||||
// startV4 starts an ephemeral discovery V4 node.
|
||||
func startV4(bootnodes []*enode.Node) (*discover.UDPv4, error) {
|
||||
func listen() (*net.UDPConn, *enode.LocalNode, discover.Config, error) {
|
||||
var cfg discover.Config
|
||||
cfg.Bootnodes = bootnodes
|
||||
cfg.PrivateKey, _ = crypto.GenerateKey()
|
||||
db, _ := enode.OpenDB("")
|
||||
ln := enode.NewLocalNode(db, cfg.PrivateKey)
|
||||
|
||||
socket, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IP{0, 0, 0, 0}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
db.Close()
|
||||
return nil, nil, cfg, err
|
||||
}
|
||||
addr := socket.LocalAddr().(*net.UDPAddr)
|
||||
ln.SetFallbackIP(net.IP{127, 0, 0, 1})
|
||||
ln.SetFallbackUDP(addr.Port)
|
||||
return discover.ListenUDP(socket, ln, cfg)
|
||||
return socket, ln, cfg, nil
|
||||
}
|
||||
|
163
cmd/devp2p/dns_cloudflare.go
Normal file
163
cmd/devp2p/dns_cloudflare.go
Normal file
@ -0,0 +1,163 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudflare/cloudflare-go"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
cloudflareTokenFlag = cli.StringFlag{
|
||||
Name: "token",
|
||||
Usage: "CloudFlare API token",
|
||||
EnvVar: "CLOUDFLARE_API_TOKEN",
|
||||
}
|
||||
cloudflareZoneIDFlag = cli.StringFlag{
|
||||
Name: "zoneid",
|
||||
Usage: "CloudFlare Zone ID (optional)",
|
||||
}
|
||||
)
|
||||
|
||||
type cloudflareClient struct {
|
||||
*cloudflare.API
|
||||
zoneID string
|
||||
}
|
||||
|
||||
// newCloudflareClient sets up a CloudFlare API client from command line flags.
|
||||
func newCloudflareClient(ctx *cli.Context) *cloudflareClient {
|
||||
token := ctx.String(cloudflareTokenFlag.Name)
|
||||
if token == "" {
|
||||
exit(fmt.Errorf("need cloudflare API token to proceed"))
|
||||
}
|
||||
api, err := cloudflare.NewWithAPIToken(token)
|
||||
if err != nil {
|
||||
exit(fmt.Errorf("can't create Cloudflare client: %v", err))
|
||||
}
|
||||
return &cloudflareClient{
|
||||
API: api,
|
||||
zoneID: ctx.String(cloudflareZoneIDFlag.Name),
|
||||
}
|
||||
}
|
||||
|
||||
// deploy uploads the given tree to CloudFlare DNS.
|
||||
func (c *cloudflareClient) deploy(name string, t *dnsdisc.Tree) error {
|
||||
if err := c.checkZone(name); err != nil {
|
||||
return err
|
||||
}
|
||||
records := t.ToTXT(name)
|
||||
return c.uploadRecords(name, records)
|
||||
}
|
||||
|
||||
// checkZone verifies permissions on the CloudFlare DNS Zone for name.
|
||||
func (c *cloudflareClient) checkZone(name string) error {
|
||||
if c.zoneID == "" {
|
||||
log.Info(fmt.Sprintf("Finding CloudFlare zone ID for %s", name))
|
||||
id, err := c.ZoneIDByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.zoneID = id
|
||||
}
|
||||
log.Info(fmt.Sprintf("Checking Permissions on zone %s", c.zoneID))
|
||||
zone, err := c.ZoneDetails(c.zoneID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.HasSuffix(name, "."+zone.Name) {
|
||||
return fmt.Errorf("CloudFlare zone name %q does not match name %q to be deployed", zone.Name, name)
|
||||
}
|
||||
needPerms := map[string]bool{"#zone:edit": false, "#zone:read": false}
|
||||
for _, perm := range zone.Permissions {
|
||||
if _, ok := needPerms[perm]; ok {
|
||||
needPerms[perm] = true
|
||||
}
|
||||
}
|
||||
for _, ok := range needPerms {
|
||||
if !ok {
|
||||
return fmt.Errorf("wrong permissions on zone %s: %v", c.zoneID, needPerms)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// uploadRecords updates the TXT records at a particular subdomain. All non-root records
|
||||
// will have a TTL of "infinity" and all existing records not in the new map will be
|
||||
// nuked!
|
||||
func (c *cloudflareClient) uploadRecords(name string, records map[string]string) error {
|
||||
// Convert all names to lowercase.
|
||||
lrecords := make(map[string]string, len(records))
|
||||
for name, r := range records {
|
||||
lrecords[strings.ToLower(name)] = r
|
||||
}
|
||||
records = lrecords
|
||||
|
||||
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name))
|
||||
entries, err := c.DNSRecords(c.zoneID, cloudflare.DNSRecord{Type: "TXT"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existing := make(map[string]cloudflare.DNSRecord)
|
||||
for _, entry := range entries {
|
||||
if !strings.HasSuffix(entry.Name, name) {
|
||||
continue
|
||||
}
|
||||
existing[strings.ToLower(entry.Name)] = entry
|
||||
}
|
||||
|
||||
// Iterate over the new records and inject anything missing.
|
||||
for path, val := range records {
|
||||
old, exists := existing[path]
|
||||
if !exists {
|
||||
// Entry is unknown, push a new one to Cloudflare.
|
||||
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
|
||||
ttl := 1
|
||||
if path != name {
|
||||
ttl = 2147483647 // Max TTL permitted by Cloudflare
|
||||
}
|
||||
_, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl})
|
||||
} else if old.Content != val {
|
||||
// Entry already exists, only change its content.
|
||||
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
||||
old.Content = val
|
||||
err = c.UpdateDNSRecord(c.zoneID, old.ID, old)
|
||||
} else {
|
||||
log.Info(fmt.Sprintf("Skipping %s = %q", path, val))
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to publish %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over the old records and delete anything stale.
|
||||
for path, entry := range existing {
|
||||
if _, ok := records[path]; ok {
|
||||
continue
|
||||
}
|
||||
// Stale entry, nuke it.
|
||||
log.Info(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
|
||||
if err := c.DeleteDNSRecord(c.zoneID, entry.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
361
cmd/devp2p/dnscmd.go
Normal file
361
cmd/devp2p/dnscmd.go
Normal file
@ -0,0 +1,361 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
dnsCommand = cli.Command{
|
||||
Name: "dns",
|
||||
Usage: "DNS Discovery Commands",
|
||||
Subcommands: []cli.Command{
|
||||
dnsSyncCommand,
|
||||
dnsSignCommand,
|
||||
dnsTXTCommand,
|
||||
dnsCloudflareCommand,
|
||||
},
|
||||
}
|
||||
dnsSyncCommand = cli.Command{
|
||||
Name: "sync",
|
||||
Usage: "Download a DNS discovery tree",
|
||||
ArgsUsage: "<url> [ <directory> ]",
|
||||
Action: dnsSync,
|
||||
Flags: []cli.Flag{dnsTimeoutFlag},
|
||||
}
|
||||
dnsSignCommand = cli.Command{
|
||||
Name: "sign",
|
||||
Usage: "Sign a DNS discovery tree",
|
||||
ArgsUsage: "<tree-directory> <key-file>",
|
||||
Action: dnsSign,
|
||||
Flags: []cli.Flag{dnsDomainFlag, dnsSeqFlag},
|
||||
}
|
||||
dnsTXTCommand = cli.Command{
|
||||
Name: "to-txt",
|
||||
Usage: "Create a DNS TXT records for a discovery tree",
|
||||
ArgsUsage: "<tree-directory> <output-file>",
|
||||
Action: dnsToTXT,
|
||||
}
|
||||
dnsCloudflareCommand = cli.Command{
|
||||
Name: "to-cloudflare",
|
||||
Usage: "Deploy DNS TXT records to cloudflare",
|
||||
ArgsUsage: "<tree-directory>",
|
||||
Action: dnsToCloudflare,
|
||||
Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag},
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
dnsTimeoutFlag = cli.DurationFlag{
|
||||
Name: "timeout",
|
||||
Usage: "Timeout for DNS lookups",
|
||||
}
|
||||
dnsDomainFlag = cli.StringFlag{
|
||||
Name: "domain",
|
||||
Usage: "Domain name of the tree",
|
||||
}
|
||||
dnsSeqFlag = cli.UintFlag{
|
||||
Name: "seq",
|
||||
Usage: "New sequence number of the tree",
|
||||
}
|
||||
)
|
||||
|
||||
// dnsSync performs dnsSyncCommand.
|
||||
func dnsSync(ctx *cli.Context) error {
|
||||
var (
|
||||
c = dnsClient(ctx)
|
||||
url = ctx.Args().Get(0)
|
||||
outdir = ctx.Args().Get(1)
|
||||
)
|
||||
domain, _, err := dnsdisc.ParseURL(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if outdir == "" {
|
||||
outdir = domain
|
||||
}
|
||||
|
||||
t, err := c.SyncTree(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
def := treeToDefinition(url, t)
|
||||
def.Meta.LastModified = time.Now()
|
||||
writeTreeMetadata(outdir, def)
|
||||
writeTreeNodes(outdir, def)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dnsSign(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 2 {
|
||||
return fmt.Errorf("need tree definition directory and key file as arguments")
|
||||
}
|
||||
var (
|
||||
defdir = ctx.Args().Get(0)
|
||||
keyfile = ctx.Args().Get(1)
|
||||
def = loadTreeDefinition(defdir)
|
||||
domain = directoryName(defdir)
|
||||
)
|
||||
if def.Meta.URL != "" {
|
||||
d, _, err := dnsdisc.ParseURL(def.Meta.URL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid 'url' field: %v", err)
|
||||
}
|
||||
domain = d
|
||||
}
|
||||
if ctx.IsSet(dnsDomainFlag.Name) {
|
||||
domain = ctx.String(dnsDomainFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(dnsSeqFlag.Name) {
|
||||
def.Meta.Seq = ctx.Uint(dnsSeqFlag.Name)
|
||||
} else {
|
||||
def.Meta.Seq++ // Auto-bump sequence number if not supplied via flag.
|
||||
}
|
||||
t, err := dnsdisc.MakeTree(def.Meta.Seq, def.Nodes, def.Meta.Links)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := loadSigningKey(keyfile)
|
||||
url, err := t.Sign(key, domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't sign: %v", err)
|
||||
}
|
||||
|
||||
def = treeToDefinition(url, t)
|
||||
def.Meta.LastModified = time.Now()
|
||||
writeTreeMetadata(defdir, def)
|
||||
return nil
|
||||
}
|
||||
|
||||
func directoryName(dir string) string {
|
||||
abs, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return filepath.Base(abs)
|
||||
}
|
||||
|
||||
// dnsToTXT peforms dnsTXTCommand.
|
||||
func dnsToTXT(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need tree definition directory as argument")
|
||||
}
|
||||
output := ctx.Args().Get(1)
|
||||
if output == "" {
|
||||
output = "-" // default to stdout
|
||||
}
|
||||
domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeTXTJSON(output, t.ToTXT(domain))
|
||||
return nil
|
||||
}
|
||||
|
||||
// dnsToCloudflare peforms dnsCloudflareCommand.
|
||||
func dnsToCloudflare(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need tree definition directory as argument")
|
||||
}
|
||||
domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := newCloudflareClient(ctx)
|
||||
return client.deploy(domain, t)
|
||||
}
|
||||
|
||||
// loadSigningKey loads a private key in Ethereum keystore format.
|
||||
func loadSigningKey(keyfile string) *ecdsa.PrivateKey {
|
||||
keyjson, err := ioutil.ReadFile(keyfile)
|
||||
if err != nil {
|
||||
exit(fmt.Errorf("failed to read the keyfile at '%s': %v", keyfile, err))
|
||||
}
|
||||
password, _ := console.Stdin.PromptPassword("Please enter the password for '" + keyfile + "': ")
|
||||
key, err := keystore.DecryptKey(keyjson, password)
|
||||
if err != nil {
|
||||
exit(fmt.Errorf("error decrypting key: %v", err))
|
||||
}
|
||||
return key.PrivateKey
|
||||
}
|
||||
|
||||
// dnsClient configures the DNS discovery client from command line flags.
|
||||
func dnsClient(ctx *cli.Context) *dnsdisc.Client {
|
||||
var cfg dnsdisc.Config
|
||||
if commandHasFlag(ctx, dnsTimeoutFlag) {
|
||||
cfg.Timeout = ctx.Duration(dnsTimeoutFlag.Name)
|
||||
}
|
||||
c, _ := dnsdisc.NewClient(cfg) // cannot fail because no URLs given
|
||||
return c
|
||||
}
|
||||
|
||||
// There are two file formats for DNS node trees on disk:
|
||||
//
|
||||
// The 'TXT' format is a single JSON file containing DNS TXT records
|
||||
// as a JSON object where the keys are names and the values are objects
|
||||
// containing the value of the record.
|
||||
//
|
||||
// The 'definition' format is a directory containing two files:
|
||||
//
|
||||
// enrtree-info.json -- contains sequence number & links to other trees
|
||||
// nodes.json -- contains the nodes as a JSON array.
|
||||
//
|
||||
// This format exists because it's convenient to edit. nodes.json can be generated
|
||||
// in multiple ways: it may be written by a DHT crawler or compiled by a human.
|
||||
|
||||
type dnsDefinition struct {
|
||||
Meta dnsMetaJSON
|
||||
Nodes []*enode.Node
|
||||
}
|
||||
|
||||
type dnsMetaJSON struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Seq uint `json:"seq"`
|
||||
Sig string `json:"signature,omitempty"`
|
||||
Links []string `json:"links"`
|
||||
LastModified time.Time `json:"lastModified"`
|
||||
}
|
||||
|
||||
func treeToDefinition(url string, t *dnsdisc.Tree) *dnsDefinition {
|
||||
meta := dnsMetaJSON{
|
||||
URL: url,
|
||||
Seq: t.Seq(),
|
||||
Sig: t.Signature(),
|
||||
Links: t.Links(),
|
||||
}
|
||||
if meta.Links == nil {
|
||||
meta.Links = []string{}
|
||||
}
|
||||
return &dnsDefinition{Meta: meta, Nodes: t.Nodes()}
|
||||
}
|
||||
|
||||
// loadTreeDefinition loads a directory in 'definition' format.
|
||||
func loadTreeDefinition(directory string) *dnsDefinition {
|
||||
metaFile, nodesFile := treeDefinitionFiles(directory)
|
||||
var def dnsDefinition
|
||||
err := common.LoadJSON(metaFile, &def.Meta)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
exit(err)
|
||||
}
|
||||
if def.Meta.Links == nil {
|
||||
def.Meta.Links = []string{}
|
||||
}
|
||||
// Check link syntax.
|
||||
for _, link := range def.Meta.Links {
|
||||
if _, _, err := dnsdisc.ParseURL(link); err != nil {
|
||||
exit(fmt.Errorf("invalid link %q: %v", link, err))
|
||||
}
|
||||
}
|
||||
// Check/convert nodes.
|
||||
nodes := loadNodesJSON(nodesFile)
|
||||
if err := nodes.verify(); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
def.Nodes = nodes.nodes()
|
||||
return &def
|
||||
}
|
||||
|
||||
// loadTreeDefinitionForExport loads a DNS tree and ensures it is signed.
|
||||
func loadTreeDefinitionForExport(dir string) (domain string, t *dnsdisc.Tree, err error) {
|
||||
metaFile, _ := treeDefinitionFiles(dir)
|
||||
def := loadTreeDefinition(dir)
|
||||
if def.Meta.URL == "" {
|
||||
return "", nil, fmt.Errorf("missing 'url' field in %v", metaFile)
|
||||
}
|
||||
domain, pubkey, err := dnsdisc.ParseURL(def.Meta.URL)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("invalid 'url' field in %v: %v", metaFile, err)
|
||||
}
|
||||
if t, err = dnsdisc.MakeTree(def.Meta.Seq, def.Nodes, def.Meta.Links); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if err := ensureValidTreeSignature(t, pubkey, def.Meta.Sig); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return domain, t, nil
|
||||
}
|
||||
|
||||
// ensureValidTreeSignature checks that sig is valid for tree and assigns it as the
|
||||
// tree's signature if valid.
|
||||
func ensureValidTreeSignature(t *dnsdisc.Tree, pubkey *ecdsa.PublicKey, sig string) error {
|
||||
if sig == "" {
|
||||
return fmt.Errorf("missing signature, run 'devp2p dns sign' first")
|
||||
}
|
||||
if err := t.SetSignature(pubkey, sig); err != nil {
|
||||
return fmt.Errorf("invalid signature on tree, run 'devp2p dns sign' to update it")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeTreeMetadata writes a DNS node tree metadata file to the given directory.
|
||||
func writeTreeMetadata(directory string, def *dnsDefinition) {
|
||||
metaJSON, err := json.MarshalIndent(&def.Meta, "", jsonIndent)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if err := os.Mkdir(directory, 0744); err != nil && !os.IsExist(err) {
|
||||
exit(err)
|
||||
}
|
||||
metaFile, _ := treeDefinitionFiles(directory)
|
||||
if err := ioutil.WriteFile(metaFile, metaJSON, 0644); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeTreeNodes(directory string, def *dnsDefinition) {
|
||||
ns := make(nodeSet, len(def.Nodes))
|
||||
ns.add(def.Nodes...)
|
||||
_, nodesFile := treeDefinitionFiles(directory)
|
||||
writeNodesJSON(nodesFile, ns)
|
||||
}
|
||||
|
||||
func treeDefinitionFiles(directory string) (string, string) {
|
||||
meta := filepath.Join(directory, "enrtree-info.json")
|
||||
nodes := filepath.Join(directory, "nodes.json")
|
||||
return meta, nodes
|
||||
}
|
||||
|
||||
// writeTXTJSON writes TXT records in JSON format.
|
||||
func writeTXTJSON(file string, txt map[string]string) {
|
||||
txtJSON, err := json.MarshalIndent(txt, "", jsonIndent)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if file == "-" {
|
||||
os.Stdout.Write(txtJSON)
|
||||
fmt.Println()
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(file, txtJSON, 0644); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
}
|
@ -20,8 +20,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
@ -57,12 +59,39 @@ func init() {
|
||||
app.Commands = []cli.Command{
|
||||
enrdumpCommand,
|
||||
discv4Command,
|
||||
dnsCommand,
|
||||
nodesetCommand,
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
exit(app.Run(os.Args))
|
||||
}
|
||||
|
||||
// commandHasFlag returns true if the current command supports the given flag.
|
||||
func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool {
|
||||
flags := ctx.FlagNames()
|
||||
sort.Strings(flags)
|
||||
i := sort.SearchStrings(flags, flag.GetName())
|
||||
return i != len(flags) && flags[i] == flag.GetName()
|
||||
}
|
||||
|
||||
// getNodeArg handles the common case of a single node descriptor argument.
|
||||
func getNodeArg(ctx *cli.Context) *enode.Node {
|
||||
if ctx.NArg() != 1 {
|
||||
exit("missing node as command-line argument")
|
||||
}
|
||||
n, err := parseNode(ctx.Args()[0])
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func exit(err interface{}) {
|
||||
if err == nil {
|
||||
os.Exit(0)
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
102
cmd/devp2p/nodeset.go
Normal file
102
cmd/devp2p/nodeset.go
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
const jsonIndent = " "
|
||||
|
||||
// nodeSet is the nodes.json file format. It holds a set of node records
|
||||
// as a JSON object.
|
||||
type nodeSet map[enode.ID]nodeJSON
|
||||
|
||||
type nodeJSON struct {
|
||||
Seq uint64 `json:"seq"`
|
||||
N *enode.Node `json:"record"`
|
||||
|
||||
// The score tracks how many liveness checks were performed. It is incremented by one
|
||||
// every time the node passes a check, and halved every time it doesn't.
|
||||
Score int `json:"score,omitempty"`
|
||||
// These two track the time of last successful contact.
|
||||
FirstResponse time.Time `json:"firstResponse,omitempty"`
|
||||
LastResponse time.Time `json:"lastResponse,omitempty"`
|
||||
// This one tracks the time of our last attempt to contact the node.
|
||||
LastCheck time.Time `json:"lastCheck,omitempty"`
|
||||
}
|
||||
|
||||
func loadNodesJSON(file string) nodeSet {
|
||||
var nodes nodeSet
|
||||
if err := common.LoadJSON(file, &nodes); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func writeNodesJSON(file string, nodes nodeSet) {
|
||||
nodesJSON, err := json.MarshalIndent(nodes, "", jsonIndent)
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
if file == "-" {
|
||||
os.Stdout.Write(nodesJSON)
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(file, nodesJSON, 0644); err != nil {
|
||||
exit(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (ns nodeSet) nodes() []*enode.Node {
|
||||
result := make([]*enode.Node, 0, len(ns))
|
||||
for _, n := range ns {
|
||||
result = append(result, n.N)
|
||||
}
|
||||
// Sort by ID.
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return bytes.Compare(result[i].ID().Bytes(), result[j].ID().Bytes()) < 0
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
func (ns nodeSet) add(nodes ...*enode.Node) {
|
||||
for _, n := range nodes {
|
||||
ns[n.ID()] = nodeJSON{Seq: n.Seq(), N: n}
|
||||
}
|
||||
}
|
||||
|
||||
func (ns nodeSet) verify() error {
|
||||
for id, n := range ns {
|
||||
if n.N.ID() != id {
|
||||
return fmt.Errorf("invalid node %v: ID does not match ID %v in record", id, n.N.ID())
|
||||
}
|
||||
if n.N.Seq() != n.Seq {
|
||||
return fmt.Errorf("invalid node %v: 'seq' does not match seq %d from record", id, n.N.Seq())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
193
cmd/devp2p/nodesetcmd.go
Normal file
193
cmd/devp2p/nodesetcmd.go
Normal file
@ -0,0 +1,193 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
nodesetCommand = cli.Command{
|
||||
Name: "nodeset",
|
||||
Usage: "Node set tools",
|
||||
Subcommands: []cli.Command{
|
||||
nodesetInfoCommand,
|
||||
nodesetFilterCommand,
|
||||
},
|
||||
}
|
||||
nodesetInfoCommand = cli.Command{
|
||||
Name: "info",
|
||||
Usage: "Shows statistics about a node set",
|
||||
Action: nodesetInfo,
|
||||
ArgsUsage: "<nodes.json>",
|
||||
}
|
||||
nodesetFilterCommand = cli.Command{
|
||||
Name: "filter",
|
||||
Usage: "Filters a node set",
|
||||
Action: nodesetFilter,
|
||||
ArgsUsage: "<nodes.json> filters..",
|
||||
|
||||
SkipFlagParsing: true,
|
||||
}
|
||||
)
|
||||
|
||||
func nodesetInfo(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
|
||||
ns := loadNodesJSON(ctx.Args().First())
|
||||
fmt.Printf("Set contains %d nodes.\n", len(ns))
|
||||
return nil
|
||||
}
|
||||
|
||||
func nodesetFilter(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 1 {
|
||||
return fmt.Errorf("need nodes file as argument")
|
||||
}
|
||||
ns := loadNodesJSON(ctx.Args().First())
|
||||
filter, err := andFilter(ctx.Args().Tail())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result := make(nodeSet)
|
||||
for id, n := range ns {
|
||||
if filter(n) {
|
||||
result[id] = n
|
||||
}
|
||||
}
|
||||
writeNodesJSON("-", result)
|
||||
return nil
|
||||
}
|
||||
|
||||
type nodeFilter func(nodeJSON) bool
|
||||
|
||||
type nodeFilterC struct {
|
||||
narg int
|
||||
fn func([]string) (nodeFilter, error)
|
||||
}
|
||||
|
||||
var filterFlags = map[string]nodeFilterC{
|
||||
"-ip": {1, ipFilter},
|
||||
"-min-age": {1, minAgeFilter},
|
||||
"-eth-network": {1, ethFilter},
|
||||
"-les-server": {0, lesFilter},
|
||||
}
|
||||
|
||||
func parseFilters(args []string) ([]nodeFilter, error) {
|
||||
var filters []nodeFilter
|
||||
for len(args) > 0 {
|
||||
fc, ok := filterFlags[args[0]]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid filter %q", args[0])
|
||||
}
|
||||
if len(args) < fc.narg {
|
||||
return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args))
|
||||
}
|
||||
filter, err := fc.fn(args[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %v", args[0], err)
|
||||
}
|
||||
filters = append(filters, filter)
|
||||
args = args[fc.narg+1:]
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
func andFilter(args []string) (nodeFilter, error) {
|
||||
checks, err := parseFilters(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool {
|
||||
for _, filter := range checks {
|
||||
if !filter(n) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func ipFilter(args []string) (nodeFilter, error) {
|
||||
_, cidr, err := net.ParseCIDR(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool { return cidr.Contains(n.N.IP()) }
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func minAgeFilter(args []string) (nodeFilter, error) {
|
||||
minage, err := time.ParseDuration(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := func(n nodeJSON) bool {
|
||||
age := n.LastResponse.Sub(n.FirstResponse)
|
||||
return age >= minage
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func ethFilter(args []string) (nodeFilter, error) {
|
||||
var filter forkid.Filter
|
||||
switch args[0] {
|
||||
case "mainnet":
|
||||
filter = forkid.NewStaticFilter(params.MainnetChainConfig, params.MainnetGenesisHash)
|
||||
case "rinkeby":
|
||||
filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash)
|
||||
case "goerli":
|
||||
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
|
||||
case "ropsten":
|
||||
filter = forkid.NewStaticFilter(params.TestnetChainConfig, params.TestnetGenesisHash)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown network %q", args[0])
|
||||
}
|
||||
|
||||
f := func(n nodeJSON) bool {
|
||||
var eth struct {
|
||||
ForkID forkid.ID
|
||||
_ []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
if n.N.Load(enr.WithEntry("eth", ð)) != nil {
|
||||
return false
|
||||
}
|
||||
return filter(eth.ForkID) == nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func lesFilter(args []string) (nodeFilter, error) {
|
||||
f := func(n nodeJSON) bool {
|
||||
var les struct {
|
||||
_ []rlp.RawValue `rlp:"tail"`
|
||||
}
|
||||
return n.N.Load(enr.WithEntry("les", &les)) == nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
@ -17,6 +17,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -145,6 +146,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
} else {
|
||||
hexcode = []byte(codeFlag)
|
||||
}
|
||||
hexcode = bytes.TrimSpace(hexcode)
|
||||
if len(hexcode)%2 != 0 {
|
||||
fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode))
|
||||
os.Exit(1)
|
||||
@ -198,6 +200,8 @@ func runCmd(ctx *cli.Context) error {
|
||||
|
||||
if chainConfig != nil {
|
||||
runtimeConfig.ChainConfig = chainConfig
|
||||
} else {
|
||||
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
|
||||
}
|
||||
tstart := time.Now()
|
||||
var leftOverGas uint64
|
||||
|
@ -156,6 +156,11 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
}
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
if ctx.GlobalBool(utils.StateDiffFlag.Name) {
|
||||
cfg.Eth.StateDiff = true
|
||||
utils.RegisterStateDiffService(stack, ctx)
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit)
|
||||
}
|
||||
@ -182,6 +187,7 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
if cfg.Ethstats.URL != "" {
|
||||
utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
|
||||
}
|
||||
|
||||
return stack
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ func TestIPCAttachWelcome(t *testing.T) {
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--shh", "--ipcpath", ipc)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
waitForEndpoint(t, ipc, 3*time.Second)
|
||||
testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
@ -101,8 +101,9 @@ func TestHTTPAttachWelcome(t *testing.T) {
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--rpc", "--rpcport", port)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "http://localhost:"+port, httpAPIs)
|
||||
endpoint := "http://127.0.0.1:" + port
|
||||
waitForEndpoint(t, endpoint, 3*time.Second)
|
||||
testAttachWelcome(t, geth, endpoint, httpAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
@ -116,8 +117,9 @@ func TestWSAttachWelcome(t *testing.T) {
|
||||
"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
|
||||
"--etherbase", coinbase, "--ws", "--wsport", port)
|
||||
|
||||
time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open
|
||||
testAttachWelcome(t, geth, "ws://localhost:"+port, httpAPIs)
|
||||
endpoint := "ws://127.0.0.1:" + port
|
||||
waitForEndpoint(t, endpoint, 3*time.Second)
|
||||
testAttachWelcome(t, geth, endpoint, httpAPIs)
|
||||
|
||||
geth.Interrupt()
|
||||
geth.ExpectExit()
|
||||
|
@ -148,6 +148,11 @@ var (
|
||||
utils.GpoPercentileFlag,
|
||||
utils.EWASMInterpreterFlag,
|
||||
utils.EVMInterpreterFlag,
|
||||
utils.StateDiffFlag,
|
||||
utils.StateDiffPathsAndProofs,
|
||||
utils.StateDiffIntermediateNodes,
|
||||
utils.StateDiffStreamBlock,
|
||||
utils.StateDiffWatchedAddresses,
|
||||
configFileFlag,
|
||||
}
|
||||
|
||||
|
@ -508,7 +508,7 @@ func (api *RetestethAPI) mineBlock() error {
|
||||
statedb.Prepare(tx.Hash(), common.Hash{}, txCount)
|
||||
snap := statedb.Snapshot()
|
||||
|
||||
receipt, _, err := core.ApplyTransaction(
|
||||
receipt, err := core.ApplyTransaction(
|
||||
api.chainConfig,
|
||||
api.blockchain,
|
||||
&api.author,
|
||||
|
@ -17,13 +17,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
func tmpdir(t *testing.T) string {
|
||||
@ -96,3 +99,28 @@ func runGeth(t *testing.T, args ...string) *testgeth {
|
||||
|
||||
return tt
|
||||
}
|
||||
|
||||
// waitForEndpoint attempts to connect to an RPC endpoint until it succeeds.
|
||||
func waitForEndpoint(t *testing.T, endpoint string, timeout time.Duration) {
|
||||
probe := func() bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
c, err := rpc.DialContext(ctx, endpoint)
|
||||
if c != nil {
|
||||
_, err = c.SupportedModules()
|
||||
c.Close()
|
||||
}
|
||||
return err == nil
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
for {
|
||||
if probe() {
|
||||
return
|
||||
}
|
||||
if time.Since(start) > timeout {
|
||||
t.Fatal("endpoint", endpoint, "did not open within", timeout)
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
@ -262,6 +262,16 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.MinerLegacyExtraDataFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "STATE DIFF",
|
||||
Flags: []cli.Flag{
|
||||
utils.StateDiffFlag,
|
||||
utils.StateDiffPathsAndProofs,
|
||||
utils.StateDiffIntermediateNodes,
|
||||
utils.StateDiffStreamBlock,
|
||||
utils.StateDiffWatchedAddresses,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "MISC",
|
||||
},
|
||||
|
@ -36,25 +36,27 @@ import (
|
||||
type alethGenesisSpec struct {
|
||||
SealEngine string `json:"sealEngine"`
|
||||
Params struct {
|
||||
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
||||
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
|
||||
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
||||
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
||||
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
||||
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
TieBreakingGas bool `json:"tieBreakingGas"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
AllowFutureBlocks bool `json:"allowFutureBlocks"`
|
||||
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
HomesteadForkBlock *hexutil.Big `json:"homesteadForkBlock,omitempty"`
|
||||
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
|
||||
EIP150ForkBlock *hexutil.Big `json:"EIP150ForkBlock,omitempty"`
|
||||
EIP158ForkBlock *hexutil.Big `json:"EIP158ForkBlock,omitempty"`
|
||||
ByzantiumForkBlock *hexutil.Big `json:"byzantiumForkBlock,omitempty"`
|
||||
ConstantinopleForkBlock *hexutil.Big `json:"constantinopleForkBlock,omitempty"`
|
||||
ConstantinopleFixForkBlock *hexutil.Big `json:"constantinopleFixForkBlock,omitempty"`
|
||||
IstanbulForkBlock *hexutil.Big `json:"istanbulForkBlock,omitempty"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
TieBreakingGas bool `json:"tieBreakingGas"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
AllowFutureBlocks bool `json:"allowFutureBlocks"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
@ -74,7 +76,7 @@ type alethGenesisSpec struct {
|
||||
// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type alethGenesisSpecAccount struct {
|
||||
Balance *math2.HexOrDecimal256 `json:"balance"`
|
||||
Balance *math2.HexOrDecimal256 `json:"balance,omitempty"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
||||
}
|
||||
@ -82,7 +84,7 @@ type alethGenesisSpecAccount struct {
|
||||
// alethGenesisSpecBuiltin is the precompiled contract definition.
|
||||
type alethGenesisSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
||||
StartingBlock *hexutil.Big `json:"startingBlock,omitempty"`
|
||||
Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
|
||||
}
|
||||
|
||||
@ -106,21 +108,33 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp
|
||||
spec.Params.AccountStartNonce = 0
|
||||
spec.Params.TieBreakingGas = false
|
||||
spec.Params.AllowFutureBlocks = false
|
||||
|
||||
// Dao hardfork block is a special one. The fork block is listed as 0 in the
|
||||
// config but aleth will sync with ETC clients up until the actual dao hard
|
||||
// fork block.
|
||||
spec.Params.DaoHardforkBlock = 0
|
||||
|
||||
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
|
||||
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
|
||||
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
|
||||
|
||||
// Byzantium
|
||||
if num := genesis.Config.HomesteadBlock; num != nil {
|
||||
spec.Params.HomesteadForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.EIP150Block; num != nil {
|
||||
spec.Params.EIP150ForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.EIP158Block; num != nil {
|
||||
spec.Params.EIP158ForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.ByzantiumBlock; num != nil {
|
||||
spec.setByzantium(num)
|
||||
spec.Params.ByzantiumForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
// Constantinople
|
||||
if num := genesis.Config.ConstantinopleBlock; num != nil {
|
||||
spec.setConstantinople(num)
|
||||
spec.Params.ConstantinopleForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.PetersburgBlock; num != nil {
|
||||
spec.Params.ConstantinopleFixForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
if num := genesis.Config.IstanbulBlock; num != nil {
|
||||
spec.Params.IstanbulForkBlock = (*hexutil.Big)(num)
|
||||
}
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
@ -157,15 +171,32 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)})
|
||||
spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 500}})
|
||||
spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
|
||||
spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
|
||||
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)})
|
||||
}
|
||||
if genesis.Config.IstanbulBlock != nil {
|
||||
if genesis.Config.ByzantiumBlock == nil {
|
||||
return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not")
|
||||
}
|
||||
spec.setPrecompile(6, &alethGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_add",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
}) // Aleth hardcoded the gas policy
|
||||
spec.setPrecompile(7, &alethGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_mul",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock),
|
||||
}) // Aleth hardcoded the gas policy
|
||||
spec.setPrecompile(9, &alethGenesisSpecBuiltin{
|
||||
Name: "blake2_compression",
|
||||
StartingBlock: (*hexutil.Big)(genesis.Config.IstanbulBlock),
|
||||
})
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
@ -196,14 +227,6 @@ func (spec *alethGenesisSpec) setAccount(address common.Address, account core.Ge
|
||||
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
|
||||
spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
|
||||
spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// parityChainSpec is the chain specification format used by Parity.
|
||||
type parityChainSpec struct {
|
||||
Name string `json:"name"`
|
||||
@ -223,29 +246,33 @@ type parityChainSpec struct {
|
||||
} `json:"engine"`
|
||||
|
||||
Params struct {
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
|
||||
EIP1283ReenableTransition hexutil.Uint64 `json:"eip1283ReenableTransition"`
|
||||
EIP1344Transition hexutil.Uint64 `json:"eip1344Transition"`
|
||||
EIP1884Transition hexutil.Uint64 `json:"eip1884Transition"`
|
||||
EIP2028Transition hexutil.Uint64 `json:"eip2028Transition"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
@ -278,17 +305,22 @@ type parityChainSpecAccount struct {
|
||||
|
||||
// parityChainSpecBuiltin is the precompiled contract definition.
|
||||
type parityChainSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
|
||||
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
|
||||
Name string `json:"name"` // Each builtin should has it own name
|
||||
Pricing *parityChainSpecPricing `json:"pricing"` // Each builtin should has it own price strategy
|
||||
ActivateAt *hexutil.Big `json:"activate_at,omitempty"` // ActivateAt can't be omitted if empty, default means no fork
|
||||
EIP1108Transition *hexutil.Big `json:"eip1108_transition,omitempty"` // EIP1108Transition can't be omitted if empty, default means no fork
|
||||
}
|
||||
|
||||
// parityChainSpecPricing represents the different pricing models that builtin
|
||||
// contracts might advertise using.
|
||||
type parityChainSpecPricing struct {
|
||||
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
|
||||
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
|
||||
AltBnPairing *parityChainSpecAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
|
||||
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
|
||||
AltBnPairing *parityChainSpecAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
AltBnConstOperation *parityChainSpecAltBnConstOperationPricing `json:"alt_bn128_const_operations,omitempty"`
|
||||
|
||||
// Blake2F is the price per round of Blake2 compression
|
||||
Blake2F *parityChainSpecBlakePricing `json:"blake2_f,omitempty"`
|
||||
}
|
||||
|
||||
type parityChainSpecLinearPricing struct {
|
||||
@ -300,9 +332,20 @@ type parityChainSpecModExpPricing struct {
|
||||
Divisor uint64 `json:"divisor"`
|
||||
}
|
||||
|
||||
type parityChainSpecAltBnConstOperationPricing struct {
|
||||
Price uint64 `json:"price"`
|
||||
EIP1108TransitionPrice uint64 `json:"eip1108_transition_price,omitempty"` // Before Istanbul fork, this field is nil
|
||||
}
|
||||
|
||||
type parityChainSpecAltBnPairingPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Pair uint64 `json:"pair"`
|
||||
Base uint64 `json:"base"`
|
||||
Pair uint64 `json:"pair"`
|
||||
EIP1108TransitionBase uint64 `json:"eip1108_transition_base,omitempty"` // Before Istanbul fork, this field is nil
|
||||
EIP1108TransitionPair uint64 `json:"eip1108_transition_pair,omitempty"` // Before Istanbul fork, this field is nil
|
||||
}
|
||||
|
||||
type parityChainSpecBlakePricing struct {
|
||||
GasPerRound uint64 `json:"gas_per_round"`
|
||||
}
|
||||
|
||||
// newParityChainSpec converts a go-ethereum genesis block into a Parity specific
|
||||
@ -352,7 +395,10 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
if num := genesis.Config.PetersburgBlock; num != nil {
|
||||
spec.setConstantinopleFix(num)
|
||||
}
|
||||
|
||||
// Istanbul
|
||||
if num := genesis.Config.IstanbulBlock; num != nil {
|
||||
spec.setIstanbul(num)
|
||||
}
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
|
||||
@ -398,18 +444,34 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
|
||||
})
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.setPrecompile(5, &parityChainSpecBuiltin{
|
||||
Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||
Name: "modexp", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||
})
|
||||
spec.setPrecompile(6, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
||||
Name: "alt_bn128_add", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Pricing: &parityChainSpecPricing{AltBnConstOperation: &parityChainSpecAltBnConstOperationPricing{Price: 500}},
|
||||
})
|
||||
spec.setPrecompile(7, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
||||
Name: "alt_bn128_mul", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Pricing: &parityChainSpecPricing{AltBnConstOperation: &parityChainSpecAltBnConstOperationPricing{Price: 40000}},
|
||||
})
|
||||
spec.setPrecompile(8, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||
Name: "alt_bn128_pairing", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||
})
|
||||
}
|
||||
if genesis.Config.IstanbulBlock != nil {
|
||||
if genesis.Config.ByzantiumBlock == nil {
|
||||
return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not")
|
||||
}
|
||||
spec.setPrecompile(6, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), EIP1108Transition: (*hexutil.Big)(genesis.Config.IstanbulBlock), Pricing: &parityChainSpecPricing{AltBnConstOperation: &parityChainSpecAltBnConstOperationPricing{Price: 500, EIP1108TransitionPrice: 150}},
|
||||
})
|
||||
spec.setPrecompile(7, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), EIP1108Transition: (*hexutil.Big)(genesis.Config.IstanbulBlock), Pricing: &parityChainSpecPricing{AltBnConstOperation: &parityChainSpecAltBnConstOperationPricing{Price: 40000, EIP1108TransitionPrice: 6000}},
|
||||
})
|
||||
spec.setPrecompile(8, &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), EIP1108Transition: (*hexutil.Big)(genesis.Config.IstanbulBlock), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000, EIP1108TransitionBase: 45000, EIP1108TransitionPair: 34000}},
|
||||
})
|
||||
spec.setPrecompile(9, &parityChainSpecBuiltin{
|
||||
Name: "blake2_f", ActivateAt: (*hexutil.Big)(genesis.Config.IstanbulBlock), Pricing: &parityChainSpecPricing{Blake2F: &parityChainSpecBlakePricing{GasPerRound: 1}},
|
||||
})
|
||||
}
|
||||
return spec, nil
|
||||
@ -451,6 +513,15 @@ func (spec *parityChainSpec) setConstantinopleFix(num *big.Int) {
|
||||
spec.Params.EIP1283DisableTransition = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
func (spec *parityChainSpec) setIstanbul(num *big.Int) {
|
||||
// spec.Params.EIP152Transition = hexutil.Uint64(num.Uint64())
|
||||
// spec.Params.EIP1108Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP1344Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP1884Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP2028Transition = hexutil.Uint64(num.Uint64())
|
||||
spec.Params.EIP1283ReenableTransition = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// Python Ethereum implementation.
|
||||
type pyEthereumGenesisSpec struct {
|
||||
|
@ -76,7 +76,7 @@ func TestParitySturebyConverter(t *testing.T) {
|
||||
if err := json.Unmarshal(blob, &genesis); err != nil {
|
||||
t.Fatalf("failed parsing genesis: %v", err)
|
||||
}
|
||||
spec, err := newParityChainSpec("Stureby", &genesis, []string{})
|
||||
spec, err := newParityChainSpec("stureby", &genesis, []string{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed creating chainspec: %v", err)
|
||||
}
|
||||
|
177
cmd/puppeth/testdata/stureby_aleth.json
vendored
177
cmd/puppeth/testdata/stureby_aleth.json
vendored
@ -1,112 +1,113 @@
|
||||
{
|
||||
"sealEngine":"Ethash",
|
||||
"params":{
|
||||
"accountStartNonce":"0x00",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"homesteadForkBlock":"0x2710",
|
||||
"daoHardforkBlock":"0x00",
|
||||
"EIP150ForkBlock":"0x3a98",
|
||||
"EIP158ForkBlock":"0x59d8",
|
||||
"byzantiumForkBlock":"0x7530",
|
||||
"constantinopleForkBlock":"0x9c40",
|
||||
"minGasLimit":"0x1388",
|
||||
"maxGasLimit":"0x7fffffffffffffff",
|
||||
"tieBreakingGas":false,
|
||||
"gasLimitBoundDivisor":"0x0400",
|
||||
"minimumDifficulty":"0x20000",
|
||||
"difficultyBoundDivisor":"0x0800",
|
||||
"durationLimit":"0x0d",
|
||||
"blockReward":"0x4563918244F40000",
|
||||
"networkID":"0x4cb2e",
|
||||
"chainID":"0x4cb2e",
|
||||
"allowFutureBlocks":false
|
||||
"sealEngine": "Ethash",
|
||||
"params": {
|
||||
"accountStartNonce": "0x0",
|
||||
"maximumExtraDataSize": "0x20",
|
||||
"homesteadForkBlock": "0x2710",
|
||||
"daoHardforkBlock": "0x0",
|
||||
"EIP150ForkBlock": "0x3a98",
|
||||
"EIP158ForkBlock": "0x59d8",
|
||||
"byzantiumForkBlock": "0x7530",
|
||||
"constantinopleForkBlock": "0x9c40",
|
||||
"constantinopleFixForkBlock": "0x9c40",
|
||||
"istanbulForkBlock": "0xc350",
|
||||
"minGasLimit": "0x1388",
|
||||
"maxGasLimit": "0x7fffffffffffffff",
|
||||
"tieBreakingGas": false,
|
||||
"gasLimitBoundDivisor": "0x400",
|
||||
"minimumDifficulty": "0x20000",
|
||||
"difficultyBoundDivisor": "0x800",
|
||||
"durationLimit": "0xd",
|
||||
"blockReward": "0x4563918244f40000",
|
||||
"networkID": "0x4cb2e",
|
||||
"chainID": "0x4cb2e",
|
||||
"allowFutureBlocks": false
|
||||
},
|
||||
"genesis":{
|
||||
"nonce":"0x0000000000000000",
|
||||
"difficulty":"0x20000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x59a4e76d",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit":"0x47b760"
|
||||
"genesis": {
|
||||
"nonce": "0x0000000000000000",
|
||||
"difficulty": "0x20000",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"author": "0x0000000000000000000000000000000000000000",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760"
|
||||
},
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"ecrecover",
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
"accounts": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "ecrecover",
|
||||
"linear": {
|
||||
"base": 3000,
|
||||
"word": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"sha256",
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "sha256",
|
||||
"linear": {
|
||||
"base": 60,
|
||||
"word": 12
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"ripemd160",
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "ripemd160",
|
||||
"linear": {
|
||||
"base": 600,
|
||||
"word": 120
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"identity",
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "identity",
|
||||
"linear": {
|
||||
"base": 15,
|
||||
"word": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"modexp",
|
||||
"startingBlock":"0x7530"
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "modexp",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_G1_add",
|
||||
"startingBlock":"0x7530",
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
}
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_G1_add",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_G1_mul",
|
||||
"startingBlock":"0x7530",
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
}
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_G1_mul",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"balance":"1",
|
||||
"precompiled":{
|
||||
"name":"alt_bn128_pairing_product",
|
||||
"startingBlock":"0x7530"
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "alt_bn128_pairing_product",
|
||||
"startingBlock": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1",
|
||||
"precompiled": {
|
||||
"name": "blake2_compression",
|
||||
"startingBlock": "0xc350"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
33
cmd/puppeth/testdata/stureby_geth.json
vendored
33
cmd/puppeth/testdata/stureby_geth.json
vendored
@ -1,6 +1,5 @@
|
||||
{
|
||||
"config": {
|
||||
"ethash":{},
|
||||
"chainId": 314158,
|
||||
"homesteadBlock": 10000,
|
||||
"eip150Block": 15000,
|
||||
@ -8,11 +7,13 @@
|
||||
"eip155Block": 23000,
|
||||
"eip158Block": 23000,
|
||||
"byzantiumBlock": 30000,
|
||||
"constantinopleBlock": 40000
|
||||
"constantinopleBlock": 40000,
|
||||
"petersburgBlock": 40000,
|
||||
"istanbulBlock": 50000,
|
||||
"ethash": {}
|
||||
},
|
||||
"nonce": "0x0",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760",
|
||||
"difficulty": "0x20000",
|
||||
@ -20,28 +21,34 @@
|
||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||
"alloc": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x01"
|
||||
"balance": "0x1"
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"number": "0x0",
|
||||
"gasUsed": "0x0",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
289
cmd/puppeth/testdata/stureby_parity.json
vendored
289
cmd/puppeth/testdata/stureby_parity.json
vendored
@ -1,181 +1,186 @@
|
||||
{
|
||||
"name":"Stureby",
|
||||
"dataDir":"stureby",
|
||||
"engine":{
|
||||
"Ethash":{
|
||||
"params":{
|
||||
"minimumDifficulty":"0x20000",
|
||||
"difficultyBoundDivisor":"0x800",
|
||||
"durationLimit":"0xd",
|
||||
"blockReward":{
|
||||
"0x0":"0x4563918244f40000",
|
||||
"0x7530":"0x29a2241af62c0000",
|
||||
"0x9c40":"0x1bc16d674ec80000"
|
||||
"name": "stureby",
|
||||
"dataDir": "stureby",
|
||||
"engine": {
|
||||
"Ethash": {
|
||||
"params": {
|
||||
"minimumDifficulty": "0x20000",
|
||||
"difficultyBoundDivisor": "0x800",
|
||||
"durationLimit": "0xd",
|
||||
"blockReward": {
|
||||
"0x0": "0x4563918244f40000",
|
||||
"0x7530": "0x29a2241af62c0000",
|
||||
"0x9c40": "0x1bc16d674ec80000"
|
||||
},
|
||||
"homesteadTransition":"0x2710",
|
||||
"eip100bTransition":"0x7530",
|
||||
"difficultyBombDelays":{
|
||||
"0x7530":"0x2dc6c0",
|
||||
"0x9c40":"0x1e8480"
|
||||
}
|
||||
"difficultyBombDelays": {
|
||||
"0x7530": "0x2dc6c0",
|
||||
"0x9c40": "0x1e8480"
|
||||
},
|
||||
"homesteadTransition": "0x2710",
|
||||
"eip100bTransition": "0x7530"
|
||||
}
|
||||
}
|
||||
},
|
||||
"params":{
|
||||
"accountStartNonce":"0x0",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"gasLimitBoundDivisor":"0x400",
|
||||
"minGasLimit":"0x1388",
|
||||
"networkID":"0x4cb2e",
|
||||
"chainID":"0x4cb2e",
|
||||
"maxCodeSize":"0x6000",
|
||||
"maxCodeSizeTransition":"0x0",
|
||||
"params": {
|
||||
"accountStartNonce": "0x0",
|
||||
"maximumExtraDataSize": "0x20",
|
||||
"minGasLimit": "0x1388",
|
||||
"gasLimitBoundDivisor": "0x400",
|
||||
"networkID": "0x4cb2e",
|
||||
"chainID": "0x4cb2e",
|
||||
"maxCodeSize": "0x6000",
|
||||
"maxCodeSizeTransition": "0x0",
|
||||
"eip98Transition": "0x7fffffffffffffff",
|
||||
"eip150Transition":"0x3a98",
|
||||
"eip160Transition":"0x59d8",
|
||||
"eip161abcTransition":"0x59d8",
|
||||
"eip161dTransition":"0x59d8",
|
||||
"eip155Transition":"0x59d8",
|
||||
"eip140Transition":"0x7530",
|
||||
"eip211Transition":"0x7530",
|
||||
"eip214Transition":"0x7530",
|
||||
"eip658Transition":"0x7530",
|
||||
"eip145Transition":"0x9c40",
|
||||
"eip1014Transition":"0x9c40",
|
||||
"eip1052Transition":"0x9c40",
|
||||
"eip1283Transition":"0x9c40"
|
||||
"eip150Transition": "0x3a98",
|
||||
"eip160Transition": "0x59d8",
|
||||
"eip161abcTransition": "0x59d8",
|
||||
"eip161dTransition": "0x59d8",
|
||||
"eip155Transition": "0x59d8",
|
||||
"eip140Transition": "0x7530",
|
||||
"eip211Transition": "0x7530",
|
||||
"eip214Transition": "0x7530",
|
||||
"eip658Transition": "0x7530",
|
||||
"eip145Transition": "0x9c40",
|
||||
"eip1014Transition": "0x9c40",
|
||||
"eip1052Transition": "0x9c40",
|
||||
"eip1283Transition": "0x9c40",
|
||||
"eip1283DisableTransition": "0x9c40",
|
||||
"eip1283ReenableTransition": "0xc350",
|
||||
"eip1344Transition": "0xc350",
|
||||
"eip1884Transition": "0xc350",
|
||||
"eip2028Transition": "0xc350"
|
||||
},
|
||||
"genesis":{
|
||||
"seal":{
|
||||
"ethereum":{
|
||||
"nonce":"0x0000000000000000",
|
||||
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
"genesis": {
|
||||
"seal": {
|
||||
"ethereum": {
|
||||
"nonce": "0x0000000000000000",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
},
|
||||
"difficulty":"0x20000",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x59a4e76d",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit":"0x47b760"
|
||||
"difficulty": "0x20000",
|
||||
"author": "0x0000000000000000000000000000000000000000",
|
||||
"timestamp": "0x59a4e76d",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||
"gasLimit": "0x47b760"
|
||||
},
|
||||
"nodes":[
|
||||
"enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
|
||||
"enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
|
||||
"enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
|
||||
"enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
|
||||
"enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
|
||||
"enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
|
||||
"enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
|
||||
"enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
|
||||
],
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"ecrecover",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
"nodes": [],
|
||||
"accounts": {
|
||||
"0000000000000000000000000000000000000001": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "ecrecover",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 3000,
|
||||
"word": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"sha256",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
"0000000000000000000000000000000000000002": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "sha256",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 60,
|
||||
"word": 12
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"ripemd160",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
"0000000000000000000000000000000000000003": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "ripemd160",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 600,
|
||||
"word": 120
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"identity",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
"0000000000000000000000000000000000000004": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "identity",
|
||||
"pricing": {
|
||||
"linear": {
|
||||
"base": 15,
|
||||
"word": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"modexp",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"modexp":{
|
||||
"divisor":20
|
||||
"0000000000000000000000000000000000000005": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "modexp",
|
||||
"pricing": {
|
||||
"modexp": {
|
||||
"divisor": 20
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_add",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
"0000000000000000000000000000000000000006": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_add",
|
||||
"pricing": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 500,
|
||||
"eip1108_transition_price": 150
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530",
|
||||
"eip1108_transition": "0xc350"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_mul",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
"0000000000000000000000000000000000000007": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_mul",
|
||||
"pricing": {
|
||||
"alt_bn128_const_operations": {
|
||||
"price": 40000,
|
||||
"eip1108_transition_price": 6000
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530",
|
||||
"eip1108_transition": "0xc350"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"balance":"1",
|
||||
"nonce":"0",
|
||||
"builtin":{
|
||||
"name":"alt_bn128_pairing",
|
||||
"activate_at":"0x7530",
|
||||
"pricing":{
|
||||
"alt_bn128_pairing":{
|
||||
"base":100000,
|
||||
"pair":80000
|
||||
"0000000000000000000000000000000000000008": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "alt_bn128_pairing",
|
||||
"pricing": {
|
||||
"alt_bn128_pairing": {
|
||||
"base": 100000,
|
||||
"pair": 80000,
|
||||
"eip1108_transition_base": 45000,
|
||||
"eip1108_transition_pair": 34000
|
||||
}
|
||||
}
|
||||
},
|
||||
"activate_at": "0x7530",
|
||||
"eip1108_transition": "0xc350"
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000009": {
|
||||
"balance": "0x1",
|
||||
"builtin": {
|
||||
"name": "blake2_f",
|
||||
"pricing": {
|
||||
"blake2_f": {
|
||||
"gas_per_round": 1
|
||||
}
|
||||
},
|
||||
"activate_at": "0xc350"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -51,6 +51,7 @@ func (w *wizard) makeGenesis() {
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
},
|
||||
}
|
||||
// Figure out which consensus engine to choose
|
||||
@ -230,6 +231,10 @@ func (w *wizard) manageGenesis() {
|
||||
fmt.Printf("Which block should Petersburg come into effect? (default = %v)\n", w.conf.Genesis.Config.PetersburgBlock)
|
||||
w.conf.Genesis.Config.PetersburgBlock = w.readDefaultBigInt(w.conf.Genesis.Config.PetersburgBlock)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Istanbul come into effect? (default = %v)\n", w.conf.Genesis.Config.IstanbulBlock)
|
||||
w.conf.Genesis.Config.IstanbulBlock = w.readDefaultBigInt(w.conf.Genesis.Config.IstanbulBlock)
|
||||
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
|
||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||
|
||||
@ -268,7 +273,7 @@ func (w *wizard) manageGenesis() {
|
||||
} else {
|
||||
saveGenesis(folder, w.network, "parity", spec)
|
||||
}
|
||||
// Export the genesis spec used by Harmony (formerly EthereumJ
|
||||
// Export the genesis spec used by Harmony (formerly EthereumJ)
|
||||
saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
|
||||
|
||||
case "3":
|
||||
@ -291,7 +296,7 @@ func (w *wizard) manageGenesis() {
|
||||
func saveGenesis(folder, network, client string, spec interface{}) {
|
||||
path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
|
||||
|
||||
out, _ := json.Marshal(spec)
|
||||
out, _ := json.MarshalIndent(spec, "", " ")
|
||||
if err := ioutil.WriteFile(path, out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "client", client, "err", err)
|
||||
return
|
||||
|
@ -32,6 +32,8 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -62,9 +64,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
|
||||
pcsclite "github.com/gballet/go-libpcsclite"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -755,6 +758,27 @@ var (
|
||||
Usage: "External EVM configuration (default = built-in interpreter)",
|
||||
Value: "",
|
||||
}
|
||||
|
||||
StateDiffFlag = cli.BoolFlag{
|
||||
Name: "statediff",
|
||||
Usage: "Enables the processing of state diffs between each block",
|
||||
}
|
||||
StateDiffPathsAndProofs = cli.BoolFlag{
|
||||
Name: "statediff.pathsandproofs",
|
||||
Usage: "Set to true to generate paths and proof sets for diffed state and storage trie leaf nodes",
|
||||
}
|
||||
StateDiffIntermediateNodes = cli.BoolFlag{
|
||||
Name: "statediff.intermediatenodes",
|
||||
Usage: "Set to include intermediate (branch and extension) nodes; default (false) processes leaf nodes only",
|
||||
}
|
||||
StateDiffStreamBlock = cli.BoolFlag{
|
||||
Name: "statediff.streamblock",
|
||||
Usage: "Set to true to stream the block data alongside state diff data in the same subscription payload",
|
||||
}
|
||||
StateDiffWatchedAddresses = cli.StringSliceFlag{
|
||||
Name: "statediff.watchedaddresses",
|
||||
Usage: "If provided, state diffing process is restricted to these addresses",
|
||||
}
|
||||
)
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
@ -964,6 +988,9 @@ func setWS(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.GlobalIsSet(WSApiFlag.Name) {
|
||||
cfg.WSModules = splitAndTrim(ctx.GlobalString(WSApiFlag.Name))
|
||||
}
|
||||
if ctx.GlobalBool(StateDiffFlag.Name) {
|
||||
cfg.WSModules = append(cfg.WSModules, "statediff")
|
||||
}
|
||||
}
|
||||
|
||||
// setIPC creates an IPC path configuration from the set command line flags,
|
||||
@ -1453,9 +1480,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
|
||||
cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
|
||||
|
||||
if ctx.GlobalIsSet(GCModeFlag.Name) {
|
||||
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheNoPrefetchFlag.Name) {
|
||||
cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
|
||||
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
|
||||
}
|
||||
@ -1610,6 +1640,25 @@ func RegisterGraphQLService(stack *node.Node, endpoint string, cors, vhosts []st
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterStateDiffService configures and registers a service to stream state diff data over RPC
|
||||
func RegisterStateDiffService(stack *node.Node, ctx *cli.Context) {
|
||||
config := statediff.Config{
|
||||
PathsAndProofs: ctx.GlobalBool(StateDiffPathsAndProofs.Name),
|
||||
IntermediateNodes: ctx.GlobalBool(StateDiffIntermediateNodes.Name),
|
||||
StreamBlock: ctx.GlobalBool(StateDiffStreamBlock.Name),
|
||||
WatchedAddresses: ctx.GlobalStringSlice(StateDiffWatchedAddresses.Name),
|
||||
}
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var ethServ *eth.Ethereum
|
||||
ctx.Service(ðServ)
|
||||
chainDb := ethServ.ChainDb()
|
||||
blockChain := ethServ.BlockChain()
|
||||
return statediff.NewStateDiffService(chainDb, blockChain, config)
|
||||
}); err != nil {
|
||||
Fatalf("Failed to register State Diff Service", err)
|
||||
}
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx *cli.Context) {
|
||||
if metrics.Enabled {
|
||||
log.Info("Enabling metrics collection")
|
||||
|
@ -134,3 +134,14 @@ func LeftPadBytes(slice []byte, l int) []byte {
|
||||
|
||||
return padded
|
||||
}
|
||||
|
||||
// TrimLeftZeroes returns a subslice of s without leading zeroes
|
||||
func TrimLeftZeroes(s []byte) []byte {
|
||||
idx := 0
|
||||
for ; idx < len(s); idx++ {
|
||||
if s[idx] != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[idx:]
|
||||
}
|
||||
|
@ -36,47 +36,39 @@ func (t AbsTime) Add(d time.Duration) AbsTime {
|
||||
return t + AbsTime(d)
|
||||
}
|
||||
|
||||
// Clock interface makes it possible to replace the monotonic system clock with
|
||||
// The Clock interface makes it possible to replace the monotonic system clock with
|
||||
// a simulated clock.
|
||||
type Clock interface {
|
||||
Now() AbsTime
|
||||
Sleep(time.Duration)
|
||||
After(time.Duration) <-chan time.Time
|
||||
AfterFunc(d time.Duration, f func()) Event
|
||||
AfterFunc(d time.Duration, f func()) Timer
|
||||
}
|
||||
|
||||
// Event represents a cancellable event returned by AfterFunc
|
||||
type Event interface {
|
||||
Cancel() bool
|
||||
// Timer represents a cancellable event returned by AfterFunc
|
||||
type Timer interface {
|
||||
Stop() bool
|
||||
}
|
||||
|
||||
// System implements Clock using the system clock.
|
||||
type System struct{}
|
||||
|
||||
// Now implements Clock.
|
||||
// Now returns the current monotonic time.
|
||||
func (System) Now() AbsTime {
|
||||
return AbsTime(monotime.Now())
|
||||
}
|
||||
|
||||
// Sleep implements Clock.
|
||||
// Sleep blocks for the given duration.
|
||||
func (System) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// After implements Clock.
|
||||
// After returns a channel which receives the current time after d has elapsed.
|
||||
func (System) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
|
||||
// AfterFunc implements Clock.
|
||||
func (System) AfterFunc(d time.Duration, f func()) Event {
|
||||
return (*SystemEvent)(time.AfterFunc(d, f))
|
||||
}
|
||||
|
||||
// SystemEvent implements Event using time.Timer.
|
||||
type SystemEvent time.Timer
|
||||
|
||||
// Cancel implements Event.
|
||||
func (e *SystemEvent) Cancel() bool {
|
||||
return (*time.Timer)(e).Stop()
|
||||
// AfterFunc runs f on a new goroutine after the duration has elapsed.
|
||||
func (System) AfterFunc(d time.Duration, f func()) Timer {
|
||||
return time.AfterFunc(d, f)
|
||||
}
|
||||
|
@ -32,22 +32,17 @@ import (
|
||||
// the timeout using a channel or semaphore.
|
||||
type Simulated struct {
|
||||
now AbsTime
|
||||
scheduled []event
|
||||
scheduled []*simTimer
|
||||
mu sync.RWMutex
|
||||
cond *sync.Cond
|
||||
lastId uint64
|
||||
}
|
||||
|
||||
type event struct {
|
||||
// simTimer implements Timer on the virtual clock.
|
||||
type simTimer struct {
|
||||
do func()
|
||||
at AbsTime
|
||||
id uint64
|
||||
}
|
||||
|
||||
// SimulatedEvent implements Event for a virtual clock.
|
||||
type SimulatedEvent struct {
|
||||
at AbsTime
|
||||
id uint64
|
||||
s *Simulated
|
||||
}
|
||||
|
||||
@ -75,6 +70,7 @@ func (s *Simulated) Run(d time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveTimers returns the number of timers that haven't fired.
|
||||
func (s *Simulated) ActiveTimers() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
@ -82,6 +78,7 @@ func (s *Simulated) ActiveTimers() int {
|
||||
return len(s.scheduled)
|
||||
}
|
||||
|
||||
// WaitForTimers waits until the clock has at least n scheduled timers.
|
||||
func (s *Simulated) WaitForTimers(n int) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
@ -92,7 +89,7 @@ func (s *Simulated) WaitForTimers(n int) {
|
||||
}
|
||||
}
|
||||
|
||||
// Now implements Clock.
|
||||
// Now returns the current virtual time.
|
||||
func (s *Simulated) Now() AbsTime {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
@ -100,12 +97,13 @@ func (s *Simulated) Now() AbsTime {
|
||||
return s.now
|
||||
}
|
||||
|
||||
// Sleep implements Clock.
|
||||
// Sleep blocks until the clock has advanced by d.
|
||||
func (s *Simulated) Sleep(d time.Duration) {
|
||||
<-s.After(d)
|
||||
}
|
||||
|
||||
// After implements Clock.
|
||||
// After returns a channel which receives the current time after the clock
|
||||
// has advanced by d.
|
||||
func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
after := make(chan time.Time, 1)
|
||||
s.AfterFunc(d, func() {
|
||||
@ -114,8 +112,9 @@ func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
return after
|
||||
}
|
||||
|
||||
// AfterFunc implements Clock.
|
||||
func (s *Simulated) AfterFunc(d time.Duration, do func()) Event {
|
||||
// AfterFunc runs fn after the clock has advanced by d. Unlike with the system
|
||||
// clock, fn runs on the goroutine that calls Run.
|
||||
func (s *Simulated) AfterFunc(d time.Duration, fn func()) Timer {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
@ -133,12 +132,27 @@ func (s *Simulated) AfterFunc(d time.Duration, do func()) Event {
|
||||
l = m + 1
|
||||
}
|
||||
}
|
||||
s.scheduled = append(s.scheduled, event{})
|
||||
ev := &simTimer{do: fn, at: at, s: s}
|
||||
s.scheduled = append(s.scheduled, nil)
|
||||
copy(s.scheduled[l+1:], s.scheduled[l:ll])
|
||||
e := event{do: do, at: at, id: id}
|
||||
s.scheduled[l] = e
|
||||
s.scheduled[l] = ev
|
||||
s.cond.Broadcast()
|
||||
return &SimulatedEvent{at: at, id: id, s: s}
|
||||
return ev
|
||||
}
|
||||
|
||||
func (ev *simTimer) Stop() bool {
|
||||
s := ev.s
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
for i := 0; i < len(s.scheduled); i++ {
|
||||
if s.scheduled[i] == ev {
|
||||
s.scheduled = append(s.scheduled[:i], s.scheduled[i+1:]...)
|
||||
s.cond.Broadcast()
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Simulated) init() {
|
||||
@ -146,31 +160,3 @@ func (s *Simulated) init() {
|
||||
s.cond = sync.NewCond(&s.mu)
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel implements Event.
|
||||
func (e *SimulatedEvent) Cancel() bool {
|
||||
s := e.s
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
l, h := 0, len(s.scheduled)
|
||||
ll := h
|
||||
for l != h {
|
||||
m := (l + h) / 2
|
||||
if e.id == s.scheduled[m].id {
|
||||
l = m
|
||||
break
|
||||
}
|
||||
if (e.at < s.scheduled[m].at) || ((e.at == s.scheduled[m].at) && (e.id < s.scheduled[m].id)) {
|
||||
h = m
|
||||
} else {
|
||||
l = m + 1
|
||||
}
|
||||
}
|
||||
if l >= ll || s.scheduled[l].id != e.id {
|
||||
return false
|
||||
}
|
||||
copy(s.scheduled[l:ll-1], s.scheduled[l+1:])
|
||||
s.scheduled = s.scheduled[:ll-1]
|
||||
return true
|
||||
}
|
||||
|
115
common/mclock/simclock_test.go
Normal file
115
common/mclock/simclock_test.go
Normal file
@ -0,0 +1,115 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mclock
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ Clock = System{}
|
||||
var _ Clock = new(Simulated)
|
||||
|
||||
func TestSimulatedAfter(t *testing.T) {
|
||||
const timeout = 30 * time.Minute
|
||||
const adv = time.Minute
|
||||
|
||||
var (
|
||||
c Simulated
|
||||
end = c.Now().Add(timeout)
|
||||
ch = c.After(timeout)
|
||||
)
|
||||
for c.Now() < end.Add(-adv) {
|
||||
c.Run(adv)
|
||||
select {
|
||||
case <-ch:
|
||||
t.Fatal("Timer fired early")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
c.Run(adv)
|
||||
select {
|
||||
case stamp := <-ch:
|
||||
want := time.Time{}.Add(timeout)
|
||||
if !stamp.Equal(want) {
|
||||
t.Errorf("Wrong time sent on timer channel: got %v, want %v", stamp, want)
|
||||
}
|
||||
default:
|
||||
t.Fatal("Timer didn't fire")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedAfterFunc(t *testing.T) {
|
||||
var c Simulated
|
||||
|
||||
called1 := false
|
||||
timer1 := c.AfterFunc(100*time.Millisecond, func() { called1 = true })
|
||||
if c.ActiveTimers() != 1 {
|
||||
t.Fatalf("%d active timers, want one", c.ActiveTimers())
|
||||
}
|
||||
if fired := timer1.Stop(); !fired {
|
||||
t.Fatal("Stop returned false even though timer didn't fire")
|
||||
}
|
||||
if c.ActiveTimers() != 0 {
|
||||
t.Fatalf("%d active timers, want zero", c.ActiveTimers())
|
||||
}
|
||||
if called1 {
|
||||
t.Fatal("timer 1 called")
|
||||
}
|
||||
if fired := timer1.Stop(); fired {
|
||||
t.Fatal("Stop returned true after timer was already stopped")
|
||||
}
|
||||
|
||||
called2 := false
|
||||
timer2 := c.AfterFunc(100*time.Millisecond, func() { called2 = true })
|
||||
c.Run(50 * time.Millisecond)
|
||||
if called2 {
|
||||
t.Fatal("timer 2 called")
|
||||
}
|
||||
c.Run(51 * time.Millisecond)
|
||||
if !called2 {
|
||||
t.Fatal("timer 2 not called")
|
||||
}
|
||||
if fired := timer2.Stop(); fired {
|
||||
t.Fatal("Stop returned true after timer has fired")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimulatedSleep(t *testing.T) {
|
||||
var (
|
||||
c Simulated
|
||||
timeout = 1 * time.Hour
|
||||
done = make(chan AbsTime)
|
||||
)
|
||||
go func() {
|
||||
c.Sleep(timeout)
|
||||
done <- c.Now()
|
||||
}()
|
||||
|
||||
c.WaitForTimers(1)
|
||||
c.Run(2 * timeout)
|
||||
select {
|
||||
case stamp := <-done:
|
||||
want := AbsTime(2 * timeout)
|
||||
if stamp != want {
|
||||
t.Errorf("Wrong time after sleep: got %v, want %v", stamp, want)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("Sleep didn't return in time")
|
||||
}
|
||||
}
|
@ -149,7 +149,7 @@ func (h *Hash) UnmarshalGraphQL(input interface{}) error {
|
||||
var err error
|
||||
switch input := input.(type) {
|
||||
case string:
|
||||
*h = HexToHash(input)
|
||||
err = h.UnmarshalText([]byte(input))
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Bytes32: %v", input)
|
||||
}
|
||||
@ -288,7 +288,7 @@ func (a *Address) UnmarshalGraphQL(input interface{}) error {
|
||||
var err error
|
||||
switch input := input.(type) {
|
||||
case string:
|
||||
*a = HexToAddress(input)
|
||||
err = a.UnmarshalText([]byte(input))
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected type for Address: %v", input)
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type
|
||||
if number == 0 {
|
||||
return nil
|
||||
}
|
||||
// Ensure that the block's timestamp isn't too close to it's parent
|
||||
// Ensure that the block's timestamp isn't too close to its parent
|
||||
var parent *types.Header
|
||||
if len(parents) > 0 {
|
||||
parent = parents[len(parents)-1]
|
||||
@ -522,7 +522,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
// Set the correct difficulty
|
||||
header.Difficulty = CalcDifficulty(snap, c.signer)
|
||||
|
||||
// Ensure the extra data has all it's components
|
||||
// Ensure the extra data has all its components
|
||||
if len(header.Extra) < extraVanity {
|
||||
header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...)
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ var (
|
||||
// to the current node.
|
||||
ErrFutureBlock = errors.New("block in the future")
|
||||
|
||||
// ErrInvalidNumber is returned if a block's number doesn't equal it's parent's
|
||||
// ErrInvalidNumber is returned if a block's number doesn't equal its parent's
|
||||
// plus one.
|
||||
ErrInvalidNumber = errors.New("invalid block number")
|
||||
)
|
||||
|
@ -86,7 +86,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
return nil
|
||||
}
|
||||
// Short circuit if the header is known, or it's parent not
|
||||
// Short circuit if the header is known, or its parent not
|
||||
number := header.Number.Uint64()
|
||||
if chain.GetHeader(header.Hash(), number) != nil {
|
||||
return nil
|
||||
@ -252,7 +252,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
if header.Time <= parent.Time {
|
||||
return errZeroBlockTime
|
||||
}
|
||||
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||
// Verify the block's difficulty based in its timestamp and parent's difficulty
|
||||
expected := ethash.CalcDifficulty(chain, header.Time, parent)
|
||||
|
||||
if expected.Cmp(header.Difficulty) != 0 {
|
||||
|
@ -57,6 +57,7 @@ func NewCompiler(debug bool) *Compiler {
|
||||
// second stage to push labels and determine the right
|
||||
// position.
|
||||
func (c *Compiler) Feed(ch <-chan token) {
|
||||
var prev token
|
||||
for i := range ch {
|
||||
switch i.typ {
|
||||
case number:
|
||||
@ -73,10 +74,14 @@ func (c *Compiler) Feed(ch <-chan token) {
|
||||
c.labels[i.text] = c.pc
|
||||
c.pc++
|
||||
case label:
|
||||
c.pc += 5
|
||||
c.pc += 4
|
||||
if prev.typ == element && isJump(prev.text) {
|
||||
c.pc++
|
||||
}
|
||||
}
|
||||
|
||||
c.tokens = append(c.tokens, i)
|
||||
prev = i
|
||||
}
|
||||
if c.debug {
|
||||
fmt.Fprintln(os.Stderr, "found", len(c.labels), "labels")
|
||||
@ -181,6 +186,8 @@ func (c *Compiler) compileElement(element token) error {
|
||||
pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes()
|
||||
pos = append(make([]byte, 4-len(pos)), pos...)
|
||||
c.pushBin(pos)
|
||||
case lineEnd:
|
||||
c.pos--
|
||||
default:
|
||||
return compileErr(rvalue, rvalue.text, "number, string or label")
|
||||
}
|
||||
@ -201,8 +208,8 @@ func (c *Compiler) compileElement(element token) error {
|
||||
case stringValue:
|
||||
value = []byte(rvalue.text[1 : len(rvalue.text)-1])
|
||||
case label:
|
||||
value = make([]byte, 4)
|
||||
copy(value, big.NewInt(int64(c.labels[rvalue.text])).Bytes())
|
||||
value = big.NewInt(int64(c.labels[rvalue.text])).Bytes()
|
||||
value = append(make([]byte, 4-len(value)), value...)
|
||||
default:
|
||||
return compileErr(rvalue, rvalue.text, "number, string or label")
|
||||
}
|
||||
|
71
core/asm/compiler_test.go
Normal file
71
core/asm/compiler_test.go
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package asm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCompiler(t *testing.T) {
|
||||
tests := []struct {
|
||||
input, output string
|
||||
}{
|
||||
{
|
||||
input: `
|
||||
GAS
|
||||
label:
|
||||
PUSH @label
|
||||
`,
|
||||
output: "5a5b6300000001",
|
||||
},
|
||||
{
|
||||
input: `
|
||||
PUSH @label
|
||||
label:
|
||||
`,
|
||||
output: "63000000055b",
|
||||
},
|
||||
{
|
||||
input: `
|
||||
PUSH @label
|
||||
JUMP
|
||||
label:
|
||||
`,
|
||||
output: "6300000006565b",
|
||||
},
|
||||
{
|
||||
input: `
|
||||
JUMP @label
|
||||
label:
|
||||
`,
|
||||
output: "6300000006565b",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ch := Lex([]byte(test.input), false)
|
||||
c := NewCompiler(false)
|
||||
c.Feed(ch)
|
||||
output, err := c.Compile()
|
||||
if len(err) != 0 {
|
||||
t.Errorf("compile error: %v\ninput: %s", err, test.input)
|
||||
continue
|
||||
}
|
||||
if output != test.output {
|
||||
t.Errorf("incorrect output\ninput: %sgot: %s\nwant: %s\n", test.input, output, test.output)
|
||||
}
|
||||
}
|
||||
}
|
@ -109,11 +109,12 @@ const (
|
||||
// CacheConfig contains the configuration values for the trie caching/pruning
|
||||
// that's resident in a blockchain.
|
||||
type CacheConfig struct {
|
||||
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
|
||||
TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
|
||||
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
|
||||
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
|
||||
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
|
||||
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
|
||||
TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
|
||||
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
|
||||
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
|
||||
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
|
||||
ProcessingStateDiffs bool // Whether statediffs processing should be taken into a account before a trie is pruned
|
||||
}
|
||||
|
||||
// BlockChain represents the canonical chain given a database with a genesis
|
||||
@ -176,6 +177,8 @@ type BlockChain struct {
|
||||
badBlocks *lru.Cache // Bad block cache
|
||||
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
|
||||
terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
|
||||
|
||||
stateDiffsProcessed map[common.Hash]int
|
||||
}
|
||||
|
||||
// NewBlockChain returns a fully initialised block chain using information
|
||||
@ -196,24 +199,25 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
txLookupCache, _ := lru.New(txLookupCacheLimit)
|
||||
futureBlocks, _ := lru.New(maxFutureBlocks)
|
||||
badBlocks, _ := lru.New(badBlockLimit)
|
||||
|
||||
stateDiffsProcessed := make(map[common.Hash]int)
|
||||
bc := &BlockChain{
|
||||
chainConfig: chainConfig,
|
||||
cacheConfig: cacheConfig,
|
||||
db: db,
|
||||
triegc: prque.New(nil),
|
||||
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
|
||||
quit: make(chan struct{}),
|
||||
shouldPreserve: shouldPreserve,
|
||||
bodyCache: bodyCache,
|
||||
bodyRLPCache: bodyRLPCache,
|
||||
receiptsCache: receiptsCache,
|
||||
blockCache: blockCache,
|
||||
txLookupCache: txLookupCache,
|
||||
futureBlocks: futureBlocks,
|
||||
engine: engine,
|
||||
vmConfig: vmConfig,
|
||||
badBlocks: badBlocks,
|
||||
chainConfig: chainConfig,
|
||||
cacheConfig: cacheConfig,
|
||||
db: db,
|
||||
triegc: prque.New(nil),
|
||||
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
|
||||
quit: make(chan struct{}),
|
||||
shouldPreserve: shouldPreserve,
|
||||
bodyCache: bodyCache,
|
||||
bodyRLPCache: bodyRLPCache,
|
||||
receiptsCache: receiptsCache,
|
||||
blockCache: blockCache,
|
||||
txLookupCache: txLookupCache,
|
||||
futureBlocks: futureBlocks,
|
||||
engine: engine,
|
||||
vmConfig: vmConfig,
|
||||
badBlocks: badBlocks,
|
||||
stateDiffsProcessed: stateDiffsProcessed,
|
||||
}
|
||||
bc.validator = NewBlockValidator(chainConfig, bc, engine)
|
||||
bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
|
||||
@ -228,10 +232,16 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
||||
if bc.genesisBlock == nil {
|
||||
return nil, ErrNoGenesis
|
||||
}
|
||||
|
||||
var nilBlock *types.Block
|
||||
bc.currentBlock.Store(nilBlock)
|
||||
bc.currentFastBlock.Store(nilBlock)
|
||||
|
||||
// Initialize the chain with ancient data if it isn't empty.
|
||||
if bc.empty() {
|
||||
rawdb.InitDatabaseFromFreezer(bc.db)
|
||||
}
|
||||
|
||||
if err := bc.loadLastState(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1251,6 +1261,11 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *BlockChain) AddToStateDiffProcessedCollection(hash common.Hash) {
|
||||
count := bc.stateDiffsProcessed[hash]
|
||||
bc.stateDiffsProcessed[hash] = count + 1
|
||||
}
|
||||
|
||||
// WriteBlockWithState writes the block and all associated state to the database.
|
||||
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
|
||||
bc.chainmu.Lock()
|
||||
@ -1335,6 +1350,19 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||
bc.triegc.Push(root, number)
|
||||
break
|
||||
}
|
||||
if bc.cacheConfig.ProcessingStateDiffs {
|
||||
if !bc.rootAllowedToBeDereferenced(root.(common.Hash)) {
|
||||
bc.triegc.Push(root, number)
|
||||
break
|
||||
} else {
|
||||
log.Debug("Current root found in stateDiffsProcessed collection with a count of 2, okay to dereference",
|
||||
"root", root.(common.Hash).Hex(),
|
||||
"blockNumber", uint64(-number),
|
||||
"size of stateDiffsProcessed", len(bc.stateDiffsProcessed))
|
||||
delete(bc.stateDiffsProcessed, root.(common.Hash))
|
||||
}
|
||||
}
|
||||
log.Debug("Dereferencing", "root", root.(common.Hash).Hex())
|
||||
triedb.Dereference(root.(common.Hash))
|
||||
}
|
||||
}
|
||||
@ -1389,6 +1417,15 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// since we need the state tries of the current block and its parent in-memory
|
||||
// in order to process statediffs, we should avoid dereferencing roots until
|
||||
// its statediff and its child have been processed
|
||||
func (bc *BlockChain) rootAllowedToBeDereferenced(root common.Hash) bool {
|
||||
diffProcessedForSelfAndChildCount := 2
|
||||
count := bc.stateDiffsProcessed[root]
|
||||
return count >= diffProcessedForSelfAndChildCount
|
||||
}
|
||||
|
||||
// addFutureBlock checks if the block is within the max allowed window to get
|
||||
// accepted for future processing, and returns an error if the block is too far
|
||||
// ahead and was not added.
|
||||
@ -1549,6 +1586,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
|
||||
// Some other error occurred, abort
|
||||
case err != nil:
|
||||
bc.futureBlocks.Remove(block.Hash())
|
||||
stats.ignored += len(it.chain)
|
||||
bc.reportBlock(block, nil, err)
|
||||
return it.index, events, coalescedLogs, err
|
||||
@ -2139,6 +2177,11 @@ func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
|
||||
return bc.hc.HasHeader(hash, number)
|
||||
}
|
||||
|
||||
// GetCanonicalHash returns the canonical hash for a given block number
|
||||
func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
|
||||
return bc.hc.GetCanonicalHash(number)
|
||||
}
|
||||
|
||||
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
|
||||
// hash, fetching towards the genesis block.
|
||||
func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
|
||||
@ -2151,9 +2194,6 @@ func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []com
|
||||
//
|
||||
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
||||
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||
bc.chainmu.RLock()
|
||||
defer bc.chainmu.RUnlock()
|
||||
|
||||
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
|
||||
}
|
||||
|
||||
|
@ -1318,7 +1318,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
funds = big.NewInt(1000000000)
|
||||
deleteAddr = common.Address{1}
|
||||
gspec = &Genesis{
|
||||
Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
|
||||
Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
|
||||
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
@ -1389,7 +1389,7 @@ func TestEIP155Transition(t *testing.T) {
|
||||
}
|
||||
|
||||
// generate an invalid chain id transaction
|
||||
config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}
|
||||
config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}
|
||||
blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
|
||||
var (
|
||||
tx *types.Transaction
|
||||
@ -1425,6 +1425,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: new(big.Int),
|
||||
EIP155Block: new(big.Int),
|
||||
EIP150Block: new(big.Int),
|
||||
EIP158Block: big.NewInt(2),
|
||||
},
|
||||
Alloc: GenesisAlloc{address: {Balance: funds}},
|
||||
@ -2287,3 +2288,159 @@ func TestSideImportPrunedBlocks(t *testing.T) {
|
||||
t.Errorf("Got error, %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
|
||||
// while changing the internals of statedb. The workflow is that a contract is
|
||||
// self destructed, then in a followup transaction (but same block) it's created
|
||||
// again and the transaction reverted.
|
||||
//
|
||||
// The original statedb implementation flushed dirty objects to the tries after
|
||||
// each transaction, so this works ok. The rework accumulated writes in memory
|
||||
// first, but the journal wiped the entire state object on create-revert.
|
||||
func TestDeleteCreateRevert(t *testing.T) {
|
||||
var (
|
||||
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
||||
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
||||
// Generate a canonical chain to act as the main dataset
|
||||
engine = ethash.NewFaker()
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
|
||||
// A sender who makes transactions, has some funds
|
||||
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||
funds = big.NewInt(1000000000)
|
||||
gspec = &Genesis{
|
||||
Config: params.TestChainConfig,
|
||||
Alloc: GenesisAlloc{
|
||||
address: {Balance: funds},
|
||||
// The address 0xAAAAA selfdestructs if called
|
||||
aa: {
|
||||
// Code needs to just selfdestruct
|
||||
Code: []byte{byte(vm.PC), 0xFF},
|
||||
Nonce: 1,
|
||||
Balance: big.NewInt(0),
|
||||
},
|
||||
// The address 0xBBBB send 1 wei to 0xAAAA, then reverts
|
||||
bb: {
|
||||
Code: []byte{
|
||||
byte(vm.PC), // [0]
|
||||
byte(vm.DUP1), // [0,0]
|
||||
byte(vm.DUP1), // [0,0,0]
|
||||
byte(vm.DUP1), // [0,0,0,0]
|
||||
byte(vm.PUSH1), 0x01, // [0,0,0,0,1] (value)
|
||||
byte(vm.PUSH2), 0xaa, 0xaa, // [0,0,0,0,1, 0xaaaa]
|
||||
byte(vm.GAS),
|
||||
byte(vm.CALL),
|
||||
byte(vm.REVERT),
|
||||
},
|
||||
Balance: big.NewInt(1),
|
||||
},
|
||||
},
|
||||
}
|
||||
genesis = gspec.MustCommit(db)
|
||||
)
|
||||
|
||||
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 1, func(i int, b *BlockGen) {
|
||||
b.SetCoinbase(common.Address{1})
|
||||
// One transaction to AAAA
|
||||
tx, _ := types.SignTx(types.NewTransaction(0, aa,
|
||||
big.NewInt(0), 50000, big.NewInt(1), nil), types.HomesteadSigner{}, key)
|
||||
b.AddTx(tx)
|
||||
// One transaction to BBBB
|
||||
tx, _ = types.SignTx(types.NewTransaction(1, bb,
|
||||
big.NewInt(0), 100000, big.NewInt(1), nil), types.HomesteadSigner{}, key)
|
||||
b.AddTx(tx)
|
||||
})
|
||||
// Import the canonical chain
|
||||
diskdb := rawdb.NewMemoryDatabase()
|
||||
gspec.MustCommit(diskdb)
|
||||
|
||||
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create tester chain: %v", err)
|
||||
}
|
||||
if n, err := chain.InsertChain(blocks); err != nil {
|
||||
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessingStateDiffs(t *testing.T) {
|
||||
defaultTrieCleanCache := 256
|
||||
defaultTrieDirtyCache := 256
|
||||
defaultTrieTimeout := 60 * time.Minute
|
||||
cacheConfig := &CacheConfig{
|
||||
TrieDirtyDisabled: false,
|
||||
TrieCleanLimit: defaultTrieCleanCache,
|
||||
TrieDirtyLimit: defaultTrieDirtyCache,
|
||||
TrieTimeLimit: defaultTrieTimeout,
|
||||
ProcessingStateDiffs: true,
|
||||
}
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
genesis := new(Genesis).MustCommit(db)
|
||||
numberOfBlocks := TriesInMemory
|
||||
engine := ethash.NewFaker()
|
||||
blockchain, _ := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil)
|
||||
blocks := makeBlockChain(genesis, numberOfBlocks+1, engine, db, canonicalSeed)
|
||||
_, err := blockchain.InsertChain(blocks)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create pristine chain: %v", err)
|
||||
}
|
||||
defer blockchain.Stop()
|
||||
|
||||
//when adding a root hash to the collection, it will increment the count
|
||||
firstStateRoot := blocks[0].Root()
|
||||
blockchain.AddToStateDiffProcessedCollection(firstStateRoot)
|
||||
value, ok := blockchain.stateDiffsProcessed[firstStateRoot]
|
||||
if !ok {
|
||||
t.Error("state root not found in collection")
|
||||
}
|
||||
if value != 1 {
|
||||
t.Error("state root count not correct", "want", 1, "got", value)
|
||||
}
|
||||
|
||||
blockchain.AddToStateDiffProcessedCollection(firstStateRoot)
|
||||
value, ok = blockchain.stateDiffsProcessed[firstStateRoot]
|
||||
if !ok {
|
||||
t.Error("state root not found in collection")
|
||||
}
|
||||
if value != 2 {
|
||||
t.Error("state root count not correct", "want", 2, "got", value)
|
||||
}
|
||||
|
||||
moreBlocks := makeBlockChain(blocks[len(blocks)-1], 1, engine, db, canonicalSeed)
|
||||
_, err = blockchain.InsertChain(moreBlocks)
|
||||
|
||||
//a root hash can be dereferenced when it's state diff and it's child's state diff have been processed
|
||||
//(i.e. it has a count of 2 in stateDiffsProcessed)
|
||||
nodes := blockchain.stateCache.TrieDB().Nodes()
|
||||
if containsRootHash(nodes, firstStateRoot) {
|
||||
t.Errorf("stateRoot %s in nodes, want: %t, got: %t", firstStateRoot.Hex(), false, true)
|
||||
}
|
||||
|
||||
//a root hash should still be in the in-mem db if it's child's state diff hasn't yet been processed
|
||||
//(i.e. it has a count of 1 stateDiffsProcessed)
|
||||
secondStateRoot := blocks[1].Root()
|
||||
blockchain.AddToStateDiffProcessedCollection(secondStateRoot)
|
||||
if !containsRootHash(nodes, secondStateRoot) {
|
||||
t.Errorf("stateRoot %s in nodes, want: %t, got: %t", secondStateRoot.Hex(), true, false)
|
||||
}
|
||||
|
||||
//the stateDiffsProcessed collection is cleaned up once a hash has been dereferenced
|
||||
_, ok = blockchain.stateDiffsProcessed[firstStateRoot]
|
||||
if ok {
|
||||
t.Errorf("stateRoot %s in stateDiffsProcessed collection, want: %t, got: %t",
|
||||
firstStateRoot.Hex(),
|
||||
false,
|
||||
ok,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func containsRootHash(collection []common.Hash, hash common.Hash) bool {
|
||||
for _, n := range collection {
|
||||
if n == hash {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) {
|
||||
b.SetCoinbase(common.Address{})
|
||||
}
|
||||
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
|
||||
receipt, _, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{})
|
||||
receipt, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -50,6 +50,9 @@ type ID struct {
|
||||
Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known
|
||||
}
|
||||
|
||||
// Filter is a fork id filter to validate a remotely advertised ID.
|
||||
type Filter func(id ID) error
|
||||
|
||||
// NewID calculates the Ethereum fork ID from the chain config and head.
|
||||
func NewID(chain *core.BlockChain) ID {
|
||||
return newID(
|
||||
@ -80,9 +83,9 @@ func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
|
||||
return ID{Hash: checksumToBytes(hash), Next: next}
|
||||
}
|
||||
|
||||
// NewFilter creates an filter that returns if a fork ID should be rejected or not
|
||||
// NewFilter creates a filter that returns if a fork ID should be rejected or not
|
||||
// based on the local chain's status.
|
||||
func NewFilter(chain *core.BlockChain) func(id ID) error {
|
||||
func NewFilter(chain *core.BlockChain) Filter {
|
||||
return newFilter(
|
||||
chain.Config(),
|
||||
chain.Genesis().Hash(),
|
||||
@ -92,10 +95,16 @@ func NewFilter(chain *core.BlockChain) func(id ID) error {
|
||||
)
|
||||
}
|
||||
|
||||
// NewStaticFilter creates a filter at block zero.
|
||||
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
|
||||
head := func() uint64 { return 0 }
|
||||
return newFilter(config, genesis, head)
|
||||
}
|
||||
|
||||
// newFilter is the internal version of NewFilter, taking closures as its arguments
|
||||
// instead of a chain. The reason is to allow testing it without having to simulate
|
||||
// an entire blockchain.
|
||||
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) func(id ID) error {
|
||||
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
|
||||
// Calculate the all the valid fork hash and fork next combos
|
||||
var (
|
||||
forks = gatherForks(config)
|
||||
@ -114,10 +123,13 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
||||
// Create a validator that will filter out incompatible chains
|
||||
return func(id ID) error {
|
||||
// Run the fork checksum validation ruleset:
|
||||
// 1. If local and remote FORK_CSUM matches, connect.
|
||||
// 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT.
|
||||
// The two nodes are in the same fork state currently. They might know
|
||||
// of differing future forks, but that's not relevant until the fork
|
||||
// triggers (might be postponed, nodes might be updated to match).
|
||||
// 1a. A remotely announced but remotely not passed block is already passed
|
||||
// locally, disconnect, since the chains are incompatible.
|
||||
// 1b. No remotely announced fork; or not yet passed locally, connect.
|
||||
// 2. If the remote FORK_CSUM is a subset of the local past forks and the
|
||||
// remote FORK_NEXT matches with the locally following fork block number,
|
||||
// connect.
|
||||
@ -139,7 +151,12 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
||||
// Found the first unpassed fork block, check if our current state matches
|
||||
// the remote checksum (rule #1).
|
||||
if sums[i] == id.Hash {
|
||||
// Yay, fork checksum matched, ignore any upcoming fork
|
||||
// Fork checksum matched, check if a remote future fork block already passed
|
||||
// locally without the local node being aware of it (rule #1a).
|
||||
if id.Next > 0 && head >= id.Next {
|
||||
return ErrLocalIncompatibleOrStale
|
||||
}
|
||||
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
|
||||
return nil
|
||||
}
|
||||
// The local and remote nodes are in different forks currently, check if the
|
||||
|
@ -55,8 +55,10 @@ func TestCreation(t *testing.T) {
|
||||
{4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
||||
{4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // First and last Constantinople, first Petersburg block
|
||||
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // Today Petersburg block
|
||||
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
||||
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
||||
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Today Istanbul block
|
||||
{10000000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Future Istanbul block
|
||||
},
|
||||
},
|
||||
// Ropsten test cases
|
||||
@ -72,8 +74,10 @@ func TestCreation(t *testing.T) {
|
||||
{4229999, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // Last Byzantium block
|
||||
{4230000, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // First Constantinople block
|
||||
{4939393, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
|
||||
{4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 0}}, // First Petersburg block
|
||||
{5822692, ID{Hash: checksumToBytes(0xd6e2149b), Next: 0}}, // Today Petersburg block
|
||||
{4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // First Petersburg block
|
||||
{6485845, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // Last Petersburg block
|
||||
{6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // First Istanbul block
|
||||
{7500000, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // Future Istanbul block
|
||||
},
|
||||
},
|
||||
// Rinkeby test cases
|
||||
@ -90,8 +94,10 @@ func TestCreation(t *testing.T) {
|
||||
{3660662, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // Last Byzantium block
|
||||
{3660663, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // First Constantinople block
|
||||
{4321233, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // Last Constantinople block
|
||||
{4321234, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}}, // First Petersburg block
|
||||
{4586649, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}}, // Today Petersburg block
|
||||
{4321234, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // First Petersburg block
|
||||
{5435344, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // Last Petersburg block
|
||||
{5435345, ID{Hash: checksumToBytes(0xcbdb8838), Next: 0}}, // First Istanbul block
|
||||
{6000000, ID{Hash: checksumToBytes(0xcbdb8838), Next: 0}}, // Future Istanbul block
|
||||
},
|
||||
},
|
||||
// Goerli test cases
|
||||
@ -99,8 +105,10 @@ func TestCreation(t *testing.T) {
|
||||
params.GoerliChainConfig,
|
||||
params.GoerliGenesisHash,
|
||||
[]testcase{
|
||||
{0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 0}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block
|
||||
{795329, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 0}}, // Today Petersburg block
|
||||
{0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block
|
||||
{1561650, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block
|
||||
{1561651, ID{Hash: checksumToBytes(0xc25efa5c), Next: 0}}, // First Istanbul block
|
||||
{2000000, ID{Hash: checksumToBytes(0xc25efa5c), Next: 0}}, // Future Istanbul block
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -145,7 +153,7 @@ func TestValidation(t *testing.T) {
|
||||
|
||||
// Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
|
||||
// is simply out of sync, accept.
|
||||
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 7280000}, nil},
|
||||
{7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||
|
||||
// Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote
|
||||
// is definitely out of sync. It may or may not need the Petersburg update, we don't know yet.
|
||||
@ -172,6 +180,16 @@ func TestValidation(t *testing.T) {
|
||||
|
||||
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
|
||||
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||
|
||||
// Local is mainnet Istanbul, far in the future. Remote announces Gopherium (non existing fork)
|
||||
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
||||
//
|
||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||
{88888888, ID{Hash: checksumToBytes(0x879d6e30), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
||||
|
||||
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
||||
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() uint64 { return tt.head })
|
||||
|
@ -207,6 +207,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
||||
if overrideIstanbul != nil {
|
||||
newcfg.IstanbulBlock = overrideIstanbul
|
||||
}
|
||||
if err := newcfg.CheckConfigForkOrder(); err != nil {
|
||||
return newcfg, common.Hash{}, err
|
||||
}
|
||||
storedcfg := rawdb.ReadChainConfig(db, stored)
|
||||
if storedcfg == nil {
|
||||
log.Warn("Found genesis block without chain config")
|
||||
@ -295,6 +298,13 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
||||
if block.Number().Sign() != 0 {
|
||||
return nil, fmt.Errorf("can't commit genesis block with number > 0")
|
||||
}
|
||||
config := g.Config
|
||||
if config == nil {
|
||||
config = params.AllEthashProtocolChanges
|
||||
}
|
||||
if err := config.CheckConfigForkOrder(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty)
|
||||
rawdb.WriteBlock(db, block)
|
||||
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
||||
@ -302,11 +312,6 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
||||
rawdb.WriteHeadBlockHash(db, block.Hash())
|
||||
rawdb.WriteHeadFastBlockHash(db, block.Hash())
|
||||
rawdb.WriteHeadHeaderHash(db, block.Hash())
|
||||
|
||||
config := g.Config
|
||||
if config == nil {
|
||||
config = params.AllEthashProtocolChanges
|
||||
}
|
||||
rawdb.WriteChainConfig(db, block.Hash(), config)
|
||||
return block, nil
|
||||
}
|
||||
|
@ -349,8 +349,11 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma
|
||||
}
|
||||
for ancestor != 0 {
|
||||
if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
|
||||
number -= ancestor
|
||||
return rawdb.ReadCanonicalHash(hc.chainDb, number), number
|
||||
ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor)
|
||||
if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
|
||||
number -= ancestor
|
||||
return ancestorHash, number
|
||||
}
|
||||
}
|
||||
if *maxNonCanonical == 0 {
|
||||
return common.Hash{}, 0
|
||||
@ -445,6 +448,10 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
|
||||
return hc.GetHeader(hash, number)
|
||||
}
|
||||
|
||||
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
|
||||
return rawdb.ReadCanonicalHash(hc.chainDb, number)
|
||||
}
|
||||
|
||||
// CurrentHeader retrieves the current head header of the canonical chain. The
|
||||
// header is retrieved from the HeaderChain's internal cache.
|
||||
func (hc *HeaderChain) CurrentHeader() *types.Header {
|
||||
|
@ -80,9 +80,9 @@ type freezer struct {
|
||||
func newFreezer(datadir string, namespace string) (*freezer, error) {
|
||||
// Create the initial freezer object
|
||||
var (
|
||||
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
|
||||
writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
|
||||
sizeCounter = metrics.NewRegisteredCounter(namespace+"ancient/size", nil)
|
||||
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
|
||||
writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
|
||||
sizeGauge = metrics.NewRegisteredGauge(namespace+"ancient/size", nil)
|
||||
)
|
||||
// Ensure the datadir is not a symbolic link if it exists.
|
||||
if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
|
||||
@ -103,7 +103,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
|
||||
instanceLock: lock,
|
||||
}
|
||||
for name, disableSnappy := range freezerNoSnappy {
|
||||
table, err := newTable(datadir, name, readMeter, writeMeter, sizeCounter, disableSnappy)
|
||||
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
|
||||
if err != nil {
|
||||
for _, table := range freezer.tables {
|
||||
table.Close()
|
||||
|
@ -94,18 +94,18 @@ type freezerTable struct {
|
||||
// to count how many historic items have gone missing.
|
||||
itemOffset uint32 // Offset (number of discarded items)
|
||||
|
||||
headBytes uint32 // Number of bytes written to the head file
|
||||
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
||||
writeMeter metrics.Meter // Meter for measuring the effective amount of data written
|
||||
sizeCounter metrics.Counter // Counter for tracking the combined size of all freezer tables
|
||||
headBytes uint32 // Number of bytes written to the head file
|
||||
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
||||
writeMeter metrics.Meter // Meter for measuring the effective amount of data written
|
||||
sizeGauge metrics.Gauge // Gauge for tracking the combined size of all freezer tables
|
||||
|
||||
logger log.Logger // Logger with database path and table name ambedded
|
||||
lock sync.RWMutex // Mutex protecting the data file descriptors
|
||||
}
|
||||
|
||||
// newTable opens a freezer table with default settings - 2G files
|
||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeCounter metrics.Counter, disableSnappy bool) (*freezerTable, error) {
|
||||
return newCustomTable(path, name, readMeter, writeMeter, sizeCounter, 2*1000*1000*1000, disableSnappy)
|
||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
|
||||
return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
|
||||
}
|
||||
|
||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||
@ -149,7 +149,7 @@ func truncateFreezerFile(file *os.File, size int64) error {
|
||||
// newCustomTable opens a freezer table, creating the data and index files if they are
|
||||
// non existent. Both files are truncated to the shortest common length to ensure
|
||||
// they don't go out of sync.
|
||||
func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeCounter metrics.Counter, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
||||
func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
||||
// Ensure the containing directory exists and open the indexEntry file
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
@ -172,7 +172,7 @@ func newCustomTable(path string, name string, readMeter metrics.Meter, writeMete
|
||||
files: make(map[uint32]*os.File),
|
||||
readMeter: readMeter,
|
||||
writeMeter: writeMeter,
|
||||
sizeCounter: sizeCounter,
|
||||
sizeGauge: sizeGauge,
|
||||
name: name,
|
||||
path: path,
|
||||
logger: log.New("database", path, "table", name),
|
||||
@ -189,7 +189,7 @@ func newCustomTable(path string, name string, readMeter metrics.Meter, writeMete
|
||||
tab.Close()
|
||||
return nil, err
|
||||
}
|
||||
tab.sizeCounter.Inc(int64(size))
|
||||
tab.sizeGauge.Inc(int64(size))
|
||||
|
||||
return tab, nil
|
||||
}
|
||||
@ -378,7 +378,7 @@ func (t *freezerTable) truncate(items uint64) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.sizeCounter.Dec(int64(oldSize - newSize))
|
||||
t.sizeGauge.Dec(int64(oldSize - newSize))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -510,7 +510,7 @@ func (t *freezerTable) Append(item uint64, blob []byte) error {
|
||||
t.index.Write(idx.marshallBinary())
|
||||
|
||||
t.writeMeter.Mark(int64(bLen + indexEntrySize))
|
||||
t.sizeCounter.Inc(int64(bLen + indexEntrySize))
|
||||
t.sizeGauge.Inc(int64(bLen + indexEntrySize))
|
||||
|
||||
atomic.AddUint64(&t.items, 1)
|
||||
return nil
|
||||
|
@ -56,7 +56,7 @@ func TestFreezerBasics(t *testing.T) {
|
||||
// set cutoff at 50 bytes
|
||||
f, err := newCustomTable(os.TempDir(),
|
||||
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter(), 50, true)
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -99,11 +99,11 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||
// set cutoff at 50 bytes
|
||||
var (
|
||||
fname = fmt.Sprintf("basics-close-%d", rand.Uint64())
|
||||
rm, wm, sc = metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
f *freezerTable
|
||||
err error
|
||||
)
|
||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -112,7 +112,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||
data := getChunk(15, x)
|
||||
f.Append(uint64(x), data)
|
||||
f.Close()
|
||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -129,7 +129,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
||||
}
|
||||
f.Close()
|
||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -139,11 +139,11 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||
// TestFreezerRepairDanglingHead tests that we can recover if index entries are removed
|
||||
func TestFreezerRepairDanglingHead(t *testing.T) {
|
||||
t.Parallel()
|
||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
||||
|
||||
{ // Fill table
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -172,7 +172,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
||||
idxFile.Close()
|
||||
// Now open it again
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -190,11 +190,11 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
||||
// TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed
|
||||
func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||
t.Parallel()
|
||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
||||
|
||||
{ // Fill a table and close it
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -222,7 +222,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||
idxFile.Close()
|
||||
// Now open it again
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -243,7 +243,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||
}
|
||||
// And if we open it, we should now be able to read all of them (new values)
|
||||
{
|
||||
f, _ := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, _ := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
for y := 1; y < 255; y++ {
|
||||
exp := getChunk(15, ^y)
|
||||
got, err := f.Retrieve(uint64(y))
|
||||
@ -260,11 +260,11 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||
// TestSnappyDetection tests that we fail to open a snappy database and vice versa
|
||||
func TestSnappyDetection(t *testing.T) {
|
||||
t.Parallel()
|
||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
|
||||
// Open with snappy
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -277,7 +277,7 @@ func TestSnappyDetection(t *testing.T) {
|
||||
}
|
||||
// Open without snappy
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, false)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -289,7 +289,7 @@ func TestSnappyDetection(t *testing.T) {
|
||||
|
||||
// Open with snappy
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -317,11 +317,11 @@ func assertFileSize(f string, size int64) error {
|
||||
// the index is repaired
|
||||
func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
|
||||
|
||||
{ // Fill a table and close it
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -357,7 +357,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||
// 45, 45, 15
|
||||
// with 3+3+1 items
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -374,11 +374,11 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||
func TestFreezerTruncate(t *testing.T) {
|
||||
|
||||
t.Parallel()
|
||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("truncation-%d", rand.Uint64())
|
||||
|
||||
{ // Fill table
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -395,7 +395,7 @@ func TestFreezerTruncate(t *testing.T) {
|
||||
}
|
||||
// Reopen, truncate
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -417,10 +417,10 @@ func TestFreezerTruncate(t *testing.T) {
|
||||
// That will rewind the index, and _should_ truncate the head file
|
||||
func TestFreezerRepairFirstFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
|
||||
{ // Fill table
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -448,7 +448,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
||||
}
|
||||
// Reopen
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -473,10 +473,10 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
||||
// - check that we did not keep the rdonly file descriptors
|
||||
func TestFreezerReadAndTruncate(t *testing.T) {
|
||||
t.Parallel()
|
||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
|
||||
{ // Fill table
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -493,7 +493,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
||||
}
|
||||
// Reopen and read all files
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -519,10 +519,10 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
||||
|
||||
func TestOffset(t *testing.T) {
|
||||
t.Parallel()
|
||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("offset-%d", rand.Uint64())
|
||||
{ // Fill table
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 40, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -578,7 +578,7 @@ func TestOffset(t *testing.T) {
|
||||
}
|
||||
// Now open again
|
||||
{
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 40, true)
|
||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -79,9 +79,10 @@ type stateObject struct {
|
||||
trie Trie // storage trie, which becomes non-nil on first access
|
||||
code Code // contract bytecode, which gets set when code is loaded
|
||||
|
||||
originStorage Storage // Storage cache of original entries to dedup rewrites
|
||||
dirtyStorage Storage // Storage entries that need to be flushed to disk
|
||||
fakeStorage Storage // Fake storage which constructed by caller for debugging purpose.
|
||||
originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction
|
||||
pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block
|
||||
dirtyStorage Storage // Storage entries that have been modified in the current transaction execution
|
||||
fakeStorage Storage // Fake storage which constructed by caller for debugging purpose.
|
||||
|
||||
// Cache flags.
|
||||
// When an object is marked suicided it will be delete from the trie
|
||||
@ -113,13 +114,17 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject {
|
||||
if data.CodeHash == nil {
|
||||
data.CodeHash = emptyCodeHash
|
||||
}
|
||||
if data.Root == (common.Hash{}) {
|
||||
data.Root = emptyRoot
|
||||
}
|
||||
return &stateObject{
|
||||
db: db,
|
||||
address: address,
|
||||
addrHash: crypto.Keccak256Hash(address[:]),
|
||||
data: data,
|
||||
originStorage: make(Storage),
|
||||
dirtyStorage: make(Storage),
|
||||
db: db,
|
||||
address: address,
|
||||
addrHash: crypto.Keccak256Hash(address[:]),
|
||||
data: data,
|
||||
originStorage: make(Storage),
|
||||
pendingStorage: make(Storage),
|
||||
dirtyStorage: make(Storage),
|
||||
}
|
||||
}
|
||||
|
||||
@ -183,9 +188,11 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
||||
if s.fakeStorage != nil {
|
||||
return s.fakeStorage[key]
|
||||
}
|
||||
// If we have the original value cached, return that
|
||||
value, cached := s.originStorage[key]
|
||||
if cached {
|
||||
// If we have a pending write or clean cached, return that
|
||||
if value, pending := s.pendingStorage[key]; pending {
|
||||
return value
|
||||
}
|
||||
if value, cached := s.originStorage[key]; cached {
|
||||
return value
|
||||
}
|
||||
// Track the amount of time wasted on reading the storage trie
|
||||
@ -198,6 +205,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
||||
s.setError(err)
|
||||
return common.Hash{}
|
||||
}
|
||||
var value common.Hash
|
||||
if len(enc) > 0 {
|
||||
_, content, _, err := rlp.Split(enc)
|
||||
if err != nil {
|
||||
@ -252,17 +260,29 @@ func (s *stateObject) setState(key, value common.Hash) {
|
||||
s.dirtyStorage[key] = value
|
||||
}
|
||||
|
||||
// finalise moves all dirty storage slots into the pending area to be hashed or
|
||||
// committed later. It is invoked at the end of every transaction.
|
||||
func (s *stateObject) finalise() {
|
||||
for key, value := range s.dirtyStorage {
|
||||
s.pendingStorage[key] = value
|
||||
}
|
||||
if len(s.dirtyStorage) > 0 {
|
||||
s.dirtyStorage = make(Storage)
|
||||
}
|
||||
}
|
||||
|
||||
// updateTrie writes cached storage modifications into the object's storage trie.
|
||||
func (s *stateObject) updateTrie(db Database) Trie {
|
||||
// Make sure all dirty slots are finalized into the pending storage area
|
||||
s.finalise()
|
||||
|
||||
// Track the amount of time wasted on updating the storge trie
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
|
||||
}
|
||||
// Update all the dirty slots in the trie
|
||||
// Insert all the pending updates into the trie
|
||||
tr := s.getTrie(db)
|
||||
for key, value := range s.dirtyStorage {
|
||||
delete(s.dirtyStorage, key)
|
||||
|
||||
for key, value := range s.pendingStorage {
|
||||
// Skip noop changes, persist actual changes
|
||||
if value == s.originStorage[key] {
|
||||
continue
|
||||
@ -274,9 +294,12 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
||||
continue
|
||||
}
|
||||
// Encoding []byte cannot fail, ok to ignore the error.
|
||||
v, _ := rlp.EncodeToBytes(bytes.TrimLeft(value[:], "\x00"))
|
||||
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
||||
s.setError(tr.TryUpdate(key[:], v))
|
||||
}
|
||||
if len(s.pendingStorage) > 0 {
|
||||
s.pendingStorage = make(Storage)
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
@ -356,6 +379,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
|
||||
stateObject.code = s.code
|
||||
stateObject.dirtyStorage = s.dirtyStorage.Copy()
|
||||
stateObject.originStorage = s.originStorage.Copy()
|
||||
stateObject.pendingStorage = s.pendingStorage.Copy()
|
||||
stateObject.suicided = s.suicided
|
||||
stateObject.dirtyCode = s.dirtyCode
|
||||
stateObject.deleted = s.deleted
|
||||
|
70
core/state/state_object_test.go
Normal file
70
core/state/state_object_test.go
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func BenchmarkCutOriginal(b *testing.B) {
|
||||
value := common.HexToHash("0x01")
|
||||
for i := 0; i < b.N; i++ {
|
||||
bytes.TrimLeft(value[:], "\x00")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCutsetterFn(b *testing.B) {
|
||||
value := common.HexToHash("0x01")
|
||||
cutSetFn := func(r rune) bool {
|
||||
return int32(r) == int32(0)
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
bytes.TrimLeftFunc(value[:], cutSetFn)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCutCustomTrim(b *testing.B) {
|
||||
value := common.HexToHash("0x01")
|
||||
for i := 0; i < b.N; i++ {
|
||||
common.TrimLeftZeroes(value[:])
|
||||
}
|
||||
}
|
||||
|
||||
func xTestFuzzCutter(t *testing.T) {
|
||||
rand.Seed(time.Now().Unix())
|
||||
for {
|
||||
v := make([]byte, 20)
|
||||
zeroes := rand.Intn(21)
|
||||
rand.Read(v[zeroes:])
|
||||
exp := bytes.TrimLeft(v[:], "\x00")
|
||||
got := common.TrimLeftZeroes(v)
|
||||
if !bytes.Equal(exp, got) {
|
||||
|
||||
fmt.Printf("Input %x\n", v)
|
||||
fmt.Printf("Exp %x\n", exp)
|
||||
fmt.Printf("Got %x\n", got)
|
||||
t.Fatalf("Error")
|
||||
}
|
||||
//break
|
||||
}
|
||||
}
|
@ -67,8 +67,9 @@ type StateDB struct {
|
||||
trie Trie
|
||||
|
||||
// This map holds 'live' objects, which will get modified while processing a state transition.
|
||||
stateObjects map[common.Address]*stateObject
|
||||
stateObjectsDirty map[common.Address]struct{}
|
||||
stateObjects map[common.Address]*stateObject
|
||||
stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
|
||||
stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
|
||||
|
||||
// DB error.
|
||||
// State objects are used by the consensus core and VM which are
|
||||
@ -111,13 +112,14 @@ func New(root common.Hash, db Database) (*StateDB, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &StateDB{
|
||||
db: db,
|
||||
trie: tr,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
journal: newJournal(),
|
||||
db: db,
|
||||
trie: tr,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsPending: make(map[common.Address]struct{}),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
journal: newJournal(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -141,6 +143,7 @@ func (self *StateDB) Reset(root common.Hash) error {
|
||||
}
|
||||
self.trie = tr
|
||||
self.stateObjects = make(map[common.Address]*stateObject)
|
||||
self.stateObjectsPending = make(map[common.Address]struct{})
|
||||
self.stateObjectsDirty = make(map[common.Address]struct{})
|
||||
self.thash = common.Hash{}
|
||||
self.bhash = common.Hash{}
|
||||
@ -421,15 +424,15 @@ func (self *StateDB) Suicide(addr common.Address) bool {
|
||||
//
|
||||
|
||||
// updateStateObject writes the given object to the trie.
|
||||
func (s *StateDB) updateStateObject(stateObject *stateObject) {
|
||||
func (s *StateDB) updateStateObject(obj *stateObject) {
|
||||
// Track the amount of time wasted on updating the account from the trie
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
|
||||
}
|
||||
// Encode the account and update the account trie
|
||||
addr := stateObject.Address()
|
||||
addr := obj.Address()
|
||||
|
||||
data, err := rlp.EncodeToBytes(stateObject)
|
||||
data, err := rlp.EncodeToBytes(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
|
||||
}
|
||||
@ -437,25 +440,33 @@ func (s *StateDB) updateStateObject(stateObject *stateObject) {
|
||||
}
|
||||
|
||||
// deleteStateObject removes the given object from the state trie.
|
||||
func (s *StateDB) deleteStateObject(stateObject *stateObject) {
|
||||
func (s *StateDB) deleteStateObject(obj *stateObject) {
|
||||
// Track the amount of time wasted on deleting the account from the trie
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
|
||||
}
|
||||
// Delete the account from the trie
|
||||
stateObject.deleted = true
|
||||
|
||||
addr := stateObject.Address()
|
||||
addr := obj.Address()
|
||||
s.setError(s.trie.TryDelete(addr[:]))
|
||||
}
|
||||
|
||||
// Retrieve a state object given by the address. Returns nil if not found.
|
||||
func (s *StateDB) getStateObject(addr common.Address) (stateObject *stateObject) {
|
||||
// Prefer live objects
|
||||
// getStateObject retrieves a state object given by the address, returning nil if
|
||||
// the object is not found or was deleted in this execution context. If you need
|
||||
// to differentiate between non-existent/just-deleted, use getDeletedStateObject.
|
||||
func (s *StateDB) getStateObject(addr common.Address) *stateObject {
|
||||
if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
|
||||
return obj
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDeletedStateObject is similar to getStateObject, but instead of returning
|
||||
// nil for a deleted state object, it returns the actual object with the deleted
|
||||
// flag set. This is needed by the state journal to revert to the correct self-
|
||||
// destructed object instead of wiping all knowledge about the state object.
|
||||
func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
||||
// Prefer live objects if any is available
|
||||
if obj := s.stateObjects[addr]; obj != nil {
|
||||
if obj.deleted {
|
||||
return nil
|
||||
}
|
||||
return obj
|
||||
}
|
||||
// Track the amount of time wasted on loading the object from the database
|
||||
@ -486,7 +497,7 @@ func (self *StateDB) setStateObject(object *stateObject) {
|
||||
// Retrieve a state object or create a new state object if nil.
|
||||
func (self *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
|
||||
stateObject := self.getStateObject(addr)
|
||||
if stateObject == nil || stateObject.deleted {
|
||||
if stateObject == nil {
|
||||
stateObject, _ = self.createObject(addr)
|
||||
}
|
||||
return stateObject
|
||||
@ -495,7 +506,8 @@ func (self *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
|
||||
// createObject creates a new state object. If there is an existing account with
|
||||
// the given address, it is overwritten and returned as the second return value.
|
||||
func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
|
||||
prev = self.getStateObject(addr)
|
||||
prev = self.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
|
||||
|
||||
newobj = newObject(self, addr, Account{})
|
||||
newobj.setNonce(0) // sets the object to dirty
|
||||
if prev == nil {
|
||||
@ -558,15 +570,16 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common
|
||||
func (self *StateDB) Copy() *StateDB {
|
||||
// Copy all the basic fields, initialize the memory ones
|
||||
state := &StateDB{
|
||||
db: self.db,
|
||||
trie: self.db.CopyTrie(self.trie),
|
||||
stateObjects: make(map[common.Address]*stateObject, len(self.journal.dirties)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(self.journal.dirties)),
|
||||
refund: self.refund,
|
||||
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
|
||||
logSize: self.logSize,
|
||||
preimages: make(map[common.Hash][]byte, len(self.preimages)),
|
||||
journal: newJournal(),
|
||||
db: self.db,
|
||||
trie: self.db.CopyTrie(self.trie),
|
||||
stateObjects: make(map[common.Address]*stateObject, len(self.journal.dirties)),
|
||||
stateObjectsPending: make(map[common.Address]struct{}, len(self.stateObjectsPending)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(self.journal.dirties)),
|
||||
refund: self.refund,
|
||||
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
|
||||
logSize: self.logSize,
|
||||
preimages: make(map[common.Hash][]byte, len(self.preimages)),
|
||||
journal: newJournal(),
|
||||
}
|
||||
// Copy the dirty states, logs, and preimages
|
||||
for addr := range self.journal.dirties {
|
||||
@ -575,18 +588,29 @@ func (self *StateDB) Copy() *StateDB {
|
||||
// in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for
|
||||
// nil
|
||||
if object, exist := self.stateObjects[addr]; exist {
|
||||
// Even though the original object is dirty, we are not copying the journal,
|
||||
// so we need to make sure that anyside effect the journal would have caused
|
||||
// during a commit (or similar op) is already applied to the copy.
|
||||
state.stateObjects[addr] = object.deepCopy(state)
|
||||
state.stateObjectsDirty[addr] = struct{}{}
|
||||
|
||||
state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits
|
||||
state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits
|
||||
}
|
||||
}
|
||||
// Above, we don't copy the actual journal. This means that if the copy is copied, the
|
||||
// loop above will be a no-op, since the copy's journal is empty.
|
||||
// Thus, here we iterate over stateObjects, to enable copies of copies
|
||||
for addr := range self.stateObjectsPending {
|
||||
if _, exist := state.stateObjects[addr]; !exist {
|
||||
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state)
|
||||
}
|
||||
state.stateObjectsPending[addr] = struct{}{}
|
||||
}
|
||||
for addr := range self.stateObjectsDirty {
|
||||
if _, exist := state.stateObjects[addr]; !exist {
|
||||
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state)
|
||||
state.stateObjectsDirty[addr] = struct{}{}
|
||||
}
|
||||
state.stateObjectsDirty[addr] = struct{}{}
|
||||
}
|
||||
for hash, logs := range self.logs {
|
||||
cpy := make([]*types.Log, len(logs))
|
||||
@ -631,11 +655,12 @@ func (self *StateDB) GetRefund() uint64 {
|
||||
return self.refund
|
||||
}
|
||||
|
||||
// Finalise finalises the state by removing the self destructed objects
|
||||
// and clears the journal as well as the refunds.
|
||||
// Finalise finalises the state by removing the self destructed objects and clears
|
||||
// the journal as well as the refunds. Finalise, however, will not push any updates
|
||||
// into the tries just yet. Only IntermediateRoot or Commit will do that.
|
||||
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
||||
for addr := range s.journal.dirties {
|
||||
stateObject, exist := s.stateObjects[addr]
|
||||
obj, exist := s.stateObjects[addr]
|
||||
if !exist {
|
||||
// ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2
|
||||
// That tx goes out of gas, and although the notion of 'touched' does not exist there, the
|
||||
@ -645,13 +670,12 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
||||
// Thus, we can safely ignore it here
|
||||
continue
|
||||
}
|
||||
|
||||
if stateObject.suicided || (deleteEmptyObjects && stateObject.empty()) {
|
||||
s.deleteStateObject(stateObject)
|
||||
if obj.suicided || (deleteEmptyObjects && obj.empty()) {
|
||||
obj.deleted = true
|
||||
} else {
|
||||
stateObject.updateRoot(s.db)
|
||||
s.updateStateObject(stateObject)
|
||||
obj.finalise()
|
||||
}
|
||||
s.stateObjectsPending[addr] = struct{}{}
|
||||
s.stateObjectsDirty[addr] = struct{}{}
|
||||
}
|
||||
// Invalidate journal because reverting across transactions is not allowed.
|
||||
@ -662,8 +686,21 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
||||
// It is called in between transactions to get the root hash that
|
||||
// goes into transaction receipts.
|
||||
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
||||
// Finalise all the dirty storage states and write them into the tries
|
||||
s.Finalise(deleteEmptyObjects)
|
||||
|
||||
for addr := range s.stateObjectsPending {
|
||||
obj := s.stateObjects[addr]
|
||||
if obj.deleted {
|
||||
s.deleteStateObject(obj)
|
||||
} else {
|
||||
obj.updateRoot(s.db)
|
||||
s.updateStateObject(obj)
|
||||
}
|
||||
}
|
||||
if len(s.stateObjectsPending) > 0 {
|
||||
s.stateObjectsPending = make(map[common.Address]struct{})
|
||||
}
|
||||
// Track the amount of time wasted on hashing the account trie
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
|
||||
@ -680,46 +717,40 @@ func (self *StateDB) Prepare(thash, bhash common.Hash, ti int) {
|
||||
}
|
||||
|
||||
func (s *StateDB) clearJournalAndRefund() {
|
||||
s.journal = newJournal()
|
||||
s.validRevisions = s.validRevisions[:0]
|
||||
s.refund = 0
|
||||
if len(s.journal.entries) > 0 {
|
||||
s.journal = newJournal()
|
||||
s.refund = 0
|
||||
}
|
||||
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
|
||||
}
|
||||
|
||||
// Commit writes the state to the underlying in-memory trie database.
|
||||
func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
|
||||
defer s.clearJournalAndRefund()
|
||||
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
||||
// Finalize any pending changes and merge everything into the tries
|
||||
s.IntermediateRoot(deleteEmptyObjects)
|
||||
|
||||
for addr := range s.journal.dirties {
|
||||
s.stateObjectsDirty[addr] = struct{}{}
|
||||
}
|
||||
// Commit objects to the trie, measuring the elapsed time
|
||||
for addr, stateObject := range s.stateObjects {
|
||||
_, isDirty := s.stateObjectsDirty[addr]
|
||||
switch {
|
||||
case stateObject.suicided || (isDirty && deleteEmptyObjects && stateObject.empty()):
|
||||
// If the object has been removed, don't bother syncing it
|
||||
// and just mark it for deletion in the trie.
|
||||
s.deleteStateObject(stateObject)
|
||||
case isDirty:
|
||||
for addr := range s.stateObjectsDirty {
|
||||
if obj := s.stateObjects[addr]; !obj.deleted {
|
||||
// Write any contract code associated with the state object
|
||||
if stateObject.code != nil && stateObject.dirtyCode {
|
||||
s.db.TrieDB().InsertBlob(common.BytesToHash(stateObject.CodeHash()), stateObject.code)
|
||||
stateObject.dirtyCode = false
|
||||
if obj.code != nil && obj.dirtyCode {
|
||||
s.db.TrieDB().InsertBlob(common.BytesToHash(obj.CodeHash()), obj.code)
|
||||
obj.dirtyCode = false
|
||||
}
|
||||
// Write any storage changes in the state object to its storage trie.
|
||||
if err := stateObject.CommitTrie(s.db); err != nil {
|
||||
// Write any storage changes in the state object to its storage trie
|
||||
if err := obj.CommitTrie(s.db); err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
// Update the object in the main account trie.
|
||||
s.updateStateObject(stateObject)
|
||||
}
|
||||
delete(s.stateObjectsDirty, addr)
|
||||
}
|
||||
if len(s.stateObjectsDirty) > 0 {
|
||||
s.stateObjectsDirty = make(map[common.Address]struct{})
|
||||
}
|
||||
// Write the account trie changes, measuing the amount of wasted time
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.AccountCommits += time.Since(start) }(time.Now())
|
||||
}
|
||||
root, err = s.trie.Commit(func(leaf []byte, parent common.Hash) error {
|
||||
return s.trie.Commit(func(leaf []byte, parent common.Hash) error {
|
||||
var account Account
|
||||
if err := rlp.DecodeBytes(leaf, &account); err != nil {
|
||||
return nil
|
||||
@ -733,5 +764,4 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return root, err
|
||||
}
|
||||
|
@ -25,10 +25,11 @@ import (
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
check "gopkg.in/check.v1"
|
||||
"gopkg.in/check.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
@ -53,8 +54,13 @@ func TestUpdateLeaks(t *testing.T) {
|
||||
if i%3 == 0 {
|
||||
state.SetCode(addr, []byte{i, i, i, i, i})
|
||||
}
|
||||
state.IntermediateRoot(false)
|
||||
}
|
||||
|
||||
root := state.IntermediateRoot(false)
|
||||
if err := state.Database().TrieDB().Commit(root, false); err != nil {
|
||||
t.Errorf("can not commit trie %v to persistent database", root.Hex())
|
||||
}
|
||||
|
||||
// Ensure that no data was leaked into the database
|
||||
it := db.NewIterator()
|
||||
for it.Next() {
|
||||
@ -98,27 +104,45 @@ func TestIntermediateLeaks(t *testing.T) {
|
||||
}
|
||||
|
||||
// Commit and cross check the databases.
|
||||
if _, err := transState.Commit(false); err != nil {
|
||||
transRoot, err := transState.Commit(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to commit transition state: %v", err)
|
||||
}
|
||||
if _, err := finalState.Commit(false); err != nil {
|
||||
if err = transState.Database().TrieDB().Commit(transRoot, false); err != nil {
|
||||
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
|
||||
}
|
||||
|
||||
finalRoot, err := finalState.Commit(false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to commit final state: %v", err)
|
||||
}
|
||||
if err = finalState.Database().TrieDB().Commit(finalRoot, false); err != nil {
|
||||
t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
|
||||
}
|
||||
|
||||
it := finalDb.NewIterator()
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
if _, err := transDb.Get(key); err != nil {
|
||||
t.Errorf("entry missing from the transition database: %x -> %x", key, it.Value())
|
||||
key, fvalue := it.Key(), it.Value()
|
||||
tvalue, err := transDb.Get(key)
|
||||
if err != nil {
|
||||
t.Errorf("entry missing from the transition database: %x -> %x", key, fvalue)
|
||||
}
|
||||
if !bytes.Equal(fvalue, tvalue) {
|
||||
t.Errorf("the value associate key %x is mismatch,: %x in transition database ,%x in final database", key, tvalue, fvalue)
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
|
||||
it = transDb.NewIterator()
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
if _, err := finalDb.Get(key); err != nil {
|
||||
key, tvalue := it.Key(), it.Value()
|
||||
fvalue, err := finalDb.Get(key)
|
||||
if err != nil {
|
||||
t.Errorf("extra entry in the transition database: %x -> %x", key, it.Value())
|
||||
}
|
||||
if !bytes.Equal(fvalue, tvalue) {
|
||||
t.Errorf("the value associate key %x is mismatch,: %x in transition database ,%x in final database", key, tvalue, fvalue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,32 +160,45 @@ func TestCopy(t *testing.T) {
|
||||
}
|
||||
orig.Finalise(false)
|
||||
|
||||
// Copy the state, modify both in-memory
|
||||
// Copy the state
|
||||
copy := orig.Copy()
|
||||
|
||||
// Copy the copy state
|
||||
ccopy := copy.Copy()
|
||||
|
||||
// modify all in memory
|
||||
for i := byte(0); i < 255; i++ {
|
||||
origObj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
copyObj := copy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
ccopyObj := ccopy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
|
||||
origObj.AddBalance(big.NewInt(2 * int64(i)))
|
||||
copyObj.AddBalance(big.NewInt(3 * int64(i)))
|
||||
ccopyObj.AddBalance(big.NewInt(4 * int64(i)))
|
||||
|
||||
orig.updateStateObject(origObj)
|
||||
copy.updateStateObject(copyObj)
|
||||
ccopy.updateStateObject(copyObj)
|
||||
}
|
||||
// Finalise the changes on both concurrently
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
orig.Finalise(true)
|
||||
close(done)
|
||||
}()
|
||||
copy.Finalise(true)
|
||||
<-done
|
||||
|
||||
// Verify that the two states have been updated independently
|
||||
// Finalise the changes on all concurrently
|
||||
finalise := func(wg *sync.WaitGroup, db *StateDB) {
|
||||
defer wg.Done()
|
||||
db.Finalise(true)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
go finalise(&wg, orig)
|
||||
go finalise(&wg, copy)
|
||||
go finalise(&wg, ccopy)
|
||||
wg.Wait()
|
||||
|
||||
// Verify that the three states have been updated independently
|
||||
for i := byte(0); i < 255; i++ {
|
||||
origObj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
copyObj := copy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
ccopyObj := ccopy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
|
||||
if want := big.NewInt(3 * int64(i)); origObj.Balance().Cmp(want) != 0 {
|
||||
t.Errorf("orig obj %d: balance mismatch: have %v, want %v", i, origObj.Balance(), want)
|
||||
@ -169,6 +206,9 @@ func TestCopy(t *testing.T) {
|
||||
if want := big.NewInt(4 * int64(i)); copyObj.Balance().Cmp(want) != 0 {
|
||||
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, copyObj.Balance(), want)
|
||||
}
|
||||
if want := big.NewInt(5 * int64(i)); ccopyObj.Balance().Cmp(want) != 0 {
|
||||
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, ccopyObj.Balance(), want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -438,14 +478,206 @@ func (s *StateSuite) TestTouchDelete(c *check.C) {
|
||||
// TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
|
||||
// See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
|
||||
func TestCopyOfCopy(t *testing.T) {
|
||||
sdb, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
|
||||
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
|
||||
addr := common.HexToAddress("aaaa")
|
||||
sdb.SetBalance(addr, big.NewInt(42))
|
||||
state.SetBalance(addr, big.NewInt(42))
|
||||
|
||||
if got := sdb.Copy().GetBalance(addr).Uint64(); got != 42 {
|
||||
if got := state.Copy().GetBalance(addr).Uint64(); got != 42 {
|
||||
t.Fatalf("1st copy fail, expected 42, got %v", got)
|
||||
}
|
||||
if got := sdb.Copy().Copy().GetBalance(addr).Uint64(); got != 42 {
|
||||
if got := state.Copy().Copy().GetBalance(addr).Uint64(); got != 42 {
|
||||
t.Fatalf("2nd copy fail, expected 42, got %v", got)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests a regression where committing a copy lost some internal meta information,
|
||||
// leading to corrupted subsequent copies.
|
||||
//
|
||||
// See https://github.com/ethereum/go-ethereum/issues/20106.
|
||||
func TestCopyCommitCopy(t *testing.T) {
|
||||
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
|
||||
|
||||
// Create an account and check if the retrieved balance is correct
|
||||
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
|
||||
skey := common.HexToHash("aaa")
|
||||
sval := common.HexToHash("bbb")
|
||||
|
||||
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
||||
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
||||
state.SetState(addr, skey, sval) // Change the storage trie
|
||||
|
||||
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := state.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
||||
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
||||
}
|
||||
// Copy the non-committed state database and check pre/post commit balance
|
||||
copyOne := state.Copy()
|
||||
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("first copy pre-commit balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("first copy pre-commit code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := copyOne.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("first copy pre-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
||||
t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
||||
}
|
||||
|
||||
copyOne.Commit(false)
|
||||
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := copyOne.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("first copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := copyOne.GetCommittedState(addr, skey); val != sval {
|
||||
t.Fatalf("first copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
// Copy the copy and check the balance once more
|
||||
copyTwo := copyOne.Copy()
|
||||
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("second copy balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("second copy code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := copyTwo.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := copyTwo.GetCommittedState(addr, skey); val != sval {
|
||||
t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests a regression where committing a copy lost some internal meta information,
|
||||
// leading to corrupted subsequent copies.
|
||||
//
|
||||
// See https://github.com/ethereum/go-ethereum/issues/20106.
|
||||
func TestCopyCopyCommitCopy(t *testing.T) {
|
||||
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
|
||||
|
||||
// Create an account and check if the retrieved balance is correct
|
||||
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
|
||||
skey := common.HexToHash("aaa")
|
||||
sval := common.HexToHash("bbb")
|
||||
|
||||
state.SetBalance(addr, big.NewInt(42)) // Change the account trie
|
||||
state.SetCode(addr, []byte("hello")) // Change an external metadata
|
||||
state.SetState(addr, skey, sval) // Change the storage trie
|
||||
|
||||
if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := state.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
||||
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
||||
}
|
||||
// Copy the non-committed state database and check pre/post commit balance
|
||||
copyOne := state.Copy()
|
||||
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("first copy balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("first copy code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := copyOne.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("first copy non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
||||
t.Fatalf("first copy committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
||||
}
|
||||
// Copy the copy and check the balance once more
|
||||
copyTwo := copyOne.Copy()
|
||||
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("second copy pre-commit balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("second copy pre-commit code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := copyTwo.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("second copy pre-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
|
||||
t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
|
||||
}
|
||||
copyTwo.Commit(false)
|
||||
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := copyTwo.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("second copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := copyTwo.GetCommittedState(addr, skey); val != sval {
|
||||
t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
// Copy the copy-copy and check the balance once more
|
||||
copyThree := copyTwo.Copy()
|
||||
if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
|
||||
t.Fatalf("third copy balance mismatch: have %v, want %v", balance, 42)
|
||||
}
|
||||
if code := copyThree.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
|
||||
t.Fatalf("third copy code mismatch: have %x, want %x", code, []byte("hello"))
|
||||
}
|
||||
if val := copyThree.GetState(addr, skey); val != sval {
|
||||
t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
if val := copyThree.GetCommittedState(addr, skey); val != sval {
|
||||
t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
|
||||
// while changing the internals of statedb. The workflow is that a contract is
|
||||
// self destructed, then in a followup transaction (but same block) it's created
|
||||
// again and the transaction reverted.
|
||||
//
|
||||
// The original statedb implementation flushed dirty objects to the tries after
|
||||
// each transaction, so this works ok. The rework accumulated writes in memory
|
||||
// first, but the journal wiped the entire state object on create-revert.
|
||||
func TestDeleteCreateRevert(t *testing.T) {
|
||||
// Create an initial state with a single contract
|
||||
state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
|
||||
|
||||
addr := toAddr([]byte("so"))
|
||||
state.SetBalance(addr, big.NewInt(1))
|
||||
|
||||
root, _ := state.Commit(false)
|
||||
state.Reset(root)
|
||||
|
||||
// Simulate self-destructing in one transaction, then create-reverting in another
|
||||
state.Suicide(addr)
|
||||
state.Finalise(true)
|
||||
|
||||
id := state.Snapshot()
|
||||
state.SetBalance(addr, big.NewInt(2))
|
||||
state.RevertToSnapshot(id)
|
||||
|
||||
// Commit the entire state and make sure we don't crash and have the correct state
|
||||
root, _ = state.Commit(true)
|
||||
state.Reset(root)
|
||||
|
||||
if state.getStateObject(addr) != nil {
|
||||
t.Fatalf("self-destructed contract came alive")
|
||||
}
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ func TestEmptyStateSync(t *testing.T) {
|
||||
func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) }
|
||||
func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) }
|
||||
|
||||
func testIterativeStateSync(t *testing.T, batch int) {
|
||||
func testIterativeStateSync(t *testing.T, count int) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
@ -144,7 +144,7 @@ func testIterativeStateSync(t *testing.T, batch int) {
|
||||
dstDb := rawdb.NewMemoryDatabase()
|
||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
|
||||
|
||||
queue := append([]common.Hash{}, sched.Missing(batch)...)
|
||||
queue := append([]common.Hash{}, sched.Missing(count)...)
|
||||
for len(queue) > 0 {
|
||||
results := make([]trie.SyncResult, len(queue))
|
||||
for i, hash := range queue {
|
||||
@ -157,10 +157,12 @@ func testIterativeStateSync(t *testing.T, batch int) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
queue = append(queue[:0], sched.Missing(batch)...)
|
||||
batch.Write()
|
||||
queue = append(queue[:0], sched.Missing(count)...)
|
||||
}
|
||||
// Cross check that the two states are in sync
|
||||
checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
|
||||
@ -190,9 +192,11 @@ func TestIterativeDelayedStateSync(t *testing.T) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
batch.Write()
|
||||
queue = append(queue[len(results):], sched.Missing(0)...)
|
||||
}
|
||||
// Cross check that the two states are in sync
|
||||
@ -205,7 +209,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
|
||||
func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
|
||||
func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) }
|
||||
|
||||
func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
func testIterativeRandomStateSync(t *testing.T, count int) {
|
||||
// Create a random state to copy
|
||||
srcDb, srcRoot, srcAccounts := makeTestState()
|
||||
|
||||
@ -214,7 +218,7 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
|
||||
|
||||
queue := make(map[common.Hash]struct{})
|
||||
for _, hash := range sched.Missing(batch) {
|
||||
for _, hash := range sched.Missing(count) {
|
||||
queue[hash] = struct{}{}
|
||||
}
|
||||
for len(queue) > 0 {
|
||||
@ -231,11 +235,13 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
batch.Write()
|
||||
queue = make(map[common.Hash]struct{})
|
||||
for _, hash := range sched.Missing(batch) {
|
||||
for _, hash := range sched.Missing(count) {
|
||||
queue[hash] = struct{}{}
|
||||
}
|
||||
}
|
||||
@ -277,9 +283,11 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
batch.Write()
|
||||
for _, hash := range sched.Missing(0) {
|
||||
queue[hash] = struct{}{}
|
||||
}
|
||||
@ -316,9 +324,11 @@ func TestIncompleteStateSync(t *testing.T) {
|
||||
if _, index, err := sched.Process(results); err != nil {
|
||||
t.Fatalf("failed to process result #%d: %v", index, err)
|
||||
}
|
||||
if index, err := sched.Commit(dstDb); err != nil {
|
||||
t.Fatalf("failed to commit data #%d: %v", index, err)
|
||||
batch := dstDb.NewBatch()
|
||||
if err := sched.Commit(batch); err != nil {
|
||||
t.Fatalf("failed to commit data: %v", err)
|
||||
}
|
||||
batch.Write()
|
||||
for _, result := range results {
|
||||
added = append(added, result.Hash)
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||
// Iterate over and process the individual transactions
|
||||
for i, tx := range block.Transactions() {
|
||||
statedb.Prepare(tx.Hash(), block.Hash(), i)
|
||||
receipt, _, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, usedGas, cfg)
|
||||
receipt, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, usedGas, cfg)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
@ -85,10 +85,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||
// and uses the input parameters for its environment. It returns the receipt
|
||||
// for the transaction, gas used and an error if the transaction failed,
|
||||
// indicating the block was invalid.
|
||||
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) {
|
||||
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) {
|
||||
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
// Create a new context to be used in the EVM environment
|
||||
context := NewEVMContext(msg, header, bc, author)
|
||||
@ -98,7 +98,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
|
||||
// Apply the transaction to the current state (included in the env)
|
||||
_, gas, failed, err := ApplyMessage(vmenv, msg, gp)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
// Update the state with pending changes
|
||||
var root []byte
|
||||
@ -125,5 +125,5 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
|
||||
receipt.BlockNumber = header.Number
|
||||
receipt.TransactionIndex = uint(statedb.TxIndex())
|
||||
|
||||
return receipt, gas, err
|
||||
return receipt, err
|
||||
}
|
||||
|
104
core/tx_pool.go
104
core/tx_pool.go
@ -97,13 +97,14 @@ var (
|
||||
queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
|
||||
|
||||
// General tx metrics
|
||||
validMeter = metrics.NewRegisteredMeter("txpool/valid", nil)
|
||||
knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil)
|
||||
validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil)
|
||||
invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
|
||||
underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
|
||||
|
||||
pendingCounter = metrics.NewRegisteredCounter("txpool/pending", nil)
|
||||
queuedCounter = metrics.NewRegisteredCounter("txpool/queued", nil)
|
||||
localCounter = metrics.NewRegisteredCounter("txpool/local", nil)
|
||||
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
|
||||
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
|
||||
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
|
||||
)
|
||||
|
||||
// TxStatus is the current status of a transaction as seen by the pool.
|
||||
@ -564,16 +565,15 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
hash := tx.Hash()
|
||||
if pool.all.Get(hash) != nil {
|
||||
log.Trace("Discarding already known transaction", "hash", hash)
|
||||
knownTxMeter.Mark(1)
|
||||
return false, fmt.Errorf("known transaction: %x", hash)
|
||||
}
|
||||
|
||||
// If the transaction fails basic validation, discard it
|
||||
if err := pool.validateTx(tx, local); err != nil {
|
||||
log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
|
||||
invalidTxMeter.Mark(1)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If the transaction pool is full, discard underpriced transactions
|
||||
if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
|
||||
// If the new transaction is underpriced, don't accept it
|
||||
@ -590,7 +590,6 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
pool.removeTx(tx.Hash(), false)
|
||||
}
|
||||
}
|
||||
|
||||
// Try to replace an existing transaction in the pending pool
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
|
||||
@ -613,13 +612,11 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
|
||||
return old != nil, nil
|
||||
}
|
||||
|
||||
// New transaction isn't replacing a pending one, push into queue
|
||||
replaced, err = pool.enqueueTx(hash, tx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Mark local addresses and journal local transactions
|
||||
if local {
|
||||
if !pool.locals.contains(from) {
|
||||
@ -628,7 +625,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
}
|
||||
}
|
||||
if local || pool.locals.contains(from) {
|
||||
localCounter.Inc(1)
|
||||
localGauge.Inc(1)
|
||||
}
|
||||
pool.journalTx(from, tx)
|
||||
|
||||
@ -658,7 +655,7 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er
|
||||
queuedReplaceMeter.Mark(1)
|
||||
} else {
|
||||
// Nothing was replaced, bump the queued counter
|
||||
queuedCounter.Inc(1)
|
||||
queuedGauge.Inc(1)
|
||||
}
|
||||
if pool.all.Get(hash) == nil {
|
||||
pool.all.Add(tx)
|
||||
@ -707,7 +704,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
|
||||
pendingReplaceMeter.Mark(1)
|
||||
} else {
|
||||
// Nothing was replaced, bump the pending counter
|
||||
pendingCounter.Inc(1)
|
||||
pendingGauge.Inc(1)
|
||||
}
|
||||
// Failsafe to work around direct pending inserts (tests)
|
||||
if pool.all.Get(hash) == nil {
|
||||
@ -768,15 +765,41 @@ func (pool *TxPool) AddRemote(tx *types.Transaction) error {
|
||||
|
||||
// addTxs attempts to queue a batch of transactions if they are valid.
|
||||
func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
|
||||
// Filter out known ones without obtaining the pool lock or recovering signatures
|
||||
var (
|
||||
errs = make([]error, len(txs))
|
||||
news = make([]*types.Transaction, 0, len(txs))
|
||||
)
|
||||
for i, tx := range txs {
|
||||
// If the transaction is known, pre-set the error slot
|
||||
if pool.all.Get(tx.Hash()) != nil {
|
||||
errs[i] = fmt.Errorf("known transaction: %x", tx.Hash())
|
||||
knownTxMeter.Mark(1)
|
||||
continue
|
||||
}
|
||||
// Accumulate all unknown transactions for deeper processing
|
||||
news = append(news, tx)
|
||||
}
|
||||
if len(news) == 0 {
|
||||
return errs
|
||||
}
|
||||
// Cache senders in transactions before obtaining lock (pool.signer is immutable)
|
||||
for _, tx := range txs {
|
||||
for _, tx := range news {
|
||||
types.Sender(pool.signer, tx)
|
||||
}
|
||||
|
||||
// Process all the new transaction and merge any errors into the original slice
|
||||
pool.mu.Lock()
|
||||
errs, dirtyAddrs := pool.addTxsLocked(txs, local)
|
||||
newErrs, dirtyAddrs := pool.addTxsLocked(news, local)
|
||||
pool.mu.Unlock()
|
||||
|
||||
var nilSlot = 0
|
||||
for _, err := range newErrs {
|
||||
for errs[nilSlot] != nil {
|
||||
nilSlot++
|
||||
}
|
||||
errs[nilSlot] = err
|
||||
}
|
||||
// Reorg the pool internals if needed and return
|
||||
done := pool.requestPromoteExecutables(dirtyAddrs)
|
||||
if sync {
|
||||
<-done
|
||||
@ -796,26 +819,29 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error,
|
||||
dirty.addTx(tx)
|
||||
}
|
||||
}
|
||||
validMeter.Mark(int64(len(dirty.accounts)))
|
||||
validTxMeter.Mark(int64(len(dirty.accounts)))
|
||||
return errs, dirty
|
||||
}
|
||||
|
||||
// Status returns the status (unknown/pending/queued) of a batch of transactions
|
||||
// identified by their hashes.
|
||||
func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
|
||||
pool.mu.RLock()
|
||||
defer pool.mu.RUnlock()
|
||||
|
||||
status := make([]TxStatus, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
if tx := pool.all.Get(hash); tx != nil {
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
if pool.pending[from] != nil && pool.pending[from].txs.items[tx.Nonce()] != nil {
|
||||
status[i] = TxStatusPending
|
||||
} else {
|
||||
status[i] = TxStatusQueued
|
||||
}
|
||||
tx := pool.Get(hash)
|
||||
if tx == nil {
|
||||
continue
|
||||
}
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
pool.mu.RLock()
|
||||
if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
|
||||
status[i] = TxStatusPending
|
||||
} else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil {
|
||||
status[i] = TxStatusQueued
|
||||
}
|
||||
// implicit else: the tx may have been included into a block between
|
||||
// checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct
|
||||
pool.mu.RUnlock()
|
||||
}
|
||||
return status
|
||||
}
|
||||
@ -841,7 +867,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||
pool.priced.Removed(1)
|
||||
}
|
||||
if pool.locals.contains(addr) {
|
||||
localCounter.Dec(1)
|
||||
localGauge.Dec(1)
|
||||
}
|
||||
// Remove the transaction from the pending lists and reset the account nonce
|
||||
if pending := pool.pending[addr]; pending != nil {
|
||||
@ -858,7 +884,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||
// Update the account nonce if needed
|
||||
pool.pendingNonces.setIfLower(addr, tx.Nonce())
|
||||
// Reduce the pending counter
|
||||
pendingCounter.Dec(int64(1 + len(invalids)))
|
||||
pendingGauge.Dec(int64(1 + len(invalids)))
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -866,7 +892,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||
if future := pool.queue[addr]; future != nil {
|
||||
if removed, _ := future.Remove(tx); removed {
|
||||
// Reduce the queued counter
|
||||
queuedCounter.Dec(1)
|
||||
queuedGauge.Dec(1)
|
||||
}
|
||||
if future.Empty() {
|
||||
delete(pool.queue, addr)
|
||||
@ -1164,7 +1190,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
|
||||
promoted = append(promoted, tx)
|
||||
}
|
||||
}
|
||||
queuedCounter.Dec(int64(len(readies)))
|
||||
queuedGauge.Dec(int64(len(readies)))
|
||||
|
||||
// Drop all transactions over the allowed limit
|
||||
var caps types.Transactions
|
||||
@ -1179,9 +1205,9 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
|
||||
}
|
||||
// Mark all the items dropped as removed
|
||||
pool.priced.Removed(len(forwards) + len(drops) + len(caps))
|
||||
queuedCounter.Dec(int64(len(forwards) + len(drops) + len(caps)))
|
||||
queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
|
||||
if pool.locals.contains(addr) {
|
||||
localCounter.Dec(int64(len(forwards) + len(drops) + len(caps)))
|
||||
localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
|
||||
}
|
||||
// Delete the entire queue entry if it became empty.
|
||||
if list.Empty() {
|
||||
@ -1240,9 +1266,9 @@ func (pool *TxPool) truncatePending() {
|
||||
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
|
||||
}
|
||||
pool.priced.Removed(len(caps))
|
||||
pendingCounter.Dec(int64(len(caps)))
|
||||
pendingGauge.Dec(int64(len(caps)))
|
||||
if pool.locals.contains(offenders[i]) {
|
||||
localCounter.Dec(int64(len(caps)))
|
||||
localGauge.Dec(int64(len(caps)))
|
||||
}
|
||||
pending--
|
||||
}
|
||||
@ -1267,9 +1293,9 @@ func (pool *TxPool) truncatePending() {
|
||||
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
|
||||
}
|
||||
pool.priced.Removed(len(caps))
|
||||
pendingCounter.Dec(int64(len(caps)))
|
||||
pendingGauge.Dec(int64(len(caps)))
|
||||
if pool.locals.contains(addr) {
|
||||
localCounter.Dec(int64(len(caps)))
|
||||
localGauge.Dec(int64(len(caps)))
|
||||
}
|
||||
pending--
|
||||
}
|
||||
@ -1353,9 +1379,9 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||
log.Trace("Demoting pending transaction", "hash", hash)
|
||||
pool.enqueueTx(hash, tx)
|
||||
}
|
||||
pendingCounter.Dec(int64(len(olds) + len(drops) + len(invalids)))
|
||||
pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
|
||||
if pool.locals.contains(addr) {
|
||||
localCounter.Dec(int64(len(olds) + len(drops) + len(invalids)))
|
||||
localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
|
||||
}
|
||||
// If there's a gap in front, alert (should never happen) and postpone all transactions
|
||||
if list.Len() > 0 && list.txs.Get(nonce) == nil {
|
||||
@ -1365,7 +1391,7 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||
log.Error("Demoting invalidated transaction", "hash", hash)
|
||||
pool.enqueueTx(hash, tx)
|
||||
}
|
||||
pendingCounter.Dec(int64(len(gapped)))
|
||||
pendingGauge.Dec(int64(len(gapped)))
|
||||
}
|
||||
// Delete the entire queue entry if it became empty.
|
||||
if list.Empty() {
|
||||
|
@ -1438,6 +1438,71 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the pool rejects duplicate transactions.
|
||||
func TestTransactionDeduplication(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create the pool to test the pricing enforcement with
|
||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
|
||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
||||
|
||||
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
|
||||
defer pool.Stop()
|
||||
|
||||
// Create a test account to add transactions with
|
||||
key, _ := crypto.GenerateKey()
|
||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
|
||||
|
||||
// Create a batch of transactions and add a few of them
|
||||
txs := make([]*types.Transaction, 16)
|
||||
for i := 0; i < len(txs); i++ {
|
||||
txs[i] = pricedTransaction(uint64(i), 100000, big.NewInt(1), key)
|
||||
}
|
||||
var firsts []*types.Transaction
|
||||
for i := 0; i < len(txs); i += 2 {
|
||||
firsts = append(firsts, txs[i])
|
||||
}
|
||||
errs := pool.AddRemotesSync(firsts)
|
||||
if len(errs) != len(firsts) {
|
||||
t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts))
|
||||
}
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
t.Errorf("add %d failed: %v", i, err)
|
||||
}
|
||||
}
|
||||
pending, queued := pool.Stats()
|
||||
if pending != 1 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
|
||||
}
|
||||
if queued != len(txs)/2-1 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1)
|
||||
}
|
||||
// Try to add all of them now and ensure previous ones error out as knowns
|
||||
errs = pool.AddRemotesSync(txs)
|
||||
if len(errs) != len(txs) {
|
||||
t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs))
|
||||
}
|
||||
for i, err := range errs {
|
||||
if i%2 == 0 && err == nil {
|
||||
t.Errorf("add %d succeeded, should have failed as known", i)
|
||||
}
|
||||
if i%2 == 1 && err != nil {
|
||||
t.Errorf("add %d failed: %v", i, err)
|
||||
}
|
||||
}
|
||||
pending, queued = pool.Stats()
|
||||
if pending != len(txs) {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, len(txs))
|
||||
}
|
||||
if queued != 0 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
|
||||
}
|
||||
if err := validateTxPoolInternals(pool); err != nil {
|
||||
t.Fatalf("pool internal state corrupted: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the pool rejects replacement transactions that don't meet the minimum
|
||||
// price bump required.
|
||||
func TestTransactionReplacement(t *testing.T) {
|
||||
|
@ -106,8 +106,13 @@ func (c *ecrecover) Run(input []byte) ([]byte, error) {
|
||||
if !allZero(input[32:63]) || !crypto.ValidateSignatureValues(v, r, s, false) {
|
||||
return nil, nil
|
||||
}
|
||||
// We must make sure not to modify the 'input', so placing the 'v' along with
|
||||
// the signature needs to be done on a new allocation
|
||||
sig := make([]byte, 65)
|
||||
copy(sig, input[64:128])
|
||||
sig[64] = v
|
||||
// v needs to be at the end for libsecp256k1
|
||||
pubKey, err := crypto.Ecrecover(input[:32], append(input[64:128], v))
|
||||
pubKey, err := crypto.Ecrecover(input[:32], sig)
|
||||
// make sure the public key is a valid one
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
|
@ -17,6 +17,7 @@
|
||||
package vm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
@ -409,6 +410,11 @@ func testPrecompiled(addr string, test precompiledTest, t *testing.T) {
|
||||
} else if common.Bytes2Hex(res) != test.expected {
|
||||
t.Errorf("Expected %v, got %v", test.expected, common.Bytes2Hex(res))
|
||||
}
|
||||
// Verify that the precompile did not touch the input buffer
|
||||
exp := common.Hex2Bytes(test.input)
|
||||
if !bytes.Equal(in, exp) {
|
||||
t.Errorf("Precompiled %v modified input data", addr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -423,6 +429,11 @@ func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing
|
||||
if !reflect.DeepEqual(err, test.expectedError) {
|
||||
t.Errorf("Expected error [%v], got [%v]", test.expectedError, err)
|
||||
}
|
||||
// Verify that the precompile did not touch the input buffer
|
||||
exp := common.Hex2Bytes(test.input)
|
||||
if !bytes.Equal(in, exp) {
|
||||
t.Errorf("Precompiled %v modified input data", addr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -574,3 +585,55 @@ func TestPrecompileBlake2FMalformedInput(t *testing.T) {
|
||||
testPrecompiledFailure("09", test, t)
|
||||
}
|
||||
}
|
||||
|
||||
// EcRecover test vectors
|
||||
var ecRecoverTests = []precompiledTest{
|
||||
{
|
||||
input: "a8b53bdf3306a35a7103ab5504a0c9b492295564b6202b1942a84ef300107281" +
|
||||
"000000000000000000000000000000000000000000000000000000000000001b" +
|
||||
"3078356531653033663533636531386237373263636230303933666637316633" +
|
||||
"6635336635633735623734646362333161383561613862383839326234653862" +
|
||||
"1122334455667788991011121314151617181920212223242526272829303132",
|
||||
expected: "",
|
||||
name: "CallEcrecoverUnrecoverableKey",
|
||||
},
|
||||
{
|
||||
input: "18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c" +
|
||||
"000000000000000000000000000000000000000000000000000000000000001c" +
|
||||
"73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f" +
|
||||
"eeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549",
|
||||
expected: "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b",
|
||||
name: "ValidKey",
|
||||
},
|
||||
{
|
||||
input: "18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c" +
|
||||
"100000000000000000000000000000000000000000000000000000000000001c" +
|
||||
"73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f" +
|
||||
"eeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549",
|
||||
expected: "",
|
||||
name: "InvalidHighV-bits-1",
|
||||
},
|
||||
{
|
||||
input: "18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c" +
|
||||
"000000000000000000000000000000000000001000000000000000000000001c" +
|
||||
"73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f" +
|
||||
"eeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549",
|
||||
expected: "",
|
||||
name: "InvalidHighV-bits-2",
|
||||
},
|
||||
{
|
||||
input: "18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c" +
|
||||
"000000000000000000000000000000000000001000000000000000000000011c" +
|
||||
"73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f" +
|
||||
"eeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549",
|
||||
expected: "",
|
||||
name: "InvalidHighV-bits-3",
|
||||
},
|
||||
}
|
||||
|
||||
func TestPrecompiledEcrecover(t *testing.T) {
|
||||
for _, test := range ecRecoverTests {
|
||||
testPrecompiled("01", test, t)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *
|
||||
|
||||
func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
offset, size := stack.pop(), stack.pop()
|
||||
data := memory.Get(offset.Int64(), size.Int64())
|
||||
data := memory.GetPtr(offset.Int64(), size.Int64())
|
||||
|
||||
if interpreter.hasher == nil {
|
||||
interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState)
|
||||
@ -602,11 +602,9 @@ func opPop(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *
|
||||
}
|
||||
|
||||
func opMload(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
offset := stack.pop()
|
||||
val := interpreter.intPool.get().SetBytes(memory.Get(offset.Int64(), 32))
|
||||
stack.push(val)
|
||||
|
||||
interpreter.intPool.put(offset)
|
||||
v := stack.peek()
|
||||
offset := v.Int64()
|
||||
v.SetBytes(memory.GetPtr(offset, 32))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -691,7 +689,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memor
|
||||
var (
|
||||
value = stack.pop()
|
||||
offset, size = stack.pop(), stack.pop()
|
||||
input = memory.Get(offset.Int64(), size.Int64())
|
||||
input = memory.GetCopy(offset.Int64(), size.Int64())
|
||||
gas = contract.Gas
|
||||
)
|
||||
if interpreter.evm.chainRules.IsEIP150 {
|
||||
@ -725,7 +723,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memo
|
||||
endowment = stack.pop()
|
||||
offset, size = stack.pop(), stack.pop()
|
||||
salt = stack.pop()
|
||||
input = memory.Get(offset.Int64(), size.Int64())
|
||||
input = memory.GetCopy(offset.Int64(), size.Int64())
|
||||
gas = contract.Gas
|
||||
)
|
||||
|
||||
@ -757,7 +755,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory
|
||||
toAddr := common.BigToAddress(addr)
|
||||
value = math.U256(value)
|
||||
// Get the arguments from the memory.
|
||||
args := memory.Get(inOffset.Int64(), inSize.Int64())
|
||||
args := memory.GetPtr(inOffset.Int64(), inSize.Int64())
|
||||
|
||||
if value.Sign() != 0 {
|
||||
gas += params.CallStipend
|
||||
@ -786,7 +784,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, contract *Contract, mem
|
||||
toAddr := common.BigToAddress(addr)
|
||||
value = math.U256(value)
|
||||
// Get arguments from the memory.
|
||||
args := memory.Get(inOffset.Int64(), inSize.Int64())
|
||||
args := memory.GetPtr(inOffset.Int64(), inSize.Int64())
|
||||
|
||||
if value.Sign() != 0 {
|
||||
gas += params.CallStipend
|
||||
@ -814,7 +812,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract,
|
||||
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.BigToAddress(addr)
|
||||
// Get arguments from the memory.
|
||||
args := memory.Get(inOffset.Int64(), inSize.Int64())
|
||||
args := memory.GetPtr(inOffset.Int64(), inSize.Int64())
|
||||
|
||||
ret, returnGas, err := interpreter.evm.DelegateCall(contract, toAddr, args, gas)
|
||||
if err != nil {
|
||||
@ -839,7 +837,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, m
|
||||
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.BigToAddress(addr)
|
||||
// Get arguments from the memory.
|
||||
args := memory.Get(inOffset.Int64(), inSize.Int64())
|
||||
args := memory.GetPtr(inOffset.Int64(), inSize.Int64())
|
||||
|
||||
ret, returnGas, err := interpreter.evm.StaticCall(contract, toAddr, args, gas)
|
||||
if err != nil {
|
||||
@ -895,7 +893,7 @@ func makeLog(size int) executionFunc {
|
||||
topics[i] = common.BigToHash(stack.pop())
|
||||
}
|
||||
|
||||
d := memory.Get(mStart.Int64(), mSize.Int64())
|
||||
d := memory.GetCopy(mStart.Int64(), mSize.Int64())
|
||||
interpreter.evm.StateDB.AddLog(&types.Log{
|
||||
Address: contract.Address(),
|
||||
Topics: topics,
|
||||
|
@ -509,12 +509,12 @@ func TestOpMstore(t *testing.T) {
|
||||
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
|
||||
stack.pushN(new(big.Int).SetBytes(common.Hex2Bytes(v)), big.NewInt(0))
|
||||
opMstore(&pc, evmInterpreter, nil, mem, stack)
|
||||
if got := common.Bytes2Hex(mem.Get(0, 32)); got != v {
|
||||
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
|
||||
t.Fatalf("Mstore fail, got %v, expected %v", got, v)
|
||||
}
|
||||
stack.pushN(big.NewInt(0x1), big.NewInt(0))
|
||||
opMstore(&pc, evmInterpreter, nil, mem, stack)
|
||||
if common.Bytes2Hex(mem.Get(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
|
||||
if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
|
||||
t.Fatalf("Mstore failed to overwrite previous value")
|
||||
}
|
||||
poolOfIntPools.put(evmInterpreter.intPool)
|
||||
|
@ -70,7 +70,7 @@ func (m *Memory) Resize(size uint64) {
|
||||
}
|
||||
|
||||
// Get returns offset + size as a new slice
|
||||
func (m *Memory) Get(offset, size int64) (cpy []byte) {
|
||||
func (m *Memory) GetCopy(offset, size int64) (cpy []byte) {
|
||||
if size == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
@ -43,14 +42,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
var dumpEnc bool
|
||||
|
||||
func init() {
|
||||
flDump := flag.Bool("dump", false, "write encrypted test message to file")
|
||||
flag.Parse()
|
||||
dumpEnc = *flDump
|
||||
}
|
||||
|
||||
// Ensure the KDF generates appropriately sized keys.
|
||||
func TestKDF(t *testing.T) {
|
||||
msg := []byte("Hello, world")
|
||||
|
@ -48,8 +48,8 @@ For more IDE support install the `linter-eslint` package too, which finds the `.
|
||||
[ESLint]: https://eslint.org/
|
||||
[Airbnb]: https://github.com/airbnb/javascript/tree/master/react
|
||||
[Webpack]: https://webpack.github.io/
|
||||
[WA]: http://webpack.github.io/analyse/
|
||||
[WV]: http://chrisbateman.github.io/webpack-visualizer/
|
||||
[WA]: https://webpack.github.io/analyse/
|
||||
[WV]: https://chrisbateman.github.io/webpack-visualizer/
|
||||
[Node.js]: https://nodejs.org/en/
|
||||
[Flow]: https://flow.org/
|
||||
[Atom]: https://atom.io/
|
||||
|
@ -125,7 +125,7 @@ func (db *Dashboard) APIs() []rpc.API { return nil }
|
||||
// Start starts the data collection thread and the listening server of the dashboard.
|
||||
// Implements the node.Service interface.
|
||||
func (db *Dashboard) Start(server *p2p.Server) error {
|
||||
log.Info("Starting dashboard")
|
||||
log.Info("Starting dashboard", "url", fmt.Sprintf("http://%s:%d", db.config.Host, db.config.Port))
|
||||
|
||||
db.wg.Add(3)
|
||||
go db.collectSystemData()
|
||||
|
@ -72,6 +72,23 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb
|
||||
return b.eth.blockchain.GetHeaderByNumber(uint64(number)), nil
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) {
|
||||
if blockNr, ok := blockNrOrHash.Number(); ok {
|
||||
return b.HeaderByNumber(ctx, blockNr)
|
||||
}
|
||||
if hash, ok := blockNrOrHash.Hash(); ok {
|
||||
header := b.eth.blockchain.GetHeaderByHash(hash)
|
||||
if header == nil {
|
||||
return nil, errors.New("header for hash not found")
|
||||
}
|
||||
if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {
|
||||
return nil, errors.New("hash is not currently canonical")
|
||||
}
|
||||
return header, nil
|
||||
}
|
||||
return nil, errors.New("invalid arguments; neither block nor hash specified")
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||
return b.eth.blockchain.GetHeaderByHash(hash), nil
|
||||
}
|
||||
@ -93,6 +110,27 @@ func (b *EthAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*typ
|
||||
return b.eth.blockchain.GetBlockByHash(hash), nil
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {
|
||||
if blockNr, ok := blockNrOrHash.Number(); ok {
|
||||
return b.BlockByNumber(ctx, blockNr)
|
||||
}
|
||||
if hash, ok := blockNrOrHash.Hash(); ok {
|
||||
header := b.eth.blockchain.GetHeaderByHash(hash)
|
||||
if header == nil {
|
||||
return nil, errors.New("header for hash not found")
|
||||
}
|
||||
if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {
|
||||
return nil, errors.New("hash is not currently canonical")
|
||||
}
|
||||
block := b.eth.blockchain.GetBlock(hash, header.Number.Uint64())
|
||||
if block == nil {
|
||||
return nil, errors.New("header found, but block body is missing")
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
return nil, errors.New("invalid arguments; neither block nor hash specified")
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) {
|
||||
// Pending state is only known by the miner
|
||||
if number == rpc.PendingBlockNumber {
|
||||
@ -111,6 +149,27 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.B
|
||||
return stateDb, header, err
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) {
|
||||
if blockNr, ok := blockNrOrHash.Number(); ok {
|
||||
return b.StateAndHeaderByNumber(ctx, blockNr)
|
||||
}
|
||||
if hash, ok := blockNrOrHash.Hash(); ok {
|
||||
header, err := b.HeaderByHash(ctx, hash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, nil, errors.New("header for hash not found")
|
||||
}
|
||||
if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {
|
||||
return nil, nil, errors.New("hash is not currently canonical")
|
||||
}
|
||||
stateDb, err := b.eth.BlockChain().StateAt(header.Root)
|
||||
return stateDb, header, err
|
||||
}
|
||||
return nil, nil, errors.New("invalid arguments; neither block nor hash specified")
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
return b.eth.blockchain.GetReceiptsByHash(hash), nil
|
||||
}
|
||||
|
@ -69,8 +69,6 @@ type Ethereum struct {
|
||||
// Channel for shutting down the service
|
||||
shutdownChan chan bool
|
||||
|
||||
server *p2p.Server
|
||||
|
||||
// Handlers
|
||||
txPool *core.TxPool
|
||||
blockchain *core.BlockChain
|
||||
@ -179,11 +177,12 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
EVMInterpreter: config.EVMInterpreter,
|
||||
}
|
||||
cacheConfig = &core.CacheConfig{
|
||||
TrieCleanLimit: config.TrieCleanCache,
|
||||
TrieCleanNoPrefetch: config.NoPrefetch,
|
||||
TrieDirtyLimit: config.TrieDirtyCache,
|
||||
TrieDirtyDisabled: config.NoPruning,
|
||||
TrieTimeLimit: config.TrieTimeout,
|
||||
TrieCleanLimit: config.TrieCleanCache,
|
||||
TrieCleanNoPrefetch: config.NoPrefetch,
|
||||
TrieDirtyLimit: config.TrieDirtyCache,
|
||||
TrieDirtyDisabled: config.NoPruning,
|
||||
TrieTimeLimit: config.TrieTimeout,
|
||||
ProcessingStateDiffs: config.StateDiff,
|
||||
}
|
||||
)
|
||||
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve)
|
||||
|
@ -61,6 +61,8 @@ var DefaultConfig = Config{
|
||||
Blocks: 20,
|
||||
Percentile: 60,
|
||||
},
|
||||
|
||||
StateDiff: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -157,4 +159,6 @@ type Config struct {
|
||||
|
||||
// Istanbul block override (TODO: remove after the fork)
|
||||
OverrideIstanbul *big.Int
|
||||
|
||||
StateDiff bool
|
||||
}
|
||||
|
@ -1574,13 +1574,14 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
|
||||
func (d *Downloader) processFastSyncContent(latest *types.Header) error {
|
||||
// Start syncing state of the reported head block. This should get us most of
|
||||
// the state of the pivot block.
|
||||
stateSync := d.syncState(latest.Root)
|
||||
defer stateSync.Cancel()
|
||||
go func() {
|
||||
if err := stateSync.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled {
|
||||
sync := d.syncState(latest.Root)
|
||||
defer sync.Cancel()
|
||||
closeOnErr := func(s *stateSync) {
|
||||
if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled {
|
||||
d.queue.Close() // wake up Results
|
||||
}
|
||||
}()
|
||||
}
|
||||
go closeOnErr(sync)
|
||||
// Figure out the ideal pivot block. Note, that this goalpost may move if the
|
||||
// sync takes long enough for the chain head to move significantly.
|
||||
pivot := uint64(0)
|
||||
@ -1600,12 +1601,12 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
|
||||
if len(results) == 0 {
|
||||
// If pivot sync is done, stop
|
||||
if oldPivot == nil {
|
||||
return stateSync.Cancel()
|
||||
return sync.Cancel()
|
||||
}
|
||||
// If sync failed, stop
|
||||
select {
|
||||
case <-d.cancelCh:
|
||||
stateSync.Cancel()
|
||||
sync.Cancel()
|
||||
return errCanceled
|
||||
default:
|
||||
}
|
||||
@ -1625,28 +1626,24 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
|
||||
}
|
||||
}
|
||||
P, beforeP, afterP := splitAroundPivot(pivot, results)
|
||||
if err := d.commitFastSyncData(beforeP, stateSync); err != nil {
|
||||
if err := d.commitFastSyncData(beforeP, sync); err != nil {
|
||||
return err
|
||||
}
|
||||
if P != nil {
|
||||
// If new pivot block found, cancel old state retrieval and restart
|
||||
if oldPivot != P {
|
||||
stateSync.Cancel()
|
||||
sync.Cancel()
|
||||
|
||||
stateSync = d.syncState(P.Header.Root)
|
||||
defer stateSync.Cancel()
|
||||
go func() {
|
||||
if err := stateSync.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled {
|
||||
d.queue.Close() // wake up Results
|
||||
}
|
||||
}()
|
||||
sync = d.syncState(P.Header.Root)
|
||||
defer sync.Cancel()
|
||||
go closeOnErr(sync)
|
||||
oldPivot = P
|
||||
}
|
||||
// Wait for completion, occasionally checking for pivot staleness
|
||||
select {
|
||||
case <-stateSync.done:
|
||||
if stateSync.err != nil {
|
||||
return stateSync.err
|
||||
case <-sync.done:
|
||||
if sync.err != nil {
|
||||
return sync.err
|
||||
}
|
||||
if err := d.commitPivotBlock(P); err != nil {
|
||||
return err
|
||||
|
@ -347,7 +347,7 @@ func (s *stateSync) commit(force bool) error {
|
||||
}
|
||||
start := time.Now()
|
||||
b := s.d.stateDB.NewBatch()
|
||||
if written, err := s.sched.Commit(b); written == 0 || err != nil {
|
||||
if err := s.sched.Commit(b); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Write(); err != nil {
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/fetcher"
|
||||
@ -63,7 +64,8 @@ func errResp(code errCode, format string, v ...interface{}) error {
|
||||
}
|
||||
|
||||
type ProtocolManager struct {
|
||||
networkID uint64
|
||||
networkID uint64
|
||||
forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
|
||||
|
||||
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
|
||||
acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
|
||||
@ -103,6 +105,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
networkID: networkID,
|
||||
forkFilter: forkid.NewFilter(blockchain),
|
||||
eventMux: mux,
|
||||
txpool: txpool,
|
||||
blockchain: blockchain,
|
||||
@ -304,7 +307,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
|
||||
number = head.Number.Uint64()
|
||||
td = pm.blockchain.GetTd(hash, number)
|
||||
)
|
||||
if err := p.Handshake(pm.networkID, td, hash, genesis.Hash()); err != nil {
|
||||
if err := p.Handshake(pm.networkID, td, hash, genesis.Hash(), forkid.NewID(pm.blockchain), pm.forkFilter); err != nil {
|
||||
p.Log().Debug("Ethereum handshake failed", "err", err)
|
||||
return err
|
||||
}
|
||||
|
@ -39,8 +39,8 @@ import (
|
||||
)
|
||||
|
||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||
func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
|
||||
func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
|
||||
func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }
|
||||
|
||||
func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxHashFetch+15, nil, nil)
|
||||
@ -198,8 +198,8 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||
}
|
||||
|
||||
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
||||
func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) }
|
||||
func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) }
|
||||
func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }
|
||||
|
||||
func testGetBlockBodies(t *testing.T, protocol int) {
|
||||
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxBlockFetch+15, nil, nil)
|
||||
@ -271,6 +271,7 @@ func testGetBlockBodies(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that the node state database can be retrieved based on hashes.
|
||||
func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) }
|
||||
func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }
|
||||
|
||||
func testGetNodeData(t *testing.T, protocol int) {
|
||||
// Define three accounts to simulate transactions with
|
||||
@ -367,6 +368,7 @@ func testGetNodeData(t *testing.T, protocol int) {
|
||||
|
||||
// Tests that the transaction receipts can be retrieved based on hashes.
|
||||
func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) }
|
||||
func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) }
|
||||
|
||||
func testGetReceipt(t *testing.T, protocol int) {
|
||||
// Define three accounts to simulate transactions with
|
||||
|
@ -22,6 +22,7 @@ package eth
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -30,6 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
@ -171,20 +173,35 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
|
||||
head = pm.blockchain.CurrentHeader()
|
||||
td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
||||
)
|
||||
tp.handshake(nil, td, head.Hash(), genesis.Hash())
|
||||
tp.handshake(nil, td, head.Hash(), genesis.Hash(), forkid.NewID(pm.blockchain), forkid.NewFilter(pm.blockchain))
|
||||
}
|
||||
return tp, errc
|
||||
}
|
||||
|
||||
// handshake simulates a trivial handshake that expects the same state from the
|
||||
// remote side as we are simulating locally.
|
||||
func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) {
|
||||
msg := &statusData{
|
||||
ProtocolVersion: uint32(p.version),
|
||||
NetworkId: DefaultConfig.NetworkId,
|
||||
TD: td,
|
||||
CurrentBlock: head,
|
||||
GenesisBlock: genesis,
|
||||
func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) {
|
||||
var msg interface{}
|
||||
switch {
|
||||
case p.version == eth63:
|
||||
msg = &statusData63{
|
||||
ProtocolVersion: uint32(p.version),
|
||||
NetworkId: DefaultConfig.NetworkId,
|
||||
TD: td,
|
||||
CurrentBlock: head,
|
||||
GenesisBlock: genesis,
|
||||
}
|
||||
case p.version == eth64:
|
||||
msg = &statusData{
|
||||
ProtocolVersion: uint32(p.version),
|
||||
NetworkID: DefaultConfig.NetworkId,
|
||||
TD: td,
|
||||
Head: head,
|
||||
Genesis: genesis,
|
||||
ForkID: forkID,
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
||||
}
|
||||
if err := p2p.ExpectMsg(p.app, StatusMsg, msg); err != nil {
|
||||
t.Fatalf("status recv: %v", err)
|
||||
|
90
eth/peer.go
90
eth/peer.go
@ -25,6 +25,7 @@ import (
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -353,22 +354,46 @@ func (p *peer) RequestReceipts(hashes []common.Hash) error {
|
||||
|
||||
// Handshake executes the eth protocol handshake, negotiating version number,
|
||||
// network IDs, difficulties, head and genesis blocks.
|
||||
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash) error {
|
||||
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {
|
||||
// Send out own handshake in a new thread
|
||||
errc := make(chan error, 2)
|
||||
var status statusData // safe to read after two values have been received from errc
|
||||
|
||||
var (
|
||||
status63 statusData63 // safe to read after two values have been received from errc
|
||||
status statusData // safe to read after two values have been received from errc
|
||||
)
|
||||
go func() {
|
||||
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
|
||||
ProtocolVersion: uint32(p.version),
|
||||
NetworkId: network,
|
||||
TD: td,
|
||||
CurrentBlock: head,
|
||||
GenesisBlock: genesis,
|
||||
})
|
||||
switch {
|
||||
case p.version == eth63:
|
||||
errc <- p2p.Send(p.rw, StatusMsg, &statusData63{
|
||||
ProtocolVersion: uint32(p.version),
|
||||
NetworkId: network,
|
||||
TD: td,
|
||||
CurrentBlock: head,
|
||||
GenesisBlock: genesis,
|
||||
})
|
||||
case p.version == eth64:
|
||||
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
|
||||
ProtocolVersion: uint32(p.version),
|
||||
NetworkID: network,
|
||||
TD: td,
|
||||
Head: head,
|
||||
Genesis: genesis,
|
||||
ForkID: forkID,
|
||||
})
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
errc <- p.readStatus(network, &status, genesis)
|
||||
switch {
|
||||
case p.version == eth63:
|
||||
errc <- p.readStatusLegacy(network, &status63, genesis)
|
||||
case p.version == eth64:
|
||||
errc <- p.readStatus(network, &status, genesis, forkFilter)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
||||
}
|
||||
}()
|
||||
timeout := time.NewTimer(handshakeTimeout)
|
||||
defer timeout.Stop()
|
||||
@ -382,11 +407,18 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
|
||||
return p2p.DiscReadTimeout
|
||||
}
|
||||
}
|
||||
p.td, p.head = status.TD, status.CurrentBlock
|
||||
switch {
|
||||
case p.version == eth63:
|
||||
p.td, p.head = status63.TD, status63.CurrentBlock
|
||||
case p.version == eth64:
|
||||
p.td, p.head = status.TD, status.Head
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash) (err error) {
|
||||
func (p *peer) readStatusLegacy(network uint64, status *statusData63, genesis common.Hash) error {
|
||||
msg, err := p.rw.ReadMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -402,10 +434,10 @@ func (p *peer) readStatus(network uint64, status *statusData, genesis common.Has
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
if status.GenesisBlock != genesis {
|
||||
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
|
||||
return errResp(ErrGenesisMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
|
||||
}
|
||||
if status.NetworkId != network {
|
||||
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network)
|
||||
return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkId, network)
|
||||
}
|
||||
if int(status.ProtocolVersion) != p.version {
|
||||
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
|
||||
@ -413,6 +445,36 @@ func (p *peer) readStatus(network uint64, status *statusData, genesis common.Has
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash, forkFilter forkid.Filter) error {
|
||||
msg, err := p.rw.ReadMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if msg.Code != StatusMsg {
|
||||
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
|
||||
}
|
||||
if msg.Size > protocolMaxMsgSize {
|
||||
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
|
||||
}
|
||||
// Decode the handshake and make sure everything matches
|
||||
if err := msg.Decode(&status); err != nil {
|
||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||
}
|
||||
if status.NetworkID != network {
|
||||
return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkID, network)
|
||||
}
|
||||
if int(status.ProtocolVersion) != p.version {
|
||||
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
|
||||
}
|
||||
if status.Genesis != genesis {
|
||||
return errResp(ErrGenesisMismatch, "%x (!= %x)", status.Genesis, genesis)
|
||||
}
|
||||
if err := forkFilter(status.ForkID); err != nil {
|
||||
return errResp(ErrForkIDRejected, "%v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer.
|
||||
func (p *peer) String() string {
|
||||
return fmt.Sprintf("Peer %s [%s]", p.id,
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -30,24 +31,23 @@ import (
|
||||
|
||||
// Constants to match up protocol versions and messages
|
||||
const (
|
||||
eth62 = 62
|
||||
eth63 = 63
|
||||
eth64 = 64
|
||||
)
|
||||
|
||||
// protocolName is the official short name of the protocol used during capability negotiation.
|
||||
const protocolName = "eth"
|
||||
|
||||
// ProtocolVersions are the supported versions of the eth protocol (first is primary).
|
||||
var ProtocolVersions = []uint{eth63}
|
||||
var ProtocolVersions = []uint{eth64, eth63}
|
||||
|
||||
// protocolLengths are the number of implemented message corresponding to different protocol versions.
|
||||
var protocolLengths = map[uint]uint64{eth63: 17, eth62: 8}
|
||||
var protocolLengths = map[uint]uint64{eth64: 17, eth63: 17}
|
||||
|
||||
const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
|
||||
|
||||
// eth protocol message codes
|
||||
const (
|
||||
// Protocol messages belonging to eth/62
|
||||
StatusMsg = 0x00
|
||||
NewBlockHashesMsg = 0x01
|
||||
TxMsg = 0x02
|
||||
@ -56,12 +56,10 @@ const (
|
||||
GetBlockBodiesMsg = 0x05
|
||||
BlockBodiesMsg = 0x06
|
||||
NewBlockMsg = 0x07
|
||||
|
||||
// Protocol messages belonging to eth/63
|
||||
GetNodeDataMsg = 0x0d
|
||||
NodeDataMsg = 0x0e
|
||||
GetReceiptsMsg = 0x0f
|
||||
ReceiptsMsg = 0x10
|
||||
GetNodeDataMsg = 0x0d
|
||||
NodeDataMsg = 0x0e
|
||||
GetReceiptsMsg = 0x0f
|
||||
ReceiptsMsg = 0x10
|
||||
)
|
||||
|
||||
type errCode int
|
||||
@ -71,11 +69,11 @@ const (
|
||||
ErrDecode
|
||||
ErrInvalidMsgCode
|
||||
ErrProtocolVersionMismatch
|
||||
ErrNetworkIdMismatch
|
||||
ErrGenesisBlockMismatch
|
||||
ErrNetworkIDMismatch
|
||||
ErrGenesisMismatch
|
||||
ErrForkIDRejected
|
||||
ErrNoStatusMsg
|
||||
ErrExtraStatusMsg
|
||||
ErrSuspendedPeer
|
||||
)
|
||||
|
||||
func (e errCode) String() string {
|
||||
@ -88,11 +86,11 @@ var errorToString = map[int]string{
|
||||
ErrDecode: "Invalid message",
|
||||
ErrInvalidMsgCode: "Invalid message code",
|
||||
ErrProtocolVersionMismatch: "Protocol version mismatch",
|
||||
ErrNetworkIdMismatch: "NetworkId mismatch",
|
||||
ErrGenesisBlockMismatch: "Genesis block mismatch",
|
||||
ErrNetworkIDMismatch: "Network ID mismatch",
|
||||
ErrGenesisMismatch: "Genesis mismatch",
|
||||
ErrForkIDRejected: "Fork ID rejected",
|
||||
ErrNoStatusMsg: "No status message",
|
||||
ErrExtraStatusMsg: "Extra status message",
|
||||
ErrSuspendedPeer: "Suspended peer",
|
||||
}
|
||||
|
||||
type txPool interface {
|
||||
@ -108,8 +106,8 @@ type txPool interface {
|
||||
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
|
||||
}
|
||||
|
||||
// statusData is the network packet for the status message.
|
||||
type statusData struct {
|
||||
// statusData63 is the network packet for the status message for eth/63.
|
||||
type statusData63 struct {
|
||||
ProtocolVersion uint32
|
||||
NetworkId uint64
|
||||
TD *big.Int
|
||||
@ -117,6 +115,16 @@ type statusData struct {
|
||||
GenesisBlock common.Hash
|
||||
}
|
||||
|
||||
// statusData is the network packet for the status message for eth/64 and later.
|
||||
type statusData struct {
|
||||
ProtocolVersion uint32
|
||||
NetworkID uint64
|
||||
TD *big.Int
|
||||
Head common.Hash
|
||||
Genesis common.Hash
|
||||
ForkID forkid.ID
|
||||
}
|
||||
|
||||
// newBlockHashesData is the network packet for the block announcements.
|
||||
type newBlockHashesData []struct {
|
||||
Hash common.Hash // Hash of one particular block being announced
|
||||
|
@ -18,15 +18,24 @@ package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
@ -37,10 +46,7 @@ func init() {
|
||||
var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
|
||||
// Tests that handshake failures are detected and reported correctly.
|
||||
func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) }
|
||||
func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) }
|
||||
|
||||
func testStatusMsgErrors(t *testing.T, protocol int) {
|
||||
func TestStatusMsgErrors63(t *testing.T) {
|
||||
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
|
||||
var (
|
||||
genesis = pm.blockchain.Genesis()
|
||||
@ -59,21 +65,20 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
|
||||
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash()},
|
||||
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", protocol),
|
||||
code: StatusMsg, data: statusData63{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash()},
|
||||
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 63),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{uint32(protocol), 999, td, head.Hash(), genesis.Hash()},
|
||||
wantError: errResp(ErrNetworkIdMismatch, "999 (!= %d)", DefaultConfig.NetworkId),
|
||||
code: StatusMsg, data: statusData63{63, 999, td, head.Hash(), genesis.Hash()},
|
||||
wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{uint32(protocol), DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}},
|
||||
wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]),
|
||||
code: StatusMsg, data: statusData63{63, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}},
|
||||
wantError: errResp(ErrGenesisMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
p, errc := newTestPeer("peer", protocol, pm, false)
|
||||
p, errc := newTestPeer("peer", 63, pm, false)
|
||||
// The send call might hang until reset because
|
||||
// the protocol might not read the payload.
|
||||
go p2p.Send(p.app, test.code, test.data)
|
||||
@ -92,9 +97,155 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusMsgErrors64(t *testing.T) {
|
||||
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
|
||||
var (
|
||||
genesis = pm.blockchain.Genesis()
|
||||
head = pm.blockchain.CurrentHeader()
|
||||
td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
||||
forkID = forkid.NewID(pm.blockchain)
|
||||
)
|
||||
defer pm.Stop()
|
||||
|
||||
tests := []struct {
|
||||
code uint64
|
||||
data interface{}
|
||||
wantError error
|
||||
}{
|
||||
{
|
||||
code: TxMsg, data: []interface{}{},
|
||||
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkID},
|
||||
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 64),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{64, 999, td, head.Hash(), genesis.Hash(), forkID},
|
||||
wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}, forkID},
|
||||
wantError: errResp(ErrGenesisMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis.Hash()),
|
||||
},
|
||||
{
|
||||
code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}},
|
||||
wantError: errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()),
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
p, errc := newTestPeer("peer", 64, pm, false)
|
||||
// The send call might hang until reset because
|
||||
// the protocol might not read the payload.
|
||||
go p2p.Send(p.app, test.code, test.data)
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err == nil {
|
||||
t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError)
|
||||
} else if err.Error() != test.wantError.Error() {
|
||||
t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Errorf("protocol did not shut down within 2 seconds")
|
||||
}
|
||||
p.close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkIDSplit(t *testing.T) {
|
||||
var (
|
||||
engine = ethash.NewFaker()
|
||||
|
||||
configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)}
|
||||
configProFork = ¶ms.ChainConfig{
|
||||
HomesteadBlock: big.NewInt(1),
|
||||
EIP150Block: big.NewInt(2),
|
||||
EIP155Block: big.NewInt(2),
|
||||
EIP158Block: big.NewInt(2),
|
||||
ByzantiumBlock: big.NewInt(3),
|
||||
}
|
||||
dbNoFork = rawdb.NewMemoryDatabase()
|
||||
dbProFork = rawdb.NewMemoryDatabase()
|
||||
|
||||
gspecNoFork = &core.Genesis{Config: configNoFork}
|
||||
gspecProFork = &core.Genesis{Config: configProFork}
|
||||
|
||||
genesisNoFork = gspecNoFork.MustCommit(dbNoFork)
|
||||
genesisProFork = gspecProFork.MustCommit(dbProFork)
|
||||
|
||||
chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil)
|
||||
chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil)
|
||||
|
||||
blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
|
||||
blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
|
||||
|
||||
ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), new(testTxPool), engine, chainNoFork, dbNoFork, 1, nil)
|
||||
ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), new(testTxPool), engine, chainProFork, dbProFork, 1, nil)
|
||||
)
|
||||
ethNoFork.Start(1000)
|
||||
ethProFork.Start(1000)
|
||||
|
||||
// Both nodes should allow the other to connect (same genesis, next fork is the same)
|
||||
p2pNoFork, p2pProFork := p2p.MsgPipe()
|
||||
peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
|
||||
peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
|
||||
|
||||
errc := make(chan error, 2)
|
||||
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
||||
go func() { errc <- ethProFork.handle(peerNoFork) }()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
t.Fatalf("frontier nofork <-> profork failed: %v", err)
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
p2pNoFork.Close()
|
||||
p2pProFork.Close()
|
||||
}
|
||||
// Progress into Homestead. Fork's match, so we don't care what the future holds
|
||||
chainNoFork.InsertChain(blocksNoFork[:1])
|
||||
chainProFork.InsertChain(blocksProFork[:1])
|
||||
|
||||
p2pNoFork, p2pProFork = p2p.MsgPipe()
|
||||
peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
|
||||
peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
|
||||
|
||||
errc = make(chan error, 2)
|
||||
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
||||
go func() { errc <- ethProFork.handle(peerNoFork) }()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
t.Fatalf("homestead nofork <-> profork failed: %v", err)
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
p2pNoFork.Close()
|
||||
p2pProFork.Close()
|
||||
}
|
||||
// Progress into Spurious. Forks mismatch, signalling differing chains, reject
|
||||
chainNoFork.InsertChain(blocksNoFork[1:2])
|
||||
chainProFork.InsertChain(blocksProFork[1:2])
|
||||
|
||||
p2pNoFork, p2pProFork = p2p.MsgPipe()
|
||||
peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
|
||||
peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
|
||||
|
||||
errc = make(chan error, 2)
|
||||
go func() { errc <- ethNoFork.handle(peerProFork) }()
|
||||
go func() { errc <- ethProFork.handle(peerNoFork) }()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
if want := errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()); err.Error() != want.Error() {
|
||||
t.Fatalf("fork ID rejection error mismatch: have %v, want %v", err, want)
|
||||
}
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
t.Fatalf("split peers not rejected")
|
||||
}
|
||||
}
|
||||
|
||||
// This test checks that received transactions are added to the local pool.
|
||||
func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) }
|
||||
func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
|
||||
func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
|
||||
|
||||
func testRecvTransactions(t *testing.T, protocol int) {
|
||||
txAdded := make(chan []*types.Transaction)
|
||||
@ -121,8 +272,8 @@ func testRecvTransactions(t *testing.T, protocol int) {
|
||||
}
|
||||
|
||||
// This test checks that pending transactions are sent.
|
||||
func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) }
|
||||
func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
|
||||
func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
|
||||
|
||||
func testSendTransactions(t *testing.T, protocol int) {
|
||||
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
|
||||
|
@ -99,7 +99,7 @@ func (mw *memoryWrapper) slice(begin, end int64) []byte {
|
||||
log.Warn("Tracer accessed out of bound memory", "available", mw.memory.Len(), "offset", begin, "size", end-begin)
|
||||
return nil
|
||||
}
|
||||
return mw.memory.Get(begin, end-begin)
|
||||
return mw.memory.GetCopy(begin, end-begin)
|
||||
}
|
||||
|
||||
// getUint returns the 32 bytes at the specified address interpreted as a uint.
|
||||
|
@ -62,14 +62,18 @@ type Database struct {
|
||||
fn string // filename for reporting
|
||||
db *leveldb.DB // LevelDB instance
|
||||
|
||||
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
|
||||
compReadMeter metrics.Meter // Meter for measuring the data read during compaction
|
||||
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
|
||||
writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
|
||||
writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
|
||||
diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database
|
||||
diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
|
||||
diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
|
||||
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
|
||||
compReadMeter metrics.Meter // Meter for measuring the data read during compaction
|
||||
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
|
||||
writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
|
||||
writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
|
||||
diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database
|
||||
diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
|
||||
diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
|
||||
memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction
|
||||
level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0
|
||||
nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
|
||||
seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
|
||||
|
||||
quitLock sync.Mutex // Mutex protecting the quit channel access
|
||||
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
|
||||
@ -96,6 +100,7 @@ func New(file string, cache int, handles int, namespace string) (*Database, erro
|
||||
BlockCacheCapacity: cache / 2 * opt.MiB,
|
||||
WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
DisableSeeksCompaction: true,
|
||||
})
|
||||
if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
|
||||
db, err = leveldb.RecoverFile(file, nil)
|
||||
@ -118,6 +123,10 @@ func New(file string, cache int, handles int, namespace string) (*Database, erro
|
||||
ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
|
||||
ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
|
||||
ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
|
||||
ldb.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
|
||||
ldb.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
|
||||
ldb.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
|
||||
ldb.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
|
||||
|
||||
// Start up the metrics gathering and return
|
||||
go ldb.meter(metricsGatheringInterval)
|
||||
@ -375,6 +384,29 @@ func (db *Database) meter(refresh time.Duration) {
|
||||
}
|
||||
iostats[0], iostats[1] = nRead, nWrite
|
||||
|
||||
compCount, err := db.db.GetProperty("leveldb.compcount")
|
||||
if err != nil {
|
||||
db.log.Error("Failed to read database iostats", "err", err)
|
||||
merr = err
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
memComp uint32
|
||||
level0Comp uint32
|
||||
nonLevel0Comp uint32
|
||||
seekComp uint32
|
||||
)
|
||||
if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil {
|
||||
db.log.Error("Compaction count statistic not found")
|
||||
merr = err
|
||||
continue
|
||||
}
|
||||
db.memCompGauge.Update(int64(memComp))
|
||||
db.level0CompGauge.Update(int64(level0Comp))
|
||||
db.nonlevel0CompGauge.Update(int64(nonLevel0Comp))
|
||||
db.seekCompGauge.Update(int64(seekComp))
|
||||
|
||||
// Sleep a bit, then repeat the stats collection
|
||||
select {
|
||||
case errc = <-db.quitChan:
|
||||
|
@ -52,7 +52,7 @@ func (h GraphiQL) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
respond(w, errorJSON("only GET requests are supported"), http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
w.Write(graphiql)
|
||||
}
|
||||
|
||||
|
@ -36,20 +36,19 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
errOnlyOnMainChain = errors.New("this operation is only available for blocks on the canonical chain")
|
||||
errBlockInvariant = errors.New("block objects must be instantiated with at least one of num or hash")
|
||||
errBlockInvariant = errors.New("block objects must be instantiated with at least one of num or hash")
|
||||
)
|
||||
|
||||
// Account represents an Ethereum account at a particular block.
|
||||
type Account struct {
|
||||
backend ethapi.Backend
|
||||
address common.Address
|
||||
blockNumber rpc.BlockNumber
|
||||
backend ethapi.Backend
|
||||
address common.Address
|
||||
blockNrOrHash rpc.BlockNumberOrHash
|
||||
}
|
||||
|
||||
// getState fetches the StateDB object for an account.
|
||||
func (a *Account) getState(ctx context.Context) (*state.StateDB, error) {
|
||||
state, _, err := a.backend.StateAndHeaderByNumber(ctx, a.blockNumber)
|
||||
state, _, err := a.backend.StateAndHeaderByNumberOrHash(ctx, a.blockNrOrHash)
|
||||
return state, err
|
||||
}
|
||||
|
||||
@ -102,9 +101,9 @@ func (l *Log) Transaction(ctx context.Context) *Transaction {
|
||||
|
||||
func (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account {
|
||||
return &Account{
|
||||
backend: l.backend,
|
||||
address: l.log.Address,
|
||||
blockNumber: args.Number(),
|
||||
backend: l.backend,
|
||||
address: l.log.Address,
|
||||
blockNrOrHash: args.NumberOrLatest(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,10 +135,10 @@ func (t *Transaction) resolve(ctx context.Context) (*types.Transaction, error) {
|
||||
tx, blockHash, _, index := rawdb.ReadTransaction(t.backend.ChainDb(), t.hash)
|
||||
if tx != nil {
|
||||
t.tx = tx
|
||||
blockNrOrHash := rpc.BlockNumberOrHashWithHash(blockHash, false)
|
||||
t.block = &Block{
|
||||
backend: t.backend,
|
||||
hash: blockHash,
|
||||
canonical: unknown,
|
||||
backend: t.backend,
|
||||
numberOrHash: &blockNrOrHash,
|
||||
}
|
||||
t.index = index
|
||||
} else {
|
||||
@ -203,9 +202,9 @@ func (t *Transaction) To(ctx context.Context, args BlockNumberArgs) (*Account, e
|
||||
return nil, nil
|
||||
}
|
||||
return &Account{
|
||||
backend: t.backend,
|
||||
address: *to,
|
||||
blockNumber: args.Number(),
|
||||
backend: t.backend,
|
||||
address: *to,
|
||||
blockNrOrHash: args.NumberOrLatest(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -221,9 +220,9 @@ func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account,
|
||||
from, _ := types.Sender(signer, tx)
|
||||
|
||||
return &Account{
|
||||
backend: t.backend,
|
||||
address: from,
|
||||
blockNumber: args.Number(),
|
||||
backend: t.backend,
|
||||
address: from,
|
||||
blockNrOrHash: args.NumberOrLatest(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -293,9 +292,9 @@ func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs)
|
||||
return nil, err
|
||||
}
|
||||
return &Account{
|
||||
backend: t.backend,
|
||||
address: receipt.ContractAddress,
|
||||
blockNumber: args.Number(),
|
||||
backend: t.backend,
|
||||
address: receipt.ContractAddress,
|
||||
blockNrOrHash: args.NumberOrLatest(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -317,45 +316,16 @@ func (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) {
|
||||
|
||||
type BlockType int
|
||||
|
||||
const (
|
||||
unknown BlockType = iota
|
||||
isCanonical
|
||||
notCanonical
|
||||
)
|
||||
|
||||
// Block represents an Ethereum block.
|
||||
// backend, and either num or hash are mandatory. All other fields are lazily fetched
|
||||
// backend, and numberOrHash are mandatory. All other fields are lazily fetched
|
||||
// when required.
|
||||
type Block struct {
|
||||
backend ethapi.Backend
|
||||
num *rpc.BlockNumber
|
||||
hash common.Hash
|
||||
header *types.Header
|
||||
block *types.Block
|
||||
receipts []*types.Receipt
|
||||
canonical BlockType // Indicates if this block is on the main chain or not.
|
||||
}
|
||||
|
||||
func (b *Block) onMainChain(ctx context.Context) error {
|
||||
if b.canonical == unknown {
|
||||
header, err := b.resolveHeader(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
canonHeader, err := b.backend.HeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Hash() == canonHeader.Hash() {
|
||||
b.canonical = isCanonical
|
||||
} else {
|
||||
b.canonical = notCanonical
|
||||
}
|
||||
}
|
||||
if b.canonical != isCanonical {
|
||||
return errOnlyOnMainChain
|
||||
}
|
||||
return nil
|
||||
backend ethapi.Backend
|
||||
numberOrHash *rpc.BlockNumberOrHash
|
||||
hash common.Hash
|
||||
header *types.Header
|
||||
block *types.Block
|
||||
receipts []*types.Receipt
|
||||
}
|
||||
|
||||
// resolve returns the internal Block object representing this block, fetching
|
||||
@ -364,14 +334,17 @@ func (b *Block) resolve(ctx context.Context) (*types.Block, error) {
|
||||
if b.block != nil {
|
||||
return b.block, nil
|
||||
}
|
||||
var err error
|
||||
if b.hash != (common.Hash{}) {
|
||||
b.block, err = b.backend.BlockByHash(ctx, b.hash)
|
||||
} else {
|
||||
b.block, err = b.backend.BlockByNumber(ctx, *b.num)
|
||||
if b.numberOrHash == nil {
|
||||
latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
|
||||
b.numberOrHash = &latest
|
||||
}
|
||||
var err error
|
||||
b.block, err = b.backend.BlockByNumberOrHash(ctx, *b.numberOrHash)
|
||||
if b.block != nil && b.header == nil {
|
||||
b.header = b.block.Header()
|
||||
if hash, ok := b.numberOrHash.Hash(); ok {
|
||||
b.hash = hash
|
||||
}
|
||||
}
|
||||
return b.block, err
|
||||
}
|
||||
@ -380,7 +353,7 @@ func (b *Block) resolve(ctx context.Context) (*types.Block, error) {
|
||||
// if necessary. Call this function instead of `resolve` unless you need the
|
||||
// additional data (transactions and uncles).
|
||||
func (b *Block) resolveHeader(ctx context.Context) (*types.Header, error) {
|
||||
if b.num == nil && b.hash == (common.Hash{}) {
|
||||
if b.numberOrHash == nil && b.hash == (common.Hash{}) {
|
||||
return nil, errBlockInvariant
|
||||
}
|
||||
var err error
|
||||
@ -388,7 +361,7 @@ func (b *Block) resolveHeader(ctx context.Context) (*types.Header, error) {
|
||||
if b.hash != (common.Hash{}) {
|
||||
b.header, err = b.backend.HeaderByHash(ctx, b.hash)
|
||||
} else {
|
||||
b.header, err = b.backend.HeaderByNumber(ctx, *b.num)
|
||||
b.header, err = b.backend.HeaderByNumberOrHash(ctx, *b.numberOrHash)
|
||||
}
|
||||
}
|
||||
return b.header, err
|
||||
@ -416,15 +389,12 @@ func (b *Block) resolveReceipts(ctx context.Context) ([]*types.Receipt, error) {
|
||||
}
|
||||
|
||||
func (b *Block) Number(ctx context.Context) (hexutil.Uint64, error) {
|
||||
if b.num == nil || *b.num == rpc.LatestBlockNumber {
|
||||
header, err := b.resolveHeader(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
num := rpc.BlockNumber(header.Number.Uint64())
|
||||
b.num = &num
|
||||
header, err := b.resolveHeader(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return hexutil.Uint64(*b.num), nil
|
||||
|
||||
return hexutil.Uint64(header.Number.Uint64()), nil
|
||||
}
|
||||
|
||||
func (b *Block) Hash(ctx context.Context) (common.Hash, error) {
|
||||
@ -456,26 +426,17 @@ func (b *Block) GasUsed(ctx context.Context) (hexutil.Uint64, error) {
|
||||
|
||||
func (b *Block) Parent(ctx context.Context) (*Block, error) {
|
||||
// If the block header hasn't been fetched, and we'll need it, fetch it.
|
||||
if b.num == nil && b.hash != (common.Hash{}) && b.header == nil {
|
||||
if b.numberOrHash == nil && b.header == nil {
|
||||
if _, err := b.resolveHeader(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if b.header != nil && b.header.Number.Uint64() > 0 {
|
||||
num := rpc.BlockNumber(b.header.Number.Uint64() - 1)
|
||||
num := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(b.header.Number.Uint64() - 1))
|
||||
return &Block{
|
||||
backend: b.backend,
|
||||
num: &num,
|
||||
hash: b.header.ParentHash,
|
||||
canonical: unknown,
|
||||
}, nil
|
||||
}
|
||||
if b.num != nil && *b.num != 0 {
|
||||
num := *b.num - 1
|
||||
return &Block{
|
||||
backend: b.backend,
|
||||
num: &num,
|
||||
canonical: isCanonical,
|
||||
backend: b.backend,
|
||||
numberOrHash: &num,
|
||||
hash: b.header.ParentHash,
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@ -561,13 +522,11 @@ func (b *Block) Ommers(ctx context.Context) (*[]*Block, error) {
|
||||
}
|
||||
ret := make([]*Block, 0, len(block.Uncles()))
|
||||
for _, uncle := range block.Uncles() {
|
||||
blockNumber := rpc.BlockNumber(uncle.Number.Uint64())
|
||||
blockNumberOrHash := rpc.BlockNumberOrHashWithHash(uncle.Hash(), false)
|
||||
ret = append(ret, &Block{
|
||||
backend: b.backend,
|
||||
num: &blockNumber,
|
||||
hash: uncle.Hash(),
|
||||
header: uncle,
|
||||
canonical: notCanonical,
|
||||
backend: b.backend,
|
||||
numberOrHash: &blockNumberOrHash,
|
||||
header: uncle,
|
||||
})
|
||||
}
|
||||
return &ret, nil
|
||||
@ -603,16 +562,26 @@ func (b *Block) TotalDifficulty(ctx context.Context) (hexutil.Big, error) {
|
||||
|
||||
// BlockNumberArgs encapsulates arguments to accessors that specify a block number.
|
||||
type BlockNumberArgs struct {
|
||||
// TODO: Ideally we could use input unions to allow the query to specify the
|
||||
// block parameter by hash, block number, or tag but input unions aren't part of the
|
||||
// standard GraphQL schema SDL yet, see: https://github.com/graphql/graphql-spec/issues/488
|
||||
Block *hexutil.Uint64
|
||||
}
|
||||
|
||||
// Number returns the provided block number, or rpc.LatestBlockNumber if none
|
||||
// NumberOr returns the provided block number argument, or the "current" block number or hash if none
|
||||
// was provided.
|
||||
func (a BlockNumberArgs) Number() rpc.BlockNumber {
|
||||
func (a BlockNumberArgs) NumberOr(current rpc.BlockNumberOrHash) rpc.BlockNumberOrHash {
|
||||
if a.Block != nil {
|
||||
return rpc.BlockNumber(*a.Block)
|
||||
blockNr := rpc.BlockNumber(*a.Block)
|
||||
return rpc.BlockNumberOrHashWithNumber(blockNr)
|
||||
}
|
||||
return rpc.LatestBlockNumber
|
||||
return current
|
||||
}
|
||||
|
||||
// NumberOrLatest returns the provided block number argument, or the "latest" block number if none
|
||||
// was provided.
|
||||
func (a BlockNumberArgs) NumberOrLatest() rpc.BlockNumberOrHash {
|
||||
return a.NumberOr(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber))
|
||||
}
|
||||
|
||||
func (b *Block) Miner(ctx context.Context, args BlockNumberArgs) (*Account, error) {
|
||||
@ -621,9 +590,9 @@ func (b *Block) Miner(ctx context.Context, args BlockNumberArgs) (*Account, erro
|
||||
return nil, err
|
||||
}
|
||||
return &Account{
|
||||
backend: b.backend,
|
||||
address: header.Coinbase,
|
||||
blockNumber: args.Number(),
|
||||
backend: b.backend,
|
||||
address: header.Coinbase,
|
||||
blockNrOrHash: args.NumberOrLatest(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -683,13 +652,11 @@ func (b *Block) OmmerAt(ctx context.Context, args struct{ Index int32 }) (*Block
|
||||
return nil, nil
|
||||
}
|
||||
uncle := uncles[args.Index]
|
||||
blockNumber := rpc.BlockNumber(uncle.Number.Uint64())
|
||||
blockNumberOrHash := rpc.BlockNumberOrHashWithHash(uncle.Hash(), false)
|
||||
return &Block{
|
||||
backend: b.backend,
|
||||
num: &blockNumber,
|
||||
hash: uncle.Hash(),
|
||||
header: uncle,
|
||||
canonical: notCanonical,
|
||||
backend: b.backend,
|
||||
numberOrHash: &blockNumberOrHash,
|
||||
header: uncle,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -757,20 +724,16 @@ func (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteri
|
||||
func (b *Block) Account(ctx context.Context, args struct {
|
||||
Address common.Address
|
||||
}) (*Account, error) {
|
||||
err := b.onMainChain(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.num == nil {
|
||||
if b.numberOrHash == nil {
|
||||
_, err := b.resolveHeader(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &Account{
|
||||
backend: b.backend,
|
||||
address: args.Address,
|
||||
blockNumber: *b.num,
|
||||
backend: b.backend,
|
||||
address: args.Address,
|
||||
blockNrOrHash: *b.numberOrHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -807,17 +770,13 @@ func (c *CallResult) Status() hexutil.Uint64 {
|
||||
func (b *Block) Call(ctx context.Context, args struct {
|
||||
Data ethapi.CallArgs
|
||||
}) (*CallResult, error) {
|
||||
err := b.onMainChain(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.num == nil {
|
||||
_, err := b.resolveHeader(ctx)
|
||||
if b.numberOrHash == nil {
|
||||
_, err := b.resolve(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
result, gas, failed, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.num, nil, vm.Config{}, 5*time.Second, b.backend.RPCGasCap())
|
||||
result, gas, failed, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, vm.Config{}, 5*time.Second, b.backend.RPCGasCap())
|
||||
status := hexutil.Uint64(1)
|
||||
if failed {
|
||||
status = 0
|
||||
@ -832,17 +791,13 @@ func (b *Block) Call(ctx context.Context, args struct {
|
||||
func (b *Block) EstimateGas(ctx context.Context, args struct {
|
||||
Data ethapi.CallArgs
|
||||
}) (hexutil.Uint64, error) {
|
||||
err := b.onMainChain(ctx)
|
||||
if err != nil {
|
||||
return hexutil.Uint64(0), err
|
||||
}
|
||||
if b.num == nil {
|
||||
if b.numberOrHash == nil {
|
||||
_, err := b.resolveHeader(ctx)
|
||||
if err != nil {
|
||||
return hexutil.Uint64(0), err
|
||||
}
|
||||
}
|
||||
gas, err := ethapi.DoEstimateGas(ctx, b.backend, args.Data, *b.num, b.backend.RPCGasCap())
|
||||
gas, err := ethapi.DoEstimateGas(ctx, b.backend, args.Data, *b.numberOrHash, b.backend.RPCGasCap())
|
||||
return gas, err
|
||||
}
|
||||
|
||||
@ -875,17 +830,19 @@ func (p *Pending) Transactions(ctx context.Context) (*[]*Transaction, error) {
|
||||
func (p *Pending) Account(ctx context.Context, args struct {
|
||||
Address common.Address
|
||||
}) *Account {
|
||||
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
|
||||
return &Account{
|
||||
backend: p.backend,
|
||||
address: args.Address,
|
||||
blockNumber: rpc.PendingBlockNumber,
|
||||
backend: p.backend,
|
||||
address: args.Address,
|
||||
blockNrOrHash: pendingBlockNr,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pending) Call(ctx context.Context, args struct {
|
||||
Data ethapi.CallArgs
|
||||
}) (*CallResult, error) {
|
||||
result, gas, failed, err := ethapi.DoCall(ctx, p.backend, args.Data, rpc.PendingBlockNumber, nil, vm.Config{}, 5*time.Second, p.backend.RPCGasCap())
|
||||
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
|
||||
result, gas, failed, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, 5*time.Second, p.backend.RPCGasCap())
|
||||
status := hexutil.Uint64(1)
|
||||
if failed {
|
||||
status = 0
|
||||
@ -900,7 +857,8 @@ func (p *Pending) Call(ctx context.Context, args struct {
|
||||
func (p *Pending) EstimateGas(ctx context.Context, args struct {
|
||||
Data ethapi.CallArgs
|
||||
}) (hexutil.Uint64, error) {
|
||||
return ethapi.DoEstimateGas(ctx, p.backend, args.Data, rpc.PendingBlockNumber, p.backend.RPCGasCap())
|
||||
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
|
||||
return ethapi.DoEstimateGas(ctx, p.backend, args.Data, pendingBlockNr, p.backend.RPCGasCap())
|
||||
}
|
||||
|
||||
// Resolver is the top-level object in the GraphQL hierarchy.
|
||||
@ -914,24 +872,23 @@ func (r *Resolver) Block(ctx context.Context, args struct {
|
||||
}) (*Block, error) {
|
||||
var block *Block
|
||||
if args.Number != nil {
|
||||
num := rpc.BlockNumber(uint64(*args.Number))
|
||||
number := rpc.BlockNumber(uint64(*args.Number))
|
||||
numberOrHash := rpc.BlockNumberOrHashWithNumber(number)
|
||||
block = &Block{
|
||||
backend: r.backend,
|
||||
num: &num,
|
||||
canonical: isCanonical,
|
||||
backend: r.backend,
|
||||
numberOrHash: &numberOrHash,
|
||||
}
|
||||
} else if args.Hash != nil {
|
||||
numberOrHash := rpc.BlockNumberOrHashWithHash(*args.Hash, false)
|
||||
block = &Block{
|
||||
backend: r.backend,
|
||||
hash: *args.Hash,
|
||||
canonical: unknown,
|
||||
backend: r.backend,
|
||||
numberOrHash: &numberOrHash,
|
||||
}
|
||||
} else {
|
||||
num := rpc.LatestBlockNumber
|
||||
numberOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
|
||||
block = &Block{
|
||||
backend: r.backend,
|
||||
num: &num,
|
||||
canonical: isCanonical,
|
||||
backend: r.backend,
|
||||
numberOrHash: &numberOrHash,
|
||||
}
|
||||
}
|
||||
// Resolve the header, return nil if it doesn't exist.
|
||||
@ -963,11 +920,10 @@ func (r *Resolver) Blocks(ctx context.Context, args struct {
|
||||
}
|
||||
ret := make([]*Block, 0, to-from+1)
|
||||
for i := from; i <= to; i++ {
|
||||
num := i
|
||||
numberOrHash := rpc.BlockNumberOrHashWithNumber(i)
|
||||
ret = append(ret, &Block{
|
||||
backend: r.backend,
|
||||
num: &num,
|
||||
canonical: isCanonical,
|
||||
backend: r.backend,
|
||||
numberOrHash: &numberOrHash,
|
||||
})
|
||||
}
|
||||
return ret, nil
|
||||
|
@ -183,3 +183,49 @@ func (a *TarballArchive) Close() error {
|
||||
}
|
||||
return a.file.Close()
|
||||
}
|
||||
|
||||
func ExtractTarballArchive(archive string, dest string) error {
|
||||
// We're only interested in gzipped archives, wrap the reader now
|
||||
ar, err := os.Open(archive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ar.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(ar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
// Iterate over all the files in the tarball
|
||||
tr := tar.NewReader(gzr)
|
||||
for {
|
||||
// Fetch the next tarball header and abort if needed
|
||||
header, err := tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Figure out the target and create it
|
||||
target := filepath.Join(dest, header.Name)
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if err := os.MkdirAll(target, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
case tar.TypeReg:
|
||||
file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(file, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
81
internal/build/gosrc.go
Normal file
81
internal/build/gosrc.go
Normal file
@ -0,0 +1,81 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EnsureGoSources ensures that path contains a file with the given SHA256 hash,
|
||||
// and if not, it downloads a fresh Go source package from upstream and replaces
|
||||
// path with it (if the hash matches).
|
||||
func EnsureGoSources(version string, hash []byte, path string) error {
|
||||
// Sanity check the destination path to ensure we don't do weird things
|
||||
if !strings.HasSuffix(path, ".tar.gz") {
|
||||
return fmt.Errorf("destination path (%s) must end with .tar.gz", path)
|
||||
}
|
||||
// If the file exists, validate it's hash
|
||||
if archive, err := ioutil.ReadFile(path); err == nil { // Go sources are ~20MB, it's fine to read all
|
||||
hasher := sha256.New()
|
||||
hasher.Write(archive)
|
||||
have := hasher.Sum(nil)
|
||||
|
||||
if bytes.Equal(have, hash) {
|
||||
fmt.Printf("Go %s [%x] available at %s\n", version, hash, path)
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("Go %s hash mismatch (have %x, want %x) at %s, deleting old archive\n", version, have, hash, path)
|
||||
if err := os.Remove(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Archive missing or bad hash, download a new one
|
||||
fmt.Printf("Downloading Go %s [want %x] into %s\n", version, hash, path)
|
||||
|
||||
res, err := http.Get(fmt.Sprintf("https://dl.google.com/go/go%s.src.tar.gz", version))
|
||||
if err != nil || res.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("failed to access Go sources: code %d, err %v", res.StatusCode, err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
archive, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Sanity check the downloaded archive, save if checks out
|
||||
hasher := sha256.New()
|
||||
hasher.Write(archive)
|
||||
|
||||
if have := hasher.Sum(nil); !bytes.Equal(have, hash) {
|
||||
return fmt.Errorf("downloaded Go %s hash mismatch (have %x, want %x)", version, have, hash)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path, archive, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Downloaded Go %s [%x] into %s\n", version, hash, path)
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user