Write state diff to CSV #2
24
.github/CODEOWNERS
vendored
24
.github/CODEOWNERS
vendored
@ -2,6 +2,7 @@
|
|||||||
# Each line is a file pattern followed by one or more owners.
|
# Each line is a file pattern followed by one or more owners.
|
||||||
|
|
||||||
accounts/usbwallet @karalabe
|
accounts/usbwallet @karalabe
|
||||||
|
accounts/abi @gballet
|
||||||
consensus @karalabe
|
consensus @karalabe
|
||||||
core/ @karalabe @holiman
|
core/ @karalabe @holiman
|
||||||
eth/ @karalabe
|
eth/ @karalabe
|
||||||
@ -9,27 +10,4 @@ les/ @zsfelfoldi
|
|||||||
light/ @zsfelfoldi
|
light/ @zsfelfoldi
|
||||||
mobile/ @karalabe
|
mobile/ @karalabe
|
||||||
p2p/ @fjl @zsfelfoldi
|
p2p/ @fjl @zsfelfoldi
|
||||||
p2p/simulations @lmars
|
|
||||||
p2p/protocols @zelig
|
|
||||||
swarm/api/http @justelad
|
|
||||||
swarm/bmt @zelig
|
|
||||||
swarm/dev @lmars
|
|
||||||
swarm/fuse @jmozah @holisticode
|
|
||||||
swarm/grafana_dashboards @nonsense
|
|
||||||
swarm/metrics @nonsense @holisticode
|
|
||||||
swarm/multihash @nolash
|
|
||||||
swarm/network/bitvector @zelig @janos
|
|
||||||
swarm/network/priorityqueue @zelig @janos
|
|
||||||
swarm/network/simulations @zelig @janos
|
|
||||||
swarm/network/stream @janos @zelig @holisticode @justelad
|
|
||||||
swarm/network/stream/intervals @janos
|
|
||||||
swarm/network/stream/testing @zelig
|
|
||||||
swarm/pot @zelig
|
|
||||||
swarm/pss @nolash @zelig @nonsense
|
|
||||||
swarm/services @zelig
|
|
||||||
swarm/state @justelad
|
|
||||||
swarm/storage/encryption @zelig @nagydani
|
|
||||||
swarm/storage/mock @janos
|
|
||||||
swarm/storage/feed @nolash @jpeletier
|
|
||||||
swarm/testutil @lmars
|
|
||||||
whisper/ @gballet @gluk256
|
whisper/ @gballet @gluk256
|
||||||
|
2
.github/no-response.yml
vendored
2
.github/no-response.yml
vendored
@ -1,7 +1,7 @@
|
|||||||
# Number of days of inactivity before an Issue is closed for lack of response
|
# Number of days of inactivity before an Issue is closed for lack of response
|
||||||
daysUntilClose: 30
|
daysUntilClose: 30
|
||||||
# Label requiring a response
|
# Label requiring a response
|
||||||
responseRequiredLabel: more-information-needed
|
responseRequiredLabel: "need:more-information"
|
||||||
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
|
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
|
||||||
closeComment: >
|
closeComment: >
|
||||||
This issue has been automatically closed because there has been no response
|
This issue has been automatically closed because there has been no response
|
||||||
|
2
.github/stale.yml
vendored
2
.github/stale.yml
vendored
@ -7,7 +7,7 @@ exemptLabels:
|
|||||||
- pinned
|
- pinned
|
||||||
- security
|
- security
|
||||||
# Label to use when marking an issue as stale
|
# Label to use when marking an issue as stale
|
||||||
staleLabel: stale
|
staleLabel: "status:inactive"
|
||||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||||
markComment: >
|
markComment: >
|
||||||
This issue has been automatically marked as stale because it has not had
|
This issue has been automatically marked as stale because it has not had
|
||||||
|
10
.travis.yml
10
.travis.yml
@ -29,6 +29,14 @@ matrix:
|
|||||||
- os: osx
|
- os: osx
|
||||||
go: 1.11.x
|
go: 1.11.x
|
||||||
script:
|
script:
|
||||||
|
- echo "Increase the maximum number of open file descriptors on macOS"
|
||||||
|
- NOFILE=20480
|
||||||
|
- sudo sysctl -w kern.maxfiles=$NOFILE
|
||||||
|
- sudo sysctl -w kern.maxfilesperproc=$NOFILE
|
||||||
|
- sudo launchctl limit maxfiles $NOFILE $NOFILE
|
||||||
|
- sudo launchctl limit maxfiles
|
||||||
|
- ulimit -S -n $NOFILE
|
||||||
|
- ulimit -n
|
||||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||||
- go run build/ci.go install
|
- go run build/ci.go install
|
||||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||||
@ -148,7 +156,7 @@ matrix:
|
|||||||
git:
|
git:
|
||||||
submodules: false # avoid cloning ethereum/tests
|
submodules: false # avoid cloning ethereum/tests
|
||||||
before_install:
|
before_install:
|
||||||
- curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz
|
- curl https://storage.googleapis.com/golang/go1.11.4.linux-amd64.tar.gz | tar -xz
|
||||||
- export PATH=`pwd`/go/bin:$PATH
|
- export PATH=`pwd`/go/bin:$PATH
|
||||||
- export GOROOT=`pwd`/go
|
- export GOROOT=`pwd`/go
|
||||||
- export GOPATH=$HOME/go
|
- export GOPATH=$HOME/go
|
||||||
|
@ -168,7 +168,7 @@ HTTP based JSON-RPC API options:
|
|||||||
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
|
||||||
|
|
||||||
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
|
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
|
||||||
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification)
|
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification)
|
||||||
on all transports. You can reuse the same connection for multiple requests!
|
on all transports. You can reuse the same connection for multiple requests!
|
||||||
|
|
||||||
**Note: Please understand the security implications of opening up an HTTP/WS based transport before
|
**Note: Please understand the security implications of opening up an HTTP/WS based transport before
|
||||||
|
@ -58,13 +58,11 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return arguments, nil
|
return arguments, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
method, exist := abi.Methods[name]
|
method, exist := abi.Methods[name]
|
||||||
if !exist {
|
if !exist {
|
||||||
return nil, fmt.Errorf("method '%s' not found", name)
|
return nil, fmt.Errorf("method '%s' not found", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
arguments, err := method.Inputs.Pack(args...)
|
arguments, err := method.Inputs.Pack(args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -82,7 +80,7 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
|
|||||||
// we need to decide whether we're calling a method or an event
|
// we need to decide whether we're calling a method or an event
|
||||||
if method, ok := abi.Methods[name]; ok {
|
if method, ok := abi.Methods[name]; ok {
|
||||||
if len(output)%32 != 0 {
|
if len(output)%32 != 0 {
|
||||||
return fmt.Errorf("abi: improperly formatted output")
|
return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(output), output)
|
||||||
}
|
}
|
||||||
return method.Outputs.Unpack(v, output)
|
return method.Outputs.Unpack(v, output)
|
||||||
} else if event, ok := abi.Events[name]; ok {
|
} else if event, ok := abi.Events[name]; ok {
|
||||||
|
@ -22,11 +22,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
)
|
)
|
||||||
@ -52,11 +51,14 @@ const jsondata2 = `
|
|||||||
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
|
||||||
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
|
||||||
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
|
||||||
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
|
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
|
||||||
|
{ "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
|
||||||
|
{ "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
|
||||||
|
{ "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
|
||||||
]`
|
]`
|
||||||
|
|
||||||
func TestReader(t *testing.T) {
|
func TestReader(t *testing.T) {
|
||||||
Uint256, _ := NewType("uint256")
|
Uint256, _ := NewType("uint256", nil)
|
||||||
exp := ABI{
|
exp := ABI{
|
||||||
Methods: map[string]Method{
|
Methods: map[string]Method{
|
||||||
"balance": {
|
"balance": {
|
||||||
@ -177,7 +179,7 @@ func TestTestSlice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMethodSignature(t *testing.T) {
|
func TestMethodSignature(t *testing.T) {
|
||||||
String, _ := NewType("string")
|
String, _ := NewType("string", nil)
|
||||||
m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
|
m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
|
||||||
exp := "foo(string,string)"
|
exp := "foo(string,string)"
|
||||||
if m.Sig() != exp {
|
if m.Sig() != exp {
|
||||||
@ -189,12 +191,31 @@ func TestMethodSignature(t *testing.T) {
|
|||||||
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
|
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
|
||||||
}
|
}
|
||||||
|
|
||||||
uintt, _ := NewType("uint256")
|
uintt, _ := NewType("uint256", nil)
|
||||||
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
|
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
|
||||||
exp = "foo(uint256)"
|
exp = "foo(uint256)"
|
||||||
if m.Sig() != exp {
|
if m.Sig() != exp {
|
||||||
t.Error("signature mismatch", exp, "!=", m.Sig())
|
t.Error("signature mismatch", exp, "!=", m.Sig())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Method with tuple arguments
|
||||||
|
s, _ := NewType("tuple", []ArgumentMarshaling{
|
||||||
|
{Name: "a", Type: "int256"},
|
||||||
|
{Name: "b", Type: "int256[]"},
|
||||||
|
{Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
|
||||||
|
{Name: "x", Type: "int256"},
|
||||||
|
{Name: "y", Type: "int256"},
|
||||||
|
}},
|
||||||
|
{Name: "d", Type: "tuple[2]", Components: []ArgumentMarshaling{
|
||||||
|
{Name: "x", Type: "int256"},
|
||||||
|
{Name: "y", Type: "int256"},
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
m = Method{"foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
|
||||||
|
exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
|
||||||
|
if m.Sig() != exp {
|
||||||
|
t.Error("signature mismatch", exp, "!=", m.Sig())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiPack(t *testing.T) {
|
func TestMultiPack(t *testing.T) {
|
||||||
@ -564,11 +585,13 @@ func TestBareEvents(t *testing.T) {
|
|||||||
const definition = `[
|
const definition = `[
|
||||||
{ "type" : "event", "name" : "balance" },
|
{ "type" : "event", "name" : "balance" },
|
||||||
{ "type" : "event", "name" : "anon", "anonymous" : true},
|
{ "type" : "event", "name" : "anon", "anonymous" : true},
|
||||||
{ "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
{ "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] },
|
||||||
|
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
|
||||||
]`
|
]`
|
||||||
|
|
||||||
arg0, _ := NewType("uint256")
|
arg0, _ := NewType("uint256", nil)
|
||||||
arg1, _ := NewType("address")
|
arg1, _ := NewType("address", nil)
|
||||||
|
tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
|
||||||
|
|
||||||
expectedEvents := map[string]struct {
|
expectedEvents := map[string]struct {
|
||||||
Anonymous bool
|
Anonymous bool
|
||||||
@ -580,6 +603,10 @@ func TestBareEvents(t *testing.T) {
|
|||||||
{Name: "arg0", Type: arg0, Indexed: false},
|
{Name: "arg0", Type: arg0, Indexed: false},
|
||||||
{Name: "arg1", Type: arg1, Indexed: true},
|
{Name: "arg1", Type: arg1, Indexed: true},
|
||||||
}},
|
}},
|
||||||
|
"tuple": {false, []Argument{
|
||||||
|
{Name: "t", Type: tuple, Indexed: false},
|
||||||
|
{Name: "arg1", Type: arg1, Indexed: true},
|
||||||
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
abi, err := JSON(strings.NewReader(definition))
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
@ -646,28 +673,24 @@ func TestUnpackEvent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ReceivedEvent struct {
|
type ReceivedEvent struct {
|
||||||
Address common.Address
|
Sender common.Address
|
||||||
Amount *big.Int
|
Amount *big.Int
|
||||||
Memo []byte
|
Memo []byte
|
||||||
}
|
}
|
||||||
var ev ReceivedEvent
|
var ev ReceivedEvent
|
||||||
|
|
||||||
err = abi.Unpack(&ev, "received", data)
|
err = abi.Unpack(&ev, "received", data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else {
|
|
||||||
t.Logf("len(data): %d; received event: %+v", len(data), ev)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ReceivedAddrEvent struct {
|
type ReceivedAddrEvent struct {
|
||||||
Address common.Address
|
Sender common.Address
|
||||||
}
|
}
|
||||||
var receivedAddrEv ReceivedAddrEvent
|
var receivedAddrEv ReceivedAddrEvent
|
||||||
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
|
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else {
|
|
||||||
t.Logf("len(data): %d; received event: %+v", len(data), receivedAddrEv)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,24 +33,27 @@ type Argument struct {
|
|||||||
|
|
||||||
type Arguments []Argument
|
type Arguments []Argument
|
||||||
|
|
||||||
|
type ArgumentMarshaling struct {
|
||||||
|
Name string
|
||||||
|
Type string
|
||||||
|
Components []ArgumentMarshaling
|
||||||
|
Indexed bool
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalJSON implements json.Unmarshaler interface
|
// UnmarshalJSON implements json.Unmarshaler interface
|
||||||
func (argument *Argument) UnmarshalJSON(data []byte) error {
|
func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||||
var extarg struct {
|
var arg ArgumentMarshaling
|
||||||
Name string
|
err := json.Unmarshal(data, &arg)
|
||||||
Type string
|
|
||||||
Indexed bool
|
|
||||||
}
|
|
||||||
err := json.Unmarshal(data, &extarg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("argument json err: %v", err)
|
return fmt.Errorf("argument json err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
argument.Type, err = NewType(extarg.Type)
|
argument.Type, err = NewType(arg.Type, arg.Components)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
argument.Name = extarg.Name
|
argument.Name = arg.Name
|
||||||
argument.Indexed = extarg.Indexed
|
argument.Indexed = arg.Indexed
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -85,7 +88,6 @@ func (arguments Arguments) isTuple() bool {
|
|||||||
|
|
||||||
// Unpack performs the operation hexdata -> Go format
|
// Unpack performs the operation hexdata -> Go format
|
||||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||||
|
|
||||||
// make sure the passed value is arguments pointer
|
// make sure the passed value is arguments pointer
|
||||||
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||||
@ -97,52 +99,134 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
|||||||
if arguments.isTuple() {
|
if arguments.isTuple() {
|
||||||
return arguments.unpackTuple(v, marshalledValues)
|
return arguments.unpackTuple(v, marshalledValues)
|
||||||
}
|
}
|
||||||
return arguments.unpackAtomic(v, marshalledValues)
|
return arguments.unpackAtomic(v, marshalledValues[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
// unpack sets the unmarshalled value to go format.
|
||||||
|
// Note the dst here must be settable.
|
||||||
|
func unpack(t *Type, dst interface{}, src interface{}) error {
|
||||||
|
var (
|
||||||
|
dstVal = reflect.ValueOf(dst).Elem()
|
||||||
|
srcVal = reflect.ValueOf(src)
|
||||||
|
)
|
||||||
|
|
||||||
|
if t.T != TupleTy && !((t.T == SliceTy || t.T == ArrayTy) && t.Elem.T == TupleTy) {
|
||||||
|
return set(dstVal, srcVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t.T {
|
||||||
|
case TupleTy:
|
||||||
|
if dstVal.Kind() != reflect.Struct {
|
||||||
|
return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind())
|
||||||
|
}
|
||||||
|
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i, elem := range t.TupleElems {
|
||||||
|
fname := fieldmap[t.TupleRawNames[i]]
|
||||||
|
field := dstVal.FieldByName(fname)
|
||||||
|
if !field.IsValid() {
|
||||||
|
return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i])
|
||||||
|
}
|
||||||
|
if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case SliceTy:
|
||||||
|
if dstVal.Kind() != reflect.Slice {
|
||||||
|
return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind())
|
||||||
|
}
|
||||||
|
slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len())
|
||||||
|
for i := 0; i < slice.Len(); i++ {
|
||||||
|
if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dstVal.Set(slice)
|
||||||
|
case ArrayTy:
|
||||||
|
if dstVal.Kind() != reflect.Array {
|
||||||
|
return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind())
|
||||||
|
}
|
||||||
|
array := reflect.New(dstVal.Type()).Elem()
|
||||||
|
for i := 0; i < array.Len(); i++ {
|
||||||
|
if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dstVal.Set(array)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||||
|
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
|
||||||
|
if arguments.LengthNonIndexed() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
argument := arguments.NonIndexed()[0]
|
||||||
|
elem := reflect.ValueOf(v).Elem()
|
||||||
|
|
||||||
|
if elem.Kind() == reflect.Struct {
|
||||||
|
fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
field := elem.FieldByName(fieldmap[argument.Name])
|
||||||
|
if !field.IsValid() {
|
||||||
|
return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name)
|
||||||
|
}
|
||||||
|
return unpack(&argument.Type, field.Addr().Interface(), marshalledValues)
|
||||||
|
}
|
||||||
|
return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
|
||||||
|
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
||||||
var (
|
var (
|
||||||
value = reflect.ValueOf(v).Elem()
|
value = reflect.ValueOf(v).Elem()
|
||||||
typ = value.Type()
|
typ = value.Type()
|
||||||
kind = value.Kind()
|
kind = value.Kind()
|
||||||
)
|
)
|
||||||
|
|
||||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the interface is a struct, get of abi->struct_field mapping
|
// If the interface is a struct, get of abi->struct_field mapping
|
||||||
|
|
||||||
var abi2struct map[string]string
|
var abi2struct map[string]string
|
||||||
if kind == reflect.Struct {
|
if kind == reflect.Struct {
|
||||||
var err error
|
var (
|
||||||
abi2struct, err = mapAbiToStructFields(arguments, value)
|
argNames []string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for _, arg := range arguments.NonIndexed() {
|
||||||
|
argNames = append(argNames, arg.Name)
|
||||||
|
}
|
||||||
|
abi2struct, err = mapArgNamesToStructFields(argNames, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i, arg := range arguments.NonIndexed() {
|
for i, arg := range arguments.NonIndexed() {
|
||||||
|
|
||||||
reflectValue := reflect.ValueOf(marshalledValues[i])
|
|
||||||
|
|
||||||
switch kind {
|
switch kind {
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
if structField, ok := abi2struct[arg.Name]; ok {
|
field := value.FieldByName(abi2struct[arg.Name])
|
||||||
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
|
if !field.IsValid() {
|
||||||
return err
|
return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
|
||||||
}
|
}
|
||||||
|
if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
case reflect.Slice, reflect.Array:
|
case reflect.Slice, reflect.Array:
|
||||||
if value.Len() < i {
|
if value.Len() < i {
|
||||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
|
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
|
||||||
}
|
}
|
||||||
v := value.Index(i)
|
v := value.Index(i)
|
||||||
if err := requireAssignable(v, reflectValue); err != nil {
|
if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil {
|
||||||
if err := set(v.Elem(), reflectValue, arg); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -150,48 +234,7 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
|
||||||
|
|
||||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
|
||||||
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
|
|
||||||
if len(marshalledValues) != 1 {
|
|
||||||
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
|
||||||
}
|
|
||||||
|
|
||||||
elem := reflect.ValueOf(v).Elem()
|
|
||||||
kind := elem.Kind()
|
|
||||||
reflectValue := reflect.ValueOf(marshalledValues[0])
|
|
||||||
|
|
||||||
var abi2struct map[string]string
|
|
||||||
if kind == reflect.Struct {
|
|
||||||
var err error
|
|
||||||
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
arg := arguments.NonIndexed()[0]
|
|
||||||
if structField, ok := abi2struct[arg.Name]; ok {
|
|
||||||
return set(elem.FieldByName(structField), reflectValue, arg)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Computes the full size of an array;
|
|
||||||
// i.e. counting nested arrays, which count towards size for unpacking.
|
|
||||||
func getArraySize(arr *Type) int {
|
|
||||||
size := arr.Size
|
|
||||||
// Arrays can be nested, with each element being the same size
|
|
||||||
arr = arr.Elem
|
|
||||||
for arr.T == ArrayTy {
|
|
||||||
// Keep multiplying by elem.Size while the elem is an array.
|
|
||||||
size *= arr.Size
|
|
||||||
arr = arr.Elem
|
|
||||||
}
|
|
||||||
// Now we have the full array size, including its children.
|
|
||||||
return size
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
|
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
|
||||||
@ -202,7 +245,7 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
|||||||
virtualArgs := 0
|
virtualArgs := 0
|
||||||
for index, arg := range arguments.NonIndexed() {
|
for index, arg := range arguments.NonIndexed() {
|
||||||
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
||||||
if arg.Type.T == ArrayTy {
|
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
|
||||||
// If we have a static array, like [3]uint256, these are coded as
|
// If we have a static array, like [3]uint256, these are coded as
|
||||||
// just like uint256,uint256,uint256.
|
// just like uint256,uint256,uint256.
|
||||||
// This means that we need to add two 'virtual' arguments when
|
// This means that we need to add two 'virtual' arguments when
|
||||||
@ -213,7 +256,11 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
|||||||
//
|
//
|
||||||
// Calculate the full array size to get the correct offset for the next argument.
|
// Calculate the full array size to get the correct offset for the next argument.
|
||||||
// Decrement it by 1, as the normal index increment is still applied.
|
// Decrement it by 1, as the normal index increment is still applied.
|
||||||
virtualArgs += getArraySize(&arg.Type) - 1
|
virtualArgs += getTypeSize(arg.Type)/32 - 1
|
||||||
|
} else if arg.Type.T == TupleTy && !isDynamicType(arg.Type) {
|
||||||
|
// If we have a static tuple, like (uint256, bool, uint256), these are
|
||||||
|
// coded as just like uint256,bool,uint256
|
||||||
|
virtualArgs += getTypeSize(arg.Type)/32 - 1
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -243,11 +290,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
|||||||
// input offset is the bytes offset for packed output
|
// input offset is the bytes offset for packed output
|
||||||
inputOffset := 0
|
inputOffset := 0
|
||||||
for _, abiArg := range abiArgs {
|
for _, abiArg := range abiArgs {
|
||||||
if abiArg.Type.T == ArrayTy {
|
inputOffset += getTypeSize(abiArg.Type)
|
||||||
inputOffset += 32 * abiArg.Type.Size
|
|
||||||
} else {
|
|
||||||
inputOffset += 32
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
var ret []byte
|
var ret []byte
|
||||||
for i, a := range args {
|
for i, a := range args {
|
||||||
@ -257,14 +300,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// check for a slice type (string, bytes, slice)
|
// check for dynamic types
|
||||||
if input.Type.requiresLengthPrefix() {
|
if isDynamicType(input.Type) {
|
||||||
// calculate the offset
|
|
||||||
offset := inputOffset + len(variableInput)
|
|
||||||
// set the offset
|
// set the offset
|
||||||
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
ret = append(ret, packNum(reflect.ValueOf(inputOffset))...)
|
||||||
// Append the packed output to the variable input. The variable input
|
// calculate next offset
|
||||||
// will be appended at the end of the input.
|
inputOffset += len(packed)
|
||||||
|
// append to variable input
|
||||||
variableInput = append(variableInput, packed...)
|
variableInput = append(variableInput, packed...)
|
||||||
} else {
|
} else {
|
||||||
// append the packed value to the input
|
// append the packed value to the input
|
||||||
@ -277,14 +319,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// capitalise makes the first character of a string upper case, also removing any
|
// ToCamelCase converts an under-score string to a camel-case string
|
||||||
// prefixing underscores from the variable names.
|
func ToCamelCase(input string) string {
|
||||||
func capitalise(input string) string {
|
parts := strings.Split(input, "_")
|
||||||
for len(input) > 0 && input[0] == '_' {
|
for i, s := range parts {
|
||||||
input = input[1:]
|
if len(s) > 0 {
|
||||||
|
parts[i] = strings.ToUpper(s[:1]) + s[1:]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if len(input) == 0 {
|
return strings.Join(parts, "")
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return strings.ToUpper(input[:1]) + input[1:]
|
|
||||||
}
|
}
|
||||||
|
@ -36,10 +36,10 @@ type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Tra
|
|||||||
|
|
||||||
// CallOpts is the collection of options to fine tune a contract call request.
|
// CallOpts is the collection of options to fine tune a contract call request.
|
||||||
type CallOpts struct {
|
type CallOpts struct {
|
||||||
Pending bool // Whether to operate on the pending state or the last known one
|
Pending bool // Whether to operate on the pending state or the last known one
|
||||||
From common.Address // Optional the sender address, otherwise the first account is used
|
From common.Address // Optional the sender address, otherwise the first account is used
|
||||||
|
BlockNumber *big.Int // Optional the block number on which the call should be performed
|
||||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TransactOpts is the collection of authorization data required to create a
|
// TransactOpts is the collection of authorization data required to create a
|
||||||
@ -148,10 +148,10 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
output, err = c.caller.CallContract(ctx, msg, nil)
|
output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
|
||||||
if err == nil && len(output) == 0 {
|
if err == nil && len(output) == 0 {
|
||||||
// Make sure we have a contract to operate on, and bail out otherwise.
|
// Make sure we have a contract to operate on, and bail out otherwise.
|
||||||
if code, err = c.caller.CodeAt(ctx, c.address, nil); err != nil {
|
if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if len(code) == 0 {
|
} else if len(code) == 0 {
|
||||||
return ErrNoCode
|
return ErrNoCode
|
||||||
|
64
accounts/abi/bind/base_test.go
Normal file
64
accounts/abi/bind/base_test.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package bind_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
ethereum "github.com/ethereum/go-ethereum"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockCaller struct {
|
||||||
|
codeAtBlockNumber *big.Int
|
||||||
|
callContractBlockNumber *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||||
|
mc.codeAtBlockNumber = blockNumber
|
||||||
|
return []byte{1, 2, 3}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||||
|
mc.callContractBlockNumber = blockNumber
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPassingBlockNumber(t *testing.T) {
|
||||||
|
|
||||||
|
mc := &mockCaller{}
|
||||||
|
|
||||||
|
bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
|
||||||
|
Methods: map[string]abi.Method{
|
||||||
|
"something": {
|
||||||
|
Name: "something",
|
||||||
|
Outputs: abi.Arguments{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, mc, nil, nil)
|
||||||
|
var ret string
|
||||||
|
|
||||||
|
blockNumber := big.NewInt(42)
|
||||||
|
|
||||||
|
bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something")
|
||||||
|
|
||||||
|
if mc.callContractBlockNumber != blockNumber {
|
||||||
|
t.Fatalf("CallContract() was not passed the block number")
|
||||||
|
}
|
||||||
|
|
||||||
|
if mc.codeAtBlockNumber != blockNumber {
|
||||||
|
t.Fatalf("CodeAt() was not passed the block number")
|
||||||
|
}
|
||||||
|
|
||||||
|
bc.Call(&bind.CallOpts{}, &ret, "something")
|
||||||
|
|
||||||
|
if mc.callContractBlockNumber != nil {
|
||||||
|
t.Fatalf("CallContract() was passed a block number when it should not have been")
|
||||||
|
}
|
||||||
|
|
||||||
|
if mc.codeAtBlockNumber != nil {
|
||||||
|
t.Fatalf("CodeAt() was passed a block number when it should not have been")
|
||||||
|
}
|
||||||
|
}
|
@ -381,54 +381,23 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
|
|||||||
// methodNormalizer is a name transformer that modifies Solidity method names to
|
// methodNormalizer is a name transformer that modifies Solidity method names to
|
||||||
// conform to target language naming concentions.
|
// conform to target language naming concentions.
|
||||||
var methodNormalizer = map[Lang]func(string) string{
|
var methodNormalizer = map[Lang]func(string) string{
|
||||||
LangGo: capitalise,
|
LangGo: abi.ToCamelCase,
|
||||||
LangJava: decapitalise,
|
LangJava: decapitalise,
|
||||||
}
|
}
|
||||||
|
|
||||||
// capitalise makes a camel-case string which starts with an upper case character.
|
// capitalise makes a camel-case string which starts with an upper case character.
|
||||||
func capitalise(input string) string {
|
func capitalise(input string) string {
|
||||||
for len(input) > 0 && input[0] == '_' {
|
return abi.ToCamelCase(input)
|
||||||
input = input[1:]
|
|
||||||
}
|
|
||||||
if len(input) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return toCamelCase(strings.ToUpper(input[:1]) + input[1:])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// decapitalise makes a camel-case string which starts with a lower case character.
|
// decapitalise makes a camel-case string which starts with a lower case character.
|
||||||
func decapitalise(input string) string {
|
func decapitalise(input string) string {
|
||||||
for len(input) > 0 && input[0] == '_' {
|
|
||||||
input = input[1:]
|
|
||||||
}
|
|
||||||
if len(input) == 0 {
|
if len(input) == 0 {
|
||||||
return ""
|
return input
|
||||||
}
|
}
|
||||||
return toCamelCase(strings.ToLower(input[:1]) + input[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// toCamelCase converts an under-score string to a camel-case string
|
goForm := abi.ToCamelCase(input)
|
||||||
func toCamelCase(input string) string {
|
return strings.ToLower(goForm[:1]) + goForm[1:]
|
||||||
toupper := false
|
|
||||||
|
|
||||||
result := ""
|
|
||||||
for k, v := range input {
|
|
||||||
switch {
|
|
||||||
case k == 0:
|
|
||||||
result = strings.ToUpper(string(input[0]))
|
|
||||||
|
|
||||||
case toupper:
|
|
||||||
result += strings.ToUpper(string(v))
|
|
||||||
toupper = false
|
|
||||||
|
|
||||||
case v == '_':
|
|
||||||
toupper = true
|
|
||||||
|
|
||||||
default:
|
|
||||||
result += string(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// structured checks whether a list of ABI data types has enough information to
|
// structured checks whether a list of ABI data types has enough information to
|
||||||
|
@ -36,12 +36,12 @@ type Event struct {
|
|||||||
func (e Event) String() string {
|
func (e Event) String() string {
|
||||||
inputs := make([]string, len(e.Inputs))
|
inputs := make([]string, len(e.Inputs))
|
||||||
for i, input := range e.Inputs {
|
for i, input := range e.Inputs {
|
||||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
|
||||||
if input.Indexed {
|
if input.Indexed {
|
||||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
|
inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
|
return fmt.Sprintf("event %v(%v)", e.Name, strings.Join(inputs, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Id returns the canonical representation of the event's signature used by the
|
// Id returns the canonical representation of the event's signature used by the
|
||||||
|
@ -87,12 +87,12 @@ func TestEventId(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
definition: `[
|
definition: `[
|
||||||
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
|
{ "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
|
||||||
{ "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
|
{ "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
|
||||||
]`,
|
]`,
|
||||||
expectations: map[string]common.Hash{
|
expectations: map[string]common.Hash{
|
||||||
"balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
|
"Balance": crypto.Keccak256Hash([]byte("Balance(uint256)")),
|
||||||
"check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
|
"Check": crypto.Keccak256Hash([]byte("Check(address,uint256)")),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -111,6 +111,39 @@ func TestEventId(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEventString(t *testing.T) {
|
||||||
|
var table = []struct {
|
||||||
|
definition string
|
||||||
|
expectations map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
definition: `[
|
||||||
|
{ "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
|
||||||
|
{ "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] },
|
||||||
|
{ "type" : "event", "name" : "Transfer", "inputs": [{ "name": "from", "type": "address", "indexed": true }, { "name": "to", "type": "address", "indexed": true }, { "name": "value", "type": "uint256" }] }
|
||||||
|
]`,
|
||||||
|
expectations: map[string]string{
|
||||||
|
"Balance": "event Balance(uint256 in)",
|
||||||
|
"Check": "event Check(address t, uint256 b)",
|
||||||
|
"Transfer": "event Transfer(address indexed from, address indexed to, uint256 value)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
abi, err := JSON(strings.NewReader(test.definition))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, event := range abi.Events {
|
||||||
|
if event.String() != test.expectations[name] {
|
||||||
|
t.Errorf("expected string to be %s, got %s", test.expectations[name], event.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
||||||
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
||||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||||
|
@ -56,14 +56,14 @@ func (method Method) Sig() string {
|
|||||||
func (method Method) String() string {
|
func (method Method) String() string {
|
||||||
inputs := make([]string, len(method.Inputs))
|
inputs := make([]string, len(method.Inputs))
|
||||||
for i, input := range method.Inputs {
|
for i, input := range method.Inputs {
|
||||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
|
||||||
}
|
}
|
||||||
outputs := make([]string, len(method.Outputs))
|
outputs := make([]string, len(method.Outputs))
|
||||||
for i, output := range method.Outputs {
|
for i, output := range method.Outputs {
|
||||||
|
outputs[i] = output.Type.String()
|
||||||
if len(output.Name) > 0 {
|
if len(output.Name) > 0 {
|
||||||
outputs[i] = fmt.Sprintf("%v ", output.Name)
|
outputs[i] += fmt.Sprintf(" %v", output.Name)
|
||||||
}
|
}
|
||||||
outputs[i] += output.Type.String()
|
|
||||||
}
|
}
|
||||||
constant := ""
|
constant := ""
|
||||||
if method.Const {
|
if method.Const {
|
||||||
|
61
accounts/abi/method_test.go
Normal file
61
accounts/abi/method_test.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const methoddata = `
|
||||||
|
[
|
||||||
|
{ "type" : "function", "name" : "balance", "constant" : true },
|
||||||
|
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
|
||||||
|
{ "type" : "function", "name" : "transfer", "constant" : false, "inputs" : [ { "name" : "from", "type" : "address" }, { "name" : "to", "type" : "address" }, { "name" : "value", "type" : "uint256" } ], "outputs" : [ { "name" : "success", "type" : "bool" } ] }
|
||||||
|
]`
|
||||||
|
|
||||||
|
func TestMethodString(t *testing.T) {
|
||||||
|
var table = []struct {
|
||||||
|
method string
|
||||||
|
expectation string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
method: "balance",
|
||||||
|
expectation: "function balance() constant returns()",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
method: "send",
|
||||||
|
expectation: "function send(uint256 amount) returns()",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
method: "transfer",
|
||||||
|
expectation: "function transfer(address from, address to, uint256 value) returns(bool success)",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
abi, err := JSON(strings.NewReader(methoddata))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
got := abi.Methods[test.method].String()
|
||||||
|
if got != test.expectation {
|
||||||
|
t.Errorf("expected string to be %s, got %s", test.expectation, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -29,314 +29,601 @@ import (
|
|||||||
|
|
||||||
func TestPack(t *testing.T) {
|
func TestPack(t *testing.T) {
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
typ string
|
typ string
|
||||||
|
components []ArgumentMarshaling
|
||||||
input interface{}
|
input interface{}
|
||||||
output []byte
|
output []byte
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"uint8",
|
"uint8",
|
||||||
|
nil,
|
||||||
uint8(2),
|
uint8(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint8[]",
|
"uint8[]",
|
||||||
|
nil,
|
||||||
[]uint8{1, 2},
|
[]uint8{1, 2},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint16",
|
"uint16",
|
||||||
|
nil,
|
||||||
uint16(2),
|
uint16(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint16[]",
|
"uint16[]",
|
||||||
|
nil,
|
||||||
[]uint16{1, 2},
|
[]uint16{1, 2},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint32",
|
"uint32",
|
||||||
|
nil,
|
||||||
uint32(2),
|
uint32(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint32[]",
|
"uint32[]",
|
||||||
|
nil,
|
||||||
[]uint32{1, 2},
|
[]uint32{1, 2},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint64",
|
"uint64",
|
||||||
|
nil,
|
||||||
uint64(2),
|
uint64(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint64[]",
|
"uint64[]",
|
||||||
|
nil,
|
||||||
[]uint64{1, 2},
|
[]uint64{1, 2},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint256",
|
"uint256",
|
||||||
|
nil,
|
||||||
big.NewInt(2),
|
big.NewInt(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint256[]",
|
"uint256[]",
|
||||||
|
nil,
|
||||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int8",
|
"int8",
|
||||||
|
nil,
|
||||||
int8(2),
|
int8(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int8[]",
|
"int8[]",
|
||||||
|
nil,
|
||||||
[]int8{1, 2},
|
[]int8{1, 2},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int16",
|
"int16",
|
||||||
|
nil,
|
||||||
int16(2),
|
int16(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int16[]",
|
"int16[]",
|
||||||
|
nil,
|
||||||
[]int16{1, 2},
|
[]int16{1, 2},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int32",
|
"int32",
|
||||||
|
nil,
|
||||||
int32(2),
|
int32(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int32[]",
|
"int32[]",
|
||||||
|
nil,
|
||||||
[]int32{1, 2},
|
[]int32{1, 2},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int64",
|
"int64",
|
||||||
|
nil,
|
||||||
int64(2),
|
int64(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int64[]",
|
"int64[]",
|
||||||
|
nil,
|
||||||
[]int64{1, 2},
|
[]int64{1, 2},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int256",
|
"int256",
|
||||||
|
nil,
|
||||||
big.NewInt(2),
|
big.NewInt(2),
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"int256[]",
|
"int256[]",
|
||||||
|
nil,
|
||||||
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
[]*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes1",
|
"bytes1",
|
||||||
|
nil,
|
||||||
[1]byte{1},
|
[1]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes2",
|
"bytes2",
|
||||||
|
nil,
|
||||||
[2]byte{1},
|
[2]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes3",
|
"bytes3",
|
||||||
|
nil,
|
||||||
[3]byte{1},
|
[3]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes4",
|
"bytes4",
|
||||||
|
nil,
|
||||||
[4]byte{1},
|
[4]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes5",
|
"bytes5",
|
||||||
|
nil,
|
||||||
[5]byte{1},
|
[5]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes6",
|
"bytes6",
|
||||||
|
nil,
|
||||||
[6]byte{1},
|
[6]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes7",
|
"bytes7",
|
||||||
|
nil,
|
||||||
[7]byte{1},
|
[7]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes8",
|
"bytes8",
|
||||||
|
nil,
|
||||||
[8]byte{1},
|
[8]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes9",
|
"bytes9",
|
||||||
|
nil,
|
||||||
[9]byte{1},
|
[9]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes10",
|
"bytes10",
|
||||||
|
nil,
|
||||||
[10]byte{1},
|
[10]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes11",
|
"bytes11",
|
||||||
|
nil,
|
||||||
[11]byte{1},
|
[11]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes12",
|
"bytes12",
|
||||||
|
nil,
|
||||||
[12]byte{1},
|
[12]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes13",
|
"bytes13",
|
||||||
|
nil,
|
||||||
[13]byte{1},
|
[13]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes14",
|
"bytes14",
|
||||||
|
nil,
|
||||||
[14]byte{1},
|
[14]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes15",
|
"bytes15",
|
||||||
|
nil,
|
||||||
[15]byte{1},
|
[15]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes16",
|
"bytes16",
|
||||||
|
nil,
|
||||||
[16]byte{1},
|
[16]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes17",
|
"bytes17",
|
||||||
|
nil,
|
||||||
[17]byte{1},
|
[17]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes18",
|
"bytes18",
|
||||||
|
nil,
|
||||||
[18]byte{1},
|
[18]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes19",
|
"bytes19",
|
||||||
|
nil,
|
||||||
[19]byte{1},
|
[19]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes20",
|
"bytes20",
|
||||||
|
nil,
|
||||||
[20]byte{1},
|
[20]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes21",
|
"bytes21",
|
||||||
|
nil,
|
||||||
[21]byte{1},
|
[21]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes22",
|
"bytes22",
|
||||||
|
nil,
|
||||||
[22]byte{1},
|
[22]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes23",
|
"bytes23",
|
||||||
|
nil,
|
||||||
[23]byte{1},
|
[23]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes24",
|
"bytes24",
|
||||||
[24]byte{1},
|
nil,
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"bytes24",
|
|
||||||
[24]byte{1},
|
[24]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes25",
|
"bytes25",
|
||||||
|
nil,
|
||||||
[25]byte{1},
|
[25]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes26",
|
"bytes26",
|
||||||
|
nil,
|
||||||
[26]byte{1},
|
[26]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes27",
|
"bytes27",
|
||||||
|
nil,
|
||||||
[27]byte{1},
|
[27]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes28",
|
"bytes28",
|
||||||
|
nil,
|
||||||
[28]byte{1},
|
[28]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes29",
|
"bytes29",
|
||||||
|
nil,
|
||||||
[29]byte{1},
|
[29]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes30",
|
"bytes30",
|
||||||
|
nil,
|
||||||
[30]byte{1},
|
[30]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes31",
|
"bytes31",
|
||||||
|
nil,
|
||||||
[31]byte{1},
|
[31]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes32",
|
"bytes32",
|
||||||
|
nil,
|
||||||
[32]byte{1},
|
[32]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"uint32[2][3][4]",
|
"uint32[2][3][4]",
|
||||||
|
nil,
|
||||||
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
|
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"address[]",
|
"address[]",
|
||||||
|
nil,
|
||||||
[]common.Address{{1}, {2}},
|
[]common.Address{{1}, {2}},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bytes32[]",
|
"bytes32[]",
|
||||||
|
nil,
|
||||||
[]common.Hash{{1}, {2}},
|
[]common.Hash{{1}, {2}},
|
||||||
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"function",
|
"function",
|
||||||
|
nil,
|
||||||
[24]byte{1},
|
[24]byte{1},
|
||||||
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"string",
|
"string",
|
||||||
|
nil,
|
||||||
"foobar",
|
"foobar",
|
||||||
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"string[]",
|
||||||
|
nil,
|
||||||
|
[]string{"hello", "foobar"},
|
||||||
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
|
||||||
|
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
|
||||||
|
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"string[2]",
|
||||||
|
nil,
|
||||||
|
[]string{"hello", "foobar"},
|
||||||
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
|
||||||
|
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
|
||||||
|
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"bytes32[][]",
|
||||||
|
nil,
|
||||||
|
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
|
||||||
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||||
|
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||||
|
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"bytes32[][2]",
|
||||||
|
nil,
|
||||||
|
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
|
||||||
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||||
|
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||||
|
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"bytes32[3][2]",
|
||||||
|
nil,
|
||||||
|
[][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
|
||||||
|
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
|
||||||
|
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
|
||||||
|
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// static tuple
|
||||||
|
"tuple",
|
||||||
|
[]ArgumentMarshaling{
|
||||||
|
{Name: "a", Type: "int64"},
|
||||||
|
{Name: "b", Type: "int256"},
|
||||||
|
{Name: "c", Type: "int256"},
|
||||||
|
{Name: "d", Type: "bool"},
|
||||||
|
{Name: "e", Type: "bytes32[3][2]"},
|
||||||
|
},
|
||||||
|
struct {
|
||||||
|
A int64
|
||||||
|
B *big.Int
|
||||||
|
C *big.Int
|
||||||
|
D bool
|
||||||
|
E [][]common.Hash
|
||||||
|
}{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
|
||||||
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
|
||||||
|
"0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
|
||||||
|
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
|
||||||
|
"0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
|
||||||
|
"0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// dynamic tuple
|
||||||
|
"tuple",
|
||||||
|
[]ArgumentMarshaling{
|
||||||
|
{Name: "a", Type: "string"},
|
||||||
|
{Name: "b", Type: "int64"},
|
||||||
|
{Name: "c", Type: "bytes"},
|
||||||
|
{Name: "d", Type: "string[]"},
|
||||||
|
{Name: "e", Type: "int256[]"},
|
||||||
|
{Name: "f", Type: "address[]"},
|
||||||
|
},
|
||||||
|
struct {
|
||||||
|
FieldA string `abi:"a"` // Test whether abi tag works
|
||||||
|
FieldB int64 `abi:"b"`
|
||||||
|
C []byte
|
||||||
|
D []string
|
||||||
|
E []*big.Int
|
||||||
|
F []common.Address
|
||||||
|
}{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
|
||||||
|
common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
|
||||||
|
"666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
|
||||||
|
"0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // foo length
|
||||||
|
"666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
|
||||||
|
"6261720000000000000000000000000000000000000000000000000000000000" + // bar
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // 1
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
|
||||||
|
"0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
|
||||||
|
"0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// nested tuple
|
||||||
|
"tuple",
|
||||||
|
[]ArgumentMarshaling{
|
||||||
|
{Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}},
|
||||||
|
{Name: "b", Type: "int256[]"},
|
||||||
|
},
|
||||||
|
struct {
|
||||||
|
A struct {
|
||||||
|
FieldA *big.Int `abi:"a"`
|
||||||
|
B []*big.Int
|
||||||
|
}
|
||||||
|
B []*big.Int
|
||||||
|
}{
|
||||||
|
A: struct {
|
||||||
|
FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
|
||||||
|
B []*big.Int
|
||||||
|
}{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
||||||
|
B: []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
||||||
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // b length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// tuple slice
|
||||||
|
"tuple[]",
|
||||||
|
[]ArgumentMarshaling{
|
||||||
|
{Name: "a", Type: "int256"},
|
||||||
|
{Name: "b", Type: "int256[]"},
|
||||||
|
},
|
||||||
|
[]struct {
|
||||||
|
A *big.Int
|
||||||
|
B []*big.Int
|
||||||
|
}{
|
||||||
|
{big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
|
||||||
|
{big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
|
||||||
|
},
|
||||||
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// static tuple array
|
||||||
|
"tuple[2]",
|
||||||
|
[]ArgumentMarshaling{
|
||||||
|
{Name: "a", Type: "int256"},
|
||||||
|
{Name: "b", Type: "int256"},
|
||||||
|
},
|
||||||
|
[2]struct {
|
||||||
|
A *big.Int
|
||||||
|
B *big.Int
|
||||||
|
}{
|
||||||
|
{big.NewInt(-1), big.NewInt(1)},
|
||||||
|
{big.NewInt(1), big.NewInt(-1)},
|
||||||
|
},
|
||||||
|
common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// dynamic tuple array
|
||||||
|
"tuple[2]",
|
||||||
|
[]ArgumentMarshaling{
|
||||||
|
{Name: "a", Type: "int256[]"},
|
||||||
|
},
|
||||||
|
[2]struct {
|
||||||
|
A []*big.Int
|
||||||
|
}{
|
||||||
|
{[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
|
||||||
|
{[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
|
||||||
|
},
|
||||||
|
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
|
||||||
|
"00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
|
||||||
|
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
typ, err := NewType(test.typ)
|
typ, err := NewType(test.typ, test.components)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := typ.pack(reflect.ValueOf(test.input))
|
output, err := typ.pack(reflect.ValueOf(test.input))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
|
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(output, test.output) {
|
if !bytes.Equal(output, test.output) {
|
||||||
t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
|
t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -406,6 +693,59 @@ func TestMethodPack(t *testing.T) {
|
|||||||
if !bytes.Equal(packed, sig) {
|
if !bytes.Equal(packed, sig) {
|
||||||
t.Errorf("expected %x got %x", sig, packed)
|
t.Errorf("expected %x got %x", sig, packed)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
|
||||||
|
sig = abi.Methods["nestedArray"].Id()
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
|
||||||
|
packed, err = abi.Pack("nestedArray", a, []common.Address{addrC, addrD})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(packed, sig) {
|
||||||
|
t.Errorf("expected %x got %x", sig, packed)
|
||||||
|
}
|
||||||
|
|
||||||
|
sig = abi.Methods["nestedArray2"].Id()
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
|
packed, err = abi.Pack("nestedArray2", [2][]uint8{{1}, {1}})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(packed, sig) {
|
||||||
|
t.Errorf("expected %x got %x", sig, packed)
|
||||||
|
}
|
||||||
|
|
||||||
|
sig = abi.Methods["nestedSlice"].Id()
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
|
||||||
|
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
|
||||||
|
packed, err = abi.Pack("nestedSlice", [][]uint8{{1, 2}, {1, 2}})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(packed, sig) {
|
||||||
|
t.Errorf("expected %x got %x", sig, packed)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPackNumber(t *testing.T) {
|
func TestPackNumber(t *testing.T) {
|
||||||
|
@ -71,22 +71,36 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
|
|||||||
//
|
//
|
||||||
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
// set is a bit more lenient when it comes to assignment and doesn't force an as
|
||||||
// strict ruleset as bare `reflect` does.
|
// strict ruleset as bare `reflect` does.
|
||||||
func set(dst, src reflect.Value, output Argument) error {
|
func set(dst, src reflect.Value) error {
|
||||||
dstType := dst.Type()
|
dstType, srcType := dst.Type(), src.Type()
|
||||||
srcType := src.Type()
|
|
||||||
switch {
|
switch {
|
||||||
case dstType.AssignableTo(srcType):
|
|
||||||
dst.Set(src)
|
|
||||||
case dstType.Kind() == reflect.Interface:
|
case dstType.Kind() == reflect.Interface:
|
||||||
|
return set(dst.Elem(), src)
|
||||||
|
case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT:
|
||||||
|
return set(dst.Elem(), src)
|
||||||
|
case srcType.AssignableTo(dstType) && dst.CanSet():
|
||||||
dst.Set(src)
|
dst.Set(src)
|
||||||
case dstType.Kind() == reflect.Ptr:
|
case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice:
|
||||||
return set(dst.Elem(), src, output)
|
return setSlice(dst, src)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setSlice attempts to assign src to dst when slices are not assignable by default
|
||||||
|
// e.g. src: [][]byte -> dst: [][15]byte
|
||||||
|
func setSlice(dst, src reflect.Value) error {
|
||||||
|
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
|
||||||
|
for i := 0; i < src.Len(); i++ {
|
||||||
|
v := src.Index(i)
|
||||||
|
reflect.Copy(slice.Index(i), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
dst.Set(slice)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// requireAssignable assures that `dest` is a pointer and it's not an interface.
|
// requireAssignable assures that `dest` is a pointer and it's not an interface.
|
||||||
func requireAssignable(dst, src reflect.Value) error {
|
func requireAssignable(dst, src reflect.Value) error {
|
||||||
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
|
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
|
||||||
@ -112,14 +126,14 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mapAbiToStringField maps abi to struct fields.
|
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
|
||||||
// first round: for each Exportable field that contains a `abi:""` tag
|
// first round: for each Exportable field that contains a `abi:""` tag
|
||||||
// and this field name exists in the arguments, pair them together.
|
// and this field name exists in the given argument name list, pair them together.
|
||||||
// second round: for each argument field that has not been already linked,
|
// second round: for each argument name that has not been already linked,
|
||||||
// find what variable is expected to be mapped into, if it exists and has not been
|
// find what variable is expected to be mapped into, if it exists and has not been
|
||||||
// used, pair them.
|
// used, pair them.
|
||||||
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
|
// Note this function assumes the given value is a struct value.
|
||||||
|
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
|
||||||
typ := value.Type()
|
typ := value.Type()
|
||||||
|
|
||||||
abi2struct := make(map[string]string)
|
abi2struct := make(map[string]string)
|
||||||
@ -133,45 +147,39 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
|
|||||||
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
|
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// skip fields that have no abi:"" tag.
|
// skip fields that have no abi:"" tag.
|
||||||
var ok bool
|
var ok bool
|
||||||
var tagName string
|
var tagName string
|
||||||
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
|
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if tag is empty.
|
// check if tag is empty.
|
||||||
if tagName == "" {
|
if tagName == "" {
|
||||||
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
|
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check which argument field matches with the abi tag.
|
// check which argument field matches with the abi tag.
|
||||||
found := false
|
found := false
|
||||||
for _, abiField := range args.NonIndexed() {
|
for _, arg := range argNames {
|
||||||
if abiField.Name == tagName {
|
if arg == tagName {
|
||||||
if abi2struct[abiField.Name] != "" {
|
if abi2struct[arg] != "" {
|
||||||
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
|
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
|
||||||
}
|
}
|
||||||
// pair them
|
// pair them
|
||||||
abi2struct[abiField.Name] = structFieldName
|
abi2struct[arg] = structFieldName
|
||||||
struct2abi[structFieldName] = abiField.Name
|
struct2abi[structFieldName] = arg
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if this tag has been mapped.
|
// check if this tag has been mapped.
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
|
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// second round ~~~
|
// second round ~~~
|
||||||
for _, arg := range args {
|
for _, argName := range argNames {
|
||||||
|
|
||||||
abiFieldName := arg.Name
|
structFieldName := ToCamelCase(argName)
|
||||||
structFieldName := capitalise(abiFieldName)
|
|
||||||
|
|
||||||
if structFieldName == "" {
|
if structFieldName == "" {
|
||||||
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||||
@ -181,11 +189,11 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
|
|||||||
// struct field with the same field name. If so, raise an error:
|
// struct field with the same field name. If so, raise an error:
|
||||||
// abi: [ { "name": "value" } ]
|
// abi: [ { "name": "value" } ]
|
||||||
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
|
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
|
||||||
if abi2struct[abiFieldName] != "" {
|
if abi2struct[argName] != "" {
|
||||||
if abi2struct[abiFieldName] != structFieldName &&
|
if abi2struct[argName] != structFieldName &&
|
||||||
struct2abi[structFieldName] == "" &&
|
struct2abi[structFieldName] == "" &&
|
||||||
value.FieldByName(structFieldName).IsValid() {
|
value.FieldByName(structFieldName).IsValid() {
|
||||||
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
|
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", argName)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -197,16 +205,14 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
|
|||||||
|
|
||||||
if value.FieldByName(structFieldName).IsValid() {
|
if value.FieldByName(structFieldName).IsValid() {
|
||||||
// pair them
|
// pair them
|
||||||
abi2struct[abiFieldName] = structFieldName
|
abi2struct[argName] = structFieldName
|
||||||
struct2abi[structFieldName] = abiFieldName
|
struct2abi[structFieldName] = argName
|
||||||
} else {
|
} else {
|
||||||
// not paired, but annotate as used, to detect cases like
|
// not paired, but annotate as used, to detect cases like
|
||||||
// abi : [ { "name": "value" }, { "name": "_value" } ]
|
// abi : [ { "name": "value" }, { "name": "_value" } ]
|
||||||
// struct { Value *big.Int }
|
// struct { Value *big.Int }
|
||||||
struct2abi[structFieldName] = abiFieldName
|
struct2abi[structFieldName] = argName
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return abi2struct, nil
|
return abi2struct, nil
|
||||||
}
|
}
|
||||||
|
191
accounts/abi/reflect_test.go
Normal file
191
accounts/abi/reflect_test.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type reflectTest struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
struc interface{}
|
||||||
|
want map[string]string
|
||||||
|
err string
|
||||||
|
}
|
||||||
|
|
||||||
|
var reflectTests = []reflectTest{
|
||||||
|
{
|
||||||
|
name: "OneToOneCorrespondance",
|
||||||
|
args: []string{"fieldA"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldA"`
|
||||||
|
}{},
|
||||||
|
want: map[string]string{
|
||||||
|
"fieldA": "FieldA",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MissingFieldsInStruct",
|
||||||
|
args: []string{"fieldA", "fieldB"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldA"`
|
||||||
|
}{},
|
||||||
|
want: map[string]string{
|
||||||
|
"fieldA": "FieldA",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MoreFieldsInStructThanArgs",
|
||||||
|
args: []string{"fieldA"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldA"`
|
||||||
|
FieldB int
|
||||||
|
}{},
|
||||||
|
want: map[string]string{
|
||||||
|
"fieldA": "FieldA",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MissingFieldInArgs",
|
||||||
|
args: []string{"fieldA"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldA"`
|
||||||
|
FieldB int `abi:"fieldB"`
|
||||||
|
}{},
|
||||||
|
err: "struct: abi tag 'fieldB' defined but not found in abi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoAbiDescriptor",
|
||||||
|
args: []string{"fieldA"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int
|
||||||
|
}{},
|
||||||
|
want: map[string]string{
|
||||||
|
"fieldA": "FieldA",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoArgs",
|
||||||
|
args: []string{},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldA"`
|
||||||
|
}{},
|
||||||
|
err: "struct: abi tag 'fieldA' defined but not found in abi",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DifferentName",
|
||||||
|
args: []string{"fieldB"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldB"`
|
||||||
|
}{},
|
||||||
|
want: map[string]string{
|
||||||
|
"fieldB": "FieldA",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DifferentName",
|
||||||
|
args: []string{"fieldB"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldB"`
|
||||||
|
}{},
|
||||||
|
want: map[string]string{
|
||||||
|
"fieldB": "FieldA",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MultipleFields",
|
||||||
|
args: []string{"fieldA", "fieldB"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldA"`
|
||||||
|
FieldB int `abi:"fieldB"`
|
||||||
|
}{},
|
||||||
|
want: map[string]string{
|
||||||
|
"fieldA": "FieldA",
|
||||||
|
"fieldB": "FieldB",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MultipleFieldsABIMissing",
|
||||||
|
args: []string{"fieldA", "fieldB"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldA"`
|
||||||
|
FieldB int
|
||||||
|
}{},
|
||||||
|
want: map[string]string{
|
||||||
|
"fieldA": "FieldA",
|
||||||
|
"fieldB": "FieldB",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NameConflict",
|
||||||
|
args: []string{"fieldB"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldB"`
|
||||||
|
FieldB int
|
||||||
|
}{},
|
||||||
|
err: "abi: multiple variables maps to the same abi field 'fieldB'",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Underscored",
|
||||||
|
args: []string{"_"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int
|
||||||
|
}{},
|
||||||
|
err: "abi: purely underscored output cannot unpack to struct",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DoubleMapping",
|
||||||
|
args: []string{"fieldB", "fieldC", "fieldA"},
|
||||||
|
struc: struct {
|
||||||
|
FieldA int `abi:"fieldC"`
|
||||||
|
FieldB int
|
||||||
|
}{},
|
||||||
|
err: "abi: multiple outputs mapping to the same struct field 'FieldA'",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AlreadyMapped",
|
||||||
|
args: []string{"fieldB", "fieldB"},
|
||||||
|
struc: struct {
|
||||||
|
FieldB int `abi:"fieldB"`
|
||||||
|
}{},
|
||||||
|
err: "struct: abi tag in 'FieldB' already mapped",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReflectNameToStruct(t *testing.T) {
|
||||||
|
for _, test := range reflectTests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
|
||||||
|
if len(test.err) > 0 {
|
||||||
|
if err == nil || err.Error() != test.err {
|
||||||
|
t.Fatalf("Invalid error: expected %v, got %v", test.err, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
for fname := range test.want {
|
||||||
|
if m[fname] != test.want[fname] {
|
||||||
|
t.Fatalf("Incorrect value for field %s: expected %v, got %v", fname, test.want[fname], m[fname])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -17,6 +17,7 @@
|
|||||||
package abi
|
package abi
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -32,6 +33,7 @@ const (
|
|||||||
StringTy
|
StringTy
|
||||||
SliceTy
|
SliceTy
|
||||||
ArrayTy
|
ArrayTy
|
||||||
|
TupleTy
|
||||||
AddressTy
|
AddressTy
|
||||||
FixedBytesTy
|
FixedBytesTy
|
||||||
BytesTy
|
BytesTy
|
||||||
@ -43,13 +45,16 @@ const (
|
|||||||
// Type is the reflection of the supported argument type
|
// Type is the reflection of the supported argument type
|
||||||
type Type struct {
|
type Type struct {
|
||||||
Elem *Type
|
Elem *Type
|
||||||
|
|
||||||
Kind reflect.Kind
|
Kind reflect.Kind
|
||||||
Type reflect.Type
|
Type reflect.Type
|
||||||
Size int
|
Size int
|
||||||
T byte // Our own type checking
|
T byte // Our own type checking
|
||||||
|
|
||||||
stringKind string // holds the unparsed string for deriving signatures
|
stringKind string // holds the unparsed string for deriving signatures
|
||||||
|
|
||||||
|
// Tuple relative fields
|
||||||
|
TupleElems []*Type // Type information of all tuple fields
|
||||||
|
TupleRawNames []string // Raw field name of all tuple fields
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -58,7 +63,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewType creates a new reflection type of abi type given in t.
|
// NewType creates a new reflection type of abi type given in t.
|
||||||
func NewType(t string) (typ Type, err error) {
|
func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
|
||||||
// check that array brackets are equal if they exist
|
// check that array brackets are equal if they exist
|
||||||
if strings.Count(t, "[") != strings.Count(t, "]") {
|
if strings.Count(t, "[") != strings.Count(t, "]") {
|
||||||
return Type{}, fmt.Errorf("invalid arg type in abi")
|
return Type{}, fmt.Errorf("invalid arg type in abi")
|
||||||
@ -71,7 +76,7 @@ func NewType(t string) (typ Type, err error) {
|
|||||||
if strings.Count(t, "[") != 0 {
|
if strings.Count(t, "[") != 0 {
|
||||||
i := strings.LastIndex(t, "[")
|
i := strings.LastIndex(t, "[")
|
||||||
// recursively embed the type
|
// recursively embed the type
|
||||||
embeddedType, err := NewType(t[:i])
|
embeddedType, err := NewType(t[:i], components)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Type{}, err
|
return Type{}, err
|
||||||
}
|
}
|
||||||
@ -87,6 +92,9 @@ func NewType(t string) (typ Type, err error) {
|
|||||||
typ.Kind = reflect.Slice
|
typ.Kind = reflect.Slice
|
||||||
typ.Elem = &embeddedType
|
typ.Elem = &embeddedType
|
||||||
typ.Type = reflect.SliceOf(embeddedType.Type)
|
typ.Type = reflect.SliceOf(embeddedType.Type)
|
||||||
|
if embeddedType.T == TupleTy {
|
||||||
|
typ.stringKind = embeddedType.stringKind + sliced
|
||||||
|
}
|
||||||
} else if len(intz) == 1 {
|
} else if len(intz) == 1 {
|
||||||
// is a array
|
// is a array
|
||||||
typ.T = ArrayTy
|
typ.T = ArrayTy
|
||||||
@ -97,6 +105,9 @@ func NewType(t string) (typ Type, err error) {
|
|||||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||||
}
|
}
|
||||||
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
|
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
|
||||||
|
if embeddedType.T == TupleTy {
|
||||||
|
typ.stringKind = embeddedType.stringKind + sliced
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return Type{}, fmt.Errorf("invalid formatting of array type")
|
return Type{}, fmt.Errorf("invalid formatting of array type")
|
||||||
}
|
}
|
||||||
@ -158,6 +169,40 @@ func NewType(t string) (typ Type, err error) {
|
|||||||
typ.Size = varSize
|
typ.Size = varSize
|
||||||
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
|
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
|
||||||
}
|
}
|
||||||
|
case "tuple":
|
||||||
|
var (
|
||||||
|
fields []reflect.StructField
|
||||||
|
elems []*Type
|
||||||
|
names []string
|
||||||
|
expression string // canonical parameter expression
|
||||||
|
)
|
||||||
|
expression += "("
|
||||||
|
for idx, c := range components {
|
||||||
|
cType, err := NewType(c.Type, c.Components)
|
||||||
|
if err != nil {
|
||||||
|
return Type{}, err
|
||||||
|
}
|
||||||
|
if ToCamelCase(c.Name) == "" {
|
||||||
|
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
|
||||||
|
}
|
||||||
|
fields = append(fields, reflect.StructField{
|
||||||
|
Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field.
|
||||||
|
Type: cType.Type,
|
||||||
|
})
|
||||||
|
elems = append(elems, &cType)
|
||||||
|
names = append(names, c.Name)
|
||||||
|
expression += cType.stringKind
|
||||||
|
if idx != len(components)-1 {
|
||||||
|
expression += ","
|
||||||
|
}
|
||||||
|
}
|
||||||
|
expression += ")"
|
||||||
|
typ.Kind = reflect.Struct
|
||||||
|
typ.Type = reflect.StructOf(fields)
|
||||||
|
typ.TupleElems = elems
|
||||||
|
typ.TupleRawNames = names
|
||||||
|
typ.T = TupleTy
|
||||||
|
typ.stringKind = expression
|
||||||
case "function":
|
case "function":
|
||||||
typ.Kind = reflect.Array
|
typ.Kind = reflect.Array
|
||||||
typ.T = FunctionTy
|
typ.T = FunctionTy
|
||||||
@ -178,28 +223,82 @@ func (t Type) String() (out string) {
|
|||||||
func (t Type) pack(v reflect.Value) ([]byte, error) {
|
func (t Type) pack(v reflect.Value) ([]byte, error) {
|
||||||
// dereference pointer first if it's a pointer
|
// dereference pointer first if it's a pointer
|
||||||
v = indirect(v)
|
v = indirect(v)
|
||||||
|
|
||||||
if err := typeCheck(t, v); err != nil {
|
if err := typeCheck(t, v); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.T == SliceTy || t.T == ArrayTy {
|
switch t.T {
|
||||||
var packed []byte
|
case SliceTy, ArrayTy:
|
||||||
|
var ret []byte
|
||||||
|
|
||||||
|
if t.requiresLengthPrefix() {
|
||||||
|
// append length
|
||||||
|
ret = append(ret, packNum(reflect.ValueOf(v.Len()))...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate offset if any
|
||||||
|
offset := 0
|
||||||
|
offsetReq := isDynamicType(*t.Elem)
|
||||||
|
if offsetReq {
|
||||||
|
offset = getTypeSize(*t.Elem) * v.Len()
|
||||||
|
}
|
||||||
|
var tail []byte
|
||||||
for i := 0; i < v.Len(); i++ {
|
for i := 0; i < v.Len(); i++ {
|
||||||
val, err := t.Elem.pack(v.Index(i))
|
val, err := t.Elem.pack(v.Index(i))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
packed = append(packed, val...)
|
if !offsetReq {
|
||||||
|
ret = append(ret, val...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||||
|
offset += len(val)
|
||||||
|
tail = append(tail, val...)
|
||||||
}
|
}
|
||||||
if t.T == SliceTy {
|
return append(ret, tail...), nil
|
||||||
return packBytesSlice(packed, v.Len()), nil
|
case TupleTy:
|
||||||
} else if t.T == ArrayTy {
|
// (T1,...,Tk) for k >= 0 and any types T1, …, Tk
|
||||||
return packed, nil
|
// enc(X) = head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(k))
|
||||||
|
// where X = (X(1), ..., X(k)) and head and tail are defined for Ti being a static
|
||||||
|
// type as
|
||||||
|
// head(X(i)) = enc(X(i)) and tail(X(i)) = "" (the empty string)
|
||||||
|
// and as
|
||||||
|
// head(X(i)) = enc(len(head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(i-1))))
|
||||||
|
// tail(X(i)) = enc(X(i))
|
||||||
|
// otherwise, i.e. if Ti is a dynamic type.
|
||||||
|
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Calculate prefix occupied size.
|
||||||
|
offset := 0
|
||||||
|
for _, elem := range t.TupleElems {
|
||||||
|
offset += getTypeSize(*elem)
|
||||||
|
}
|
||||||
|
var ret, tail []byte
|
||||||
|
for i, elem := range t.TupleElems {
|
||||||
|
field := v.FieldByName(fieldmap[t.TupleRawNames[i]])
|
||||||
|
if !field.IsValid() {
|
||||||
|
return nil, fmt.Errorf("field %s for tuple not found in the given struct", t.TupleRawNames[i])
|
||||||
|
}
|
||||||
|
val, err := elem.pack(field)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if isDynamicType(*elem) {
|
||||||
|
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||||
|
tail = append(tail, val...)
|
||||||
|
offset += len(val)
|
||||||
|
} else {
|
||||||
|
ret = append(ret, val...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return append(ret, tail...), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return packElement(t, v), nil
|
||||||
}
|
}
|
||||||
return packElement(t, v), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// requireLengthPrefix returns whether the type requires any sort of length
|
// requireLengthPrefix returns whether the type requires any sort of length
|
||||||
@ -207,3 +306,47 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
|
|||||||
func (t Type) requiresLengthPrefix() bool {
|
func (t Type) requiresLengthPrefix() bool {
|
||||||
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
|
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isDynamicType returns true if the type is dynamic.
|
||||||
|
// The following types are called “dynamic”:
|
||||||
|
// * bytes
|
||||||
|
// * string
|
||||||
|
// * T[] for any T
|
||||||
|
// * T[k] for any dynamic T and any k >= 0
|
||||||
|
// * (T1,...,Tk) if Ti is dynamic for some 1 <= i <= k
|
||||||
|
func isDynamicType(t Type) bool {
|
||||||
|
if t.T == TupleTy {
|
||||||
|
for _, elem := range t.TupleElems {
|
||||||
|
if isDynamicType(*elem) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
|
||||||
|
}
|
||||||
|
|
||||||
|
// getTypeSize returns the size that this type needs to occupy.
|
||||||
|
// We distinguish static and dynamic types. Static types are encoded in-place
|
||||||
|
// and dynamic types are encoded at a separately allocated location after the
|
||||||
|
// current block.
|
||||||
|
// So for a static variable, the size returned represents the size that the
|
||||||
|
// variable actually occupies.
|
||||||
|
// For a dynamic variable, the returned size is fixed 32 bytes, which is used
|
||||||
|
// to store the location reference for actual value storage.
|
||||||
|
func getTypeSize(t Type) int {
|
||||||
|
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
|
||||||
|
// Recursively calculate type size if it is a nested array
|
||||||
|
if t.Elem.T == ArrayTy {
|
||||||
|
return t.Size * getTypeSize(*t.Elem)
|
||||||
|
}
|
||||||
|
return t.Size * 32
|
||||||
|
} else if t.T == TupleTy && !isDynamicType(t) {
|
||||||
|
total := 0
|
||||||
|
for _, elem := range t.TupleElems {
|
||||||
|
total += getTypeSize(*elem)
|
||||||
|
}
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
return 32
|
||||||
|
}
|
||||||
|
@ -32,72 +32,75 @@ type typeWithoutStringer Type
|
|||||||
// Tests that all allowed types get recognized by the type parser.
|
// Tests that all allowed types get recognized by the type parser.
|
||||||
func TestTypeRegexp(t *testing.T) {
|
func TestTypeRegexp(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
blob string
|
blob string
|
||||||
kind Type
|
components []ArgumentMarshaling
|
||||||
|
kind Type
|
||||||
}{
|
}{
|
||||||
{"bool", Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
|
{"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
|
||||||
{"bool[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
|
{"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
|
||||||
{"bool[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
|
{"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
|
||||||
{"bool[2][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
|
{"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
|
||||||
{"bool[][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
|
{"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
|
||||||
{"bool[][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
|
{"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
|
||||||
{"bool[2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
|
{"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
|
||||||
{"bool[2][][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
|
{"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
|
||||||
{"bool[2][2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
{"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
||||||
{"bool[][][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
{"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
||||||
{"bool[][2][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
{"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
||||||
{"int8", Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
|
{"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||||
{"int16", Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
|
{"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||||
{"int32", Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
|
{"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||||
{"int64", Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
|
{"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||||
{"int256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
|
{"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||||
{"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
{"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||||
{"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
{"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||||
{"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
{"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||||
{"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
{"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||||
{"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
{"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||||
{"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
{"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||||
{"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
{"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||||
{"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
{"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||||
{"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
{"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||||
{"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
{"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||||
{"uint8", Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
|
{"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||||
{"uint16", Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
|
{"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||||
{"uint32", Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
|
{"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||||
{"uint64", Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
|
{"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||||
{"uint256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
|
{"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||||
{"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
{"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||||
{"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
{"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||||
{"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
{"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||||
{"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
{"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||||
{"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
{"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||||
{"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
{"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||||
{"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
{"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||||
{"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
{"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||||
{"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
{"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||||
{"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
{"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||||
{"bytes32", Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
|
{"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
|
||||||
{"bytes[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
{"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||||
{"bytes[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
{"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||||
{"bytes32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
{"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
|
||||||
{"bytes32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
{"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
|
||||||
{"string", Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
|
{"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
|
||||||
{"string[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
{"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
||||||
{"string[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
|
{"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
|
||||||
{"address", Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
|
{"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
|
||||||
{"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
{"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||||
{"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
{"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||||
// TODO when fixed types are implemented properly
|
// TODO when fixed types are implemented properly
|
||||||
// {"fixed", Type{}},
|
// {"fixed", nil, Type{}},
|
||||||
// {"fixed128x128", Type{}},
|
// {"fixed128x128", nil, Type{}},
|
||||||
// {"fixed[]", Type{}},
|
// {"fixed[]", nil, Type{}},
|
||||||
// {"fixed[2]", Type{}},
|
// {"fixed[2]", nil, Type{}},
|
||||||
// {"fixed128x128[]", Type{}},
|
// {"fixed128x128[]", nil, Type{}},
|
||||||
// {"fixed128x128[2]", Type{}},
|
// {"fixed128x128[2]", nil, Type{}},
|
||||||
|
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct{ A int64 }{}), stringKind: "(int64)",
|
||||||
|
TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
typ, err := NewType(tt.blob)
|
typ, err := NewType(tt.blob, tt.components)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
|
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
|
||||||
}
|
}
|
||||||
@ -109,154 +112,170 @@ func TestTypeRegexp(t *testing.T) {
|
|||||||
|
|
||||||
func TestTypeCheck(t *testing.T) {
|
func TestTypeCheck(t *testing.T) {
|
||||||
for i, test := range []struct {
|
for i, test := range []struct {
|
||||||
typ string
|
typ string
|
||||||
input interface{}
|
components []ArgumentMarshaling
|
||||||
err string
|
input interface{}
|
||||||
|
err string
|
||||||
}{
|
}{
|
||||||
{"uint", big.NewInt(1), "unsupported arg type: uint"},
|
{"uint", nil, big.NewInt(1), "unsupported arg type: uint"},
|
||||||
{"int", big.NewInt(1), "unsupported arg type: int"},
|
{"int", nil, big.NewInt(1), "unsupported arg type: int"},
|
||||||
{"uint256", big.NewInt(1), ""},
|
{"uint256", nil, big.NewInt(1), ""},
|
||||||
{"uint256[][3][]", [][3][]*big.Int{{{}}}, ""},
|
{"uint256[][3][]", nil, [][3][]*big.Int{{{}}}, ""},
|
||||||
{"uint256[][][3]", [3][][]*big.Int{{{}}}, ""},
|
{"uint256[][][3]", nil, [3][][]*big.Int{{{}}}, ""},
|
||||||
{"uint256[3][][]", [][][3]*big.Int{{{}}}, ""},
|
{"uint256[3][][]", nil, [][][3]*big.Int{{{}}}, ""},
|
||||||
{"uint256[3][3][3]", [3][3][3]*big.Int{{{}}}, ""},
|
{"uint256[3][3][3]", nil, [3][3][3]*big.Int{{{}}}, ""},
|
||||||
{"uint8[][]", [][]uint8{}, ""},
|
{"uint8[][]", nil, [][]uint8{}, ""},
|
||||||
{"int256", big.NewInt(1), ""},
|
{"int256", nil, big.NewInt(1), ""},
|
||||||
{"uint8", uint8(1), ""},
|
{"uint8", nil, uint8(1), ""},
|
||||||
{"uint16", uint16(1), ""},
|
{"uint16", nil, uint16(1), ""},
|
||||||
{"uint32", uint32(1), ""},
|
{"uint32", nil, uint32(1), ""},
|
||||||
{"uint64", uint64(1), ""},
|
{"uint64", nil, uint64(1), ""},
|
||||||
{"int8", int8(1), ""},
|
{"int8", nil, int8(1), ""},
|
||||||
{"int16", int16(1), ""},
|
{"int16", nil, int16(1), ""},
|
||||||
{"int32", int32(1), ""},
|
{"int32", nil, int32(1), ""},
|
||||||
{"int64", int64(1), ""},
|
{"int64", nil, int64(1), ""},
|
||||||
{"uint24", big.NewInt(1), ""},
|
{"uint24", nil, big.NewInt(1), ""},
|
||||||
{"uint40", big.NewInt(1), ""},
|
{"uint40", nil, big.NewInt(1), ""},
|
||||||
{"uint48", big.NewInt(1), ""},
|
{"uint48", nil, big.NewInt(1), ""},
|
||||||
{"uint56", big.NewInt(1), ""},
|
{"uint56", nil, big.NewInt(1), ""},
|
||||||
{"uint72", big.NewInt(1), ""},
|
{"uint72", nil, big.NewInt(1), ""},
|
||||||
{"uint80", big.NewInt(1), ""},
|
{"uint80", nil, big.NewInt(1), ""},
|
||||||
{"uint88", big.NewInt(1), ""},
|
{"uint88", nil, big.NewInt(1), ""},
|
||||||
{"uint96", big.NewInt(1), ""},
|
{"uint96", nil, big.NewInt(1), ""},
|
||||||
{"uint104", big.NewInt(1), ""},
|
{"uint104", nil, big.NewInt(1), ""},
|
||||||
{"uint112", big.NewInt(1), ""},
|
{"uint112", nil, big.NewInt(1), ""},
|
||||||
{"uint120", big.NewInt(1), ""},
|
{"uint120", nil, big.NewInt(1), ""},
|
||||||
{"uint128", big.NewInt(1), ""},
|
{"uint128", nil, big.NewInt(1), ""},
|
||||||
{"uint136", big.NewInt(1), ""},
|
{"uint136", nil, big.NewInt(1), ""},
|
||||||
{"uint144", big.NewInt(1), ""},
|
{"uint144", nil, big.NewInt(1), ""},
|
||||||
{"uint152", big.NewInt(1), ""},
|
{"uint152", nil, big.NewInt(1), ""},
|
||||||
{"uint160", big.NewInt(1), ""},
|
{"uint160", nil, big.NewInt(1), ""},
|
||||||
{"uint168", big.NewInt(1), ""},
|
{"uint168", nil, big.NewInt(1), ""},
|
||||||
{"uint176", big.NewInt(1), ""},
|
{"uint176", nil, big.NewInt(1), ""},
|
||||||
{"uint184", big.NewInt(1), ""},
|
{"uint184", nil, big.NewInt(1), ""},
|
||||||
{"uint192", big.NewInt(1), ""},
|
{"uint192", nil, big.NewInt(1), ""},
|
||||||
{"uint200", big.NewInt(1), ""},
|
{"uint200", nil, big.NewInt(1), ""},
|
||||||
{"uint208", big.NewInt(1), ""},
|
{"uint208", nil, big.NewInt(1), ""},
|
||||||
{"uint216", big.NewInt(1), ""},
|
{"uint216", nil, big.NewInt(1), ""},
|
||||||
{"uint224", big.NewInt(1), ""},
|
{"uint224", nil, big.NewInt(1), ""},
|
||||||
{"uint232", big.NewInt(1), ""},
|
{"uint232", nil, big.NewInt(1), ""},
|
||||||
{"uint240", big.NewInt(1), ""},
|
{"uint240", nil, big.NewInt(1), ""},
|
||||||
{"uint248", big.NewInt(1), ""},
|
{"uint248", nil, big.NewInt(1), ""},
|
||||||
{"int24", big.NewInt(1), ""},
|
{"int24", nil, big.NewInt(1), ""},
|
||||||
{"int40", big.NewInt(1), ""},
|
{"int40", nil, big.NewInt(1), ""},
|
||||||
{"int48", big.NewInt(1), ""},
|
{"int48", nil, big.NewInt(1), ""},
|
||||||
{"int56", big.NewInt(1), ""},
|
{"int56", nil, big.NewInt(1), ""},
|
||||||
{"int72", big.NewInt(1), ""},
|
{"int72", nil, big.NewInt(1), ""},
|
||||||
{"int80", big.NewInt(1), ""},
|
{"int80", nil, big.NewInt(1), ""},
|
||||||
{"int88", big.NewInt(1), ""},
|
{"int88", nil, big.NewInt(1), ""},
|
||||||
{"int96", big.NewInt(1), ""},
|
{"int96", nil, big.NewInt(1), ""},
|
||||||
{"int104", big.NewInt(1), ""},
|
{"int104", nil, big.NewInt(1), ""},
|
||||||
{"int112", big.NewInt(1), ""},
|
{"int112", nil, big.NewInt(1), ""},
|
||||||
{"int120", big.NewInt(1), ""},
|
{"int120", nil, big.NewInt(1), ""},
|
||||||
{"int128", big.NewInt(1), ""},
|
{"int128", nil, big.NewInt(1), ""},
|
||||||
{"int136", big.NewInt(1), ""},
|
{"int136", nil, big.NewInt(1), ""},
|
||||||
{"int144", big.NewInt(1), ""},
|
{"int144", nil, big.NewInt(1), ""},
|
||||||
{"int152", big.NewInt(1), ""},
|
{"int152", nil, big.NewInt(1), ""},
|
||||||
{"int160", big.NewInt(1), ""},
|
{"int160", nil, big.NewInt(1), ""},
|
||||||
{"int168", big.NewInt(1), ""},
|
{"int168", nil, big.NewInt(1), ""},
|
||||||
{"int176", big.NewInt(1), ""},
|
{"int176", nil, big.NewInt(1), ""},
|
||||||
{"int184", big.NewInt(1), ""},
|
{"int184", nil, big.NewInt(1), ""},
|
||||||
{"int192", big.NewInt(1), ""},
|
{"int192", nil, big.NewInt(1), ""},
|
||||||
{"int200", big.NewInt(1), ""},
|
{"int200", nil, big.NewInt(1), ""},
|
||||||
{"int208", big.NewInt(1), ""},
|
{"int208", nil, big.NewInt(1), ""},
|
||||||
{"int216", big.NewInt(1), ""},
|
{"int216", nil, big.NewInt(1), ""},
|
||||||
{"int224", big.NewInt(1), ""},
|
{"int224", nil, big.NewInt(1), ""},
|
||||||
{"int232", big.NewInt(1), ""},
|
{"int232", nil, big.NewInt(1), ""},
|
||||||
{"int240", big.NewInt(1), ""},
|
{"int240", nil, big.NewInt(1), ""},
|
||||||
{"int248", big.NewInt(1), ""},
|
{"int248", nil, big.NewInt(1), ""},
|
||||||
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
{"uint30", nil, uint8(1), "abi: cannot use uint8 as type ptr as argument"},
|
||||||
{"uint8", uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
|
{"uint8", nil, uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
|
||||||
{"uint8", uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
|
{"uint8", nil, uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
|
||||||
{"uint8", uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
|
{"uint8", nil, uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
|
||||||
{"uint8", int8(1), "abi: cannot use int8 as type uint8 as argument"},
|
{"uint8", nil, int8(1), "abi: cannot use int8 as type uint8 as argument"},
|
||||||
{"uint8", int16(1), "abi: cannot use int16 as type uint8 as argument"},
|
{"uint8", nil, int16(1), "abi: cannot use int16 as type uint8 as argument"},
|
||||||
{"uint8", int32(1), "abi: cannot use int32 as type uint8 as argument"},
|
{"uint8", nil, int32(1), "abi: cannot use int32 as type uint8 as argument"},
|
||||||
{"uint8", int64(1), "abi: cannot use int64 as type uint8 as argument"},
|
{"uint8", nil, int64(1), "abi: cannot use int64 as type uint8 as argument"},
|
||||||
{"uint16", uint16(1), ""},
|
{"uint16", nil, uint16(1), ""},
|
||||||
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
{"uint16", nil, uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
|
||||||
{"uint16[]", []uint16{1, 2, 3}, ""},
|
{"uint16[]", nil, []uint16{1, 2, 3}, ""},
|
||||||
{"uint16[]", [3]uint16{1, 2, 3}, ""},
|
{"uint16[]", nil, [3]uint16{1, 2, 3}, ""},
|
||||||
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
|
{"uint16[]", nil, []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
|
||||||
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
{"uint16[3]", nil, [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
|
||||||
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
{"uint16[3]", nil, [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||||
{"uint16[3]", []uint16{1, 2, 3}, ""},
|
{"uint16[3]", nil, []uint16{1, 2, 3}, ""},
|
||||||
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
{"uint16[3]", nil, []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
|
||||||
{"address[]", []common.Address{{1}}, ""},
|
{"address[]", nil, []common.Address{{1}}, ""},
|
||||||
{"address[1]", []common.Address{{1}}, ""},
|
{"address[1]", nil, []common.Address{{1}}, ""},
|
||||||
{"address[1]", [1]common.Address{{1}}, ""},
|
{"address[1]", nil, [1]common.Address{{1}}, ""},
|
||||||
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
{"address[2]", nil, [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
|
||||||
{"bytes32", [32]byte{}, ""},
|
{"bytes32", nil, [32]byte{}, ""},
|
||||||
{"bytes31", [31]byte{}, ""},
|
{"bytes31", nil, [31]byte{}, ""},
|
||||||
{"bytes30", [30]byte{}, ""},
|
{"bytes30", nil, [30]byte{}, ""},
|
||||||
{"bytes29", [29]byte{}, ""},
|
{"bytes29", nil, [29]byte{}, ""},
|
||||||
{"bytes28", [28]byte{}, ""},
|
{"bytes28", nil, [28]byte{}, ""},
|
||||||
{"bytes27", [27]byte{}, ""},
|
{"bytes27", nil, [27]byte{}, ""},
|
||||||
{"bytes26", [26]byte{}, ""},
|
{"bytes26", nil, [26]byte{}, ""},
|
||||||
{"bytes25", [25]byte{}, ""},
|
{"bytes25", nil, [25]byte{}, ""},
|
||||||
{"bytes24", [24]byte{}, ""},
|
{"bytes24", nil, [24]byte{}, ""},
|
||||||
{"bytes23", [23]byte{}, ""},
|
{"bytes23", nil, [23]byte{}, ""},
|
||||||
{"bytes22", [22]byte{}, ""},
|
{"bytes22", nil, [22]byte{}, ""},
|
||||||
{"bytes21", [21]byte{}, ""},
|
{"bytes21", nil, [21]byte{}, ""},
|
||||||
{"bytes20", [20]byte{}, ""},
|
{"bytes20", nil, [20]byte{}, ""},
|
||||||
{"bytes19", [19]byte{}, ""},
|
{"bytes19", nil, [19]byte{}, ""},
|
||||||
{"bytes18", [18]byte{}, ""},
|
{"bytes18", nil, [18]byte{}, ""},
|
||||||
{"bytes17", [17]byte{}, ""},
|
{"bytes17", nil, [17]byte{}, ""},
|
||||||
{"bytes16", [16]byte{}, ""},
|
{"bytes16", nil, [16]byte{}, ""},
|
||||||
{"bytes15", [15]byte{}, ""},
|
{"bytes15", nil, [15]byte{}, ""},
|
||||||
{"bytes14", [14]byte{}, ""},
|
{"bytes14", nil, [14]byte{}, ""},
|
||||||
{"bytes13", [13]byte{}, ""},
|
{"bytes13", nil, [13]byte{}, ""},
|
||||||
{"bytes12", [12]byte{}, ""},
|
{"bytes12", nil, [12]byte{}, ""},
|
||||||
{"bytes11", [11]byte{}, ""},
|
{"bytes11", nil, [11]byte{}, ""},
|
||||||
{"bytes10", [10]byte{}, ""},
|
{"bytes10", nil, [10]byte{}, ""},
|
||||||
{"bytes9", [9]byte{}, ""},
|
{"bytes9", nil, [9]byte{}, ""},
|
||||||
{"bytes8", [8]byte{}, ""},
|
{"bytes8", nil, [8]byte{}, ""},
|
||||||
{"bytes7", [7]byte{}, ""},
|
{"bytes7", nil, [7]byte{}, ""},
|
||||||
{"bytes6", [6]byte{}, ""},
|
{"bytes6", nil, [6]byte{}, ""},
|
||||||
{"bytes5", [5]byte{}, ""},
|
{"bytes5", nil, [5]byte{}, ""},
|
||||||
{"bytes4", [4]byte{}, ""},
|
{"bytes4", nil, [4]byte{}, ""},
|
||||||
{"bytes3", [3]byte{}, ""},
|
{"bytes3", nil, [3]byte{}, ""},
|
||||||
{"bytes2", [2]byte{}, ""},
|
{"bytes2", nil, [2]byte{}, ""},
|
||||||
{"bytes1", [1]byte{}, ""},
|
{"bytes1", nil, [1]byte{}, ""},
|
||||||
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
{"bytes32", nil, [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
|
||||||
{"bytes32", common.Hash{1}, ""},
|
{"bytes32", nil, common.Hash{1}, ""},
|
||||||
{"bytes31", common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
|
{"bytes31", nil, common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
|
||||||
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
{"bytes31", nil, [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
|
||||||
{"bytes", []byte{0, 1}, ""},
|
{"bytes", nil, []byte{0, 1}, ""},
|
||||||
{"bytes", [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
|
{"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
|
||||||
{"bytes", common.Hash{1}, "abi: cannot use array as type slice as argument"},
|
{"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"},
|
||||||
{"string", "hello world", ""},
|
{"string", nil, "hello world", ""},
|
||||||
{"string", string(""), ""},
|
{"string", nil, string(""), ""},
|
||||||
{"string", []byte{}, "abi: cannot use slice as type string as argument"},
|
{"string", nil, []byte{}, "abi: cannot use slice as type string as argument"},
|
||||||
{"bytes32[]", [][32]byte{{}}, ""},
|
{"bytes32[]", nil, [][32]byte{{}}, ""},
|
||||||
{"function", [24]byte{}, ""},
|
{"function", nil, [24]byte{}, ""},
|
||||||
{"bytes20", common.Address{}, ""},
|
{"bytes20", nil, common.Address{}, ""},
|
||||||
{"address", [20]byte{}, ""},
|
{"address", nil, [20]byte{}, ""},
|
||||||
{"address", common.Address{}, ""},
|
{"address", nil, common.Address{}, ""},
|
||||||
{"bytes32[]]", "", "invalid arg type in abi"},
|
{"bytes32[]]", nil, "", "invalid arg type in abi"},
|
||||||
{"invalidType", "", "unsupported arg type: invalidType"},
|
{"invalidType", nil, "", "unsupported arg type: invalidType"},
|
||||||
{"invalidSlice[]", "", "unsupported arg type: invalidSlice"},
|
{"invalidSlice[]", nil, "", "unsupported arg type: invalidSlice"},
|
||||||
|
// simple tuple
|
||||||
|
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, struct {
|
||||||
|
A *big.Int
|
||||||
|
B *big.Int
|
||||||
|
}{}, ""},
|
||||||
|
// tuple slice
|
||||||
|
{"tuple[]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
|
||||||
|
A *big.Int
|
||||||
|
B *big.Int
|
||||||
|
}{}, ""},
|
||||||
|
// tuple array
|
||||||
|
{"tuple[2]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
|
||||||
|
A *big.Int
|
||||||
|
B *big.Int
|
||||||
|
}{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
|
||||||
} {
|
} {
|
||||||
typ, err := NewType(test.typ)
|
typ, err := NewType(test.typ, test.components)
|
||||||
if err != nil && len(test.err) == 0 {
|
if err != nil && len(test.err) == 0 {
|
||||||
t.Fatal("unexpected parse error:", err)
|
t.Fatal("unexpected parse error:", err)
|
||||||
} else if err != nil && len(test.err) != 0 {
|
} else if err != nil && len(test.err) != 0 {
|
||||||
|
@ -115,17 +115,6 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFullElemSize(elem *Type) int {
|
|
||||||
//all other should be counted as 32 (slices have pointers to respective elements)
|
|
||||||
size := 32
|
|
||||||
//arrays wrap it, each element being the same size
|
|
||||||
for elem.T == ArrayTy {
|
|
||||||
size *= elem.Size
|
|
||||||
elem = elem.Elem
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
// iteratively unpack elements
|
// iteratively unpack elements
|
||||||
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
@ -150,13 +139,9 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
|
|||||||
|
|
||||||
// Arrays have packed elements, resulting in longer unpack steps.
|
// Arrays have packed elements, resulting in longer unpack steps.
|
||||||
// Slices have just 32 bytes per element (pointing to the contents).
|
// Slices have just 32 bytes per element (pointing to the contents).
|
||||||
elemSize := 32
|
elemSize := getTypeSize(*t.Elem)
|
||||||
if t.T == ArrayTy {
|
|
||||||
elemSize = getFullElemSize(t.Elem)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
|
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
|
||||||
|
|
||||||
inter, err := toGoType(i, *t.Elem, output)
|
inter, err := toGoType(i, *t.Elem, output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -170,6 +155,36 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
|
|||||||
return refSlice.Interface(), nil
|
return refSlice.Interface(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func forTupleUnpack(t Type, output []byte) (interface{}, error) {
|
||||||
|
retval := reflect.New(t.Type).Elem()
|
||||||
|
virtualArgs := 0
|
||||||
|
for index, elem := range t.TupleElems {
|
||||||
|
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
|
||||||
|
if elem.T == ArrayTy && !isDynamicType(*elem) {
|
||||||
|
// If we have a static array, like [3]uint256, these are coded as
|
||||||
|
// just like uint256,uint256,uint256.
|
||||||
|
// This means that we need to add two 'virtual' arguments when
|
||||||
|
// we count the index from now on.
|
||||||
|
//
|
||||||
|
// Array values nested multiple levels deep are also encoded inline:
|
||||||
|
// [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
|
||||||
|
//
|
||||||
|
// Calculate the full array size to get the correct offset for the next argument.
|
||||||
|
// Decrement it by 1, as the normal index increment is still applied.
|
||||||
|
virtualArgs += getTypeSize(*elem)/32 - 1
|
||||||
|
} else if elem.T == TupleTy && !isDynamicType(*elem) {
|
||||||
|
// If we have a static tuple, like (uint256, bool, uint256), these are
|
||||||
|
// coded as just like uint256,bool,uint256
|
||||||
|
virtualArgs += getTypeSize(*elem)/32 - 1
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
retval.Field(index).Set(reflect.ValueOf(marshalledValue))
|
||||||
|
}
|
||||||
|
return retval.Interface(), nil
|
||||||
|
}
|
||||||
|
|
||||||
// toGoType parses the output bytes and recursively assigns the value of these bytes
|
// toGoType parses the output bytes and recursively assigns the value of these bytes
|
||||||
// into a go type with accordance with the ABI spec.
|
// into a go type with accordance with the ABI spec.
|
||||||
func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||||
@ -178,14 +193,14 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
returnOutput []byte
|
returnOutput []byte
|
||||||
begin, end int
|
begin, length int
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
// if we require a length prefix, find the beginning word and size returned.
|
// if we require a length prefix, find the beginning word and size returned.
|
||||||
if t.requiresLengthPrefix() {
|
if t.requiresLengthPrefix() {
|
||||||
begin, end, err = lengthPrefixPointsTo(index, output)
|
begin, length, err = lengthPrefixPointsTo(index, output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -194,12 +209,26 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch t.T {
|
switch t.T {
|
||||||
|
case TupleTy:
|
||||||
|
if isDynamicType(t) {
|
||||||
|
begin, err := tuplePointsTo(index, output)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return forTupleUnpack(t, output[begin:])
|
||||||
|
} else {
|
||||||
|
return forTupleUnpack(t, output[index:])
|
||||||
|
}
|
||||||
case SliceTy:
|
case SliceTy:
|
||||||
return forEachUnpack(t, output, begin, end)
|
return forEachUnpack(t, output[begin:], 0, length)
|
||||||
case ArrayTy:
|
case ArrayTy:
|
||||||
return forEachUnpack(t, output, index, t.Size)
|
if isDynamicType(*t.Elem) {
|
||||||
|
offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]))
|
||||||
|
return forEachUnpack(t, output[offset:], 0, t.Size)
|
||||||
|
}
|
||||||
|
return forEachUnpack(t, output[index:], 0, t.Size)
|
||||||
case StringTy: // variable arrays are written at the end of the return bytes
|
case StringTy: // variable arrays are written at the end of the return bytes
|
||||||
return string(output[begin : begin+end]), nil
|
return string(output[begin : begin+length]), nil
|
||||||
case IntTy, UintTy:
|
case IntTy, UintTy:
|
||||||
return readInteger(t.T, t.Kind, returnOutput), nil
|
return readInteger(t.T, t.Kind, returnOutput), nil
|
||||||
case BoolTy:
|
case BoolTy:
|
||||||
@ -209,7 +238,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
case HashTy:
|
case HashTy:
|
||||||
return common.BytesToHash(returnOutput), nil
|
return common.BytesToHash(returnOutput), nil
|
||||||
case BytesTy:
|
case BytesTy:
|
||||||
return output[begin : begin+end], nil
|
return output[begin : begin+length], nil
|
||||||
case FixedBytesTy:
|
case FixedBytesTy:
|
||||||
return readFixedBytes(t, returnOutput)
|
return readFixedBytes(t, returnOutput)
|
||||||
case FunctionTy:
|
case FunctionTy:
|
||||||
@ -250,3 +279,17 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
|
|||||||
length = int(lengthBig.Uint64())
|
length = int(lengthBig.Uint64())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// tuplePointsTo resolves the location reference for dynamic tuple.
|
||||||
|
func tuplePointsTo(index int, output []byte) (start int, err error) {
|
||||||
|
offset := big.NewInt(0).SetBytes(output[index : index+32])
|
||||||
|
outputLen := big.NewInt(int64(len(output)))
|
||||||
|
|
||||||
|
if offset.Cmp(big.NewInt(int64(len(output)))) > 0 {
|
||||||
|
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
|
||||||
|
}
|
||||||
|
if offset.BitLen() > 63 {
|
||||||
|
return 0, fmt.Errorf("abi offset larger than int64: %v", offset)
|
||||||
|
}
|
||||||
|
return int(offset.Uint64()), nil
|
||||||
|
}
|
||||||
|
@ -173,9 +173,14 @@ var unpackTests = []unpackTest{
|
|||||||
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
// multi dimensional, if these pass, all types that don't require length prefix should pass
|
||||||
{
|
{
|
||||||
def: `[{"type": "uint8[][]"}]`,
|
def: `[{"type": "uint8[][]"}]`,
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000E0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
want: [][]uint8{{1, 2}, {1, 2}},
|
want: [][]uint8{{1, 2}, {1, 2}},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[][]"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||||
|
want: [][]uint8{{1, 2}, {1, 2, 3}},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type": "uint8[2][2]"}]`,
|
def: `[{"type": "uint8[2][2]"}]`,
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
@ -183,7 +188,7 @@ var unpackTests = []unpackTest{
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type": "uint8[][2]"}]`,
|
def: `[{"type": "uint8[][2]"}]`,
|
||||||
enc: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
|
||||||
want: [2][]uint8{{1}, {1}},
|
want: [2][]uint8{{1}, {1}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -191,6 +196,11 @@ var unpackTests = []unpackTest{
|
|||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
want: [][2]uint8{{1, 2}},
|
want: [][2]uint8{{1, 2}},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[2][]"}]`,
|
||||||
|
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: [][2]uint8{{1, 2}, {1, 2}},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type": "uint16[]"}]`,
|
def: `[{"type": "uint16[]"}]`,
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
@ -236,6 +246,26 @@ var unpackTests = []unpackTest{
|
|||||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "string[4]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000",
|
||||||
|
want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "string[]"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000",
|
||||||
|
want: []string{"Ethereum", "go-ethereum"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "bytes[]"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000",
|
||||||
|
want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint256[2][][]"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8",
|
||||||
|
want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type": "int8[]"}]`,
|
def: `[{"type": "int8[]"}]`,
|
||||||
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
@ -295,6 +325,53 @@ var unpackTests = []unpackTest{
|
|||||||
Int2 *big.Int
|
Int2 *big.Int
|
||||||
}{big.NewInt(1), big.NewInt(2)},
|
}{big.NewInt(1), big.NewInt(2)},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int__one","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one_","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
}{big.NewInt(1)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
Intone *big.Int
|
||||||
|
}{big.NewInt(1), big.NewInt(2)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"___","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
IntOne *big.Int
|
||||||
|
Intone *big.Int
|
||||||
|
}{},
|
||||||
|
err: "abi: purely underscored output cannot unpack to struct",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`,
|
||||||
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
|
want: struct {
|
||||||
|
Int1 *big.Int
|
||||||
|
Int2 *big.Int
|
||||||
|
}{},
|
||||||
|
err: "abi: multiple outputs mapping to the same struct field 'IntOne'",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
def: `[{"name":"int","type":"int256"},{"name":"Int","type":"int256"}]`,
|
def: `[{"name":"int","type":"int256"},{"name":"Int","type":"int256"}]`,
|
||||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||||
@ -359,6 +436,55 @@ func TestUnpack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnpackSetDynamicArrayOutput(t *testing.T) {
|
||||||
|
abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
marshalledReturn32 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783132333435363738393000000000000000000000000000000000000000003078303938373635343332310000000000000000000000000000000000000000")
|
||||||
|
marshalledReturn15 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783031323334350000000000000000000000000000000000000000000000003078393837363534000000000000000000000000000000000000000000000000")
|
||||||
|
|
||||||
|
out32 [][32]byte
|
||||||
|
out15 [][15]byte
|
||||||
|
)
|
||||||
|
|
||||||
|
// test 32
|
||||||
|
err = abi.Unpack(&out32, "testDynamicFixedBytes32", marshalledReturn32)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(out32) != 2 {
|
||||||
|
t.Fatalf("expected array with 2 values, got %d", len(out32))
|
||||||
|
}
|
||||||
|
expected := common.Hex2Bytes("3078313233343536373839300000000000000000000000000000000000000000")
|
||||||
|
if !bytes.Equal(out32[0][:], expected) {
|
||||||
|
t.Errorf("expected %x, got %x\n", expected, out32[0])
|
||||||
|
}
|
||||||
|
expected = common.Hex2Bytes("3078303938373635343332310000000000000000000000000000000000000000")
|
||||||
|
if !bytes.Equal(out32[1][:], expected) {
|
||||||
|
t.Errorf("expected %x, got %x\n", expected, out32[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// test 15
|
||||||
|
err = abi.Unpack(&out15, "testDynamicFixedBytes32", marshalledReturn15)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if len(out15) != 2 {
|
||||||
|
t.Fatalf("expected array with 2 values, got %d", len(out15))
|
||||||
|
}
|
||||||
|
expected = common.Hex2Bytes("307830313233343500000000000000")
|
||||||
|
if !bytes.Equal(out15[0][:], expected) {
|
||||||
|
t.Errorf("expected %x, got %x\n", expected, out15[0])
|
||||||
|
}
|
||||||
|
expected = common.Hex2Bytes("307839383736353400000000000000")
|
||||||
|
if !bytes.Equal(out15[1][:], expected) {
|
||||||
|
t.Errorf("expected %x, got %x\n", expected, out15[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type methodMultiOutput struct {
|
type methodMultiOutput struct {
|
||||||
Int *big.Int
|
Int *big.Int
|
||||||
String string
|
String string
|
||||||
@ -462,6 +588,68 @@ func TestMultiReturnWithArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMultiReturnWithStringArray(t *testing.T) {
|
||||||
|
const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
|
||||||
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
buff := new(bytes.Buffer)
|
||||||
|
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000"))
|
||||||
|
temp, _ := big.NewInt(0).SetString("30000000000000000000", 10)
|
||||||
|
ret1, ret1Exp := new([3]*big.Int), [3]*big.Int{big.NewInt(1545304298), big.NewInt(6), temp}
|
||||||
|
ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f")
|
||||||
|
ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"}
|
||||||
|
ret4, ret4Exp := new(bool), false
|
||||||
|
if err := abi.Unpack(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||||
|
t.Error("big.Int array result", *ret1, "!= Expected", ret1Exp)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*ret2, ret2Exp) {
|
||||||
|
t.Error("address result", *ret2, "!= Expected", ret2Exp)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*ret3, ret3Exp) {
|
||||||
|
t.Error("string array result", *ret3, "!= Expected", ret3Exp)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*ret4, ret4Exp) {
|
||||||
|
t.Error("bool result", *ret4, "!= Expected", ret4Exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiReturnWithStringSlice(t *testing.T) {
|
||||||
|
const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
|
||||||
|
abi, err := JSON(strings.NewReader(definition))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
buff := new(bytes.Buffer)
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0] offset
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000120")) // output[1] offset
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[0] length
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0][0] offset
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // output[0][1] offset
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008")) // output[0][0] length
|
||||||
|
buff.Write(common.Hex2Bytes("657468657265756d000000000000000000000000000000000000000000000000")) // output[0][0] value
|
||||||
|
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000b")) // output[0][1] length
|
||||||
|
buff.Write(common.Hex2Bytes("676f2d657468657265756d000000000000000000000000000000000000000000")) // output[0][1] value
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[1] length
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000064")) // output[1][0] value
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value
|
||||||
|
ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"}
|
||||||
|
ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)}
|
||||||
|
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||||
|
t.Error("string slice result", *ret1, "!= Expected", ret1Exp)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(*ret2, ret2Exp) {
|
||||||
|
t.Error("uint256 slice result", *ret2, "!= Expected", ret2Exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
|
||||||
// Similar to TestMultiReturnWithArray, but with a special case in mind:
|
// Similar to TestMultiReturnWithArray, but with a special case in mind:
|
||||||
// values of nested static arrays count towards the size as well, and any element following
|
// values of nested static arrays count towards the size as well, and any element following
|
||||||
@ -751,6 +939,108 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnpackTuple(t *testing.T) {
|
||||||
|
const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
|
||||||
|
abi, err := JSON(strings.NewReader(simpleTuple))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
buff := new(bytes.Buffer)
|
||||||
|
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // ret[a] = 1
|
||||||
|
buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
|
||||||
|
|
||||||
|
v := struct {
|
||||||
|
Ret struct {
|
||||||
|
A *big.Int
|
||||||
|
B *big.Int
|
||||||
|
}
|
||||||
|
}{Ret: struct {
|
||||||
|
A *big.Int
|
||||||
|
B *big.Int
|
||||||
|
}{new(big.Int), new(big.Int)}}
|
||||||
|
|
||||||
|
err = abi.Unpack(&v, "tuple", buff.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else {
|
||||||
|
if v.Ret.A.Cmp(big.NewInt(1)) != 0 {
|
||||||
|
t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.Ret.A)
|
||||||
|
}
|
||||||
|
if v.Ret.B.Cmp(big.NewInt(-1)) != 0 {
|
||||||
|
t.Errorf("unexpected value unpacked: want %x, got %x", v.Ret.B, -1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test nested tuple
|
||||||
|
const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[
|
||||||
|
{"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]},
|
||||||
|
{"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]},
|
||||||
|
{"type":"uint256","name":"a"}
|
||||||
|
]}]`
|
||||||
|
|
||||||
|
abi, err = JSON(strings.NewReader(nestedTuple))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
buff.Reset()
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // s offset
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")) // t.X = 0
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // t.Y = 1
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // a = 1
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.A = 1
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")) // s.B offset
|
||||||
|
buff.Write(common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0")) // s.C offset
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B length
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.B[0] = 1
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B[0] = 2
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C length
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[0].X
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[0].Y
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[1].X
|
||||||
|
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[1].Y
|
||||||
|
|
||||||
|
type T struct {
|
||||||
|
X *big.Int `abi:"x"`
|
||||||
|
Z *big.Int `abi:"y"` // Test whether the abi tag works.
|
||||||
|
}
|
||||||
|
|
||||||
|
type S struct {
|
||||||
|
A *big.Int
|
||||||
|
B []*big.Int
|
||||||
|
C []T
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ret struct {
|
||||||
|
FieldS S `abi:"s"`
|
||||||
|
FieldT T `abi:"t"`
|
||||||
|
A *big.Int
|
||||||
|
}
|
||||||
|
var ret Ret
|
||||||
|
var expected = Ret{
|
||||||
|
FieldS: S{
|
||||||
|
A: big.NewInt(1),
|
||||||
|
B: []*big.Int{big.NewInt(1), big.NewInt(2)},
|
||||||
|
C: []T{
|
||||||
|
{big.NewInt(1), big.NewInt(2)},
|
||||||
|
{big.NewInt(2), big.NewInt(1)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
FieldT: T{
|
||||||
|
big.NewInt(0), big.NewInt(1),
|
||||||
|
},
|
||||||
|
A: big.NewInt(1),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = abi.Unpack(&ret, "tuple", buff.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if reflect.DeepEqual(ret, expected) {
|
||||||
|
t.Error("unexpected unpack value")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestOOMMaliciousInput(t *testing.T) {
|
func TestOOMMaliciousInput(t *testing.T) {
|
||||||
oomTests := []unpackTest{
|
oomTests := []unpackTest{
|
||||||
{
|
{
|
||||||
|
@ -265,7 +265,10 @@ func (ac *accountCache) scanAccounts() error {
|
|||||||
case (addr == common.Address{}):
|
case (addr == common.Address{}):
|
||||||
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
|
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
|
||||||
default:
|
default:
|
||||||
return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
|
return &accounts.Account{
|
||||||
|
Address: addr,
|
||||||
|
URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -171,7 +171,10 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, accounts.Account{}, err
|
return nil, accounts.Account{}, err
|
||||||
}
|
}
|
||||||
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}}
|
a := accounts.Account{
|
||||||
|
Address: key.Address,
|
||||||
|
URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
|
||||||
|
}
|
||||||
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
|
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
|
||||||
zeroKey(key.PrivateKey)
|
zeroKey(key.PrivateKey)
|
||||||
return nil, a, err
|
return nil, a, err
|
||||||
@ -224,5 +227,6 @@ func toISO8601(t time.Time) string {
|
|||||||
} else {
|
} else {
|
||||||
tz = fmt.Sprintf("%03d00", offset/3600)
|
tz = fmt.Sprintf("%03d00", offset/3600)
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
|
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
|
||||||
|
t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
|
||||||
}
|
}
|
||||||
|
@ -233,6 +233,7 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
|||||||
PrivateKey: key,
|
PrivateKey: key,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
||||||
if cryptoJson.Cipher != "aes-128-ctr" {
|
if cryptoJson.Cipher != "aes-128-ctr" {
|
||||||
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
|
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
|
@ -38,7 +38,13 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou
|
|||||||
return accounts.Account{}, nil, err
|
return accounts.Account{}, nil, err
|
||||||
}
|
}
|
||||||
key.Id = uuid.NewRandom()
|
key.Id = uuid.NewRandom()
|
||||||
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: keyStore.JoinPath(keyFileName(key.Address))}}
|
a := accounts.Account{
|
||||||
|
Address: key.Address,
|
||||||
|
URL: accounts.URL{
|
||||||
|
Scheme: KeyStoreScheme,
|
||||||
|
Path: keyStore.JoinPath(keyFileName(key.Address)),
|
||||||
|
},
|
||||||
|
}
|
||||||
err = keyStore.StoreKey(a.URL.Path, key, password)
|
err = keyStore.StoreKey(a.URL.Path, key, password)
|
||||||
return a, key, err
|
return a, key, err
|
||||||
}
|
}
|
||||||
|
@ -52,8 +52,8 @@ func (w *keystoreWallet) Status() (string, error) {
|
|||||||
// is no connection or decryption step necessary to access the list of accounts.
|
// is no connection or decryption step necessary to access the list of accounts.
|
||||||
func (w *keystoreWallet) Open(passphrase string) error { return nil }
|
func (w *keystoreWallet) Open(passphrase string) error { return nil }
|
||||||
|
|
||||||
// Close implements accounts.Wallet, but is a noop for plain wallets since is no
|
// Close implements accounts.Wallet, but is a noop for plain wallets since there
|
||||||
// meaningful open operation.
|
// is no meaningful open operation.
|
||||||
func (w *keystoreWallet) Close() error { return nil }
|
func (w *keystoreWallet) Close() error { return nil }
|
||||||
|
|
||||||
// Accounts implements accounts.Wallet, returning an account list consisting of
|
// Accounts implements accounts.Wallet, returning an account list consisting of
|
||||||
@ -84,10 +84,7 @@ func (w *keystoreWallet) SelfDerive(base accounts.DerivationPath, chain ethereum
|
|||||||
// able to sign via our shared keystore backend).
|
// able to sign via our shared keystore backend).
|
||||||
func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
|
func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
|
||||||
// Make sure the requested account is contained within
|
// Make sure the requested account is contained within
|
||||||
if account.Address != w.account.Address {
|
if !w.Contains(account) {
|
||||||
return nil, accounts.ErrUnknownAccount
|
|
||||||
}
|
|
||||||
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
|
|
||||||
return nil, accounts.ErrUnknownAccount
|
return nil, accounts.ErrUnknownAccount
|
||||||
}
|
}
|
||||||
// Account seems valid, request the keystore to sign
|
// Account seems valid, request the keystore to sign
|
||||||
@ -100,10 +97,7 @@ func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte
|
|||||||
// be able to sign via our shared keystore backend).
|
// be able to sign via our shared keystore backend).
|
||||||
func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||||
// Make sure the requested account is contained within
|
// Make sure the requested account is contained within
|
||||||
if account.Address != w.account.Address {
|
if !w.Contains(account) {
|
||||||
return nil, accounts.ErrUnknownAccount
|
|
||||||
}
|
|
||||||
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
|
|
||||||
return nil, accounts.ErrUnknownAccount
|
return nil, accounts.ErrUnknownAccount
|
||||||
}
|
}
|
||||||
// Account seems valid, request the keystore to sign
|
// Account seems valid, request the keystore to sign
|
||||||
@ -114,10 +108,7 @@ func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction,
|
|||||||
// given hash with the given account using passphrase as extra authentication.
|
// given hash with the given account using passphrase as extra authentication.
|
||||||
func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
|
||||||
// Make sure the requested account is contained within
|
// Make sure the requested account is contained within
|
||||||
if account.Address != w.account.Address {
|
if !w.Contains(account) {
|
||||||
return nil, accounts.ErrUnknownAccount
|
|
||||||
}
|
|
||||||
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
|
|
||||||
return nil, accounts.ErrUnknownAccount
|
return nil, accounts.ErrUnknownAccount
|
||||||
}
|
}
|
||||||
// Account seems valid, request the keystore to sign
|
// Account seems valid, request the keystore to sign
|
||||||
@ -128,10 +119,7 @@ func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passph
|
|||||||
// transaction with the given account using passphrase as extra authentication.
|
// transaction with the given account using passphrase as extra authentication.
|
||||||
func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
|
||||||
// Make sure the requested account is contained within
|
// Make sure the requested account is contained within
|
||||||
if account.Address != w.account.Address {
|
if !w.Contains(account) {
|
||||||
return nil, accounts.ErrUnknownAccount
|
|
||||||
}
|
|
||||||
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
|
|
||||||
return nil, accounts.ErrUnknownAccount
|
return nil, accounts.ErrUnknownAccount
|
||||||
}
|
}
|
||||||
// Account seems valid, request the keystore to sign
|
// Account seems valid, request the keystore to sign
|
@ -257,7 +257,9 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
|
|||||||
|
|
||||||
// Decode the hex sting into an Ethereum address and return
|
// Decode the hex sting into an Ethereum address and return
|
||||||
var address common.Address
|
var address common.Address
|
||||||
hex.Decode(address[:], hexstr)
|
if _, err = hex.Decode(address[:], hexstr); err != nil {
|
||||||
|
return common.Address{}, err
|
||||||
|
}
|
||||||
return address, nil
|
return address, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,8 +23,8 @@ environment:
|
|||||||
install:
|
install:
|
||||||
- git submodule update --init
|
- git submodule update --init
|
||||||
- rmdir C:\go /s /q
|
- rmdir C:\go /s /q
|
||||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip
|
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.4.windows-%GETH_ARCH%.zip
|
||||||
- 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
- 7z x go1.11.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||||
- go version
|
- go version
|
||||||
- gcc --version
|
- gcc --version
|
||||||
|
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// +build none
|
// +build none
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -89,7 +89,7 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
genesisConfig *core.Genesis
|
genesisConfig *core.Genesis
|
||||||
)
|
)
|
||||||
if ctx.GlobalBool(MachineFlag.Name) {
|
if ctx.GlobalBool(MachineFlag.Name) {
|
||||||
tracer = NewJSONLogger(logconfig, os.Stdout)
|
tracer = vm.NewJSONLogger(logconfig, os.Stdout)
|
||||||
} else if ctx.GlobalBool(DebugFlag.Name) {
|
} else if ctx.GlobalBool(DebugFlag.Name) {
|
||||||
debugLogger = vm.NewStructLogger(logconfig)
|
debugLogger = vm.NewStructLogger(logconfig)
|
||||||
tracer = debugLogger
|
tracer = debugLogger
|
||||||
@ -206,6 +206,7 @@ func runCmd(ctx *cli.Context) error {
|
|||||||
execTime := time.Since(tstart)
|
execTime := time.Since(tstart)
|
||||||
|
|
||||||
if ctx.GlobalBool(DumpFlag.Name) {
|
if ctx.GlobalBool(DumpFlag.Name) {
|
||||||
|
statedb.Commit(true)
|
||||||
statedb.IntermediateRoot(true)
|
statedb.IntermediateRoot(true)
|
||||||
fmt.Println(string(statedb.Dump()))
|
fmt.Println(string(statedb.Dump()))
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||||||
)
|
)
|
||||||
switch {
|
switch {
|
||||||
case ctx.GlobalBool(MachineFlag.Name):
|
case ctx.GlobalBool(MachineFlag.Name):
|
||||||
tracer = NewJSONLogger(config, os.Stderr)
|
tracer = vm.NewJSONLogger(config, os.Stderr)
|
||||||
|
|
||||||
case ctx.GlobalBool(DebugFlag.Name):
|
case ctx.GlobalBool(DebugFlag.Name):
|
||||||
debugger = vm.NewStructLogger(config)
|
debugger = vm.NewStructLogger(config)
|
||||||
|
@ -256,7 +256,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
|||||||
}
|
}
|
||||||
for _, boot := range enodes {
|
for _, boot := range enodes {
|
||||||
old, err := enode.ParseV4(boot.String())
|
old, err := enode.ParseV4(boot.String())
|
||||||
if err != nil {
|
if err == nil {
|
||||||
stack.Server().AddPeer(old)
|
stack.Server().AddPeer(old)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"unicode"
|
"unicode"
|
||||||
@ -152,7 +152,9 @@ func enableWhisper(ctx *cli.Context) bool {
|
|||||||
|
|
||||||
func makeFullNode(ctx *cli.Context) *node.Node {
|
func makeFullNode(ctx *cli.Context) *node.Node {
|
||||||
stack, cfg := makeConfigNode(ctx)
|
stack, cfg := makeConfigNode(ctx)
|
||||||
|
if ctx.GlobalIsSet(utils.ConstantinopleOverrideFlag.Name) {
|
||||||
|
cfg.Eth.ConstantinopleOverride = new(big.Int).SetUint64(ctx.GlobalUint64(utils.ConstantinopleOverrideFlag.Name))
|
||||||
|
}
|
||||||
utils.RegisterEthService(stack, &cfg.Eth)
|
utils.RegisterEthService(stack, &cfg.Eth)
|
||||||
|
|
||||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||||
@ -199,7 +201,17 @@ func dumpConfig(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
io.WriteString(os.Stdout, comment)
|
|
||||||
os.Stdout.Write(out)
|
dump := os.Stdout
|
||||||
|
if ctx.NArg() > 0 {
|
||||||
|
dump, err = os.OpenFile(ctx.Args().Get(0), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer dump.Close()
|
||||||
|
}
|
||||||
|
dump.WriteString(comment)
|
||||||
|
dump.Write(out)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -87,6 +87,7 @@ var (
|
|||||||
utils.LightServFlag,
|
utils.LightServFlag,
|
||||||
utils.LightPeersFlag,
|
utils.LightPeersFlag,
|
||||||
utils.LightKDFFlag,
|
utils.LightKDFFlag,
|
||||||
|
utils.WhitelistFlag,
|
||||||
utils.CacheFlag,
|
utils.CacheFlag,
|
||||||
utils.CacheDatabaseFlag,
|
utils.CacheDatabaseFlag,
|
||||||
utils.CacheTrieFlag,
|
utils.CacheTrieFlag,
|
||||||
@ -122,6 +123,7 @@ var (
|
|||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.VMEnableDebugFlag,
|
utils.VMEnableDebugFlag,
|
||||||
utils.NetworkIdFlag,
|
utils.NetworkIdFlag,
|
||||||
|
utils.ConstantinopleOverrideFlag,
|
||||||
utils.RPCCORSDomainFlag,
|
utils.RPCCORSDomainFlag,
|
||||||
utils.RPCVirtualHostsFlag,
|
utils.RPCVirtualHostsFlag,
|
||||||
utils.EthStatsURLFlag,
|
utils.EthStatsURLFlag,
|
||||||
|
@ -81,6 +81,7 @@ var AppHelpFlagGroups = []flagGroup{
|
|||||||
utils.LightServFlag,
|
utils.LightServFlag,
|
||||||
utils.LightPeersFlag,
|
utils.LightPeersFlag,
|
||||||
utils.LightKDFFlag,
|
utils.LightKDFFlag,
|
||||||
|
utils.WhitelistFlag,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -20,35 +20,41 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
math2 "github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
// cppEthereumGenesisSpec represents the genesis specification format used by the
|
// alethGenesisSpec represents the genesis specification format used by the
|
||||||
// C++ Ethereum implementation.
|
// C++ Ethereum implementation.
|
||||||
type cppEthereumGenesisSpec struct {
|
type alethGenesisSpec struct {
|
||||||
SealEngine string `json:"sealEngine"`
|
SealEngine string `json:"sealEngine"`
|
||||||
Params struct {
|
Params struct {
|
||||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
|
||||||
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||||
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
||||||
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
|
||||||
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
||||||
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
||||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
||||||
ChainID hexutil.Uint64 `json:"chainID"`
|
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
||||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
TieBreakingGas bool `json:"tieBreakingGas"`
|
||||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
|
||||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
|
||||||
BlockReward *hexutil.Big `json:"blockReward"`
|
BlockReward *hexutil.Big `json:"blockReward"`
|
||||||
|
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||||
|
ChainID hexutil.Uint64 `json:"chainID"`
|
||||||
|
AllowFutureBlocks bool `json:"allowFutureBlocks"`
|
||||||
} `json:"params"`
|
} `json:"params"`
|
||||||
|
|
||||||
Genesis struct {
|
Genesis struct {
|
||||||
@ -62,57 +68,68 @@ type cppEthereumGenesisSpec struct {
|
|||||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||||
} `json:"genesis"`
|
} `json:"genesis"`
|
||||||
|
|
||||||
Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
|
Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
||||||
// contract definition.
|
// contract definition.
|
||||||
type cppEthereumGenesisSpecAccount struct {
|
type alethGenesisSpecAccount struct {
|
||||||
Balance *hexutil.Big `json:"balance"`
|
Balance *math2.HexOrDecimal256 `json:"balance"`
|
||||||
Nonce uint64 `json:"nonce,omitempty"`
|
Nonce uint64 `json:"nonce,omitempty"`
|
||||||
Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
|
// alethGenesisSpecBuiltin is the precompiled contract definition.
|
||||||
type cppEthereumGenesisSpecBuiltin struct {
|
type alethGenesisSpecBuiltin struct {
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
||||||
Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
|
Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type cppEthereumGenesisSpecLinearPricing struct {
|
type alethGenesisSpecLinearPricing struct {
|
||||||
Base uint64 `json:"base"`
|
Base uint64 `json:"base"`
|
||||||
Word uint64 `json:"word"`
|
Word uint64 `json:"word"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
|
// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific
|
||||||
// chain specification format.
|
// chain specification format.
|
||||||
func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
|
func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) {
|
||||||
// Only ethash is currently supported between go-ethereum and cpp-ethereum
|
// Only ethash is currently supported between go-ethereum and aleth
|
||||||
if genesis.Config.Ethash == nil {
|
if genesis.Config.Ethash == nil {
|
||||||
return nil, errors.New("unsupported consensus engine")
|
return nil, errors.New("unsupported consensus engine")
|
||||||
}
|
}
|
||||||
// Reconstruct the chain spec in Parity's format
|
// Reconstruct the chain spec in Aleth format
|
||||||
spec := &cppEthereumGenesisSpec{
|
spec := &alethGenesisSpec{
|
||||||
SealEngine: "Ethash",
|
SealEngine: "Ethash",
|
||||||
}
|
}
|
||||||
|
// Some defaults
|
||||||
spec.Params.AccountStartNonce = 0
|
spec.Params.AccountStartNonce = 0
|
||||||
|
spec.Params.TieBreakingGas = false
|
||||||
|
spec.Params.AllowFutureBlocks = false
|
||||||
|
spec.Params.DaoHardforkBlock = 0
|
||||||
|
|
||||||
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
|
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
|
||||||
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
|
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
|
||||||
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
|
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
|
||||||
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
|
|
||||||
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
|
// Byzantium
|
||||||
|
if num := genesis.Config.ByzantiumBlock; num != nil {
|
||||||
|
spec.setByzantium(num)
|
||||||
|
}
|
||||||
|
// Constantinople
|
||||||
|
if num := genesis.Config.ConstantinopleBlock; num != nil {
|
||||||
|
spec.setConstantinople(num)
|
||||||
|
}
|
||||||
|
|
||||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||||
|
|
||||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||||
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
|
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64)
|
||||||
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||||
spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor)
|
||||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
|
||||||
spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
|
||||||
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||||
|
|
||||||
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||||
@ -126,77 +143,108 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
|
|||||||
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
||||||
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
||||||
|
|
||||||
spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
|
|
||||||
for address, account := range genesis.Alloc {
|
for address, account := range genesis.Alloc {
|
||||||
spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
|
spec.setAccount(address, account)
|
||||||
Balance: (*hexutil.Big)(account.Balance),
|
|
||||||
Nonce: account.Nonce,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
|
||||||
Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
|
|
||||||
}
|
|
||||||
spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
|
||||||
Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
|
|
||||||
}
|
|
||||||
spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
|
||||||
Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
|
|
||||||
}
|
|
||||||
spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
|
||||||
Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover",
|
||||||
|
Linear: &alethGenesisSpecLinearPricing{Base: 3000}})
|
||||||
|
spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256",
|
||||||
|
Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}})
|
||||||
|
spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160",
|
||||||
|
Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}})
|
||||||
|
spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity",
|
||||||
|
Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
|
||||||
if genesis.Config.ByzantiumBlock != nil {
|
if genesis.Config.ByzantiumBlock != nil {
|
||||||
spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
|
||||||
Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||||
}
|
spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
|
||||||
spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||||
Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
|
Linear: &alethGenesisSpecLinearPricing{Base: 500}})
|
||||||
}
|
spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
|
||||||
spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||||
Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
|
Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
|
||||||
}
|
spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
|
||||||
spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
|
||||||
Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return spec, nil
|
return spec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) {
|
||||||
|
if spec.Accounts == nil {
|
||||||
|
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
|
||||||
|
}
|
||||||
|
addr := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
|
||||||
|
if _, exist := spec.Accounts[addr]; !exist {
|
||||||
|
spec.Accounts[addr] = &alethGenesisSpecAccount{}
|
||||||
|
}
|
||||||
|
spec.Accounts[addr].Precompiled = data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
|
||||||
|
if spec.Accounts == nil {
|
||||||
|
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, exist := spec.Accounts[common.UnprefixedAddress(address)]
|
||||||
|
if !exist {
|
||||||
|
a = &alethGenesisSpecAccount{}
|
||||||
|
spec.Accounts[common.UnprefixedAddress(address)] = a
|
||||||
|
}
|
||||||
|
a.Balance = (*math2.HexOrDecimal256)(account.Balance)
|
||||||
|
a.Nonce = account.Nonce
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
|
||||||
|
spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
|
||||||
|
spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
|
||||||
|
}
|
||||||
|
|
||||||
// parityChainSpec is the chain specification format used by Parity.
|
// parityChainSpec is the chain specification format used by Parity.
|
||||||
type parityChainSpec struct {
|
type parityChainSpec struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Engine struct {
|
Datadir string `json:"dataDir"`
|
||||||
|
Engine struct {
|
||||||
Ethash struct {
|
Ethash struct {
|
||||||
Params struct {
|
Params struct {
|
||||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||||
BlockReward *hexutil.Big `json:"blockReward"`
|
BlockReward map[string]string `json:"blockReward"`
|
||||||
HomesteadTransition uint64 `json:"homesteadTransition"`
|
DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
|
||||||
EIP150Transition uint64 `json:"eip150Transition"`
|
HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
|
||||||
EIP160Transition uint64 `json:"eip160Transition"`
|
EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
|
||||||
EIP161abcTransition uint64 `json:"eip161abcTransition"`
|
|
||||||
EIP161dTransition uint64 `json:"eip161dTransition"`
|
|
||||||
EIP649Reward *hexutil.Big `json:"eip649Reward"`
|
|
||||||
EIP100bTransition uint64 `json:"eip100bTransition"`
|
|
||||||
EIP649Transition uint64 `json:"eip649Transition"`
|
|
||||||
} `json:"params"`
|
} `json:"params"`
|
||||||
} `json:"Ethash"`
|
} `json:"Ethash"`
|
||||||
} `json:"engine"`
|
} `json:"engine"`
|
||||||
|
|
||||||
Params struct {
|
Params struct {
|
||||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||||
MaxCodeSize uint64 `json:"maxCodeSize"`
|
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||||
EIP155Transition uint64 `json:"eip155Transition"`
|
ChainID hexutil.Uint64 `json:"chainID"`
|
||||||
EIP98Transition uint64 `json:"eip98Transition"`
|
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||||
EIP86Transition uint64 `json:"eip86Transition"`
|
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||||
EIP140Transition uint64 `json:"eip140Transition"`
|
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||||
EIP211Transition uint64 `json:"eip211Transition"`
|
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||||
EIP214Transition uint64 `json:"eip214Transition"`
|
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||||
EIP658Transition uint64 `json:"eip658Transition"`
|
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||||
|
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||||
|
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||||
|
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||||
|
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||||
|
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||||
|
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||||
|
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||||
|
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||||
|
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||||
|
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||||
} `json:"params"`
|
} `json:"params"`
|
||||||
|
|
||||||
Genesis struct {
|
Genesis struct {
|
||||||
@ -215,22 +263,22 @@ type parityChainSpec struct {
|
|||||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||||
} `json:"genesis"`
|
} `json:"genesis"`
|
||||||
|
|
||||||
Nodes []string `json:"nodes"`
|
Nodes []string `json:"nodes"`
|
||||||
Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
|
Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
|
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
|
||||||
// contract definition.
|
// contract definition.
|
||||||
type parityChainSpecAccount struct {
|
type parityChainSpecAccount struct {
|
||||||
Balance *hexutil.Big `json:"balance"`
|
Balance math2.HexOrDecimal256 `json:"balance"`
|
||||||
Nonce uint64 `json:"nonce,omitempty"`
|
Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"`
|
||||||
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
|
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// parityChainSpecBuiltin is the precompiled contract definition.
|
// parityChainSpecBuiltin is the precompiled contract definition.
|
||||||
type parityChainSpecBuiltin struct {
|
type parityChainSpecBuiltin struct {
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
ActivateAt uint64 `json:"activate_at,omitempty"`
|
ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
|
||||||
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
|
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,34 +313,51 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
|||||||
}
|
}
|
||||||
// Reconstruct the chain spec in Parity's format
|
// Reconstruct the chain spec in Parity's format
|
||||||
spec := &parityChainSpec{
|
spec := &parityChainSpec{
|
||||||
Name: network,
|
Name: network,
|
||||||
Nodes: bootnodes,
|
Nodes: bootnodes,
|
||||||
|
Datadir: strings.ToLower(network),
|
||||||
}
|
}
|
||||||
|
spec.Engine.Ethash.Params.BlockReward = make(map[string]string)
|
||||||
|
spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string)
|
||||||
|
// Frontier
|
||||||
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||||
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||||
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||||
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward)
|
||||||
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
|
|
||||||
spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
|
|
||||||
spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
|
|
||||||
spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
|
|
||||||
spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
|
|
||||||
spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
|
|
||||||
spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
|
|
||||||
spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
|
|
||||||
|
|
||||||
|
// Homestead
|
||||||
|
spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64())
|
||||||
|
|
||||||
|
// Tangerine Whistle : 150
|
||||||
|
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md
|
||||||
|
spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64())
|
||||||
|
|
||||||
|
// Spurious Dragon: 155, 160, 161, 170
|
||||||
|
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md
|
||||||
|
spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
|
||||||
|
spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
|
||||||
|
spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
|
||||||
|
spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
|
||||||
|
|
||||||
|
// Byzantium
|
||||||
|
if num := genesis.Config.ByzantiumBlock; num != nil {
|
||||||
|
spec.setByzantium(num)
|
||||||
|
}
|
||||||
|
// Constantinople
|
||||||
|
if num := genesis.Config.ConstantinopleBlock; num != nil {
|
||||||
|
spec.setConstantinople(num)
|
||||||
|
}
|
||||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
|
||||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||||
|
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
|
||||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
// geth has it set from zero
|
||||||
spec.Params.EIP98Transition = math.MaxUint64
|
spec.Params.MaxCodeSizeTransition = 0
|
||||||
spec.Params.EIP86Transition = math.MaxUint64
|
|
||||||
spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
|
// Disable this one
|
||||||
spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
|
spec.Params.EIP98Transition = math.MaxInt64
|
||||||
spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
|
|
||||||
spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
|
|
||||||
|
|
||||||
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||||
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
|
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
|
||||||
@ -305,42 +370,77 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
|||||||
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
||||||
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
||||||
|
|
||||||
spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
|
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
|
||||||
for address, account := range genesis.Alloc {
|
for address, account := range genesis.Alloc {
|
||||||
spec.Accounts[address] = &parityChainSpecAccount{
|
bal := math2.HexOrDecimal256(*account.Balance)
|
||||||
Balance: (*hexutil.Big)(account.Balance),
|
|
||||||
Nonce: account.Nonce,
|
spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{
|
||||||
|
Balance: bal,
|
||||||
|
Nonce: math2.HexOrDecimal64(account.Nonce),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
|
spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover",
|
||||||
Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
|
Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}})
|
||||||
}
|
|
||||||
spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
|
spec.setPrecompile(2, &parityChainSpecBuiltin{
|
||||||
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
|
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
|
||||||
}
|
})
|
||||||
spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
|
spec.setPrecompile(3, &parityChainSpecBuiltin{
|
||||||
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
|
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
|
||||||
}
|
})
|
||||||
spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
|
spec.setPrecompile(4, &parityChainSpecBuiltin{
|
||||||
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
|
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
|
||||||
}
|
})
|
||||||
if genesis.Config.ByzantiumBlock != nil {
|
if genesis.Config.ByzantiumBlock != nil {
|
||||||
spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
|
blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
|
||||||
Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
spec.setPrecompile(5, &parityChainSpecBuiltin{
|
||||||
}
|
Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||||
spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
|
})
|
||||||
Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
spec.setPrecompile(6, &parityChainSpecBuiltin{
|
||||||
}
|
Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
||||||
spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
|
})
|
||||||
Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
spec.setPrecompile(7, &parityChainSpecBuiltin{
|
||||||
}
|
Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
||||||
spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
|
})
|
||||||
Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
spec.setPrecompile(8, &parityChainSpecBuiltin{
|
||||||
}
|
Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
return spec, nil
|
return spec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) {
|
||||||
|
if spec.Accounts == nil {
|
||||||
|
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
|
||||||
|
}
|
||||||
|
a := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
|
||||||
|
if _, exist := spec.Accounts[a]; !exist {
|
||||||
|
spec.Accounts[a] = &parityChainSpecAccount{}
|
||||||
|
}
|
||||||
|
spec.Accounts[a].Builtin = data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *parityChainSpec) setByzantium(num *big.Int) {
|
||||||
|
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward)
|
||||||
|
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000)
|
||||||
|
n := hexutil.Uint64(num.Uint64())
|
||||||
|
spec.Engine.Ethash.Params.EIP100bTransition = n
|
||||||
|
spec.Params.EIP140Transition = n
|
||||||
|
spec.Params.EIP211Transition = n
|
||||||
|
spec.Params.EIP214Transition = n
|
||||||
|
spec.Params.EIP658Transition = n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *parityChainSpec) setConstantinople(num *big.Int) {
|
||||||
|
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward)
|
||||||
|
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000)
|
||||||
|
n := hexutil.Uint64(num.Uint64())
|
||||||
|
spec.Params.EIP145Transition = n
|
||||||
|
spec.Params.EIP1014Transition = n
|
||||||
|
spec.Params.EIP1052Transition = n
|
||||||
|
spec.Params.EIP1283Transition = n
|
||||||
|
}
|
||||||
|
|
||||||
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
||||||
// Python Ethereum implementation.
|
// Python Ethereum implementation.
|
||||||
type pyEthereumGenesisSpec struct {
|
type pyEthereumGenesisSpec struct {
|
||||||
|
109
cmd/puppeth/genesis_test.go
Normal file
109
cmd/puppeth/genesis_test.go
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests the go-ethereum to Aleth chainspec conversion for the Stureby testnet.
|
||||||
|
func TestAlethSturebyConverter(t *testing.T) {
|
||||||
|
blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not read file: %v", err)
|
||||||
|
}
|
||||||
|
var genesis core.Genesis
|
||||||
|
if err := json.Unmarshal(blob, &genesis); err != nil {
|
||||||
|
t.Fatalf("failed parsing genesis: %v", err)
|
||||||
|
}
|
||||||
|
spec, err := newAlethGenesisSpec("stureby", &genesis)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed creating chainspec: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expBlob, err := ioutil.ReadFile("testdata/stureby_aleth.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not read file: %v", err)
|
||||||
|
}
|
||||||
|
expspec := &alethGenesisSpec{}
|
||||||
|
if err := json.Unmarshal(expBlob, expspec); err != nil {
|
||||||
|
t.Fatalf("failed parsing genesis: %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(expspec, spec) {
|
||||||
|
t.Errorf("chainspec mismatch")
|
||||||
|
c := spew.ConfigState{
|
||||||
|
DisablePointerAddresses: true,
|
||||||
|
SortKeys: true,
|
||||||
|
}
|
||||||
|
exp := strings.Split(c.Sdump(expspec), "\n")
|
||||||
|
got := strings.Split(c.Sdump(spec), "\n")
|
||||||
|
for i := 0; i < len(exp) && i < len(got); i++ {
|
||||||
|
if exp[i] != got[i] {
|
||||||
|
fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests the go-ethereum to Parity chainspec conversion for the Stureby testnet.
|
||||||
|
func TestParitySturebyConverter(t *testing.T) {
|
||||||
|
blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not read file: %v", err)
|
||||||
|
}
|
||||||
|
var genesis core.Genesis
|
||||||
|
if err := json.Unmarshal(blob, &genesis); err != nil {
|
||||||
|
t.Fatalf("failed parsing genesis: %v", err)
|
||||||
|
}
|
||||||
|
spec, err := newParityChainSpec("Stureby", &genesis, []string{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed creating chainspec: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expBlob, err := ioutil.ReadFile("testdata/stureby_parity.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not read file: %v", err)
|
||||||
|
}
|
||||||
|
expspec := &parityChainSpec{}
|
||||||
|
if err := json.Unmarshal(expBlob, expspec); err != nil {
|
||||||
|
t.Fatalf("failed parsing genesis: %v", err)
|
||||||
|
}
|
||||||
|
expspec.Nodes = []string{}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(expspec, spec) {
|
||||||
|
t.Errorf("chainspec mismatch")
|
||||||
|
c := spew.ConfigState{
|
||||||
|
DisablePointerAddresses: true,
|
||||||
|
SortKeys: true,
|
||||||
|
}
|
||||||
|
exp := strings.Split(c.Sdump(expspec), "\n")
|
||||||
|
got := strings.Split(c.Sdump(spec), "\n")
|
||||||
|
for i := 0; i < len(exp) && i < len(got); i++ {
|
||||||
|
if exp[i] != got[i] {
|
||||||
|
fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -640,7 +640,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
|||||||
files[filepath.Join(workdir, network+".json")] = genesis
|
files[filepath.Join(workdir, network+".json")] = genesis
|
||||||
|
|
||||||
if conf.Genesis.Config.Ethash != nil {
|
if conf.Genesis.Config.Ethash != nil {
|
||||||
cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis)
|
cppSpec, err := newAlethGenesisSpec(network, conf.Genesis)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,8 @@ version: '2'
|
|||||||
services:
|
services:
|
||||||
ethstats:
|
ethstats:
|
||||||
build: .
|
build: .
|
||||||
image: {{.Network}}/ethstats{{if not .VHost}}
|
image: {{.Network}}/ethstats
|
||||||
|
container_name: {{.Network}}_ethstats_1{{if not .VHost}}
|
||||||
ports:
|
ports:
|
||||||
- "{{.Port}}:3000"{{end}}
|
- "{{.Port}}:3000"{{end}}
|
||||||
environment:
|
environment:
|
||||||
|
@ -77,6 +77,7 @@ services:
|
|||||||
explorer:
|
explorer:
|
||||||
build: .
|
build: .
|
||||||
image: {{.Network}}/explorer
|
image: {{.Network}}/explorer
|
||||||
|
container_name: {{.Network}}_explorer_1
|
||||||
ports:
|
ports:
|
||||||
- "{{.NodePort}}:{{.NodePort}}"
|
- "{{.NodePort}}:{{.NodePort}}"
|
||||||
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}
|
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}
|
||||||
|
@ -56,8 +56,10 @@ services:
|
|||||||
faucet:
|
faucet:
|
||||||
build: .
|
build: .
|
||||||
image: {{.Network}}/faucet
|
image: {{.Network}}/faucet
|
||||||
|
container_name: {{.Network}}_faucet_1
|
||||||
ports:
|
ports:
|
||||||
- "{{.EthPort}}:{{.EthPort}}"{{if not .VHost}}
|
- "{{.EthPort}}:{{.EthPort}}"
|
||||||
|
- "{{.EthPort}}:{{.EthPort}}/udp"{{if not .VHost}}
|
||||||
- "{{.ApiPort}}:8080"{{end}}
|
- "{{.ApiPort}}:8080"{{end}}
|
||||||
volumes:
|
volumes:
|
||||||
- {{.Datadir}}:/root/.faucet
|
- {{.Datadir}}:/root/.faucet
|
||||||
|
@ -40,6 +40,7 @@ services:
|
|||||||
nginx:
|
nginx:
|
||||||
build: .
|
build: .
|
||||||
image: {{.Network}}/nginx
|
image: {{.Network}}/nginx
|
||||||
|
container_name: {{.Network}}_nginx_1
|
||||||
ports:
|
ports:
|
||||||
- "{{.Port}}:80"
|
- "{{.Port}}:80"
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -55,6 +55,7 @@ services:
|
|||||||
{{.Type}}:
|
{{.Type}}:
|
||||||
build: .
|
build: .
|
||||||
image: {{.Network}}/{{.Type}}
|
image: {{.Network}}/{{.Type}}
|
||||||
|
container_name: {{.Network}}_{{.Type}}_1
|
||||||
ports:
|
ports:
|
||||||
- "{{.Port}}:{{.Port}}"
|
- "{{.Port}}:{{.Port}}"
|
||||||
- "{{.Port}}:{{.Port}}/udp"
|
- "{{.Port}}:{{.Port}}/udp"
|
||||||
|
@ -57,6 +57,7 @@ services:
|
|||||||
wallet:
|
wallet:
|
||||||
build: .
|
build: .
|
||||||
image: {{.Network}}/wallet
|
image: {{.Network}}/wallet
|
||||||
|
container_name: {{.Network}}_wallet_1
|
||||||
ports:
|
ports:
|
||||||
- "{{.NodePort}}:{{.NodePort}}"
|
- "{{.NodePort}}:{{.NodePort}}"
|
||||||
- "{{.NodePort}}:{{.NodePort}}/udp"
|
- "{{.NodePort}}:{{.NodePort}}/udp"
|
||||||
|
@ -43,18 +43,23 @@ func main() {
|
|||||||
Usage: "log level to emit to the screen",
|
Usage: "log level to emit to the screen",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
app.Action = func(c *cli.Context) error {
|
app.Before = func(c *cli.Context) error {
|
||||||
// Set up the logger to print everything and the random generator
|
// Set up the logger to print everything and the random generator
|
||||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
|
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
network := c.String("network")
|
|
||||||
if strings.Contains(network, " ") || strings.Contains(network, "-") {
|
|
||||||
log.Crit("No spaces or hyphens allowed in network name")
|
|
||||||
}
|
|
||||||
// Start the wizard and relinquish control
|
|
||||||
makeWizard(c.String("network")).run()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
app.Action = runWizard
|
||||||
app.Run(os.Args)
|
app.Run(os.Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// runWizard start the wizard and relinquish control to it.
|
||||||
|
func runWizard(c *cli.Context) error {
|
||||||
|
network := c.String("network")
|
||||||
|
if strings.Contains(network, " ") || strings.Contains(network, "-") || strings.ToLower(network) != network {
|
||||||
|
log.Crit("No spaces, hyphens or capital letters allowed in network name")
|
||||||
|
}
|
||||||
|
makeWizard(c.String("network")).run()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
112
cmd/puppeth/testdata/stureby_aleth.json
vendored
Normal file
112
cmd/puppeth/testdata/stureby_aleth.json
vendored
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
{
|
||||||
|
"sealEngine":"Ethash",
|
||||||
|
"params":{
|
||||||
|
"accountStartNonce":"0x00",
|
||||||
|
"maximumExtraDataSize":"0x20",
|
||||||
|
"homesteadForkBlock":"0x2710",
|
||||||
|
"daoHardforkBlock":"0x00",
|
||||||
|
"EIP150ForkBlock":"0x3a98",
|
||||||
|
"EIP158ForkBlock":"0x59d8",
|
||||||
|
"byzantiumForkBlock":"0x7530",
|
||||||
|
"constantinopleForkBlock":"0x9c40",
|
||||||
|
"minGasLimit":"0x1388",
|
||||||
|
"maxGasLimit":"0x7fffffffffffffff",
|
||||||
|
"tieBreakingGas":false,
|
||||||
|
"gasLimitBoundDivisor":"0x0400",
|
||||||
|
"minimumDifficulty":"0x20000",
|
||||||
|
"difficultyBoundDivisor":"0x0800",
|
||||||
|
"durationLimit":"0x0d",
|
||||||
|
"blockReward":"0x4563918244F40000",
|
||||||
|
"networkID":"0x4cb2e",
|
||||||
|
"chainID":"0x4cb2e",
|
||||||
|
"allowFutureBlocks":false
|
||||||
|
},
|
||||||
|
"genesis":{
|
||||||
|
"nonce":"0x0000000000000000",
|
||||||
|
"difficulty":"0x20000",
|
||||||
|
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"author":"0x0000000000000000000000000000000000000000",
|
||||||
|
"timestamp":"0x59a4e76d",
|
||||||
|
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||||
|
"gasLimit":"0x47b760"
|
||||||
|
},
|
||||||
|
"accounts":{
|
||||||
|
"0000000000000000000000000000000000000001":{
|
||||||
|
"balance":"1",
|
||||||
|
"precompiled":{
|
||||||
|
"name":"ecrecover",
|
||||||
|
"linear":{
|
||||||
|
"base":3000,
|
||||||
|
"word":0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000002":{
|
||||||
|
"balance":"1",
|
||||||
|
"precompiled":{
|
||||||
|
"name":"sha256",
|
||||||
|
"linear":{
|
||||||
|
"base":60,
|
||||||
|
"word":12
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000003":{
|
||||||
|
"balance":"1",
|
||||||
|
"precompiled":{
|
||||||
|
"name":"ripemd160",
|
||||||
|
"linear":{
|
||||||
|
"base":600,
|
||||||
|
"word":120
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000004":{
|
||||||
|
"balance":"1",
|
||||||
|
"precompiled":{
|
||||||
|
"name":"identity",
|
||||||
|
"linear":{
|
||||||
|
"base":15,
|
||||||
|
"word":3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000005":{
|
||||||
|
"balance":"1",
|
||||||
|
"precompiled":{
|
||||||
|
"name":"modexp",
|
||||||
|
"startingBlock":"0x7530"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000006":{
|
||||||
|
"balance":"1",
|
||||||
|
"precompiled":{
|
||||||
|
"name":"alt_bn128_G1_add",
|
||||||
|
"startingBlock":"0x7530",
|
||||||
|
"linear":{
|
||||||
|
"base":500,
|
||||||
|
"word":0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000007":{
|
||||||
|
"balance":"1",
|
||||||
|
"precompiled":{
|
||||||
|
"name":"alt_bn128_G1_mul",
|
||||||
|
"startingBlock":"0x7530",
|
||||||
|
"linear":{
|
||||||
|
"base":40000,
|
||||||
|
"word":0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000008":{
|
||||||
|
"balance":"1",
|
||||||
|
"precompiled":{
|
||||||
|
"name":"alt_bn128_pairing_product",
|
||||||
|
"startingBlock":"0x7530"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
47
cmd/puppeth/testdata/stureby_geth.json
vendored
Normal file
47
cmd/puppeth/testdata/stureby_geth.json
vendored
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
{
|
||||||
|
"config": {
|
||||||
|
"ethash":{},
|
||||||
|
"chainId": 314158,
|
||||||
|
"homesteadBlock": 10000,
|
||||||
|
"eip150Block": 15000,
|
||||||
|
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"eip155Block": 23000,
|
||||||
|
"eip158Block": 23000,
|
||||||
|
"byzantiumBlock": 30000,
|
||||||
|
"constantinopleBlock": 40000
|
||||||
|
},
|
||||||
|
"nonce": "0x0",
|
||||||
|
"timestamp": "0x59a4e76d",
|
||||||
|
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||||
|
"gasLimit": "0x47b760",
|
||||||
|
"difficulty": "0x20000",
|
||||||
|
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||||
|
"alloc": {
|
||||||
|
"0000000000000000000000000000000000000001": {
|
||||||
|
"balance": "0x01"
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000002": {
|
||||||
|
"balance": "0x01"
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000003": {
|
||||||
|
"balance": "0x01"
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000004": {
|
||||||
|
"balance": "0x01"
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000005": {
|
||||||
|
"balance": "0x01"
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000006": {
|
||||||
|
"balance": "0x01"
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000007": {
|
||||||
|
"balance": "0x01"
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000008": {
|
||||||
|
"balance": "0x01"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
181
cmd/puppeth/testdata/stureby_parity.json
vendored
Normal file
181
cmd/puppeth/testdata/stureby_parity.json
vendored
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
{
|
||||||
|
"name":"Stureby",
|
||||||
|
"dataDir":"stureby",
|
||||||
|
"engine":{
|
||||||
|
"Ethash":{
|
||||||
|
"params":{
|
||||||
|
"minimumDifficulty":"0x20000",
|
||||||
|
"difficultyBoundDivisor":"0x800",
|
||||||
|
"durationLimit":"0xd",
|
||||||
|
"blockReward":{
|
||||||
|
"0x0":"0x4563918244f40000",
|
||||||
|
"0x7530":"0x29a2241af62c0000",
|
||||||
|
"0x9c40":"0x1bc16d674ec80000"
|
||||||
|
},
|
||||||
|
"homesteadTransition":"0x2710",
|
||||||
|
"eip100bTransition":"0x7530",
|
||||||
|
"difficultyBombDelays":{
|
||||||
|
"0x7530":"0x2dc6c0",
|
||||||
|
"0x9c40":"0x1e8480"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"params":{
|
||||||
|
"accountStartNonce":"0x0",
|
||||||
|
"maximumExtraDataSize":"0x20",
|
||||||
|
"gasLimitBoundDivisor":"0x400",
|
||||||
|
"minGasLimit":"0x1388",
|
||||||
|
"networkID":"0x4cb2e",
|
||||||
|
"chainID":"0x4cb2e",
|
||||||
|
"maxCodeSize":"0x6000",
|
||||||
|
"maxCodeSizeTransition":"0x0",
|
||||||
|
"eip98Transition": "0x7fffffffffffffff",
|
||||||
|
"eip150Transition":"0x3a98",
|
||||||
|
"eip160Transition":"0x59d8",
|
||||||
|
"eip161abcTransition":"0x59d8",
|
||||||
|
"eip161dTransition":"0x59d8",
|
||||||
|
"eip155Transition":"0x59d8",
|
||||||
|
"eip140Transition":"0x7530",
|
||||||
|
"eip211Transition":"0x7530",
|
||||||
|
"eip214Transition":"0x7530",
|
||||||
|
"eip658Transition":"0x7530",
|
||||||
|
"eip145Transition":"0x9c40",
|
||||||
|
"eip1014Transition":"0x9c40",
|
||||||
|
"eip1052Transition":"0x9c40",
|
||||||
|
"eip1283Transition":"0x9c40"
|
||||||
|
},
|
||||||
|
"genesis":{
|
||||||
|
"seal":{
|
||||||
|
"ethereum":{
|
||||||
|
"nonce":"0x0000000000000000",
|
||||||
|
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"difficulty":"0x20000",
|
||||||
|
"author":"0x0000000000000000000000000000000000000000",
|
||||||
|
"timestamp":"0x59a4e76d",
|
||||||
|
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
|
||||||
|
"gasLimit":"0x47b760"
|
||||||
|
},
|
||||||
|
"nodes":[
|
||||||
|
"enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
|
||||||
|
"enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
|
||||||
|
"enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
|
||||||
|
"enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
|
||||||
|
"enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
|
||||||
|
"enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
|
||||||
|
"enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
|
||||||
|
"enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
|
||||||
|
],
|
||||||
|
"accounts":{
|
||||||
|
"0000000000000000000000000000000000000001":{
|
||||||
|
"balance":"1",
|
||||||
|
"nonce":"0",
|
||||||
|
"builtin":{
|
||||||
|
"name":"ecrecover",
|
||||||
|
"pricing":{
|
||||||
|
"linear":{
|
||||||
|
"base":3000,
|
||||||
|
"word":0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000002":{
|
||||||
|
"balance":"1",
|
||||||
|
"nonce":"0",
|
||||||
|
"builtin":{
|
||||||
|
"name":"sha256",
|
||||||
|
"pricing":{
|
||||||
|
"linear":{
|
||||||
|
"base":60,
|
||||||
|
"word":12
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000003":{
|
||||||
|
"balance":"1",
|
||||||
|
"nonce":"0",
|
||||||
|
"builtin":{
|
||||||
|
"name":"ripemd160",
|
||||||
|
"pricing":{
|
||||||
|
"linear":{
|
||||||
|
"base":600,
|
||||||
|
"word":120
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000004":{
|
||||||
|
"balance":"1",
|
||||||
|
"nonce":"0",
|
||||||
|
"builtin":{
|
||||||
|
"name":"identity",
|
||||||
|
"pricing":{
|
||||||
|
"linear":{
|
||||||
|
"base":15,
|
||||||
|
"word":3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000005":{
|
||||||
|
"balance":"1",
|
||||||
|
"nonce":"0",
|
||||||
|
"builtin":{
|
||||||
|
"name":"modexp",
|
||||||
|
"activate_at":"0x7530",
|
||||||
|
"pricing":{
|
||||||
|
"modexp":{
|
||||||
|
"divisor":20
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000006":{
|
||||||
|
"balance":"1",
|
||||||
|
"nonce":"0",
|
||||||
|
"builtin":{
|
||||||
|
"name":"alt_bn128_add",
|
||||||
|
"activate_at":"0x7530",
|
||||||
|
"pricing":{
|
||||||
|
"linear":{
|
||||||
|
"base":500,
|
||||||
|
"word":0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000007":{
|
||||||
|
"balance":"1",
|
||||||
|
"nonce":"0",
|
||||||
|
"builtin":{
|
||||||
|
"name":"alt_bn128_mul",
|
||||||
|
"activate_at":"0x7530",
|
||||||
|
"pricing":{
|
||||||
|
"linear":{
|
||||||
|
"base":40000,
|
||||||
|
"word":0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"0000000000000000000000000000000000000008":{
|
||||||
|
"balance":"1",
|
||||||
|
"nonce":"0",
|
||||||
|
"builtin":{
|
||||||
|
"name":"alt_bn128_pairing",
|
||||||
|
"activate_at":"0x7530",
|
||||||
|
"pricing":{
|
||||||
|
"alt_bn128_pairing":{
|
||||||
|
"base":100000,
|
||||||
|
"pair":80000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -23,6 +23,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"net"
|
"net"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
@ -118,6 +119,47 @@ func (w *wizard) readDefaultString(def string) string {
|
|||||||
return def
|
return def
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readDefaultYesNo reads a single line from stdin, trimming if from spaces and
|
||||||
|
// interpreting it as a 'yes' or a 'no'. If an empty line is entered, the default
|
||||||
|
// value is returned.
|
||||||
|
func (w *wizard) readDefaultYesNo(def bool) bool {
|
||||||
|
for {
|
||||||
|
fmt.Printf("> ")
|
||||||
|
text, err := w.in.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
log.Crit("Failed to read user input", "err", err)
|
||||||
|
}
|
||||||
|
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
if text == "y" || text == "yes" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if text == "n" || text == "no" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
log.Error("Invalid input, expected 'y', 'yes', 'n', 'no' or empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readURL reads a single line from stdin, trimming if from spaces and trying to
|
||||||
|
// interpret it as a URL (http, https or file).
|
||||||
|
func (w *wizard) readURL() *url.URL {
|
||||||
|
for {
|
||||||
|
fmt.Printf("> ")
|
||||||
|
text, err := w.in.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
log.Crit("Failed to read user input", "err", err)
|
||||||
|
}
|
||||||
|
uri, err := url.Parse(strings.TrimSpace(text))
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Invalid input, expected URL", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return uri
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
|
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
|
||||||
// to parse into an integer.
|
// to parse into an integer.
|
||||||
func (w *wizard) readInt() int {
|
func (w *wizard) readInt() int {
|
||||||
|
@ -137,14 +137,14 @@ func (w *wizard) deployDashboard() {
|
|||||||
if w.conf.ethstats != "" {
|
if w.conf.ethstats != "" {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
|
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
|
||||||
infos.trusted = w.readDefaultString("y") == "y"
|
infos.trusted = w.readDefaultYesNo(true)
|
||||||
}
|
}
|
||||||
// Try to deploy the dashboard container on the host
|
// Try to deploy the dashboard container on the host
|
||||||
nocache := false
|
nocache := false
|
||||||
if existed {
|
if existed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultYesNo(false)
|
||||||
}
|
}
|
||||||
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
|
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
|
||||||
log.Error("Failed to deploy dashboard container", "err", err)
|
log.Error("Failed to deploy dashboard container", "err", err)
|
||||||
|
@ -67,11 +67,11 @@ func (w *wizard) deployEthstats() {
|
|||||||
if existed {
|
if existed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
|
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
|
||||||
if w.readDefaultString("y") != "y" {
|
if !w.readDefaultYesNo(true) {
|
||||||
// The user might want to clear the entire list, although generally probably not
|
// The user might want to clear the entire list, although generally probably not
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
|
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
|
||||||
if w.readDefaultString("n") != "n" {
|
if w.readDefaultYesNo(false) {
|
||||||
infos.banned = nil
|
infos.banned = nil
|
||||||
}
|
}
|
||||||
// Offer the user to explicitly add/remove certain IP addresses
|
// Offer the user to explicitly add/remove certain IP addresses
|
||||||
@ -106,7 +106,7 @@ func (w *wizard) deployEthstats() {
|
|||||||
if existed {
|
if existed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultYesNo(false)
|
||||||
}
|
}
|
||||||
trusted := make([]string, 0, len(w.servers))
|
trusted := make([]string, 0, len(w.servers))
|
||||||
for _, client := range w.servers {
|
for _, client := range w.servers {
|
||||||
|
@ -100,7 +100,7 @@ func (w *wizard) deployExplorer() {
|
|||||||
if existed {
|
if existed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultYesNo(false)
|
||||||
}
|
}
|
||||||
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
|
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
|
||||||
log.Error("Failed to deploy explorer container", "err", err)
|
log.Error("Failed to deploy explorer container", "err", err)
|
||||||
|
@ -81,7 +81,7 @@ func (w *wizard) deployFaucet() {
|
|||||||
if infos.captchaToken != "" {
|
if infos.captchaToken != "" {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
|
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
|
||||||
if w.readDefaultString("y") != "y" {
|
if !w.readDefaultYesNo(true) {
|
||||||
infos.captchaToken, infos.captchaSecret = "", ""
|
infos.captchaToken, infos.captchaSecret = "", ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -89,7 +89,7 @@ func (w *wizard) deployFaucet() {
|
|||||||
// No previous authorization (or old one discarded)
|
// No previous authorization (or old one discarded)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
|
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
|
||||||
if w.readDefaultString("n") == "n" {
|
if !w.readDefaultYesNo(false) {
|
||||||
log.Warn("Users will be able to requests funds via automated scripts")
|
log.Warn("Users will be able to requests funds via automated scripts")
|
||||||
} else {
|
} else {
|
||||||
// Captcha protection explicitly requested, read the site and secret keys
|
// Captcha protection explicitly requested, read the site and secret keys
|
||||||
@ -132,7 +132,7 @@ func (w *wizard) deployFaucet() {
|
|||||||
} else {
|
} else {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex())
|
fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex())
|
||||||
if w.readDefaultString("y") != "y" {
|
if !w.readDefaultYesNo(true) {
|
||||||
infos.node.keyJSON, infos.node.keyPass = "", ""
|
infos.node.keyJSON, infos.node.keyPass = "", ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -166,7 +166,7 @@ func (w *wizard) deployFaucet() {
|
|||||||
if existed {
|
if existed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultYesNo(false)
|
||||||
}
|
}
|
||||||
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||||
log.Error("Failed to deploy faucet container", "err", err)
|
log.Error("Failed to deploy faucet container", "err", err)
|
||||||
|
@ -20,9 +20,13 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -40,11 +44,12 @@ func (w *wizard) makeGenesis() {
|
|||||||
Difficulty: big.NewInt(524288),
|
Difficulty: big.NewInt(524288),
|
||||||
Alloc: make(core.GenesisAlloc),
|
Alloc: make(core.GenesisAlloc),
|
||||||
Config: ¶ms.ChainConfig{
|
Config: ¶ms.ChainConfig{
|
||||||
HomesteadBlock: big.NewInt(1),
|
HomesteadBlock: big.NewInt(1),
|
||||||
EIP150Block: big.NewInt(2),
|
EIP150Block: big.NewInt(2),
|
||||||
EIP155Block: big.NewInt(3),
|
EIP155Block: big.NewInt(3),
|
||||||
EIP158Block: big.NewInt(3),
|
EIP158Block: big.NewInt(3),
|
||||||
ByzantiumBlock: big.NewInt(4),
|
ByzantiumBlock: big.NewInt(4),
|
||||||
|
ConstantinopleBlock: big.NewInt(5),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Figure out which consensus engine to choose
|
// Figure out which consensus engine to choose
|
||||||
@ -114,9 +119,13 @@ func (w *wizard) makeGenesis() {
|
|||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Add a batch of precompile balances to avoid them getting deleted
|
fmt.Println()
|
||||||
for i := int64(0); i < 256; i++ {
|
fmt.Println("Should the precompile-addresses (0x1 .. 0xff) be pre-funded with 1 wei? (advisable yes)")
|
||||||
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
|
if w.readDefaultYesNo(true) {
|
||||||
|
// Add a batch of precompile balances to avoid them getting deleted
|
||||||
|
for i := int64(0); i < 256; i++ {
|
||||||
|
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Query the user for some custom extras
|
// Query the user for some custom extras
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@ -130,53 +139,130 @@ func (w *wizard) makeGenesis() {
|
|||||||
w.conf.flush()
|
w.conf.flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// importGenesis imports a Geth genesis spec into puppeth.
|
||||||
|
func (w *wizard) importGenesis() {
|
||||||
|
// Request the genesis JSON spec URL from the user
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("Where's the genesis file? (local file or http/https url)")
|
||||||
|
url := w.readURL()
|
||||||
|
|
||||||
|
// Convert the various allowed URLs to a reader stream
|
||||||
|
var reader io.Reader
|
||||||
|
|
||||||
|
switch url.Scheme {
|
||||||
|
case "http", "https":
|
||||||
|
// Remote web URL, retrieve it via an HTTP client
|
||||||
|
res, err := http.Get(url.String())
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to retrieve remote genesis", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
reader = res.Body
|
||||||
|
|
||||||
|
case "":
|
||||||
|
// Schemaless URL, interpret as a local file
|
||||||
|
file, err := os.Open(url.String())
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to open local genesis", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
reader = file
|
||||||
|
|
||||||
|
default:
|
||||||
|
log.Error("Unsupported genesis URL scheme", "scheme", url.Scheme)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Parse the genesis file and inject it successful
|
||||||
|
var genesis core.Genesis
|
||||||
|
if err := json.NewDecoder(reader).Decode(&genesis); err != nil {
|
||||||
|
log.Error("Invalid genesis spec: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Info("Imported genesis block")
|
||||||
|
|
||||||
|
w.conf.Genesis = &genesis
|
||||||
|
w.conf.flush()
|
||||||
|
}
|
||||||
|
|
||||||
// manageGenesis permits the modification of chain configuration parameters in
|
// manageGenesis permits the modification of chain configuration parameters in
|
||||||
// a genesis config and the export of the entire genesis spec.
|
// a genesis config and the export of the entire genesis spec.
|
||||||
func (w *wizard) manageGenesis() {
|
func (w *wizard) manageGenesis() {
|
||||||
// Figure out whether to modify or export the genesis
|
// Figure out whether to modify or export the genesis
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println(" 1. Modify existing fork rules")
|
fmt.Println(" 1. Modify existing fork rules")
|
||||||
fmt.Println(" 2. Export genesis configuration")
|
fmt.Println(" 2. Export genesis configurations")
|
||||||
fmt.Println(" 3. Remove genesis configuration")
|
fmt.Println(" 3. Remove genesis configuration")
|
||||||
|
|
||||||
choice := w.read()
|
choice := w.read()
|
||||||
switch {
|
switch choice {
|
||||||
case choice == "1":
|
case "1":
|
||||||
// Fork rule updating requested, iterate over each fork
|
// Fork rule updating requested, iterate over each fork
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
|
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
|
||||||
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
|
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
|
fmt.Printf("Which block should EIP150 (Tangerine Whistle) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
|
||||||
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
|
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
|
fmt.Printf("Which block should EIP155 (Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
|
||||||
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
|
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
|
fmt.Printf("Which block should EIP158/161 (also Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
|
||||||
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
|
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
|
||||||
|
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
|
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
|
||||||
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
|
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
|
||||||
|
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock)
|
||||||
|
w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock)
|
||||||
|
|
||||||
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
|
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
|
||||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||||
|
|
||||||
case choice == "2":
|
case "2":
|
||||||
// Save whatever genesis configuration we currently have
|
// Save whatever genesis configuration we currently have
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
|
fmt.Printf("Which folder to save the genesis specs into? (default = current)\n")
|
||||||
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
|
fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network)
|
||||||
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
|
|
||||||
log.Error("Failed to save genesis file", "err", err)
|
|
||||||
}
|
|
||||||
log.Info("Exported existing genesis block")
|
|
||||||
|
|
||||||
case choice == "3":
|
folder := w.readDefaultString(".")
|
||||||
|
if err := os.MkdirAll(folder, 0755); err != nil {
|
||||||
|
log.Error("Failed to create spec folder", "folder", folder, "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||||
|
|
||||||
|
// Export the native genesis spec used by puppeth and Geth
|
||||||
|
gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network))
|
||||||
|
if err := ioutil.WriteFile((gethJson), out, 0644); err != nil {
|
||||||
|
log.Error("Failed to save genesis file", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Info("Saved native genesis chain spec", "path", gethJson)
|
||||||
|
|
||||||
|
// Export the genesis spec used by Aleth (formerly C++ Ethereum)
|
||||||
|
if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil {
|
||||||
|
log.Error("Failed to create Aleth chain spec", "err", err)
|
||||||
|
} else {
|
||||||
|
saveGenesis(folder, w.network, "aleth", spec)
|
||||||
|
}
|
||||||
|
// Export the genesis spec used by Parity
|
||||||
|
if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil {
|
||||||
|
log.Error("Failed to create Parity chain spec", "err", err)
|
||||||
|
} else {
|
||||||
|
saveGenesis(folder, w.network, "parity", spec)
|
||||||
|
}
|
||||||
|
// Export the genesis spec used by Harmony (formerly EthereumJ
|
||||||
|
saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
|
||||||
|
|
||||||
|
case "3":
|
||||||
// Make sure we don't have any services running
|
// Make sure we don't have any services running
|
||||||
if len(w.conf.servers()) > 0 {
|
if len(w.conf.servers()) > 0 {
|
||||||
log.Error("Genesis reset requires all services and servers torn down")
|
log.Error("Genesis reset requires all services and servers torn down")
|
||||||
@ -186,8 +272,20 @@ func (w *wizard) manageGenesis() {
|
|||||||
|
|
||||||
w.conf.Genesis = nil
|
w.conf.Genesis = nil
|
||||||
w.conf.flush()
|
w.conf.flush()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Error("That's not something I can do")
|
log.Error("That's not something I can do")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file.
|
||||||
|
func saveGenesis(folder, network, client string, spec interface{}) {
|
||||||
|
path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
|
||||||
|
|
||||||
|
out, _ := json.Marshal(spec)
|
||||||
|
if err := ioutil.WriteFile(path, out, 0644); err != nil {
|
||||||
|
log.Error("Failed to save genesis file", "client", client, "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Info("Saved genesis chain spec", "client", client, "path", path)
|
||||||
|
}
|
||||||
|
@ -61,14 +61,14 @@ func (w *wizard) run() {
|
|||||||
// Make sure we have a good network name to work with fmt.Println()
|
// Make sure we have a good network name to work with fmt.Println()
|
||||||
// Docker accepts hyphens in image names, but doesn't like it for container names
|
// Docker accepts hyphens in image names, but doesn't like it for container names
|
||||||
if w.network == "" {
|
if w.network == "" {
|
||||||
fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
|
fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)")
|
||||||
for {
|
for {
|
||||||
w.network = w.readString()
|
w.network = w.readString()
|
||||||
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
|
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network {
|
||||||
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
log.Error("I also like to live dangerously, still no spaces or hyphens")
|
log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Info("Administering Ethereum network", "name", w.network)
|
log.Info("Administering Ethereum network", "name", w.network)
|
||||||
@ -131,7 +131,20 @@ func (w *wizard) run() {
|
|||||||
|
|
||||||
case choice == "2":
|
case choice == "2":
|
||||||
if w.conf.Genesis == nil {
|
if w.conf.Genesis == nil {
|
||||||
w.makeGenesis()
|
fmt.Println()
|
||||||
|
fmt.Println("What would you like to do? (default = create)")
|
||||||
|
fmt.Println(" 1. Create new genesis from scratch")
|
||||||
|
fmt.Println(" 2. Import already existing genesis")
|
||||||
|
|
||||||
|
choice := w.read()
|
||||||
|
switch {
|
||||||
|
case choice == "" || choice == "1":
|
||||||
|
w.makeGenesis()
|
||||||
|
case choice == "2":
|
||||||
|
w.importGenesis()
|
||||||
|
default:
|
||||||
|
log.Error("That's not something I can do")
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
w.manageGenesis()
|
w.manageGenesis()
|
||||||
}
|
}
|
||||||
@ -149,7 +162,6 @@ func (w *wizard) run() {
|
|||||||
} else {
|
} else {
|
||||||
w.manageComponents()
|
w.manageComponents()
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Error("That's not something I can do")
|
log.Error("That's not something I can do")
|
||||||
}
|
}
|
||||||
|
@ -41,12 +41,12 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str
|
|||||||
// Reverse proxy is not running, offer to deploy a new one
|
// Reverse proxy is not running, offer to deploy a new one
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
|
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
|
||||||
if w.readDefaultString("y") == "y" {
|
if w.readDefaultYesNo(true) {
|
||||||
nocache := false
|
nocache := false
|
||||||
if proxy != nil {
|
if proxy != nil {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultYesNo(false)
|
||||||
}
|
}
|
||||||
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
|
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
|
||||||
log.Error("Failed to deploy reverse-proxy", "err", err)
|
log.Error("Failed to deploy reverse-proxy", "err", err)
|
||||||
|
@ -126,7 +126,7 @@ func (w *wizard) deployNode(boot bool) {
|
|||||||
} else {
|
} else {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex())
|
fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex())
|
||||||
if w.readDefaultString("y") != "y" {
|
if !w.readDefaultYesNo(true) {
|
||||||
infos.keyJSON, infos.keyPass = "", ""
|
infos.keyJSON, infos.keyPass = "", ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -165,7 +165,7 @@ func (w *wizard) deployNode(boot bool) {
|
|||||||
if existed {
|
if existed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultYesNo(false)
|
||||||
}
|
}
|
||||||
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||||
log.Error("Failed to deploy Ethereum node container", "err", err)
|
log.Error("Failed to deploy Ethereum node container", "err", err)
|
||||||
|
@ -96,7 +96,7 @@ func (w *wizard) deployWallet() {
|
|||||||
if existed {
|
if existed {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultYesNo(false)
|
||||||
}
|
}
|
||||||
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||||
log.Error("Failed to deploy wallet container", "err", err)
|
log.Error("Failed to deploy wallet container", "err", err)
|
||||||
|
@ -33,11 +33,11 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/crypto/ecies"
|
"github.com/ethereum/go-ethereum/crypto/ecies"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
|
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -598,7 +598,7 @@ func TestKeypairSanity(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := sha3.NewKeccak256()
|
hasher := sha3.NewLegacyKeccak256()
|
||||||
hasher.Write(salt)
|
hasher.Write(salt)
|
||||||
shared, err := hex.DecodeString(sharedSecret)
|
shared, err := hex.DecodeString(sharedSecret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -26,14 +26,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/reexec"
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/swarm"
|
"github.com/ethereum/go-ethereum/swarm"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/reexec"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDumpConfig(t *testing.T) {
|
func TestConfigDump(t *testing.T) {
|
||||||
swarm := runSwarm(t, "dumpconfig")
|
swarm := runSwarm(t, "dumpconfig")
|
||||||
defaultConf := api.NewConfig()
|
defaultConf := api.NewConfig()
|
||||||
out, err := tomlSettings.Marshal(&defaultConf)
|
out, err := tomlSettings.Marshal(&defaultConf)
|
||||||
@ -91,8 +91,8 @@ func TestConfigCmdLineOverrides(t *testing.T) {
|
|||||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||||
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
|
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
|
||||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||||
"--datadir", dir,
|
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
|
||||||
"--ipcpath", conf.IPCPath,
|
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
|
||||||
}
|
}
|
||||||
node.Cmd = runSwarm(t, flags...)
|
node.Cmd = runSwarm(t, flags...)
|
||||||
node.Cmd.InputLine(testPassphrase)
|
node.Cmd.InputLine(testPassphrase)
|
||||||
@ -189,9 +189,9 @@ func TestConfigFileOverrides(t *testing.T) {
|
|||||||
flags := []string{
|
flags := []string{
|
||||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||||
"--ens-api", "",
|
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||||
"--ipcpath", conf.IPCPath,
|
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
|
||||||
"--datadir", dir,
|
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
|
||||||
}
|
}
|
||||||
node.Cmd = runSwarm(t, flags...)
|
node.Cmd = runSwarm(t, flags...)
|
||||||
node.Cmd.InputLine(testPassphrase)
|
node.Cmd.InputLine(testPassphrase)
|
||||||
@ -407,9 +407,9 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
|
|||||||
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
|
||||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||||
"--ens-api", "",
|
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||||
"--datadir", dir,
|
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
|
||||||
"--ipcpath", conf.IPCPath,
|
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
|
||||||
}
|
}
|
||||||
node.Cmd = runSwarm(t, flags...)
|
node.Cmd = runSwarm(t, flags...)
|
||||||
node.Cmd.InputLine(testPassphrase)
|
node.Cmd.InputLine(testPassphrase)
|
||||||
@ -466,7 +466,7 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
|
|||||||
node.Shutdown()
|
node.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateConfig(t *testing.T) {
|
func TestConfigValidate(t *testing.T) {
|
||||||
for _, c := range []struct {
|
for _, c := range []struct {
|
||||||
cfg *api.Config
|
cfg *api.Config
|
||||||
err string
|
err string
|
||||||
|
@ -169,7 +169,6 @@ func feedUpdate(ctx *cli.Context) {
|
|||||||
query = new(feed.Query)
|
query = new(feed.Query)
|
||||||
query.User = signer.Address()
|
query.User = signer.Address()
|
||||||
query.Topic = getTopic(ctx)
|
query.Topic = getTopic(ctx)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve a feed update request
|
// Retrieve a feed update request
|
||||||
@ -178,6 +177,11 @@ func feedUpdate(ctx *cli.Context) {
|
|||||||
utils.Fatalf("Error retrieving feed status: %s", err.Error())
|
utils.Fatalf("Error retrieving feed status: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that the provided signer matches the request to sign
|
||||||
|
if updateRequest.User != signer.Address() {
|
||||||
|
utils.Fatalf("Signer address does not match the update request")
|
||||||
|
}
|
||||||
|
|
||||||
// set the new data
|
// set the new data
|
||||||
updateRequest.SetData(data)
|
updateRequest.SetData(data)
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
@ -69,7 +68,7 @@ func TestCLIFeedUpdate(t *testing.T) {
|
|||||||
hexData}
|
hexData}
|
||||||
|
|
||||||
// create an update and expect an exit without errors
|
// create an update and expect an exit without errors
|
||||||
log.Info(fmt.Sprintf("updating a feed with 'swarm feed update'"))
|
log.Info("updating a feed with 'swarm feed update'")
|
||||||
cmd := runSwarm(t, flags...)
|
cmd := runSwarm(t, flags...)
|
||||||
cmd.ExpectExit()
|
cmd.ExpectExit()
|
||||||
|
|
||||||
@ -116,7 +115,7 @@ func TestCLIFeedUpdate(t *testing.T) {
|
|||||||
"--user", address.Hex(),
|
"--user", address.Hex(),
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("getting feed info with 'swarm feed info'"))
|
log.Info("getting feed info with 'swarm feed info'")
|
||||||
cmd = runSwarm(t, flags...)
|
cmd = runSwarm(t, flags...)
|
||||||
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
|
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
|
||||||
cmd.ExpectExit()
|
cmd.ExpectExit()
|
||||||
@ -141,9 +140,9 @@ func TestCLIFeedUpdate(t *testing.T) {
|
|||||||
"--topic", topic.Hex(),
|
"--topic", topic.Hex(),
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("Publishing manifest with 'swarm feed create'"))
|
log.Info("Publishing manifest with 'swarm feed create'")
|
||||||
cmd = runSwarm(t, flags...)
|
cmd = runSwarm(t, flags...)
|
||||||
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) // regex hack to extract stdout
|
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
|
||||||
cmd.ExpectExit()
|
cmd.ExpectExit()
|
||||||
|
|
||||||
manifestAddress := matches[0] // read the received feed manifest
|
manifestAddress := matches[0] // read the received feed manifest
|
||||||
@ -162,4 +161,36 @@ func TestCLIFeedUpdate(t *testing.T) {
|
|||||||
if !bytes.Equal(data, retrieved) {
|
if !bytes.Equal(data, retrieved) {
|
||||||
t.Fatalf("Received %s, expected %s", retrieved, data)
|
t.Fatalf("Received %s, expected %s", retrieved, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// test publishing a manifest for a different user
|
||||||
|
flags = []string{
|
||||||
|
"--bzzapi", srv.URL,
|
||||||
|
"feed", "create",
|
||||||
|
"--topic", topic.Hex(),
|
||||||
|
"--user", "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // different user
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Publishing manifest with 'swarm feed create' for a different user")
|
||||||
|
cmd = runSwarm(t, flags...)
|
||||||
|
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
|
||||||
|
cmd.ExpectExit()
|
||||||
|
|
||||||
|
manifestAddress = matches[0] // read the received feed manifest
|
||||||
|
|
||||||
|
// now let's try to update that user's manifest which we don't have the private key for
|
||||||
|
flags = []string{
|
||||||
|
"--bzzapi", srv.URL,
|
||||||
|
"--bzzaccount", pkFileName,
|
||||||
|
"feed", "update",
|
||||||
|
"--manifest", manifestAddress,
|
||||||
|
hexData}
|
||||||
|
|
||||||
|
// create an update and expect an error given there is a user mismatch
|
||||||
|
log.Info("updating a feed with 'swarm feed update'")
|
||||||
|
cmd = runSwarm(t, flags...)
|
||||||
|
cmd.ExpectRegexp("Fatal:.*") // best way so far to detect a failure.
|
||||||
|
cmd.ExpectExit()
|
||||||
|
if cmd.ExitStatus() == 0 {
|
||||||
|
t.Fatal("Expected nonzero exit code when updating a manifest with the wrong user. Got 0.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -164,10 +164,6 @@ var (
|
|||||||
Name: "topic",
|
Name: "topic",
|
||||||
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
|
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
|
||||||
}
|
}
|
||||||
SwarmFeedDataOnCreateFlag = cli.StringFlag{
|
|
||||||
Name: "data",
|
|
||||||
Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
|
|
||||||
}
|
|
||||||
SwarmFeedManifestFlag = cli.StringFlag{
|
SwarmFeedManifestFlag = cli.StringFlag{
|
||||||
Name: "manifest",
|
Name: "manifest",
|
||||||
Usage: "Refers to the feed through a manifest",
|
Usage: "Refers to the feed through a manifest",
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/swarm/fuse"
|
"github.com/ethereum/go-ethereum/swarm/fuse"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
@ -41,27 +41,24 @@ var fsCommand = cli.Command{
|
|||||||
Action: mount,
|
Action: mount,
|
||||||
CustomHelpTemplate: helpTemplate,
|
CustomHelpTemplate: helpTemplate,
|
||||||
Name: "mount",
|
Name: "mount",
|
||||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
|
||||||
Usage: "mount a swarm hash to a mount point",
|
Usage: "mount a swarm hash to a mount point",
|
||||||
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
|
ArgsUsage: "swarm fs mount <manifest hash> <mount point>",
|
||||||
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: unmount,
|
Action: unmount,
|
||||||
CustomHelpTemplate: helpTemplate,
|
CustomHelpTemplate: helpTemplate,
|
||||||
Name: "unmount",
|
Name: "unmount",
|
||||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
|
||||||
Usage: "unmount a swarmfs mount",
|
Usage: "unmount a swarmfs mount",
|
||||||
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
|
ArgsUsage: "swarm fs unmount <mount point>",
|
||||||
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: listMounts,
|
Action: listMounts,
|
||||||
CustomHelpTemplate: helpTemplate,
|
CustomHelpTemplate: helpTemplate,
|
||||||
Name: "list",
|
Name: "list",
|
||||||
Flags: []cli.Flag{utils.IPCPathFlag},
|
|
||||||
Usage: "list swarmfs mounts",
|
Usage: "list swarmfs mounts",
|
||||||
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
|
ArgsUsage: "swarm fs list",
|
||||||
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -70,7 +67,7 @@ var fsCommand = cli.Command{
|
|||||||
func mount(cliContext *cli.Context) {
|
func mount(cliContext *cli.Context) {
|
||||||
args := cliContext.Args()
|
args := cliContext.Args()
|
||||||
if len(args) < 2 {
|
if len(args) < 2 {
|
||||||
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
|
utils.Fatalf("Usage: swarm fs mount <manifestHash> <file name>")
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := dialRPC(cliContext)
|
client, err := dialRPC(cliContext)
|
||||||
@ -97,7 +94,7 @@ func unmount(cliContext *cli.Context) {
|
|||||||
args := cliContext.Args()
|
args := cliContext.Args()
|
||||||
|
|
||||||
if len(args) < 1 {
|
if len(args) < 1 {
|
||||||
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
|
utils.Fatalf("Usage: swarm fs unmount <mount path>")
|
||||||
}
|
}
|
||||||
client, err := dialRPC(cliContext)
|
client, err := dialRPC(cliContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -145,20 +142,21 @@ func listMounts(cliContext *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
|
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
|
||||||
var endpoint string
|
endpoint := getIPCEndpoint(ctx)
|
||||||
|
log.Info("IPC endpoint", "path", endpoint)
|
||||||
|
return rpc.Dial(endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
if ctx.IsSet(utils.IPCPathFlag.Name) {
|
func getIPCEndpoint(ctx *cli.Context) string {
|
||||||
endpoint = ctx.String(utils.IPCPathFlag.Name)
|
cfg := defaultNodeConfig
|
||||||
} else {
|
utils.SetNodeConfig(ctx, &cfg)
|
||||||
utils.Fatalf("swarm ipc endpoint not specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
if endpoint == "" {
|
endpoint := cfg.IPCEndpoint()
|
||||||
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
|
|
||||||
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
|
if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
|
||||||
// Backwards compatibility with geth < 1.5 which required
|
// Backwards compatibility with geth < 1.5 which required
|
||||||
// these prefixes.
|
// these prefixes.
|
||||||
endpoint = endpoint[4:]
|
endpoint = endpoint[4:]
|
||||||
}
|
}
|
||||||
return rpc.Dial(endpoint)
|
return endpoint
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -28,6 +29,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -36,6 +38,26 @@ type testFile struct {
|
|||||||
content string
|
content string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestCLISwarmFsDefaultIPCPath tests if the most basic fs command, i.e., list
|
||||||
|
// can find and correctly connect to a running Swarm node on the default
|
||||||
|
// IPCPath.
|
||||||
|
func TestCLISwarmFsDefaultIPCPath(t *testing.T) {
|
||||||
|
cluster := newTestCluster(t, 1)
|
||||||
|
defer cluster.Shutdown()
|
||||||
|
|
||||||
|
handlingNode := cluster.Nodes[0]
|
||||||
|
list := runSwarm(t, []string{
|
||||||
|
"--datadir", handlingNode.Dir,
|
||||||
|
"fs",
|
||||||
|
"list",
|
||||||
|
}...)
|
||||||
|
|
||||||
|
list.WaitExit()
|
||||||
|
if list.Err != nil {
|
||||||
|
t.Fatal(list.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestCLISwarmFs is a high-level test of swarmfs
|
// TestCLISwarmFs is a high-level test of swarmfs
|
||||||
//
|
//
|
||||||
// This test fails on travis for macOS as this executable exits with code 1
|
// This test fails on travis for macOS as this executable exits with code 1
|
||||||
@ -59,9 +81,9 @@ func TestCLISwarmFs(t *testing.T) {
|
|||||||
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||||
|
|
||||||
mount := runSwarm(t, []string{
|
mount := runSwarm(t, []string{
|
||||||
|
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||||
"fs",
|
"fs",
|
||||||
"mount",
|
"mount",
|
||||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
|
||||||
mhash,
|
mhash,
|
||||||
mountPoint,
|
mountPoint,
|
||||||
}...)
|
}...)
|
||||||
@ -101,9 +123,9 @@ func TestCLISwarmFs(t *testing.T) {
|
|||||||
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||||
|
|
||||||
unmount := runSwarm(t, []string{
|
unmount := runSwarm(t, []string{
|
||||||
|
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||||
"fs",
|
"fs",
|
||||||
"unmount",
|
"unmount",
|
||||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
|
||||||
mountPoint,
|
mountPoint,
|
||||||
}...)
|
}...)
|
||||||
_, matches := unmount.ExpectRegexp(hashRegexp)
|
_, matches := unmount.ExpectRegexp(hashRegexp)
|
||||||
@ -136,9 +158,9 @@ func TestCLISwarmFs(t *testing.T) {
|
|||||||
|
|
||||||
//remount, check files
|
//remount, check files
|
||||||
newMount := runSwarm(t, []string{
|
newMount := runSwarm(t, []string{
|
||||||
|
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||||
"fs",
|
"fs",
|
||||||
"mount",
|
"mount",
|
||||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
|
||||||
hash, // the latest hash
|
hash, // the latest hash
|
||||||
secondMountPoint,
|
secondMountPoint,
|
||||||
}...)
|
}...)
|
||||||
@ -172,9 +194,9 @@ func TestCLISwarmFs(t *testing.T) {
|
|||||||
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
|
||||||
|
|
||||||
unmountSec := runSwarm(t, []string{
|
unmountSec := runSwarm(t, []string{
|
||||||
|
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
||||||
"fs",
|
"fs",
|
||||||
"unmount",
|
"unmount",
|
||||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
|
|
||||||
secondMountPoint,
|
secondMountPoint,
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
|
@ -2,11 +2,13 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
@ -16,9 +18,13 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/multihash"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
colorable "github.com/mattn/go-colorable"
|
colorable "github.com/mattn/go-colorable"
|
||||||
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
"github.com/pborman/uuid"
|
"github.com/pborman/uuid"
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
@ -27,16 +33,34 @@ const (
|
|||||||
feedRandomDataLength = 8
|
feedRandomDataLength = 8
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: retrieve with manifest + extract repeating code
|
|
||||||
func cliFeedUploadAndSync(c *cli.Context) error {
|
func cliFeedUploadAndSync(c *cli.Context) error {
|
||||||
|
metrics.GetOrRegisterCounter("feed-and-sync", nil).Inc(1)
|
||||||
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))))
|
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))))
|
||||||
|
|
||||||
|
errc := make(chan error)
|
||||||
|
go func() {
|
||||||
|
errc <- feedUploadAndSync(c)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errc:
|
||||||
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter("feed-and-sync.fail", nil).Inc(1)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
case <-time.After(time.Duration(timeout) * time.Second):
|
||||||
|
metrics.GetOrRegisterCounter("feed-and-sync.timeout", nil).Inc(1)
|
||||||
|
return fmt.Errorf("timeout after %v sec", timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: retrieve with manifest + extract repeating code
|
||||||
|
func feedUploadAndSync(c *cli.Context) error {
|
||||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
|
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
|
||||||
|
|
||||||
generateEndpoints(scheme, cluster, from, to)
|
generateEndpoints(scheme, cluster, appName, from, to)
|
||||||
|
|
||||||
log.Info("generating and uploading MRUs to " + endpoints[0] + " and syncing")
|
log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing")
|
||||||
|
|
||||||
// create a random private key to sign updates with and derive the address
|
// create a random private key to sign updates with and derive the address
|
||||||
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
|
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
|
||||||
@ -205,12 +229,12 @@ func cliFeedUploadAndSync(c *cli.Context) error {
|
|||||||
log.Info("all endpoints synced random data successfully")
|
log.Info("all endpoints synced random data successfully")
|
||||||
|
|
||||||
// upload test file
|
// upload test file
|
||||||
log.Info("uploading to " + endpoints[0] + " and syncing")
|
seed := int(time.Now().UnixNano() / 1e6)
|
||||||
|
log.Info("feed uploading to "+endpoints[0]+" and syncing", "seed", seed)
|
||||||
|
|
||||||
f, cleanup := generateRandomFile(filesize * 1000)
|
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
hash, err := upload(f, endpoints[0])
|
hash, err := upload(&randomBytes, endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -218,9 +242,8 @@ func cliFeedUploadAndSync(c *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
multihashHex := hexutil.Encode(multihash.ToMultihash(hashBytes))
|
multihashHex := hexutil.Encode(hashBytes)
|
||||||
|
fileHash, err := digest(bytes.NewReader(randomBytes))
|
||||||
fileHash, err := digest(f)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -286,14 +309,37 @@ func cliFeedUploadAndSync(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
|
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
|
||||||
|
ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch")
|
||||||
|
defer sp.Finish()
|
||||||
|
|
||||||
log.Trace("sleeping", "ruid", ruid)
|
log.Trace("sleeping", "ruid", ruid)
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
|
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
|
||||||
res, err := http.Get(endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user)
|
|
||||||
|
var tn time.Time
|
||||||
|
reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user
|
||||||
|
req, _ := http.NewRequest("GET", reqUri, nil)
|
||||||
|
|
||||||
|
opentracing.GlobalTracer().Inject(
|
||||||
|
sp.Context(),
|
||||||
|
opentracing.HTTPHeaders,
|
||||||
|
opentracing.HTTPHeadersCarrier(req.Header))
|
||||||
|
|
||||||
|
trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn)
|
||||||
|
|
||||||
|
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
|
||||||
|
transport := http.DefaultTransport
|
||||||
|
|
||||||
|
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||||
|
|
||||||
|
tn = time.Now()
|
||||||
|
res, err := transport.RoundTrip(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error(err.Error(), "ruid", ruid)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
|
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
|
||||||
|
|
||||||
if res.StatusCode != 200 {
|
if res.StatusCode != 200 {
|
||||||
|
@ -17,23 +17,38 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
gethmetrics "github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics/influxdb"
|
||||||
|
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/tracing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
endpoints []string
|
endpoints []string
|
||||||
includeLocalhost bool
|
includeLocalhost bool
|
||||||
cluster string
|
cluster string
|
||||||
|
appName string
|
||||||
scheme string
|
scheme string
|
||||||
filesize int
|
filesize int
|
||||||
|
syncDelay int
|
||||||
from int
|
from int
|
||||||
to int
|
to int
|
||||||
verbosity int
|
verbosity int
|
||||||
|
timeout int
|
||||||
|
single bool
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@ -49,6 +64,12 @@ func main() {
|
|||||||
Usage: "cluster to point to (prod or a given namespace)",
|
Usage: "cluster to point to (prod or a given namespace)",
|
||||||
Destination: &cluster,
|
Destination: &cluster,
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "app",
|
||||||
|
Value: "swarm",
|
||||||
|
Usage: "application to point to (swarm or swarm-private)",
|
||||||
|
Destination: &appName,
|
||||||
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "cluster-from",
|
Name: "cluster-from",
|
||||||
Value: 8501,
|
Value: 8501,
|
||||||
@ -78,14 +99,42 @@ func main() {
|
|||||||
Usage: "file size for generated random file in KB",
|
Usage: "file size for generated random file in KB",
|
||||||
Destination: &filesize,
|
Destination: &filesize,
|
||||||
},
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "sync-delay",
|
||||||
|
Value: 5,
|
||||||
|
Usage: "duration of delay in seconds to wait for content to be synced",
|
||||||
|
Destination: &syncDelay,
|
||||||
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "verbosity",
|
Name: "verbosity",
|
||||||
Value: 1,
|
Value: 1,
|
||||||
Usage: "verbosity",
|
Usage: "verbosity",
|
||||||
Destination: &verbosity,
|
Destination: &verbosity,
|
||||||
},
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "timeout",
|
||||||
|
Value: 120,
|
||||||
|
Usage: "timeout in seconds after which kill the process",
|
||||||
|
Destination: &timeout,
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "single",
|
||||||
|
Usage: "whether to fetch content from a single node or from all nodes",
|
||||||
|
Destination: &single,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
app.Flags = append(app.Flags, []cli.Flag{
|
||||||
|
utils.MetricsEnabledFlag,
|
||||||
|
swarmmetrics.MetricsInfluxDBEndpointFlag,
|
||||||
|
swarmmetrics.MetricsInfluxDBDatabaseFlag,
|
||||||
|
swarmmetrics.MetricsInfluxDBUsernameFlag,
|
||||||
|
swarmmetrics.MetricsInfluxDBPasswordFlag,
|
||||||
|
swarmmetrics.MetricsInfluxDBHostTagFlag,
|
||||||
|
}...)
|
||||||
|
|
||||||
|
app.Flags = append(app.Flags, tracing.Flags...)
|
||||||
|
|
||||||
app.Commands = []cli.Command{
|
app.Commands = []cli.Command{
|
||||||
{
|
{
|
||||||
Name: "upload_and_sync",
|
Name: "upload_and_sync",
|
||||||
@ -104,8 +153,38 @@ func main() {
|
|||||||
sort.Sort(cli.FlagsByName(app.Flags))
|
sort.Sort(cli.FlagsByName(app.Flags))
|
||||||
sort.Sort(cli.CommandsByName(app.Commands))
|
sort.Sort(cli.CommandsByName(app.Commands))
|
||||||
|
|
||||||
|
app.Before = func(ctx *cli.Context) error {
|
||||||
|
tracing.Setup(ctx)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
app.After = func(ctx *cli.Context) error {
|
||||||
|
return emitMetrics(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
err := app.Run(os.Args)
|
err := app.Run(os.Args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err.Error())
|
log.Error(err.Error())
|
||||||
|
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func emitMetrics(ctx *cli.Context) error {
|
||||||
|
if gethmetrics.Enabled {
|
||||||
|
var (
|
||||||
|
endpoint = ctx.GlobalString(swarmmetrics.MetricsInfluxDBEndpointFlag.Name)
|
||||||
|
database = ctx.GlobalString(swarmmetrics.MetricsInfluxDBDatabaseFlag.Name)
|
||||||
|
username = ctx.GlobalString(swarmmetrics.MetricsInfluxDBUsernameFlag.Name)
|
||||||
|
password = ctx.GlobalString(swarmmetrics.MetricsInfluxDBPasswordFlag.Name)
|
||||||
|
hosttag = ctx.GlobalString(swarmmetrics.MetricsInfluxDBHostTagFlag.Name)
|
||||||
|
)
|
||||||
|
return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", map[string]string{
|
||||||
|
"host": hosttag,
|
||||||
|
"version": gitCommit,
|
||||||
|
"filesize": fmt.Sprintf("%v", filesize),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -18,35 +18,40 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
crand "crypto/rand"
|
crand "crypto/rand"
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
colorable "github.com/mattn/go-colorable"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
"github.com/pborman/uuid"
|
"github.com/pborman/uuid"
|
||||||
|
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func generateEndpoints(scheme string, cluster string, from int, to int) {
|
func generateEndpoints(scheme string, cluster string, app string, from int, to int) {
|
||||||
if cluster == "prod" {
|
if cluster == "prod" {
|
||||||
for port := from; port <= to; port++ {
|
for port := from; port < to; port++ {
|
||||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
|
endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for port := from; port <= to; port++ {
|
for port := from; port < to; port++ {
|
||||||
endpoints = append(endpoints, fmt.Sprintf("%s://swarm-%v-%s.stg.swarm-gateways.net", scheme, port, cluster))
|
endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm-gateways.net", scheme, app, port, cluster))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,24 +62,50 @@ func generateEndpoints(scheme string, cluster string, from int, to int) {
|
|||||||
|
|
||||||
func cliUploadAndSync(c *cli.Context) error {
|
func cliUploadAndSync(c *cli.Context) error {
|
||||||
log.PrintOrigins(true)
|
log.PrintOrigins(true)
|
||||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
|
||||||
|
|
||||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
|
metrics.GetOrRegisterCounter("upload-and-sync", nil).Inc(1)
|
||||||
|
|
||||||
generateEndpoints(scheme, cluster, from, to)
|
errc := make(chan error)
|
||||||
|
go func() {
|
||||||
|
errc <- uploadAndSync(c)
|
||||||
|
}()
|
||||||
|
|
||||||
log.Info("uploading to " + endpoints[0] + " and syncing")
|
select {
|
||||||
|
case err := <-errc:
|
||||||
|
if err != nil {
|
||||||
|
metrics.GetOrRegisterCounter("upload-and-sync.fail", nil).Inc(1)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
case <-time.After(time.Duration(timeout) * time.Second):
|
||||||
|
metrics.GetOrRegisterCounter("upload-and-sync.timeout", nil).Inc(1)
|
||||||
|
return fmt.Errorf("timeout after %v sec", timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
f, cleanup := generateRandomFile(filesize * 1000)
|
func uploadAndSync(c *cli.Context) error {
|
||||||
defer cleanup()
|
defer func(now time.Time) {
|
||||||
|
totalTime := time.Since(now)
|
||||||
|
|
||||||
hash, err := upload(f, endpoints[0])
|
log.Info("total time", "time", totalTime, "kb", filesize)
|
||||||
|
metrics.GetOrRegisterCounter("upload-and-sync.total-time", nil).Inc(int64(totalTime))
|
||||||
|
}(time.Now())
|
||||||
|
|
||||||
|
generateEndpoints(scheme, cluster, appName, from, to)
|
||||||
|
seed := int(time.Now().UnixNano() / 1e6)
|
||||||
|
log.Info("uploading to "+endpoints[0]+" and syncing", "seed", seed)
|
||||||
|
|
||||||
|
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||||
|
|
||||||
|
t1 := time.Now()
|
||||||
|
hash, err := upload(&randomBytes, endpoints[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err.Error())
|
log.Error(err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
metrics.GetOrRegisterCounter("upload-and-sync.upload-time", nil).Inc(int64(time.Since(t1)))
|
||||||
|
|
||||||
fhash, err := digest(f)
|
fhash, err := digest(bytes.NewReader(randomBytes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err.Error())
|
log.Error(err.Error())
|
||||||
return err
|
return err
|
||||||
@ -82,23 +113,47 @@ func cliUploadAndSync(c *cli.Context) error {
|
|||||||
|
|
||||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
||||||
|
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(time.Duration(syncDelay) * time.Second)
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
for _, endpoint := range endpoints {
|
if single {
|
||||||
|
rand.Seed(time.Now().UTC().UnixNano())
|
||||||
|
randIndex := 1 + rand.Intn(len(endpoints)-1)
|
||||||
ruid := uuid.New()[:8]
|
ruid := uuid.New()[:8]
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(endpoint string, ruid string) {
|
go func(endpoint string, ruid string) {
|
||||||
for {
|
for {
|
||||||
|
start := time.Now()
|
||||||
err := fetch(hash, endpoint, fhash, ruid)
|
err := fetch(hash, endpoint, fhash, ruid)
|
||||||
|
fetchTime := time.Since(start)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.GetOrRegisterMeter("upload-and-sync.single.fetch-time", nil).Mark(int64(fetchTime))
|
||||||
wg.Done()
|
wg.Done()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}(endpoint, ruid)
|
}(endpoints[randIndex], ruid)
|
||||||
|
} else {
|
||||||
|
for _, endpoint := range endpoints {
|
||||||
|
ruid := uuid.New()[:8]
|
||||||
|
wg.Add(1)
|
||||||
|
go func(endpoint string, ruid string) {
|
||||||
|
for {
|
||||||
|
start := time.Now()
|
||||||
|
err := fetch(hash, endpoint, fhash, ruid)
|
||||||
|
fetchTime := time.Since(start)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.GetOrRegisterMeter("upload-and-sync.each.fetch-time", nil).Mark(int64(fetchTime))
|
||||||
|
wg.Done()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(endpoint, ruid)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
log.Info("all endpoints synced random file successfully")
|
log.Info("all endpoints synced random file successfully")
|
||||||
@ -108,16 +163,33 @@ func cliUploadAndSync(c *cli.Context) error {
|
|||||||
|
|
||||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||||
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||||
|
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
|
||||||
|
defer sp.Finish()
|
||||||
|
|
||||||
log.Trace("sleeping", "ruid", ruid)
|
log.Trace("sleeping", "ruid", ruid)
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
||||||
client := &http.Client{Transport: &http.Transport{
|
|
||||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
var tn time.Time
|
||||||
}}
|
reqUri := endpoint + "/bzz:/" + hash + "/"
|
||||||
res, err := client.Get(endpoint + "/bzz:/" + hash + "/")
|
req, _ := http.NewRequest("GET", reqUri, nil)
|
||||||
|
|
||||||
|
opentracing.GlobalTracer().Inject(
|
||||||
|
sp.Context(),
|
||||||
|
opentracing.HTTPHeaders,
|
||||||
|
opentracing.HTTPHeadersCarrier(req.Header))
|
||||||
|
|
||||||
|
trace := client.GetClientTrace("upload-and-sync - http get", "upload-and-sync", ruid, &tn)
|
||||||
|
|
||||||
|
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
|
||||||
|
transport := http.DefaultTransport
|
||||||
|
|
||||||
|
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||||
|
|
||||||
|
tn = time.Now()
|
||||||
|
res, err := transport.RoundTrip(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn(err.Error(), "ruid", ruid)
|
log.Error(err.Error(), "ruid", ruid)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
||||||
@ -148,16 +220,19 @@ func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
|
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
|
||||||
func upload(f *os.File, endpoint string) (string, error) {
|
func upload(dataBytes *[]byte, endpoint string) (string, error) {
|
||||||
var out bytes.Buffer
|
swarm := client.NewClient(endpoint)
|
||||||
cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
|
f := &client.File{
|
||||||
cmd.Stdout = &out
|
ReadCloser: ioutil.NopCloser(bytes.NewReader(*dataBytes)),
|
||||||
err := cmd.Run()
|
ManifestEntry: api.ManifestEntry{
|
||||||
if err != nil {
|
ContentType: "text/plain",
|
||||||
return "", err
|
Mode: 0660,
|
||||||
|
Size: int64(len(*dataBytes)),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
hash := strings.TrimRight(out.String(), "\r\n")
|
|
||||||
return hash, nil
|
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
|
||||||
|
return swarm.Upload(f, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func digest(r io.Reader) ([]byte, error) {
|
func digest(r io.Reader) ([]byte, error) {
|
||||||
@ -180,27 +255,3 @@ func generateRandomData(datasize int) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateRandomFile is creating a temporary file with the requested byte size
|
|
||||||
func generateRandomFile(size int) (f *os.File, teardown func()) {
|
|
||||||
// create a tmp file
|
|
||||||
tmp, err := ioutil.TempFile("", "swarm-test")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// callback for tmp file cleanup
|
|
||||||
teardown = func() {
|
|
||||||
tmp.Close()
|
|
||||||
os.Remove(tmp.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, size)
|
|
||||||
_, err = crand.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
ioutil.WriteFile(tmp.Name(), buf, 0755)
|
|
||||||
|
|
||||||
return tmp, teardown
|
|
||||||
}
|
|
||||||
|
@ -142,6 +142,10 @@ var (
|
|||||||
Name: "rinkeby",
|
Name: "rinkeby",
|
||||||
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
|
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
|
||||||
}
|
}
|
||||||
|
ConstantinopleOverrideFlag = cli.Uint64Flag{
|
||||||
|
Name: "override.constantinople",
|
||||||
|
Usage: "Manually specify constantinople fork-block, overriding the bundled setting",
|
||||||
|
}
|
||||||
DeveloperFlag = cli.BoolFlag{
|
DeveloperFlag = cli.BoolFlag{
|
||||||
Name: "dev",
|
Name: "dev",
|
||||||
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
|
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
|
||||||
@ -184,6 +188,10 @@ var (
|
|||||||
Name: "lightkdf",
|
Name: "lightkdf",
|
||||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||||
}
|
}
|
||||||
|
WhitelistFlag = cli.StringFlag{
|
||||||
|
Name: "whitelist",
|
||||||
|
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)",
|
||||||
|
}
|
||||||
// Dashboard settings
|
// Dashboard settings
|
||||||
DashboardEnabledFlag = cli.BoolFlag{
|
DashboardEnabledFlag = cli.BoolFlag{
|
||||||
Name: metrics.DashboardEnabledFlag,
|
Name: metrics.DashboardEnabledFlag,
|
||||||
@ -843,17 +851,12 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
|
|||||||
// makeDatabaseHandles raises out the number of allowed file handles per process
|
// makeDatabaseHandles raises out the number of allowed file handles per process
|
||||||
// for Geth and returns half of the allowance to assign to the database.
|
// for Geth and returns half of the allowance to assign to the database.
|
||||||
func makeDatabaseHandles() int {
|
func makeDatabaseHandles() int {
|
||||||
limit, err := fdlimit.Current()
|
limit, err := fdlimit.Maximum()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
|
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
|
||||||
}
|
}
|
||||||
if limit < 2048 {
|
if err := fdlimit.Raise(uint64(limit)); err != nil {
|
||||||
if err := fdlimit.Raise(2048); err != nil {
|
Fatalf("Failed to raise file descriptor allowance: %v", err)
|
||||||
Fatalf("Failed to raise file descriptor allowance: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if limit > 2048 { // cap database file descriptors even if more is available
|
|
||||||
limit = 2048
|
|
||||||
}
|
}
|
||||||
return limit / 2 // Leave half for networking and other stuff
|
return limit / 2 // Leave half for networking and other stuff
|
||||||
}
|
}
|
||||||
@ -997,16 +1000,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
|||||||
setWS(ctx, cfg)
|
setWS(ctx, cfg)
|
||||||
setNodeUserIdent(ctx, cfg)
|
setNodeUserIdent(ctx, cfg)
|
||||||
|
|
||||||
switch {
|
setDataDir(ctx, cfg)
|
||||||
case ctx.GlobalIsSet(DataDirFlag.Name):
|
|
||||||
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
|
|
||||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
|
||||||
cfg.DataDir = "" // unless explicitly requested, use memory databases
|
|
||||||
case ctx.GlobalBool(TestnetFlag.Name):
|
|
||||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
|
|
||||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
|
||||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
|
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
|
||||||
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
|
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
|
||||||
@ -1019,6 +1013,19 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setDataDir(ctx *cli.Context, cfg *node.Config) {
|
||||||
|
switch {
|
||||||
|
case ctx.GlobalIsSet(DataDirFlag.Name):
|
||||||
|
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
|
||||||
|
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||||
|
cfg.DataDir = "" // unless explicitly requested, use memory databases
|
||||||
|
case ctx.GlobalBool(TestnetFlag.Name):
|
||||||
|
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
|
||||||
|
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||||
|
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||||
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
|
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
|
||||||
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
|
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
|
||||||
@ -1092,6 +1099,29 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setWhitelist(ctx *cli.Context, cfg *eth.Config) {
|
||||||
|
whitelist := ctx.GlobalString(WhitelistFlag.Name)
|
||||||
|
if whitelist == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cfg.Whitelist = make(map[uint64]common.Hash)
|
||||||
|
for _, entry := range strings.Split(whitelist, ",") {
|
||||||
|
parts := strings.Split(entry, "=")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
Fatalf("Invalid whitelist entry: %s", entry)
|
||||||
|
}
|
||||||
|
number, err := strconv.ParseUint(parts[0], 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
|
||||||
|
}
|
||||||
|
var hash common.Hash
|
||||||
|
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
|
||||||
|
Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
|
||||||
|
}
|
||||||
|
cfg.Whitelist[number] = hash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// checkExclusive verifies that only a single instance of the provided flags was
|
// checkExclusive verifies that only a single instance of the provided flags was
|
||||||
// set by the user. Each flag might optionally be followed by a string type to
|
// set by the user. Each flag might optionally be followed by a string type to
|
||||||
// specialize it further.
|
// specialize it further.
|
||||||
@ -1157,6 +1187,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
|||||||
setGPO(ctx, &cfg.GPO)
|
setGPO(ctx, &cfg.GPO)
|
||||||
setTxPool(ctx, &cfg.TxPool)
|
setTxPool(ctx, &cfg.TxPool)
|
||||||
setEthash(ctx, cfg)
|
setEthash(ctx, cfg)
|
||||||
|
setWhitelist(ctx, cfg)
|
||||||
|
|
||||||
if ctx.GlobalIsSet(SyncModeFlag.Name) {
|
if ctx.GlobalIsSet(SyncModeFlag.Name) {
|
||||||
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
|
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
|
||||||
@ -1170,7 +1201,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
|||||||
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||||
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
|
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
|
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
|
||||||
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
|
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
|
||||||
}
|
}
|
||||||
@ -1423,7 +1453,6 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
|
|||||||
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
|
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
|
||||||
var err error
|
var err error
|
||||||
chainDb = MakeChainDatabase(ctx, stack)
|
chainDb = MakeChainDatabase(ctx, stack)
|
||||||
|
|
||||||
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
|
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("%v", err)
|
Fatalf("%v", err)
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Lengths of hashes and addresses in bytes.
|
// Lengths of hashes and addresses in bytes.
|
||||||
@ -196,7 +196,7 @@ func (a Address) Hash() Hash { return BytesToHash(a[:]) }
|
|||||||
// Hex returns an EIP55-compliant hex string representation of the address.
|
// Hex returns an EIP55-compliant hex string representation of the address.
|
||||||
func (a Address) Hex() string {
|
func (a Address) Hex() string {
|
||||||
unchecksummed := hex.EncodeToString(a[:])
|
unchecksummed := hex.EncodeToString(a[:])
|
||||||
sha := sha3.NewKeccak256()
|
sha := sha3.NewLegacyKeccak256()
|
||||||
sha.Write([]byte(unchecksummed))
|
sha.Write([]byte(unchecksummed))
|
||||||
hash := sha.Sum(nil)
|
hash := sha.Sum(nil)
|
||||||
|
|
||||||
|
@ -33,13 +33,13 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -148,7 +148,7 @@ type SignerFn func(accounts.Account, []byte) ([]byte, error)
|
|||||||
// panics. This is done to avoid accidentally using both forms (signature present
|
// panics. This is done to avoid accidentally using both forms (signature present
|
||||||
// or not), which could be abused to produce different hashes for the same header.
|
// or not), which could be abused to produce different hashes for the same header.
|
||||||
func sigHash(header *types.Header) (hash common.Hash) {
|
func sigHash(header *types.Header) (hash common.Hash) {
|
||||||
hasher := sha3.NewKeccak256()
|
hasher := sha3.NewLegacyKeccak256()
|
||||||
|
|
||||||
rlp.Encode(hasher, []interface{}{
|
rlp.Encode(hasher, []interface{}{
|
||||||
header.ParentHash,
|
header.ParentHash,
|
||||||
|
@ -30,8 +30,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -123,7 +123,7 @@ func seedHash(block uint64) []byte {
|
|||||||
if block < epochLength {
|
if block < epochLength {
|
||||||
return seed
|
return seed
|
||||||
}
|
}
|
||||||
keccak256 := makeHasher(sha3.NewKeccak256())
|
keccak256 := makeHasher(sha3.NewLegacyKeccak256())
|
||||||
for i := 0; i < int(block/epochLength); i++ {
|
for i := 0; i < int(block/epochLength); i++ {
|
||||||
keccak256(seed, seed)
|
keccak256(seed, seed)
|
||||||
}
|
}
|
||||||
@ -177,7 +177,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// Create a hasher to reuse between invocations
|
// Create a hasher to reuse between invocations
|
||||||
keccak512 := makeHasher(sha3.NewKeccak512())
|
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
|
||||||
|
|
||||||
// Sequentially produce the initial dataset
|
// Sequentially produce the initial dataset
|
||||||
keccak512(cache, seed)
|
keccak512(cache, seed)
|
||||||
@ -301,7 +301,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
|||||||
defer pend.Done()
|
defer pend.Done()
|
||||||
|
|
||||||
// Create a hasher to reuse between invocations
|
// Create a hasher to reuse between invocations
|
||||||
keccak512 := makeHasher(sha3.NewKeccak512())
|
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
|
||||||
|
|
||||||
// Calculate the data segment this thread should generate
|
// Calculate the data segment this thread should generate
|
||||||
batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
|
batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
|
||||||
@ -375,7 +375,7 @@ func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32)
|
|||||||
// in-memory cache) in order to produce our final value for a particular header
|
// in-memory cache) in order to produce our final value for a particular header
|
||||||
// hash and nonce.
|
// hash and nonce.
|
||||||
func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
|
func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
|
||||||
keccak512 := makeHasher(sha3.NewKeccak512())
|
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
|
||||||
|
|
||||||
lookup := func(index uint32) []uint32 {
|
lookup := func(index uint32) []uint32 {
|
||||||
rawData := generateDatasetItem(cache, index, keccak512)
|
rawData := generateDatasetItem(cache, index, keccak512)
|
||||||
|
@ -31,9 +31,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ethash proof-of-work protocol constants.
|
// Ethash proof-of-work protocol constants.
|
||||||
@ -575,7 +575,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header
|
|||||||
|
|
||||||
// SealHash returns the hash of a block prior to it being sealed.
|
// SealHash returns the hash of a block prior to it being sealed.
|
||||||
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
|
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
|
||||||
hasher := sha3.NewKeccak256()
|
hasher := sha3.NewLegacyKeccak256()
|
||||||
|
|
||||||
rlp.Encode(hasher, []interface{}{
|
rlp.Encode(hasher, []interface{}{
|
||||||
header.ParentHash,
|
header.ParentHash,
|
||||||
|
@ -47,7 +47,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
|
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
|
||||||
|
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
|
||||||
|
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
|
||||||
|
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
|
||||||
|
|
||||||
ErrNoGenesis = errors.New("Genesis not found in chain")
|
ErrNoGenesis = errors.New("Genesis not found in chain")
|
||||||
)
|
)
|
||||||
@ -62,7 +65,7 @@ const (
|
|||||||
triesInMemory = 128
|
triesInMemory = 128
|
||||||
|
|
||||||
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
|
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
|
||||||
BlockChainVersion = 3
|
BlockChainVersion uint64 = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
// CacheConfig contains the configuration values for the trie caching/pruning
|
// CacheConfig contains the configuration values for the trie caching/pruning
|
||||||
@ -207,6 +210,11 @@ func (bc *BlockChain) getProcInterrupt() bool {
|
|||||||
return atomic.LoadInt32(&bc.procInterrupt) == 1
|
return atomic.LoadInt32(&bc.procInterrupt) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetVMConfig returns the block chain VM config.
|
||||||
|
func (bc *BlockChain) GetVMConfig() *vm.Config {
|
||||||
|
return &bc.vmConfig
|
||||||
|
}
|
||||||
|
|
||||||
// loadLastState loads the last known chain state from the database. This method
|
// loadLastState loads the last known chain state from the database. This method
|
||||||
// assumes that the chain manager mutex is held.
|
// assumes that the chain manager mutex is held.
|
||||||
func (bc *BlockChain) loadLastState() error {
|
func (bc *BlockChain) loadLastState() error {
|
||||||
@ -445,7 +453,11 @@ func (bc *BlockChain) repair(head **types.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Otherwise rewind one block and recheck state availability there
|
// Otherwise rewind one block and recheck state availability there
|
||||||
(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
|
block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
|
||||||
|
if block == nil {
|
||||||
|
return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
|
||||||
|
}
|
||||||
|
(*head) = block
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1036,6 +1048,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
|||||||
return status, nil
|
return status, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addFutureBlock checks if the block is within the max allowed window to get
|
||||||
|
// accepted for future processing, and returns an error if the block is too far
|
||||||
|
// ahead and was not added.
|
||||||
|
func (bc *BlockChain) addFutureBlock(block *types.Block) error {
|
||||||
|
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
|
||||||
|
if block.Time().Cmp(max) > 0 {
|
||||||
|
return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
|
||||||
|
}
|
||||||
|
bc.futureBlocks.Add(block.Hash(), block)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// InsertChain attempts to insert the given batch of blocks in to the canonical
|
// InsertChain attempts to insert the given batch of blocks in to the canonical
|
||||||
// chain or, otherwise, create a fork. If an error is returned it will return
|
// chain or, otherwise, create a fork. If an error is returned it will return
|
||||||
// the index number of the failing block as well an error describing what went
|
// the index number of the failing block as well an error describing what went
|
||||||
@ -1043,18 +1067,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
|||||||
//
|
//
|
||||||
// After insertion is done, all accumulated events will be fired.
|
// After insertion is done, all accumulated events will be fired.
|
||||||
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||||
n, events, logs, err := bc.insertChain(chain)
|
|
||||||
bc.PostChainEvents(events, logs)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// insertChain will execute the actual chain insertion and event aggregation. The
|
|
||||||
// only reason this method exists as a separate one is to make locking cleaner
|
|
||||||
// with deferred statements.
|
|
||||||
func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
|
|
||||||
// Sanity check that we have something meaningful to import
|
// Sanity check that we have something meaningful to import
|
||||||
if len(chain) == 0 {
|
if len(chain) == 0 {
|
||||||
return 0, nil, nil, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
// Do a sanity check that the provided chain is actually ordered and linked
|
// Do a sanity check that the provided chain is actually ordered and linked
|
||||||
for i := 1; i < len(chain); i++ {
|
for i := 1; i < len(chain); i++ {
|
||||||
@ -1063,16 +1078,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
|||||||
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
|
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
|
||||||
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
|
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
|
||||||
|
|
||||||
return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
|
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
|
||||||
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
|
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Pre-checks passed, start the full block imports
|
// Pre-checks passed, start the full block imports
|
||||||
bc.wg.Add(1)
|
bc.wg.Add(1)
|
||||||
defer bc.wg.Done()
|
|
||||||
|
|
||||||
bc.chainmu.Lock()
|
bc.chainmu.Lock()
|
||||||
defer bc.chainmu.Unlock()
|
n, events, logs, err := bc.insertChain(chain, true)
|
||||||
|
bc.chainmu.Unlock()
|
||||||
|
bc.wg.Done()
|
||||||
|
|
||||||
|
bc.PostChainEvents(events, logs)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertChain is the internal implementation of insertChain, which assumes that
|
||||||
|
// 1) chains are contiguous, and 2) The chain mutex is held.
|
||||||
|
//
|
||||||
|
// This method is split out so that import batches that require re-injecting
|
||||||
|
// historical blocks can do so without releasing the lock, which could lead to
|
||||||
|
// racey behaviour. If a sidechain import is in progress, and the historic state
|
||||||
|
// is imported, but then new canon-head is added before the actual sidechain
|
||||||
|
// completes, then the historic state could be pruned again
|
||||||
|
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
|
||||||
|
// If the chain is terminating, don't even bother starting u
|
||||||
|
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||||
|
return 0, nil, nil, nil
|
||||||
|
}
|
||||||
|
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
||||||
|
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
|
||||||
|
|
||||||
// A queued approach to delivering events. This is generally
|
// A queued approach to delivering events. This is generally
|
||||||
// faster than direct delivery and requires much less mutex
|
// faster than direct delivery and requires much less mutex
|
||||||
@ -1089,16 +1124,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
|||||||
|
|
||||||
for i, block := range chain {
|
for i, block := range chain {
|
||||||
headers[i] = block.Header()
|
headers[i] = block.Header()
|
||||||
seals[i] = true
|
seals[i] = verifySeals
|
||||||
}
|
}
|
||||||
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
|
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
|
||||||
defer close(abort)
|
defer close(abort)
|
||||||
|
|
||||||
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
|
// Peek the error for the first block to decide the directing import logic
|
||||||
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
|
it := newInsertIterator(chain, results, bc.Validator())
|
||||||
|
|
||||||
// Iterate over the blocks and insert when the verifier permits
|
block, err := it.next()
|
||||||
for i, block := range chain {
|
switch {
|
||||||
|
// First block is pruned, insert as sidechain and reorg only if TD grows enough
|
||||||
|
case err == consensus.ErrPrunedAncestor:
|
||||||
|
return bc.insertSidechain(it)
|
||||||
|
|
||||||
|
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
|
||||||
|
case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
|
||||||
|
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
|
||||||
|
if err := bc.addFutureBlock(block); err != nil {
|
||||||
|
return it.index, events, coalescedLogs, err
|
||||||
|
}
|
||||||
|
block, err = it.next()
|
||||||
|
}
|
||||||
|
stats.queued += it.processed()
|
||||||
|
stats.ignored += it.remaining()
|
||||||
|
|
||||||
|
// If there are any still remaining, mark as ignored
|
||||||
|
return it.index, events, coalescedLogs, err
|
||||||
|
|
||||||
|
// First block (and state) is known
|
||||||
|
// 1. We did a roll-back, and should now do a re-import
|
||||||
|
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
|
||||||
|
// from the canonical chain, which has not been verified.
|
||||||
|
case err == ErrKnownBlock:
|
||||||
|
// Skip all known blocks that behind us
|
||||||
|
current := bc.CurrentBlock().NumberU64()
|
||||||
|
|
||||||
|
for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
|
||||||
|
stats.ignored++
|
||||||
|
block, err = it.next()
|
||||||
|
}
|
||||||
|
// Falls through to the block import
|
||||||
|
|
||||||
|
// Some other error occurred, abort
|
||||||
|
case err != nil:
|
||||||
|
stats.ignored += len(it.chain)
|
||||||
|
bc.reportBlock(block, nil, err)
|
||||||
|
return it.index, events, coalescedLogs, err
|
||||||
|
}
|
||||||
|
// No validation errors for the first block (or chain prefix skipped)
|
||||||
|
for ; block != nil && err == nil; block, err = it.next() {
|
||||||
// If the chain is terminating, stop processing blocks
|
// If the chain is terminating, stop processing blocks
|
||||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||||
log.Debug("Premature abort during blocks processing")
|
log.Debug("Premature abort during blocks processing")
|
||||||
@ -1107,115 +1182,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
|||||||
// If the header is a banned one, straight out abort
|
// If the header is a banned one, straight out abort
|
||||||
if BadHashes[block.Hash()] {
|
if BadHashes[block.Hash()] {
|
||||||
bc.reportBlock(block, nil, ErrBlacklistedHash)
|
bc.reportBlock(block, nil, ErrBlacklistedHash)
|
||||||
return i, events, coalescedLogs, ErrBlacklistedHash
|
return it.index, events, coalescedLogs, ErrBlacklistedHash
|
||||||
}
|
}
|
||||||
// Wait for the block's verification to complete
|
// Retrieve the parent block and it's state to execute on top
|
||||||
bstart := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
err := <-results
|
parent := it.previous()
|
||||||
if err == nil {
|
if parent == nil {
|
||||||
err = bc.Validator().ValidateBody(block)
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case err == ErrKnownBlock:
|
|
||||||
// Block and state both already known. However if the current block is below
|
|
||||||
// this number we did a rollback and we should reimport it nonetheless.
|
|
||||||
if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
|
|
||||||
stats.ignored++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
case err == consensus.ErrFutureBlock:
|
|
||||||
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
|
|
||||||
// the chain is discarded and processed at a later time if given.
|
|
||||||
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
|
|
||||||
if block.Time().Cmp(max) > 0 {
|
|
||||||
return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
|
|
||||||
}
|
|
||||||
bc.futureBlocks.Add(block.Hash(), block)
|
|
||||||
stats.queued++
|
|
||||||
continue
|
|
||||||
|
|
||||||
case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
|
|
||||||
bc.futureBlocks.Add(block.Hash(), block)
|
|
||||||
stats.queued++
|
|
||||||
continue
|
|
||||||
|
|
||||||
case err == consensus.ErrPrunedAncestor:
|
|
||||||
// Block competing with the canonical chain, store in the db, but don't process
|
|
||||||
// until the competitor TD goes above the canonical TD
|
|
||||||
currentBlock := bc.CurrentBlock()
|
|
||||||
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
|
|
||||||
externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
|
|
||||||
if localTd.Cmp(externTd) > 0 {
|
|
||||||
if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
|
|
||||||
return i, events, coalescedLogs, err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Competitor chain beat canonical, gather all blocks from the common ancestor
|
|
||||||
var winner []*types.Block
|
|
||||||
|
|
||||||
parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
|
||||||
for !bc.HasState(parent.Root()) {
|
|
||||||
winner = append(winner, parent)
|
|
||||||
parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
|
|
||||||
}
|
|
||||||
for j := 0; j < len(winner)/2; j++ {
|
|
||||||
winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
|
|
||||||
}
|
|
||||||
// Import all the pruned blocks to make the state available
|
|
||||||
bc.chainmu.Unlock()
|
|
||||||
_, evs, logs, err := bc.insertChain(winner)
|
|
||||||
bc.chainmu.Lock()
|
|
||||||
events, coalescedLogs = evs, logs
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return i, events, coalescedLogs, err
|
|
||||||
}
|
|
||||||
|
|
||||||
case err != nil:
|
|
||||||
bc.reportBlock(block, nil, err)
|
|
||||||
return i, events, coalescedLogs, err
|
|
||||||
}
|
|
||||||
// Create a new statedb using the parent block and report an
|
|
||||||
// error if it fails.
|
|
||||||
var parent *types.Block
|
|
||||||
if i == 0 {
|
|
||||||
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||||
} else {
|
|
||||||
parent = chain[i-1]
|
|
||||||
}
|
}
|
||||||
state, err := state.New(parent.Root(), bc.stateCache)
|
state, err := state.New(parent.Root(), bc.stateCache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return i, events, coalescedLogs, err
|
return it.index, events, coalescedLogs, err
|
||||||
}
|
}
|
||||||
// Process block using the parent state as reference point.
|
// Process block using the parent state as reference point.
|
||||||
|
t0 := time.Now()
|
||||||
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
|
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
|
||||||
|
t1 := time.Now()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bc.reportBlock(block, receipts, err)
|
bc.reportBlock(block, receipts, err)
|
||||||
return i, events, coalescedLogs, err
|
return it.index, events, coalescedLogs, err
|
||||||
}
|
}
|
||||||
// Validate the state using the default validator
|
// Validate the state using the default validator
|
||||||
err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
|
if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil {
|
||||||
if err != nil {
|
|
||||||
bc.reportBlock(block, receipts, err)
|
bc.reportBlock(block, receipts, err)
|
||||||
return i, events, coalescedLogs, err
|
return it.index, events, coalescedLogs, err
|
||||||
}
|
}
|
||||||
proctime := time.Since(bstart)
|
t2 := time.Now()
|
||||||
|
proctime := time.Since(start)
|
||||||
|
|
||||||
// Write the block to the chain and get the status.
|
// Write the block to the chain and get the status.
|
||||||
status, err := bc.WriteBlockWithState(block, receipts, state)
|
status, err := bc.WriteBlockWithState(block, receipts, state)
|
||||||
|
t3 := time.Now()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return i, events, coalescedLogs, err
|
return it.index, events, coalescedLogs, err
|
||||||
}
|
}
|
||||||
|
blockInsertTimer.UpdateSince(start)
|
||||||
|
blockExecutionTimer.Update(t1.Sub(t0))
|
||||||
|
blockValidationTimer.Update(t2.Sub(t1))
|
||||||
|
blockWriteTimer.Update(t3.Sub(t2))
|
||||||
switch status {
|
switch status {
|
||||||
case CanonStatTy:
|
case CanonStatTy:
|
||||||
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
|
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
|
||||||
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
|
"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(start)),
|
||||||
|
"root", block.Root())
|
||||||
|
|
||||||
coalescedLogs = append(coalescedLogs, logs...)
|
coalescedLogs = append(coalescedLogs, logs...)
|
||||||
blockInsertTimer.UpdateSince(bstart)
|
|
||||||
events = append(events, ChainEvent{block, block.Hash(), logs})
|
events = append(events, ChainEvent{block, block.Hash(), logs})
|
||||||
lastCanon = block
|
lastCanon = block
|
||||||
|
|
||||||
@ -1223,78 +1236,153 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
|||||||
bc.gcproc += proctime
|
bc.gcproc += proctime
|
||||||
|
|
||||||
case SideStatTy:
|
case SideStatTy:
|
||||||
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
|
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
|
||||||
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
|
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||||
|
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
||||||
blockInsertTimer.UpdateSince(bstart)
|
"root", block.Root())
|
||||||
events = append(events, ChainSideEvent{block})
|
events = append(events, ChainSideEvent{block})
|
||||||
}
|
}
|
||||||
|
blockInsertTimer.UpdateSince(start)
|
||||||
stats.processed++
|
stats.processed++
|
||||||
stats.usedGas += usedGas
|
stats.usedGas += usedGas
|
||||||
|
|
||||||
cache, _ := bc.stateCache.TrieDB().Size()
|
cache, _ := bc.stateCache.TrieDB().Size()
|
||||||
stats.report(chain, i, cache)
|
stats.report(chain, it.index, cache)
|
||||||
}
|
}
|
||||||
|
// Any blocks remaining here? The only ones we care about are the future ones
|
||||||
|
if block != nil && err == consensus.ErrFutureBlock {
|
||||||
|
if err := bc.addFutureBlock(block); err != nil {
|
||||||
|
return it.index, events, coalescedLogs, err
|
||||||
|
}
|
||||||
|
block, err = it.next()
|
||||||
|
|
||||||
|
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
|
||||||
|
if err := bc.addFutureBlock(block); err != nil {
|
||||||
|
return it.index, events, coalescedLogs, err
|
||||||
|
}
|
||||||
|
stats.queued++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stats.ignored += it.remaining()
|
||||||
|
|
||||||
// Append a single chain head event if we've progressed the chain
|
// Append a single chain head event if we've progressed the chain
|
||||||
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
|
||||||
events = append(events, ChainHeadEvent{lastCanon})
|
events = append(events, ChainHeadEvent{lastCanon})
|
||||||
}
|
}
|
||||||
return 0, events, coalescedLogs, nil
|
return it.index, events, coalescedLogs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// insertStats tracks and reports on block insertion.
|
// insertSidechain is called when an import batch hits upon a pruned ancestor
|
||||||
type insertStats struct {
|
// error, which happens when a sidechain with a sufficiently old fork-block is
|
||||||
queued, processed, ignored int
|
// found.
|
||||||
usedGas uint64
|
//
|
||||||
lastIndex int
|
// The method writes all (header-and-body-valid) blocks to disk, then tries to
|
||||||
startTime mclock.AbsTime
|
// switch over to the new chain if the TD exceeded the current chain.
|
||||||
}
|
func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
|
||||||
|
|
||||||
// statsReportLimit is the time limit during import and export after which we
|
|
||||||
// always print out progress. This avoids the user wondering what's going on.
|
|
||||||
const statsReportLimit = 8 * time.Second
|
|
||||||
|
|
||||||
// report prints statistics if some number of blocks have been processed
|
|
||||||
// or more than a few seconds have passed since the last message.
|
|
||||||
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
|
|
||||||
// Fetch the timings for the batch
|
|
||||||
var (
|
var (
|
||||||
now = mclock.Now()
|
externTd *big.Int
|
||||||
elapsed = time.Duration(now) - time.Duration(st.startTime)
|
current = bc.CurrentBlock().NumberU64()
|
||||||
)
|
)
|
||||||
// If we're at the last block of the batch or report period reached, log
|
// The first sidechain block error is already verified to be ErrPrunedAncestor.
|
||||||
if index == len(chain)-1 || elapsed >= statsReportLimit {
|
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
|
||||||
var (
|
// ones. Any other errors means that the block is invalid, and should not be written
|
||||||
end = chain[index]
|
// to disk.
|
||||||
txs = countTransactions(chain[st.lastIndex : index+1])
|
block, err := it.current(), consensus.ErrPrunedAncestor
|
||||||
)
|
for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
|
||||||
context := []interface{}{
|
// Check the canonical state root for that number
|
||||||
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
|
if number := block.NumberU64(); current >= number {
|
||||||
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
|
if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() {
|
||||||
"number", end.Number(), "hash", end.Hash(),
|
// This is most likely a shadow-state attack. When a fork is imported into the
|
||||||
}
|
// database, and it eventually reaches a block height which is not pruned, we
|
||||||
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
|
// just found that the state already exist! This means that the sidechain block
|
||||||
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
// refers to a state which already exists in our canon chain.
|
||||||
}
|
//
|
||||||
context = append(context, []interface{}{"cache", cache}...)
|
// If left unchecked, we would now proceed importing the blocks, without actually
|
||||||
|
// having verified the state of the previous blocks.
|
||||||
|
log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
|
||||||
|
|
||||||
if st.queued > 0 {
|
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
|
||||||
context = append(context, []interface{}{"queued", st.queued}...)
|
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
|
||||||
|
// mechanism.
|
||||||
|
return it.index, nil, nil, errors.New("sidechain ghost-state attack")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if st.ignored > 0 {
|
if externTd == nil {
|
||||||
context = append(context, []interface{}{"ignored", st.ignored}...)
|
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
|
||||||
}
|
}
|
||||||
log.Info("Imported new chain segment", context...)
|
externTd = new(big.Int).Add(externTd, block.Difficulty())
|
||||||
|
|
||||||
*st = insertStats{startTime: now, lastIndex: index + 1}
|
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
|
||||||
|
start := time.Now()
|
||||||
|
if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
|
||||||
|
return it.index, nil, nil, err
|
||||||
|
}
|
||||||
|
log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
|
||||||
|
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||||
|
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
||||||
|
"root", block.Root())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
// At this point, we've written all sidechain blocks to database. Loop ended
|
||||||
|
// either on some other error or all were processed. If there was some other
|
||||||
func countTransactions(chain []*types.Block) (c int) {
|
// error, we can ignore the rest of those blocks.
|
||||||
for _, b := range chain {
|
//
|
||||||
c += len(b.Transactions())
|
// If the externTd was larger than our local TD, we now need to reimport the previous
|
||||||
|
// blocks to regenerate the required state
|
||||||
|
localTd := bc.GetTd(bc.CurrentBlock().Hash(), current)
|
||||||
|
if localTd.Cmp(externTd) > 0 {
|
||||||
|
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd)
|
||||||
|
return it.index, nil, nil, err
|
||||||
}
|
}
|
||||||
return c
|
// Gather all the sidechain hashes (full blocks may be memory heavy)
|
||||||
|
var (
|
||||||
|
hashes []common.Hash
|
||||||
|
numbers []uint64
|
||||||
|
)
|
||||||
|
parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64())
|
||||||
|
for parent != nil && !bc.HasState(parent.Root) {
|
||||||
|
hashes = append(hashes, parent.Hash())
|
||||||
|
numbers = append(numbers, parent.Number.Uint64())
|
||||||
|
|
||||||
|
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
|
||||||
|
}
|
||||||
|
if parent == nil {
|
||||||
|
return it.index, nil, nil, errors.New("missing parent")
|
||||||
|
}
|
||||||
|
// Import all the pruned blocks to make the state available
|
||||||
|
var (
|
||||||
|
blocks []*types.Block
|
||||||
|
memory common.StorageSize
|
||||||
|
)
|
||||||
|
for i := len(hashes) - 1; i >= 0; i-- {
|
||||||
|
// Append the next block to our batch
|
||||||
|
block := bc.GetBlock(hashes[i], numbers[i])
|
||||||
|
|
||||||
|
blocks = append(blocks, block)
|
||||||
|
memory += block.Size()
|
||||||
|
|
||||||
|
// If memory use grew too large, import and continue. Sadly we need to discard
|
||||||
|
// all raised events and logs from notifications since we're too heavy on the
|
||||||
|
// memory here.
|
||||||
|
if len(blocks) >= 2048 || memory > 64*1024*1024 {
|
||||||
|
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
|
||||||
|
if _, _, _, err := bc.insertChain(blocks, false); err != nil {
|
||||||
|
return 0, nil, nil, err
|
||||||
|
}
|
||||||
|
blocks, memory = blocks[:0], 0
|
||||||
|
|
||||||
|
// If the chain is terminating, stop processing blocks
|
||||||
|
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||||
|
log.Debug("Premature abort during blocks processing")
|
||||||
|
return 0, nil, nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(blocks) > 0 {
|
||||||
|
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
|
||||||
|
return bc.insertChain(blocks, false)
|
||||||
|
}
|
||||||
|
return 0, nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
|
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
|
||||||
@ -1469,8 +1557,10 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
|
|||||||
bc.addBadBlock(block)
|
bc.addBadBlock(block)
|
||||||
|
|
||||||
var receiptString string
|
var receiptString string
|
||||||
for _, receipt := range receipts {
|
for i, receipt := range receipts {
|
||||||
receiptString += fmt.Sprintf("\t%v\n", receipt)
|
receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
|
||||||
|
i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
|
||||||
|
receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
|
||||||
}
|
}
|
||||||
log.Error(fmt.Sprintf(`
|
log.Error(fmt.Sprintf(`
|
||||||
########## BAD BLOCK #########
|
########## BAD BLOCK #########
|
||||||
|
143
core/blockchain_insert.go
Normal file
143
core/blockchain_insert.go
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// insertStats tracks and reports on block insertion.
|
||||||
|
type insertStats struct {
|
||||||
|
queued, processed, ignored int
|
||||||
|
usedGas uint64
|
||||||
|
lastIndex int
|
||||||
|
startTime mclock.AbsTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// statsReportLimit is the time limit during import and export after which we
|
||||||
|
// always print out progress. This avoids the user wondering what's going on.
|
||||||
|
const statsReportLimit = 8 * time.Second
|
||||||
|
|
||||||
|
// report prints statistics if some number of blocks have been processed
|
||||||
|
// or more than a few seconds have passed since the last message.
|
||||||
|
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
|
||||||
|
// Fetch the timings for the batch
|
||||||
|
var (
|
||||||
|
now = mclock.Now()
|
||||||
|
elapsed = time.Duration(now) - time.Duration(st.startTime)
|
||||||
|
)
|
||||||
|
// If we're at the last block of the batch or report period reached, log
|
||||||
|
if index == len(chain)-1 || elapsed >= statsReportLimit {
|
||||||
|
// Count the number of transactions in this segment
|
||||||
|
var txs int
|
||||||
|
for _, block := range chain[st.lastIndex : index+1] {
|
||||||
|
txs += len(block.Transactions())
|
||||||
|
}
|
||||||
|
end := chain[index]
|
||||||
|
|
||||||
|
// Assemble the log context and send it to the logger
|
||||||
|
context := []interface{}{
|
||||||
|
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
|
||||||
|
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
|
||||||
|
"number", end.Number(), "hash", end.Hash(),
|
||||||
|
}
|
||||||
|
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
|
||||||
|
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
||||||
|
}
|
||||||
|
context = append(context, []interface{}{"cache", cache}...)
|
||||||
|
|
||||||
|
if st.queued > 0 {
|
||||||
|
context = append(context, []interface{}{"queued", st.queued}...)
|
||||||
|
}
|
||||||
|
if st.ignored > 0 {
|
||||||
|
context = append(context, []interface{}{"ignored", st.ignored}...)
|
||||||
|
}
|
||||||
|
log.Info("Imported new chain segment", context...)
|
||||||
|
|
||||||
|
// Bump the stats reported to the next section
|
||||||
|
*st = insertStats{startTime: now, lastIndex: index + 1}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertIterator is a helper to assist during chain import.
|
||||||
|
type insertIterator struct {
|
||||||
|
chain types.Blocks
|
||||||
|
results <-chan error
|
||||||
|
index int
|
||||||
|
validator Validator
|
||||||
|
}
|
||||||
|
|
||||||
|
// newInsertIterator creates a new iterator based on the given blocks, which are
|
||||||
|
// assumed to be a contiguous chain.
|
||||||
|
func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
|
||||||
|
return &insertIterator{
|
||||||
|
chain: chain,
|
||||||
|
results: results,
|
||||||
|
index: -1,
|
||||||
|
validator: validator,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// next returns the next block in the iterator, along with any potential validation
|
||||||
|
// error for that block. When the end is reached, it will return (nil, nil).
|
||||||
|
func (it *insertIterator) next() (*types.Block, error) {
|
||||||
|
if it.index+1 >= len(it.chain) {
|
||||||
|
it.index = len(it.chain)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
it.index++
|
||||||
|
if err := <-it.results; err != nil {
|
||||||
|
return it.chain[it.index], err
|
||||||
|
}
|
||||||
|
return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
|
||||||
|
}
|
||||||
|
|
||||||
|
// current returns the current block that's being processed.
|
||||||
|
func (it *insertIterator) current() *types.Block {
|
||||||
|
if it.index < 0 || it.index+1 >= len(it.chain) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return it.chain[it.index]
|
||||||
|
}
|
||||||
|
|
||||||
|
// previous returns the previous block was being processed, or nil
|
||||||
|
func (it *insertIterator) previous() *types.Block {
|
||||||
|
if it.index < 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return it.chain[it.index-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// first returns the first block in the it.
|
||||||
|
func (it *insertIterator) first() *types.Block {
|
||||||
|
return it.chain[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// remaining returns the number of remaining blocks.
|
||||||
|
func (it *insertIterator) remaining() int {
|
||||||
|
return len(it.chain) - it.index
|
||||||
|
}
|
||||||
|
|
||||||
|
// processed returns the number of processed blocks.
|
||||||
|
func (it *insertIterator) processed() int {
|
||||||
|
return it.index + 1
|
||||||
|
}
|
@ -579,11 +579,11 @@ func testInsertNonceError(t *testing.T, full bool) {
|
|||||||
blockchain.hc.engine = blockchain.engine
|
blockchain.hc.engine = blockchain.engine
|
||||||
failRes, err = blockchain.InsertHeaderChain(headers, 1)
|
failRes, err = blockchain.InsertHeaderChain(headers, 1)
|
||||||
}
|
}
|
||||||
// Check that the returned error indicates the failure.
|
// Check that the returned error indicates the failure
|
||||||
if failRes != failAt {
|
if failRes != failAt {
|
||||||
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
|
t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
|
||||||
}
|
}
|
||||||
// Check that all no blocks after the failing block have been inserted.
|
// Check that all blocks after the failing block have been inserted
|
||||||
for j := 0; j < i-failAt; j++ {
|
for j := 0; j < i-failAt; j++ {
|
||||||
if full {
|
if full {
|
||||||
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
|
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
|
||||||
@ -1345,7 +1345,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
|
|||||||
t.Fatalf("failed to insert shared chain: %v", err)
|
t.Fatalf("failed to insert shared chain: %v", err)
|
||||||
}
|
}
|
||||||
if _, err := chain.InsertChain(original); err != nil {
|
if _, err := chain.InsertChain(original); err != nil {
|
||||||
t.Fatalf("failed to insert shared chain: %v", err)
|
t.Fatalf("failed to insert original chain: %v", err)
|
||||||
}
|
}
|
||||||
// Ensure that the state associated with the forking point is pruned away
|
// Ensure that the state associated with the forking point is pruned away
|
||||||
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
|
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
|
||||||
|
@ -151,6 +151,9 @@ func (e *GenesisMismatchError) Error() string {
|
|||||||
//
|
//
|
||||||
// The returned chain configuration is never nil.
|
// The returned chain configuration is never nil.
|
||||||
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
|
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
|
||||||
|
return SetupGenesisBlockWithOverride(db, genesis, nil)
|
||||||
|
}
|
||||||
|
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constantinopleOverride *big.Int) (*params.ChainConfig, common.Hash, error) {
|
||||||
if genesis != nil && genesis.Config == nil {
|
if genesis != nil && genesis.Config == nil {
|
||||||
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
||||||
}
|
}
|
||||||
@ -178,6 +181,9 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
|
|||||||
|
|
||||||
// Get the existing chain configuration.
|
// Get the existing chain configuration.
|
||||||
newcfg := genesis.configOrDefault(stored)
|
newcfg := genesis.configOrDefault(stored)
|
||||||
|
if constantinopleOverride != nil {
|
||||||
|
newcfg.ConstantinopleBlock = constantinopleOverride
|
||||||
|
}
|
||||||
storedcfg := rawdb.ReadChainConfig(db, stored)
|
storedcfg := rawdb.ReadChainConfig(db, stored)
|
||||||
if storedcfg == nil {
|
if storedcfg == nil {
|
||||||
log.Warn("Found genesis block without chain config")
|
log.Warn("Found genesis block without chain config")
|
||||||
|
@ -23,9 +23,9 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests block header storage and retrieval operations.
|
// Tests block header storage and retrieval operations.
|
||||||
@ -47,7 +47,7 @@ func TestHeaderStorage(t *testing.T) {
|
|||||||
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
||||||
t.Fatalf("Stored header RLP not found")
|
t.Fatalf("Stored header RLP not found")
|
||||||
} else {
|
} else {
|
||||||
hasher := sha3.NewKeccak256()
|
hasher := sha3.NewLegacyKeccak256()
|
||||||
hasher.Write(entry)
|
hasher.Write(entry)
|
||||||
|
|
||||||
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
|
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
|
||||||
@ -68,7 +68,7 @@ func TestBodyStorage(t *testing.T) {
|
|||||||
// Create a test body to move around the database and make sure it's really new
|
// Create a test body to move around the database and make sure it's really new
|
||||||
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
|
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
|
||||||
|
|
||||||
hasher := sha3.NewKeccak256()
|
hasher := sha3.NewLegacyKeccak256()
|
||||||
rlp.Encode(hasher, body)
|
rlp.Encode(hasher, body)
|
||||||
hash := common.BytesToHash(hasher.Sum(nil))
|
hash := common.BytesToHash(hasher.Sum(nil))
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ func TestBodyStorage(t *testing.T) {
|
|||||||
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
|
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
|
||||||
t.Fatalf("Stored body RLP not found")
|
t.Fatalf("Stored body RLP not found")
|
||||||
} else {
|
} else {
|
||||||
hasher := sha3.NewKeccak256()
|
hasher := sha3.NewLegacyKeccak256()
|
||||||
hasher.Write(entry)
|
hasher.Write(entry)
|
||||||
|
|
||||||
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
|
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
|
||||||
|
@ -26,19 +26,27 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ReadDatabaseVersion retrieves the version number of the database.
|
// ReadDatabaseVersion retrieves the version number of the database.
|
||||||
func ReadDatabaseVersion(db DatabaseReader) int {
|
func ReadDatabaseVersion(db DatabaseReader) *uint64 {
|
||||||
var version int
|
var version uint64
|
||||||
|
|
||||||
enc, _ := db.Get(databaseVerisionKey)
|
enc, _ := db.Get(databaseVerisionKey)
|
||||||
rlp.DecodeBytes(enc, &version)
|
if len(enc) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := rlp.DecodeBytes(enc, &version); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return version
|
return &version
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteDatabaseVersion stores the version number of the database
|
// WriteDatabaseVersion stores the version number of the database
|
||||||
func WriteDatabaseVersion(db DatabaseWriter, version int) {
|
func WriteDatabaseVersion(db DatabaseWriter, version uint64) {
|
||||||
enc, _ := rlp.EncodeToBytes(version)
|
enc, err := rlp.EncodeToBytes(version)
|
||||||
if err := db.Put(databaseVerisionKey, enc); err != nil {
|
if err != nil {
|
||||||
|
log.Crit("Failed to encode database version", "err", err)
|
||||||
|
}
|
||||||
|
if err = db.Put(databaseVerisionKey, enc); err != nil {
|
||||||
log.Crit("Failed to store the database version", "err", err)
|
log.Crit("Failed to store the database version", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -468,9 +468,9 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
|
|||||||
//
|
//
|
||||||
// Carrying over the balance ensures that Ether doesn't disappear.
|
// Carrying over the balance ensures that Ether doesn't disappear.
|
||||||
func (self *StateDB) CreateAccount(addr common.Address) {
|
func (self *StateDB) CreateAccount(addr common.Address) {
|
||||||
new, prev := self.createObject(addr)
|
newObj, prev := self.createObject(addr)
|
||||||
if prev != nil {
|
if prev != nil {
|
||||||
new.setBalance(prev.data.Balance)
|
newObj.setBalance(prev.data.Balance)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// senderCacher is a concurrent transaction sender recoverer anc cacher.
|
// senderCacher is a concurrent transaction sender recoverer and cacher.
|
||||||
var senderCacher = newTxSenderCacher(runtime.NumCPU())
|
var senderCacher = newTxSenderCacher(runtime.NumCPU())
|
||||||
|
|
||||||
// txSenderCacherRequest is a request for recovering transaction senders with a
|
// txSenderCacherRequest is a request for recovering transaction senders with a
|
||||||
|
@ -172,6 +172,26 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig {
|
|||||||
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
|
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
|
||||||
conf.PriceBump = DefaultTxPoolConfig.PriceBump
|
conf.PriceBump = DefaultTxPoolConfig.PriceBump
|
||||||
}
|
}
|
||||||
|
if conf.AccountSlots < 1 {
|
||||||
|
log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
|
||||||
|
conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
|
||||||
|
}
|
||||||
|
if conf.GlobalSlots < 1 {
|
||||||
|
log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
|
||||||
|
conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
|
||||||
|
}
|
||||||
|
if conf.AccountQueue < 1 {
|
||||||
|
log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
|
||||||
|
conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
|
||||||
|
}
|
||||||
|
if conf.GlobalQueue < 1 {
|
||||||
|
log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
|
||||||
|
conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
|
||||||
|
}
|
||||||
|
if conf.Lifetime < 1 {
|
||||||
|
log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
|
||||||
|
conf.Lifetime = DefaultTxPoolConfig.Lifetime
|
||||||
|
}
|
||||||
return conf
|
return conf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1095,7 +1095,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
|
|||||||
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
|
||||||
|
|
||||||
config := testTxPoolConfig
|
config := testTxPoolConfig
|
||||||
config.GlobalSlots = 0
|
config.GlobalSlots = 1
|
||||||
|
|
||||||
pool := NewTxPool(config, params.TestChainConfig, blockchain)
|
pool := NewTxPool(config, params.TestChainConfig, blockchain)
|
||||||
defer pool.Stop()
|
defer pool.Stop()
|
||||||
|
@ -28,8 +28,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -81,8 +81,8 @@ type Header struct {
|
|||||||
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
|
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
Time *big.Int `json:"timestamp" gencodec:"required"`
|
Time *big.Int `json:"timestamp" gencodec:"required"`
|
||||||
Extra []byte `json:"extraData" gencodec:"required"`
|
Extra []byte `json:"extraData" gencodec:"required"`
|
||||||
MixDigest common.Hash `json:"mixHash" gencodec:"required"`
|
MixDigest common.Hash `json:"mixHash"`
|
||||||
Nonce BlockNonce `json:"nonce" gencodec:"required"`
|
Nonce BlockNonce `json:"nonce"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// field type overrides for gencodec
|
// field type overrides for gencodec
|
||||||
@ -109,7 +109,7 @@ func (h *Header) Size() common.StorageSize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func rlpHash(x interface{}) (h common.Hash) {
|
func rlpHash(x interface{}) (h common.Hash) {
|
||||||
hw := sha3.NewKeccak256()
|
hw := sha3.NewLegacyKeccak256()
|
||||||
rlp.Encode(hw, x)
|
rlp.Encode(hw, x)
|
||||||
hw.Sum(h[:0])
|
hw.Sum(h[:0])
|
||||||
return h
|
return h
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
var _ = (*headerMarshaling)(nil)
|
var _ = (*headerMarshaling)(nil)
|
||||||
|
|
||||||
|
// MarshalJSON marshals as JSON.
|
||||||
func (h Header) MarshalJSON() ([]byte, error) {
|
func (h Header) MarshalJSON() ([]byte, error) {
|
||||||
type Header struct {
|
type Header struct {
|
||||||
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
||||||
@ -28,8 +29,8 @@ func (h Header) MarshalJSON() ([]byte, error) {
|
|||||||
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
||||||
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
|
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||||
MixDigest common.Hash `json:"mixHash" gencodec:"required"`
|
MixDigest common.Hash `json:"mixHash"`
|
||||||
Nonce BlockNonce `json:"nonce" gencodec:"required"`
|
Nonce BlockNonce `json:"nonce"`
|
||||||
Hash common.Hash `json:"hash"`
|
Hash common.Hash `json:"hash"`
|
||||||
}
|
}
|
||||||
var enc Header
|
var enc Header
|
||||||
@ -52,6 +53,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
|
|||||||
return json.Marshal(&enc)
|
return json.Marshal(&enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals from JSON.
|
||||||
func (h *Header) UnmarshalJSON(input []byte) error {
|
func (h *Header) UnmarshalJSON(input []byte) error {
|
||||||
type Header struct {
|
type Header struct {
|
||||||
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
|
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
|
||||||
@ -67,8 +69,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
|
|||||||
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
||||||
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
|
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||||
MixDigest *common.Hash `json:"mixHash" gencodec:"required"`
|
MixDigest *common.Hash `json:"mixHash"`
|
||||||
Nonce *BlockNonce `json:"nonce" gencodec:"required"`
|
Nonce *BlockNonce `json:"nonce"`
|
||||||
}
|
}
|
||||||
var dec Header
|
var dec Header
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
@ -126,13 +128,11 @@ func (h *Header) UnmarshalJSON(input []byte) error {
|
|||||||
return errors.New("missing required field 'extraData' for Header")
|
return errors.New("missing required field 'extraData' for Header")
|
||||||
}
|
}
|
||||||
h.Extra = *dec.Extra
|
h.Extra = *dec.Extra
|
||||||
if dec.MixDigest == nil {
|
if dec.MixDigest != nil {
|
||||||
return errors.New("missing required field 'mixHash' for Header")
|
h.MixDigest = *dec.MixDigest
|
||||||
}
|
}
|
||||||
h.MixDigest = *dec.MixDigest
|
if dec.Nonce != nil {
|
||||||
if dec.Nonce == nil {
|
h.Nonce = *dec.Nonce
|
||||||
return errors.New("missing required field 'nonce' for Header")
|
|
||||||
}
|
}
|
||||||
h.Nonce = *dec.Nonce
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -234,7 +234,7 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithSignature returns a new transaction with the given signature.
|
// WithSignature returns a new transaction with the given signature.
|
||||||
// This signature needs to be formatted as described in the yellow paper (v+27).
|
// This signature needs to be in the [R || S || V] format where V is 0 or 1.
|
||||||
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
|
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
|
||||||
r, s, v, err := signer.SignatureValues(tx, sig)
|
r, s, v, err := signer.SignatureValues(tx, sig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -339,6 +339,12 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
|
|||||||
contract := NewContract(caller, to, new(big.Int), gas)
|
contract := NewContract(caller, to, new(big.Int), gas)
|
||||||
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
|
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
|
||||||
|
|
||||||
|
// We do an AddBalance of zero here, just in order to trigger a touch.
|
||||||
|
// This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium,
|
||||||
|
// but is the correct thing to do and matters on other networks, in tests, and potential
|
||||||
|
// future scenarios
|
||||||
|
evm.StateDB.AddBalance(addr, bigZero)
|
||||||
|
|
||||||
// When an error was returned by the EVM or when setting the creation code
|
// When an error was returned by the EVM or when setting the creation code
|
||||||
// above we revert to the snapshot and consume any gas remaining. Additionally
|
// above we revert to the snapshot and consume any gas remaining. Additionally
|
||||||
// when we're in Homestead this also counts for code storage gas errors.
|
// when we're in Homestead this also counts for code storage gas errors.
|
||||||
|
@ -24,8 +24,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -387,7 +387,7 @@ func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory
|
|||||||
data := memory.Get(offset.Int64(), size.Int64())
|
data := memory.Get(offset.Int64(), size.Int64())
|
||||||
|
|
||||||
if interpreter.hasher == nil {
|
if interpreter.hasher == nil {
|
||||||
interpreter.hasher = sha3.NewKeccak256().(keccakState)
|
interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState)
|
||||||
} else {
|
} else {
|
||||||
interpreter.hasher.Reset()
|
interpreter.hasher.Reset()
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@ -24,17 +24,16 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type JSONLogger struct {
|
type JSONLogger struct {
|
||||||
encoder *json.Encoder
|
encoder *json.Encoder
|
||||||
cfg *vm.LogConfig
|
cfg *LogConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
|
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
|
||||||
// into the provided stream.
|
// into the provided stream.
|
||||||
func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger {
|
||||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,8 +42,8 @@ func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CaptureState outputs state information on the logger.
|
// CaptureState outputs state information on the logger.
|
||||||
func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
|
||||||
log := vm.StructLog{
|
log := StructLog{
|
||||||
Pc: pc,
|
Pc: pc,
|
||||||
Op: op,
|
Op: op,
|
||||||
Gas: gas,
|
Gas: gas,
|
||||||
@ -65,7 +64,7 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CaptureFault outputs state information on the logger.
|
// CaptureFault outputs state information on the logger.
|
||||||
func (l *JSONLogger) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
func (l *JSONLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -30,8 +30,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -43,7 +43,7 @@ var errInvalidPubkey = errors.New("invalid secp256k1 public key")
|
|||||||
|
|
||||||
// Keccak256 calculates and returns the Keccak256 hash of the input data.
|
// Keccak256 calculates and returns the Keccak256 hash of the input data.
|
||||||
func Keccak256(data ...[]byte) []byte {
|
func Keccak256(data ...[]byte) []byte {
|
||||||
d := sha3.NewKeccak256()
|
d := sha3.NewLegacyKeccak256()
|
||||||
for _, b := range data {
|
for _, b := range data {
|
||||||
d.Write(b)
|
d.Write(b)
|
||||||
}
|
}
|
||||||
@ -53,7 +53,7 @@ func Keccak256(data ...[]byte) []byte {
|
|||||||
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
|
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
|
||||||
// converting it to an internal Hash data structure.
|
// converting it to an internal Hash data structure.
|
||||||
func Keccak256Hash(data ...[]byte) (h common.Hash) {
|
func Keccak256Hash(data ...[]byte) (h common.Hash) {
|
||||||
d := sha3.NewKeccak256()
|
d := sha3.NewLegacyKeccak256()
|
||||||
for _, b := range data {
|
for _, b := range data {
|
||||||
d.Write(b)
|
d.Write(b)
|
||||||
}
|
}
|
||||||
@ -63,7 +63,7 @@ func Keccak256Hash(data ...[]byte) (h common.Hash) {
|
|||||||
|
|
||||||
// Keccak512 calculates and returns the Keccak512 hash of the input data.
|
// Keccak512 calculates and returns the Keccak512 hash of the input data.
|
||||||
func Keccak512(data ...[]byte) []byte {
|
func Keccak512(data ...[]byte) []byte {
|
||||||
d := sha3.NewKeccak512()
|
d := sha3.NewLegacyKeccak512()
|
||||||
for _, b := range data {
|
for _, b := range data {
|
||||||
d.Write(b)
|
d.Write(b)
|
||||||
}
|
}
|
||||||
|
@ -310,7 +310,7 @@ var theCurve = new(BitCurve)
|
|||||||
func init() {
|
func init() {
|
||||||
// See SEC 2 section 2.7.1
|
// See SEC 2 section 2.7.1
|
||||||
// curve parameters taken from:
|
// curve parameters taken from:
|
||||||
// http://www.secg.org/collateral/sec2_final.pdf
|
// http://www.secg.org/sec2-v2.pdf
|
||||||
theCurve.P, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 0)
|
theCurve.P, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 0)
|
||||||
theCurve.N, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 0)
|
theCurve.N, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 0)
|
||||||
theCurve.B, _ = new(big.Int).SetString("0x0000000000000000000000000000000000000000000000000000000000000007", 0)
|
theCurve.B, _ = new(big.Int).SetString("0x0000000000000000000000000000000000000000000000000000000000000007", 0)
|
||||||
|
@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,22 +0,0 @@
|
|||||||
Additional IP Rights Grant (Patents)
|
|
||||||
|
|
||||||
"This implementation" means the copyrightable works distributed by
|
|
||||||
Google as part of the Go project.
|
|
||||||
|
|
||||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
|
||||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
|
||||||
patent license to make, have made, use, offer to sell, sell, import,
|
|
||||||
transfer and otherwise run, modify and propagate the contents of this
|
|
||||||
implementation of Go, where such license applies only to those patent
|
|
||||||
claims, both currently owned or controlled by Google and acquired in
|
|
||||||
the future, licensable by Google that are necessarily infringed by this
|
|
||||||
implementation of Go. This grant does not include claims that would be
|
|
||||||
infringed only as a consequence of further modification of this
|
|
||||||
implementation. If you or your agent or exclusive licensee institute or
|
|
||||||
order or agree to the institution of patent litigation against any
|
|
||||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
|
||||||
that this implementation of Go or any code incorporated within this
|
|
||||||
implementation of Go constitutes direct or contributory patent
|
|
||||||
infringement, or inducement of patent infringement, then any patent
|
|
||||||
rights granted to you under this License for this implementation of Go
|
|
||||||
shall terminate as of the date such litigation is filed.
|
|
@ -1,297 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package sha3
|
|
||||||
|
|
||||||
// Tests include all the ShortMsgKATs provided by the Keccak team at
|
|
||||||
// https://github.com/gvanas/KeccakCodePackage
|
|
||||||
//
|
|
||||||
// They only include the zero-bit case of the bitwise testvectors
|
|
||||||
// published by NIST in the draft of FIPS-202.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/flate"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"hash"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
testString = "brekeccakkeccak koax koax"
|
|
||||||
katFilename = "testdata/keccakKats.json.deflate"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Internal-use instances of SHAKE used to test against KATs.
|
|
||||||
func newHashShake128() hash.Hash {
|
|
||||||
return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
|
|
||||||
}
|
|
||||||
func newHashShake256() hash.Hash {
|
|
||||||
return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
|
|
||||||
}
|
|
||||||
|
|
||||||
// testDigests contains functions returning hash.Hash instances
|
|
||||||
// with output-length equal to the KAT length for both SHA-3 and
|
|
||||||
// SHAKE instances.
|
|
||||||
var testDigests = map[string]func() hash.Hash{
|
|
||||||
"SHA3-224": New224,
|
|
||||||
"SHA3-256": New256,
|
|
||||||
"SHA3-384": New384,
|
|
||||||
"SHA3-512": New512,
|
|
||||||
"SHAKE128": newHashShake128,
|
|
||||||
"SHAKE256": newHashShake256,
|
|
||||||
}
|
|
||||||
|
|
||||||
// testShakes contains functions that return ShakeHash instances for
|
|
||||||
// testing the ShakeHash-specific interface.
|
|
||||||
var testShakes = map[string]func() ShakeHash{
|
|
||||||
"SHAKE128": NewShake128,
|
|
||||||
"SHAKE256": NewShake256,
|
|
||||||
}
|
|
||||||
|
|
||||||
// structs used to marshal JSON test-cases.
|
|
||||||
type KeccakKats struct {
|
|
||||||
Kats map[string][]struct {
|
|
||||||
Digest string `json:"digest"`
|
|
||||||
Length int64 `json:"length"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) {
|
|
||||||
xorInOrig, copyOutOrig := xorIn, copyOut
|
|
||||||
xorIn, copyOut = xorInGeneric, copyOutGeneric
|
|
||||||
testf("generic")
|
|
||||||
if xorImplementationUnaligned != "generic" {
|
|
||||||
xorIn, copyOut = xorInUnaligned, copyOutUnaligned
|
|
||||||
testf("unaligned")
|
|
||||||
}
|
|
||||||
xorIn, copyOut = xorInOrig, copyOutOrig
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestKeccakKats tests the SHA-3 and Shake implementations against all the
|
|
||||||
// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
|
|
||||||
// (The testvectors are stored in keccakKats.json.deflate due to their length.)
|
|
||||||
func TestKeccakKats(t *testing.T) {
|
|
||||||
testUnalignedAndGeneric(t, func(impl string) {
|
|
||||||
// Read the KATs.
|
|
||||||
deflated, err := os.Open(katFilename)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("error opening %s: %s", katFilename, err)
|
|
||||||
}
|
|
||||||
file := flate.NewReader(deflated)
|
|
||||||
dec := json.NewDecoder(file)
|
|
||||||
var katSet KeccakKats
|
|
||||||
err = dec.Decode(&katSet)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("error decoding KATs: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the KATs.
|
|
||||||
for functionName, kats := range katSet.Kats {
|
|
||||||
d := testDigests[functionName]()
|
|
||||||
for _, kat := range kats {
|
|
||||||
d.Reset()
|
|
||||||
in, err := hex.DecodeString(kat.Message)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("error decoding KAT: %s", err)
|
|
||||||
}
|
|
||||||
d.Write(in[:kat.Length/8])
|
|
||||||
got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
|
|
||||||
if got != kat.Digest {
|
|
||||||
t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s",
|
|
||||||
functionName, impl, kat.Length, kat.Message, got, kat.Digest)
|
|
||||||
t.Logf("wanted %+v", kat)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestUnalignedWrite tests that writing data in an arbitrary pattern with
|
|
||||||
// small input buffers.
|
|
||||||
func TestUnalignedWrite(t *testing.T) {
|
|
||||||
testUnalignedAndGeneric(t, func(impl string) {
|
|
||||||
buf := sequentialBytes(0x10000)
|
|
||||||
for alg, df := range testDigests {
|
|
||||||
d := df()
|
|
||||||
d.Reset()
|
|
||||||
d.Write(buf)
|
|
||||||
want := d.Sum(nil)
|
|
||||||
d.Reset()
|
|
||||||
for i := 0; i < len(buf); {
|
|
||||||
// Cycle through offsets which make a 137 byte sequence.
|
|
||||||
// Because 137 is prime this sequence should exercise all corner cases.
|
|
||||||
offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
|
|
||||||
for _, j := range offsets {
|
|
||||||
if v := len(buf) - i; v < j {
|
|
||||||
j = v
|
|
||||||
}
|
|
||||||
d.Write(buf[i : i+j])
|
|
||||||
i += j
|
|
||||||
}
|
|
||||||
}
|
|
||||||
got := d.Sum(nil)
|
|
||||||
if !bytes.Equal(got, want) {
|
|
||||||
t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestAppend checks that appending works when reallocation is necessary.
|
|
||||||
func TestAppend(t *testing.T) {
|
|
||||||
testUnalignedAndGeneric(t, func(impl string) {
|
|
||||||
d := New224()
|
|
||||||
|
|
||||||
for capacity := 2; capacity <= 66; capacity += 64 {
|
|
||||||
// The first time around the loop, Sum will have to reallocate.
|
|
||||||
// The second time, it will not.
|
|
||||||
buf := make([]byte, 2, capacity)
|
|
||||||
d.Reset()
|
|
||||||
d.Write([]byte{0xcc})
|
|
||||||
buf = d.Sum(buf)
|
|
||||||
expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
|
|
||||||
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
|
|
||||||
t.Errorf("got %s, want %s", got, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
|
|
||||||
func TestAppendNoRealloc(t *testing.T) {
|
|
||||||
testUnalignedAndGeneric(t, func(impl string) {
|
|
||||||
buf := make([]byte, 1, 200)
|
|
||||||
d := New224()
|
|
||||||
d.Write([]byte{0xcc})
|
|
||||||
buf = d.Sum(buf)
|
|
||||||
expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
|
|
||||||
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
|
|
||||||
t.Errorf("%s: got %s, want %s", impl, got, expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestSqueezing checks that squeezing the full output a single time produces
|
|
||||||
// the same output as repeatedly squeezing the instance.
|
|
||||||
func TestSqueezing(t *testing.T) {
|
|
||||||
testUnalignedAndGeneric(t, func(impl string) {
|
|
||||||
for functionName, newShakeHash := range testShakes {
|
|
||||||
d0 := newShakeHash()
|
|
||||||
d0.Write([]byte(testString))
|
|
||||||
ref := make([]byte, 32)
|
|
||||||
d0.Read(ref)
|
|
||||||
|
|
||||||
d1 := newShakeHash()
|
|
||||||
d1.Write([]byte(testString))
|
|
||||||
var multiple []byte
|
|
||||||
for range ref {
|
|
||||||
one := make([]byte, 1)
|
|
||||||
d1.Read(one)
|
|
||||||
multiple = append(multiple, one...)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(ref, multiple) {
|
|
||||||
t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
|
|
||||||
func sequentialBytes(size int) []byte {
|
|
||||||
result := make([]byte, size)
|
|
||||||
for i := range result {
|
|
||||||
result[i] = byte(i)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkPermutationFunction measures the speed of the permutation function
|
|
||||||
// with no input data.
|
|
||||||
func BenchmarkPermutationFunction(b *testing.B) {
|
|
||||||
b.SetBytes(int64(200))
|
|
||||||
var lanes [25]uint64
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
keccakF1600(&lanes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// benchmarkHash tests the speed to hash num buffers of buflen each.
|
|
||||||
func benchmarkHash(b *testing.B, h hash.Hash, size, num int) {
|
|
||||||
b.StopTimer()
|
|
||||||
h.Reset()
|
|
||||||
data := sequentialBytes(size)
|
|
||||||
b.SetBytes(int64(size * num))
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
var state []byte
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
for j := 0; j < num; j++ {
|
|
||||||
h.Write(data)
|
|
||||||
}
|
|
||||||
state = h.Sum(state[:0])
|
|
||||||
}
|
|
||||||
b.StopTimer()
|
|
||||||
h.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
// benchmarkShake is specialized to the Shake instances, which don't
|
|
||||||
// require a copy on reading output.
|
|
||||||
func benchmarkShake(b *testing.B, h ShakeHash, size, num int) {
|
|
||||||
b.StopTimer()
|
|
||||||
h.Reset()
|
|
||||||
data := sequentialBytes(size)
|
|
||||||
d := make([]byte, 32)
|
|
||||||
|
|
||||||
b.SetBytes(int64(size * num))
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
h.Reset()
|
|
||||||
for j := 0; j < num; j++ {
|
|
||||||
h.Write(data)
|
|
||||||
}
|
|
||||||
h.Read(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) }
|
|
||||||
func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) }
|
|
||||||
func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) }
|
|
||||||
func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) }
|
|
||||||
|
|
||||||
func BenchmarkShake128_MTU(b *testing.B) { benchmarkShake(b, NewShake128(), 1350, 1) }
|
|
||||||
func BenchmarkShake256_MTU(b *testing.B) { benchmarkShake(b, NewShake256(), 1350, 1) }
|
|
||||||
func BenchmarkShake256_16x(b *testing.B) { benchmarkShake(b, NewShake256(), 16, 1024) }
|
|
||||||
func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) }
|
|
||||||
|
|
||||||
func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) }
|
|
||||||
|
|
||||||
func Example_sum() {
|
|
||||||
buf := []byte("some data to hash")
|
|
||||||
// A hash needs to be 64 bytes long to have 256-bit collision resistance.
|
|
||||||
h := make([]byte, 64)
|
|
||||||
// Compute a 64-byte hash of buf and put it in h.
|
|
||||||
ShakeSum256(h, buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Example_mac() {
|
|
||||||
k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long")
|
|
||||||
buf := []byte("and this is some data to authenticate")
|
|
||||||
// A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
|
|
||||||
h := make([]byte, 32)
|
|
||||||
d := NewShake256()
|
|
||||||
// Write the key into the hash.
|
|
||||||
d.Write(k)
|
|
||||||
// Now write the data.
|
|
||||||
d.Write(buf)
|
|
||||||
// Read 32 bytes of output from the hash into h.
|
|
||||||
d.Read(h)
|
|
||||||
}
|
|
BIN
crypto/sha3/testdata/keccakKats.json.deflate
vendored
BIN
crypto/sha3/testdata/keccakKats.json.deflate
vendored
Binary file not shown.
@ -125,12 +125,12 @@ func (b *EthAPIBackend) GetTd(blockHash common.Hash) *big.Int {
|
|||||||
return b.eth.blockchain.GetTdByHash(blockHash)
|
return b.eth.blockchain.GetTdByHash(blockHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
|
func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {
|
||||||
state.SetBalance(msg.From(), math.MaxBig256)
|
state.SetBalance(msg.From(), math.MaxBig256)
|
||||||
vmError := func() error { return nil }
|
vmError := func() error { return nil }
|
||||||
|
|
||||||
context := core.NewEVMContext(msg, header, b.eth.BlockChain(), nil)
|
context := core.NewEVMContext(msg, header, b.eth.BlockChain(), nil)
|
||||||
return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), vmError, nil
|
return vm.NewEVM(context, state, b.eth.chainConfig, *b.eth.blockchain.GetVMConfig()), vmError, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user