Bump geth to 1.8.21 (#137)

* Bump geth to 1.8.21

* Bump vendored crypto library for go-ethereum
This commit is contained in:
Edvard Hübinette 2019-01-16 10:54:01 +01:00 committed by GitHub
parent 909d23e176
commit 518bfbaf54
239 changed files with 7356 additions and 5058 deletions

13
Gopkg.lock generated
View File

@ -37,7 +37,7 @@
version = "v1.7.1"
[[projects]]
digest = "1:a9c8210eb5d36a9a6e66953dc3d3cabd3afbbfb4f50baab0db1af1b723254b82"
digest = "1:3d26f660432345429f6b09595e4707ee12745547323bcd1dc91457125aefeedc"
name = "github.com/ethereum/go-ethereum"
packages = [
".",
@ -56,7 +56,6 @@
"crypto",
"crypto/ecies",
"crypto/secp256k1",
"crypto/sha3",
"ethclient",
"ethdb",
"event",
@ -75,8 +74,8 @@
"trie",
]
pruneopts = ""
revision = "24d727b6d6e2c0cde222fa12155c4a6db5caaf2e"
version = "v1.8.20"
revision = "9dc5d1a915ac0e0bd8429d6ac41df50eec91de5f"
version = "v1.8.21"
[[projects]]
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
@ -416,15 +415,16 @@
[[projects]]
branch = "master"
digest = "1:61a86f0be8b466d6e3fbdabb155aaa4006137cb5e3fd3b949329d103fa0ceb0f"
digest = "1:59b49c47c11a48f1054529207f65907c014ecf5f9a7c0d9c0f1616dec7b062ed"
name = "golang.org/x/crypto"
packages = [
"pbkdf2",
"scrypt",
"sha3",
"ssh/terminal",
]
pruneopts = ""
revision = "0e37d006457bf46f9e6692014ba72ef82c33022c"
revision = "ff983b9c42bc9fbf91556e191cc8efb585c16908"
[[projects]]
branch = "master"
@ -548,7 +548,6 @@
"github.com/ethereum/go-ethereum/crypto",
"github.com/ethereum/go-ethereum/ethclient",
"github.com/ethereum/go-ethereum/ethdb",
"github.com/ethereum/go-ethereum/log",
"github.com/ethereum/go-ethereum/p2p",
"github.com/ethereum/go-ethereum/p2p/discv5",
"github.com/ethereum/go-ethereum/params",

View File

@ -51,4 +51,4 @@
[[constraint]]
name = "github.com/ethereum/go-ethereum"
version = "1.8.20"
version = "1.8.21"

View File

@ -2,6 +2,7 @@
# Each line is a file pattern followed by one or more owners.
accounts/usbwallet @karalabe
accounts/abi @gballet
consensus @karalabe
core/ @karalabe @holiman
eth/ @karalabe
@ -9,27 +10,4 @@ les/ @zsfelfoldi
light/ @zsfelfoldi
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi
p2p/simulations @lmars
p2p/protocols @zelig
swarm/api/http @justelad
swarm/bmt @zelig
swarm/dev @lmars
swarm/fuse @jmozah @holisticode
swarm/grafana_dashboards @nonsense
swarm/metrics @nonsense @holisticode
swarm/multihash @nolash
swarm/network/bitvector @zelig @janos
swarm/network/priorityqueue @zelig @janos
swarm/network/simulations @zelig @janos
swarm/network/stream @janos @zelig @holisticode @justelad
swarm/network/stream/intervals @janos
swarm/network/stream/testing @zelig
swarm/pot @zelig
swarm/pss @nolash @zelig @nonsense
swarm/services @zelig
swarm/state @justelad
swarm/storage/encryption @zelig @nagydani
swarm/storage/mock @janos
swarm/storage/feed @nolash @jpeletier
swarm/testutil @lmars
whisper/ @gballet @gluk256

View File

@ -1,7 +1,7 @@
# Number of days of inactivity before an Issue is closed for lack of response
daysUntilClose: 30
# Label requiring a response
responseRequiredLabel: more-information-needed
responseRequiredLabel: "need:more-information"
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
closeComment: >
This issue has been automatically closed because there has been no response

View File

@ -7,7 +7,7 @@ exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
staleLabel: stale
staleLabel: "status:inactive"
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had

View File

@ -156,7 +156,7 @@ matrix:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz
- curl https://storage.googleapis.com/golang/go1.11.4.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go

View File

@ -58,13 +58,11 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
return nil, err
}
return arguments, nil
}
method, exist := abi.Methods[name]
if !exist {
return nil, fmt.Errorf("method '%s' not found", name)
}
arguments, err := method.Inputs.Pack(args...)
if err != nil {
return nil, err
@ -82,7 +80,7 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
// we need to decide whether we're calling a method or an event
if method, ok := abi.Methods[name]; ok {
if len(output)%32 != 0 {
return fmt.Errorf("abi: improperly formatted output")
return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(output), output)
}
return method.Outputs.Unpack(v, output)
} else if event, ok := abi.Events[name]; ok {

View File

@ -22,11 +22,10 @@ import (
"fmt"
"log"
"math/big"
"reflect"
"strings"
"testing"
"reflect"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
@ -52,11 +51,14 @@ const jsondata2 = `
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
{ "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
{ "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
{ "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
]`
func TestReader(t *testing.T) {
Uint256, _ := NewType("uint256")
Uint256, _ := NewType("uint256", nil)
exp := ABI{
Methods: map[string]Method{
"balance": {
@ -177,7 +179,7 @@ func TestTestSlice(t *testing.T) {
}
func TestMethodSignature(t *testing.T) {
String, _ := NewType("string")
String, _ := NewType("string", nil)
m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
exp := "foo(string,string)"
if m.Sig() != exp {
@ -189,12 +191,31 @@ func TestMethodSignature(t *testing.T) {
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
}
uintt, _ := NewType("uint256")
uintt, _ := NewType("uint256", nil)
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
exp = "foo(uint256)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
}
// Method with tuple arguments
s, _ := NewType("tuple", []ArgumentMarshaling{
{Name: "a", Type: "int256"},
{Name: "b", Type: "int256[]"},
{Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
{Name: "x", Type: "int256"},
{Name: "y", Type: "int256"},
}},
{Name: "d", Type: "tuple[2]", Components: []ArgumentMarshaling{
{Name: "x", Type: "int256"},
{Name: "y", Type: "int256"},
}},
})
m = Method{"foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
}
}
func TestMultiPack(t *testing.T) {
@ -564,11 +585,13 @@ func TestBareEvents(t *testing.T) {
const definition = `[
{ "type" : "event", "name" : "balance" },
{ "type" : "event", "name" : "anon", "anonymous" : true},
{ "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] }
{ "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] },
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
]`
arg0, _ := NewType("uint256")
arg1, _ := NewType("address")
arg0, _ := NewType("uint256", nil)
arg1, _ := NewType("address", nil)
tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
expectedEvents := map[string]struct {
Anonymous bool
@ -580,6 +603,10 @@ func TestBareEvents(t *testing.T) {
{Name: "arg0", Type: arg0, Indexed: false},
{Name: "arg1", Type: arg1, Indexed: true},
}},
"tuple": {false, []Argument{
{Name: "t", Type: tuple, Indexed: false},
{Name: "arg1", Type: arg1, Indexed: true},
}},
}
abi, err := JSON(strings.NewReader(definition))
@ -646,7 +673,7 @@ func TestUnpackEvent(t *testing.T) {
}
type ReceivedEvent struct {
Address common.Address
Sender common.Address
Amount *big.Int
Memo []byte
}
@ -655,19 +682,15 @@ func TestUnpackEvent(t *testing.T) {
err = abi.Unpack(&ev, "received", data)
if err != nil {
t.Error(err)
} else {
t.Logf("len(data): %d; received event: %+v", len(data), ev)
}
type ReceivedAddrEvent struct {
Address common.Address
Sender common.Address
}
var receivedAddrEv ReceivedAddrEvent
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
if err != nil {
t.Error(err)
} else {
t.Logf("len(data): %d; received event: %+v", len(data), receivedAddrEv)
}
}

View File

@ -33,24 +33,27 @@ type Argument struct {
type Arguments []Argument
// UnmarshalJSON implements json.Unmarshaler interface
func (argument *Argument) UnmarshalJSON(data []byte) error {
var extarg struct {
type ArgumentMarshaling struct {
Name string
Type string
Components []ArgumentMarshaling
Indexed bool
}
err := json.Unmarshal(data, &extarg)
}
// UnmarshalJSON implements json.Unmarshaler interface
func (argument *Argument) UnmarshalJSON(data []byte) error {
var arg ArgumentMarshaling
err := json.Unmarshal(data, &arg)
if err != nil {
return fmt.Errorf("argument json err: %v", err)
}
argument.Type, err = NewType(extarg.Type)
argument.Type, err = NewType(arg.Type, arg.Components)
if err != nil {
return err
}
argument.Name = extarg.Name
argument.Indexed = extarg.Indexed
argument.Name = arg.Name
argument.Indexed = arg.Indexed
return nil
}
@ -85,7 +88,6 @@ func (arguments Arguments) isTuple() bool {
// Unpack performs the operation hexdata -> Go format
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
// make sure the passed value is arguments pointer
if reflect.Ptr != reflect.ValueOf(v).Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
@ -97,52 +99,134 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if arguments.isTuple() {
return arguments.unpackTuple(v, marshalledValues)
}
return arguments.unpackAtomic(v, marshalledValues)
return arguments.unpackAtomic(v, marshalledValues[0])
}
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
// unpack sets the unmarshalled value to go format.
// Note the dst here must be settable.
func unpack(t *Type, dst interface{}, src interface{}) error {
var (
dstVal = reflect.ValueOf(dst).Elem()
srcVal = reflect.ValueOf(src)
)
if t.T != TupleTy && !((t.T == SliceTy || t.T == ArrayTy) && t.Elem.T == TupleTy) {
return set(dstVal, srcVal)
}
switch t.T {
case TupleTy:
if dstVal.Kind() != reflect.Struct {
return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind())
}
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal)
if err != nil {
return err
}
for i, elem := range t.TupleElems {
fname := fieldmap[t.TupleRawNames[i]]
field := dstVal.FieldByName(fname)
if !field.IsValid() {
return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i])
}
if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil {
return err
}
}
return nil
case SliceTy:
if dstVal.Kind() != reflect.Slice {
return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind())
}
slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len())
for i := 0; i < slice.Len(); i++ {
if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
return err
}
}
dstVal.Set(slice)
case ArrayTy:
if dstVal.Kind() != reflect.Array {
return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind())
}
array := reflect.New(dstVal.Type()).Elem()
for i := 0; i < array.Len(); i++ {
if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
return err
}
}
dstVal.Set(array)
}
return nil
}
// unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
if arguments.LengthNonIndexed() == 0 {
return nil
}
argument := arguments.NonIndexed()[0]
elem := reflect.ValueOf(v).Elem()
if elem.Kind() == reflect.Struct {
fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem)
if err != nil {
return err
}
field := elem.FieldByName(fieldmap[argument.Name])
if !field.IsValid() {
return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name)
}
return unpack(&argument.Type, field.Addr().Interface(), marshalledValues)
}
return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues)
}
// unpackTuple unpacks ( hexdata -> go ) a batch of values.
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
var (
value = reflect.ValueOf(v).Elem()
typ = value.Type()
kind = value.Kind()
)
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
return err
}
// If the interface is a struct, get of abi->struct_field mapping
var abi2struct map[string]string
if kind == reflect.Struct {
var err error
abi2struct, err = mapAbiToStructFields(arguments, value)
var (
argNames []string
err error
)
for _, arg := range arguments.NonIndexed() {
argNames = append(argNames, arg.Name)
}
abi2struct, err = mapArgNamesToStructFields(argNames, value)
if err != nil {
return err
}
}
for i, arg := range arguments.NonIndexed() {
reflectValue := reflect.ValueOf(marshalledValues[i])
switch kind {
case reflect.Struct:
if structField, ok := abi2struct[arg.Name]; ok {
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
return err
field := value.FieldByName(abi2struct[arg.Name])
if !field.IsValid() {
return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
}
if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil {
return err
}
case reflect.Slice, reflect.Array:
if value.Len() < i {
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
}
v := value.Index(i)
if err := requireAssignable(v, reflectValue); err != nil {
if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil {
return err
}
if err := set(v.Elem(), reflectValue, arg); err != nil {
if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil {
return err
}
default:
@ -150,48 +234,7 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
}
}
return nil
}
// unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
if len(marshalledValues) != 1 {
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
}
elem := reflect.ValueOf(v).Elem()
kind := elem.Kind()
reflectValue := reflect.ValueOf(marshalledValues[0])
var abi2struct map[string]string
if kind == reflect.Struct {
var err error
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
return err
}
arg := arguments.NonIndexed()[0]
if structField, ok := abi2struct[arg.Name]; ok {
return set(elem.FieldByName(structField), reflectValue, arg)
}
return nil
}
return set(elem, reflectValue, arguments.NonIndexed()[0])
}
// Computes the full size of an array;
// i.e. counting nested arrays, which count towards size for unpacking.
func getArraySize(arr *Type) int {
size := arr.Size
// Arrays can be nested, with each element being the same size
arr = arr.Elem
for arr.T == ArrayTy {
// Keep multiplying by elem.Size while the elem is an array.
size *= arr.Size
arr = arr.Elem
}
// Now we have the full array size, including its children.
return size
}
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
@ -202,7 +245,7 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
virtualArgs := 0
for index, arg := range arguments.NonIndexed() {
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
if arg.Type.T == ArrayTy {
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
@ -213,7 +256,11 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
virtualArgs += getArraySize(&arg.Type) - 1
virtualArgs += getTypeSize(arg.Type)/32 - 1
} else if arg.Type.T == TupleTy && !isDynamicType(arg.Type) {
// If we have a static tuple, like (uint256, bool, uint256), these are
// coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(arg.Type)/32 - 1
}
if err != nil {
return nil, err
@ -243,7 +290,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
inputOffset += getDynamicTypeOffset(abiArg.Type)
inputOffset += getTypeSize(abiArg.Type)
}
var ret []byte
for i, a := range args {
@ -272,14 +319,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
return ret, nil
}
// capitalise makes the first character of a string upper case, also removing any
// prefixing underscores from the variable names.
func capitalise(input string) string {
for len(input) > 0 && input[0] == '_' {
input = input[1:]
// ToCamelCase converts an under-score string to a camel-case string
func ToCamelCase(input string) string {
parts := strings.Split(input, "_")
for i, s := range parts {
if len(s) > 0 {
parts[i] = strings.ToUpper(s[:1]) + s[1:]
}
if len(input) == 0 {
return ""
}
return strings.ToUpper(input[:1]) + input[1:]
return strings.Join(parts, "")
}

View File

@ -38,7 +38,7 @@ type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Tra
type CallOpts struct {
Pending bool // Whether to operate on the pending state or the last known one
From common.Address // Optional the sender address, otherwise the first account is used
BlockNumber *big.Int // Optional the block number on which the call should be performed
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
@ -148,10 +148,10 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
}
}
} else {
output, err = c.caller.CallContract(ctx, msg, nil)
output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
if err == nil && len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise.
if code, err = c.caller.CodeAt(ctx, c.address, nil); err != nil {
if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
return err
} else if len(code) == 0 {
return ErrNoCode

View File

@ -0,0 +1,64 @@
package bind_test
import (
"context"
"math/big"
"testing"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
)
type mockCaller struct {
codeAtBlockNumber *big.Int
callContractBlockNumber *big.Int
}
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
mc.codeAtBlockNumber = blockNumber
return []byte{1, 2, 3}, nil
}
func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
mc.callContractBlockNumber = blockNumber
return nil, nil
}
func TestPassingBlockNumber(t *testing.T) {
mc := &mockCaller{}
bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
Methods: map[string]abi.Method{
"something": {
Name: "something",
Outputs: abi.Arguments{},
},
},
}, mc, nil, nil)
var ret string
blockNumber := big.NewInt(42)
bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something")
if mc.callContractBlockNumber != blockNumber {
t.Fatalf("CallContract() was not passed the block number")
}
if mc.codeAtBlockNumber != blockNumber {
t.Fatalf("CodeAt() was not passed the block number")
}
bc.Call(&bind.CallOpts{}, &ret, "something")
if mc.callContractBlockNumber != nil {
t.Fatalf("CallContract() was passed a block number when it should not have been")
}
if mc.codeAtBlockNumber != nil {
t.Fatalf("CodeAt() was passed a block number when it should not have been")
}
}

View File

@ -381,54 +381,23 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
// methodNormalizer is a name transformer that modifies Solidity method names to
// conform to target language naming concentions.
var methodNormalizer = map[Lang]func(string) string{
LangGo: capitalise,
LangGo: abi.ToCamelCase,
LangJava: decapitalise,
}
// capitalise makes a camel-case string which starts with an upper case character.
func capitalise(input string) string {
for len(input) > 0 && input[0] == '_' {
input = input[1:]
}
if len(input) == 0 {
return ""
}
return toCamelCase(strings.ToUpper(input[:1]) + input[1:])
return abi.ToCamelCase(input)
}
// decapitalise makes a camel-case string which starts with a lower case character.
func decapitalise(input string) string {
for len(input) > 0 && input[0] == '_' {
input = input[1:]
}
if len(input) == 0 {
return ""
return input
}
return toCamelCase(strings.ToLower(input[:1]) + input[1:])
}
// toCamelCase converts an under-score string to a camel-case string
func toCamelCase(input string) string {
toupper := false
result := ""
for k, v := range input {
switch {
case k == 0:
result = strings.ToUpper(string(input[0]))
case toupper:
result += strings.ToUpper(string(v))
toupper = false
case v == '_':
toupper = true
default:
result += string(v)
}
}
return result
goForm := abi.ToCamelCase(input)
return strings.ToLower(goForm[:1]) + goForm[1:]
}
// structured checks whether a list of ABI data types has enough information to

View File

@ -36,12 +36,12 @@ type Event struct {
func (e Event) String() string {
inputs := make([]string, len(e.Inputs))
for i, input := range e.Inputs {
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
if input.Indexed {
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
}
}
return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
return fmt.Sprintf("event %v(%v)", e.Name, strings.Join(inputs, ", "))
}
// Id returns the canonical representation of the event's signature used by the

View File

@ -87,12 +87,12 @@ func TestEventId(t *testing.T) {
}{
{
definition: `[
{ "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
{ "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
{ "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
{ "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
]`,
expectations: map[string]common.Hash{
"balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
"check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
"Balance": crypto.Keccak256Hash([]byte("Balance(uint256)")),
"Check": crypto.Keccak256Hash([]byte("Check(address,uint256)")),
},
},
}
@ -111,6 +111,39 @@ func TestEventId(t *testing.T) {
}
}
func TestEventString(t *testing.T) {
var table = []struct {
definition string
expectations map[string]string
}{
{
definition: `[
{ "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
{ "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] },
{ "type" : "event", "name" : "Transfer", "inputs": [{ "name": "from", "type": "address", "indexed": true }, { "name": "to", "type": "address", "indexed": true }, { "name": "value", "type": "uint256" }] }
]`,
expectations: map[string]string{
"Balance": "event Balance(uint256 in)",
"Check": "event Check(address t, uint256 b)",
"Transfer": "event Transfer(address indexed from, address indexed to, uint256 value)",
},
},
}
for _, test := range table {
abi, err := JSON(strings.NewReader(test.definition))
if err != nil {
t.Fatal(err)
}
for name, event := range abi.Events {
if event.String() != test.expectations[name] {
t.Errorf("expected string to be %s, got %s", test.expectations[name], event.String())
}
}
}
}
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`

View File

@ -56,14 +56,14 @@ func (method Method) Sig() string {
func (method Method) String() string {
inputs := make([]string, len(method.Inputs))
for i, input := range method.Inputs {
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
}
outputs := make([]string, len(method.Outputs))
for i, output := range method.Outputs {
outputs[i] = output.Type.String()
if len(output.Name) > 0 {
outputs[i] = fmt.Sprintf("%v ", output.Name)
outputs[i] += fmt.Sprintf(" %v", output.Name)
}
outputs[i] += output.Type.String()
}
constant := ""
if method.Const {

View File

@ -0,0 +1,61 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"strings"
"testing"
)
const methoddata = `
[
{ "type" : "function", "name" : "balance", "constant" : true },
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
{ "type" : "function", "name" : "transfer", "constant" : false, "inputs" : [ { "name" : "from", "type" : "address" }, { "name" : "to", "type" : "address" }, { "name" : "value", "type" : "uint256" } ], "outputs" : [ { "name" : "success", "type" : "bool" } ] }
]`
func TestMethodString(t *testing.T) {
var table = []struct {
method string
expectation string
}{
{
method: "balance",
expectation: "function balance() constant returns()",
},
{
method: "send",
expectation: "function send(uint256 amount) returns()",
},
{
method: "transfer",
expectation: "function transfer(address from, address to, uint256 value) returns(bool success)",
},
}
abi, err := JSON(strings.NewReader(methoddata))
if err != nil {
t.Fatal(err)
}
for _, test := range table {
got := abi.Methods[test.method].String()
if got != test.expectation {
t.Errorf("expected string to be %s, got %s", test.expectation, got)
}
}
}

View File

@ -30,302 +30,355 @@ import (
func TestPack(t *testing.T) {
for i, test := range []struct {
typ string
components []ArgumentMarshaling
input interface{}
output []byte
}{
{
"uint8",
nil,
uint8(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint8[]",
nil,
[]uint8{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint16",
nil,
uint16(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint16[]",
nil,
[]uint16{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint32",
nil,
uint32(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint32[]",
nil,
[]uint32{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint64",
nil,
uint64(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint64[]",
nil,
[]uint64{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint256",
nil,
big.NewInt(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint256[]",
nil,
[]*big.Int{big.NewInt(1), big.NewInt(2)},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int8",
nil,
int8(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int8[]",
nil,
[]int8{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int16",
nil,
int16(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int16[]",
nil,
[]int16{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int32",
nil,
int32(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int32[]",
nil,
[]int32{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int64",
nil,
int64(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int64[]",
nil,
[]int64{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int256",
nil,
big.NewInt(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int256[]",
nil,
[]*big.Int{big.NewInt(1), big.NewInt(2)},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"bytes1",
nil,
[1]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes2",
nil,
[2]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes3",
nil,
[3]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes4",
nil,
[4]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes5",
nil,
[5]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes6",
nil,
[6]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes7",
nil,
[7]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes8",
nil,
[8]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes9",
nil,
[9]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes10",
nil,
[10]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes11",
nil,
[11]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes12",
nil,
[12]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes13",
nil,
[13]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes14",
nil,
[14]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes15",
nil,
[15]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes16",
nil,
[16]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes17",
nil,
[17]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes18",
nil,
[18]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes19",
nil,
[19]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes20",
nil,
[20]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes21",
nil,
[21]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes22",
nil,
[22]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes23",
nil,
[23]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes24",
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes24",
nil,
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes25",
nil,
[25]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes26",
nil,
[26]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes27",
nil,
[27]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes28",
nil,
[28]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes29",
nil,
[29]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes30",
nil,
[30]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes31",
nil,
[31]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes32",
nil,
[32]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"uint32[2][3][4]",
nil,
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
},
{
"address[]",
nil,
[]common.Address{{1}, {2}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
},
{
"bytes32[]",
nil,
[]common.Hash{{1}, {2}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
},
{
"function",
nil,
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"string",
nil,
"foobar",
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
},
{
"string[]",
nil,
[]string{"hello", "foobar"},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
@ -337,6 +390,7 @@ func TestPack(t *testing.T) {
},
{
"string[2]",
nil,
[]string{"hello", "foobar"},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
@ -347,6 +401,7 @@ func TestPack(t *testing.T) {
},
{
"bytes32[][]",
nil,
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
@ -362,6 +417,7 @@ func TestPack(t *testing.T) {
{
"bytes32[][2]",
nil,
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
@ -376,6 +432,7 @@ func TestPack(t *testing.T) {
{
"bytes32[3][2]",
nil,
[][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
@ -384,12 +441,182 @@ func TestPack(t *testing.T) {
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
},
{
// static tuple
"tuple",
[]ArgumentMarshaling{
{Name: "a", Type: "int64"},
{Name: "b", Type: "int256"},
{Name: "c", Type: "int256"},
{Name: "d", Type: "bool"},
{Name: "e", Type: "bytes32[3][2]"},
},
struct {
A int64
B *big.Int
C *big.Int
D bool
E [][]common.Hash
}{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
"0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2]
},
{
// dynamic tuple
"tuple",
[]ArgumentMarshaling{
{Name: "a", Type: "string"},
{Name: "b", Type: "int64"},
{Name: "c", Type: "bytes"},
{Name: "d", Type: "string[]"},
{Name: "e", Type: "int256[]"},
{Name: "f", Type: "address[]"},
},
struct {
FieldA string `abi:"a"` // Test whether abi tag works
FieldB int64 `abi:"b"`
C []byte
D []string
E []*big.Int
F []common.Address
}{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
"0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
"0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
"0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
"0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
"0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
"666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
"0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
"0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
"0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
"0000000000000000000000000000000000000000000000000000000000000003" + // foo length
"666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
"0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
"6261720000000000000000000000000000000000000000000000000000000000" + // bar
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
"0000000000000000000000000000000000000000000000000000000000000001" + // 1
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
"0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
"0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2}
},
{
// nested tuple
"tuple",
[]ArgumentMarshaling{
{Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}},
{Name: "b", Type: "int256[]"},
},
struct {
A struct {
FieldA *big.Int `abi:"a"`
B []*big.Int
}
B []*big.Int
}{
A: struct {
FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
B []*big.Int
}{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
B: []*big.Int{big.NewInt(1), big.NewInt(0)}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset
"00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
"0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
"0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
"0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
"0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value
"0000000000000000000000000000000000000000000000000000000000000002" + // b length
"0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
"0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value
},
{
// tuple slice
"tuple[]",
[]ArgumentMarshaling{
{Name: "a", Type: "int256"},
{Name: "b", Type: "int256[]"},
},
[]struct {
A *big.Int
B []*big.Int
}{
{big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
{big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
"00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
"0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value
},
{
// static tuple array
"tuple[2]",
[]ArgumentMarshaling{
{Name: "a", Type: "int256"},
{Name: "b", Type: "int256"},
},
[2]struct {
A *big.Int
B *big.Int
}{
{big.NewInt(-1), big.NewInt(1)},
{big.NewInt(1), big.NewInt(-1)},
},
common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b
},
{
// dynamic tuple array
"tuple[2]",
[]ArgumentMarshaling{
{Name: "a", Type: "int256[]"},
},
[2]struct {
A []*big.Int
}{
{[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
{[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
"00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
},
} {
typ, err := NewType(test.typ)
typ, err := NewType(test.typ, test.components)
if err != nil {
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
}
output, err := typ.pack(reflect.ValueOf(test.input))
if err != nil {
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
@ -466,6 +693,59 @@ func TestMethodPack(t *testing.T) {
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
sig = abi.Methods["nestedArray"].Id()
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
packed, err = abi.Pack("nestedArray", a, []common.Address{addrC, addrD})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
sig = abi.Methods["nestedArray2"].Id()
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
packed, err = abi.Pack("nestedArray2", [2][]uint8{{1}, {1}})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
sig = abi.Methods["nestedSlice"].Id()
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
packed, err = abi.Pack("nestedSlice", [][]uint8{{1, 2}, {1, 2}})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
}
func TestPackNumber(t *testing.T) {

View File

@ -71,22 +71,36 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
//
// set is a bit more lenient when it comes to assignment and doesn't force an as
// strict ruleset as bare `reflect` does.
func set(dst, src reflect.Value, output Argument) error {
dstType := dst.Type()
srcType := src.Type()
func set(dst, src reflect.Value) error {
dstType, srcType := dst.Type(), src.Type()
switch {
case dstType.AssignableTo(srcType):
dst.Set(src)
case dstType.Kind() == reflect.Interface:
return set(dst.Elem(), src)
case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT:
return set(dst.Elem(), src)
case srcType.AssignableTo(dstType) && dst.CanSet():
dst.Set(src)
case dstType.Kind() == reflect.Ptr:
return set(dst.Elem(), src, output)
case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice:
return setSlice(dst, src)
default:
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
}
return nil
}
// setSlice attempts to assign src to dst when slices are not assignable by default
// e.g. src: [][]byte -> dst: [][15]byte
func setSlice(dst, src reflect.Value) error {
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ {
v := src.Index(i)
reflect.Copy(slice.Index(i), v)
}
dst.Set(slice)
return nil
}
// requireAssignable assures that `dest` is a pointer and it's not an interface.
func requireAssignable(dst, src reflect.Value) error {
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
@ -112,14 +126,14 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
return nil
}
// mapAbiToStringField maps abi to struct fields.
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
// first round: for each Exportable field that contains a `abi:""` tag
// and this field name exists in the arguments, pair them together.
// second round: for each argument field that has not been already linked,
// and this field name exists in the given argument name list, pair them together.
// second round: for each argument name that has not been already linked,
// find what variable is expected to be mapped into, if it exists and has not been
// used, pair them.
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
// Note this function assumes the given value is a struct value.
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
typ := value.Type()
abi2struct := make(map[string]string)
@ -133,45 +147,39 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
continue
}
// skip fields that have no abi:"" tag.
var ok bool
var tagName string
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
continue
}
// check if tag is empty.
if tagName == "" {
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
}
// check which argument field matches with the abi tag.
found := false
for _, abiField := range args.NonIndexed() {
if abiField.Name == tagName {
if abi2struct[abiField.Name] != "" {
for _, arg := range argNames {
if arg == tagName {
if abi2struct[arg] != "" {
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
}
// pair them
abi2struct[abiField.Name] = structFieldName
struct2abi[structFieldName] = abiField.Name
abi2struct[arg] = structFieldName
struct2abi[structFieldName] = arg
found = true
}
}
// check if this tag has been mapped.
if !found {
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
}
}
// second round ~~~
for _, arg := range args {
for _, argName := range argNames {
abiFieldName := arg.Name
structFieldName := capitalise(abiFieldName)
structFieldName := ToCamelCase(argName)
if structFieldName == "" {
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
@ -181,11 +189,11 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
// struct field with the same field name. If so, raise an error:
// abi: [ { "name": "value" } ]
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
if abi2struct[abiFieldName] != "" {
if abi2struct[abiFieldName] != structFieldName &&
if abi2struct[argName] != "" {
if abi2struct[argName] != structFieldName &&
struct2abi[structFieldName] == "" &&
value.FieldByName(structFieldName).IsValid() {
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", argName)
}
continue
}
@ -197,16 +205,14 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
if value.FieldByName(structFieldName).IsValid() {
// pair them
abi2struct[abiFieldName] = structFieldName
struct2abi[structFieldName] = abiFieldName
abi2struct[argName] = structFieldName
struct2abi[structFieldName] = argName
} else {
// not paired, but annotate as used, to detect cases like
// abi : [ { "name": "value" }, { "name": "_value" } ]
// struct { Value *big.Int }
struct2abi[structFieldName] = abiFieldName
struct2abi[structFieldName] = argName
}
}
return abi2struct, nil
}

View File

@ -0,0 +1,191 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"reflect"
"testing"
)
type reflectTest struct {
name string
args []string
struc interface{}
want map[string]string
err string
}
var reflectTests = []reflectTest{
{
name: "OneToOneCorrespondance",
args: []string{"fieldA"},
struc: struct {
FieldA int `abi:"fieldA"`
}{},
want: map[string]string{
"fieldA": "FieldA",
},
},
{
name: "MissingFieldsInStruct",
args: []string{"fieldA", "fieldB"},
struc: struct {
FieldA int `abi:"fieldA"`
}{},
want: map[string]string{
"fieldA": "FieldA",
},
},
{
name: "MoreFieldsInStructThanArgs",
args: []string{"fieldA"},
struc: struct {
FieldA int `abi:"fieldA"`
FieldB int
}{},
want: map[string]string{
"fieldA": "FieldA",
},
},
{
name: "MissingFieldInArgs",
args: []string{"fieldA"},
struc: struct {
FieldA int `abi:"fieldA"`
FieldB int `abi:"fieldB"`
}{},
err: "struct: abi tag 'fieldB' defined but not found in abi",
},
{
name: "NoAbiDescriptor",
args: []string{"fieldA"},
struc: struct {
FieldA int
}{},
want: map[string]string{
"fieldA": "FieldA",
},
},
{
name: "NoArgs",
args: []string{},
struc: struct {
FieldA int `abi:"fieldA"`
}{},
err: "struct: abi tag 'fieldA' defined but not found in abi",
},
{
name: "DifferentName",
args: []string{"fieldB"},
struc: struct {
FieldA int `abi:"fieldB"`
}{},
want: map[string]string{
"fieldB": "FieldA",
},
},
{
name: "DifferentName",
args: []string{"fieldB"},
struc: struct {
FieldA int `abi:"fieldB"`
}{},
want: map[string]string{
"fieldB": "FieldA",
},
},
{
name: "MultipleFields",
args: []string{"fieldA", "fieldB"},
struc: struct {
FieldA int `abi:"fieldA"`
FieldB int `abi:"fieldB"`
}{},
want: map[string]string{
"fieldA": "FieldA",
"fieldB": "FieldB",
},
},
{
name: "MultipleFieldsABIMissing",
args: []string{"fieldA", "fieldB"},
struc: struct {
FieldA int `abi:"fieldA"`
FieldB int
}{},
want: map[string]string{
"fieldA": "FieldA",
"fieldB": "FieldB",
},
},
{
name: "NameConflict",
args: []string{"fieldB"},
struc: struct {
FieldA int `abi:"fieldB"`
FieldB int
}{},
err: "abi: multiple variables maps to the same abi field 'fieldB'",
},
{
name: "Underscored",
args: []string{"_"},
struc: struct {
FieldA int
}{},
err: "abi: purely underscored output cannot unpack to struct",
},
{
name: "DoubleMapping",
args: []string{"fieldB", "fieldC", "fieldA"},
struc: struct {
FieldA int `abi:"fieldC"`
FieldB int
}{},
err: "abi: multiple outputs mapping to the same struct field 'FieldA'",
},
{
name: "AlreadyMapped",
args: []string{"fieldB", "fieldB"},
struc: struct {
FieldB int `abi:"fieldB"`
}{},
err: "struct: abi tag in 'FieldB' already mapped",
},
}
func TestReflectNameToStruct(t *testing.T) {
for _, test := range reflectTests {
t.Run(test.name, func(t *testing.T) {
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
if len(test.err) > 0 {
if err == nil || err.Error() != test.err {
t.Fatalf("Invalid error: expected %v, got %v", test.err, err)
}
} else {
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for fname := range test.want {
if m[fname] != test.want[fname] {
t.Fatalf("Incorrect value for field %s: expected %v, got %v", fname, test.want[fname], m[fname])
}
}
}
})
}
}

View File

@ -17,6 +17,7 @@
package abi
import (
"errors"
"fmt"
"reflect"
"regexp"
@ -32,6 +33,7 @@ const (
StringTy
SliceTy
ArrayTy
TupleTy
AddressTy
FixedBytesTy
BytesTy
@ -43,13 +45,16 @@ const (
// Type is the reflection of the supported argument type
type Type struct {
Elem *Type
Kind reflect.Kind
Type reflect.Type
Size int
T byte // Our own type checking
stringKind string // holds the unparsed string for deriving signatures
// Tuple relative fields
TupleElems []*Type // Type information of all tuple fields
TupleRawNames []string // Raw field name of all tuple fields
}
var (
@ -58,7 +63,7 @@ var (
)
// NewType creates a new reflection type of abi type given in t.
func NewType(t string) (typ Type, err error) {
func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
// check that array brackets are equal if they exist
if strings.Count(t, "[") != strings.Count(t, "]") {
return Type{}, fmt.Errorf("invalid arg type in abi")
@ -71,7 +76,7 @@ func NewType(t string) (typ Type, err error) {
if strings.Count(t, "[") != 0 {
i := strings.LastIndex(t, "[")
// recursively embed the type
embeddedType, err := NewType(t[:i])
embeddedType, err := NewType(t[:i], components)
if err != nil {
return Type{}, err
}
@ -87,6 +92,9 @@ func NewType(t string) (typ Type, err error) {
typ.Kind = reflect.Slice
typ.Elem = &embeddedType
typ.Type = reflect.SliceOf(embeddedType.Type)
if embeddedType.T == TupleTy {
typ.stringKind = embeddedType.stringKind + sliced
}
} else if len(intz) == 1 {
// is a array
typ.T = ArrayTy
@ -97,6 +105,9 @@ func NewType(t string) (typ Type, err error) {
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
}
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
if embeddedType.T == TupleTy {
typ.stringKind = embeddedType.stringKind + sliced
}
} else {
return Type{}, fmt.Errorf("invalid formatting of array type")
}
@ -158,6 +169,40 @@ func NewType(t string) (typ Type, err error) {
typ.Size = varSize
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
}
case "tuple":
var (
fields []reflect.StructField
elems []*Type
names []string
expression string // canonical parameter expression
)
expression += "("
for idx, c := range components {
cType, err := NewType(c.Type, c.Components)
if err != nil {
return Type{}, err
}
if ToCamelCase(c.Name) == "" {
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
}
fields = append(fields, reflect.StructField{
Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field.
Type: cType.Type,
})
elems = append(elems, &cType)
names = append(names, c.Name)
expression += cType.stringKind
if idx != len(components)-1 {
expression += ","
}
}
expression += ")"
typ.Kind = reflect.Struct
typ.Type = reflect.StructOf(fields)
typ.TupleElems = elems
typ.TupleRawNames = names
typ.T = TupleTy
typ.stringKind = expression
case "function":
typ.Kind = reflect.Array
typ.T = FunctionTy
@ -178,7 +223,6 @@ func (t Type) String() (out string) {
func (t Type) pack(v reflect.Value) ([]byte, error) {
// dereference pointer first if it's a pointer
v = indirect(v)
if err := typeCheck(t, v); err != nil {
return nil, err
}
@ -196,7 +240,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
offset := 0
offsetReq := isDynamicType(*t.Elem)
if offsetReq {
offset = getDynamicTypeOffset(*t.Elem) * v.Len()
offset = getTypeSize(*t.Elem) * v.Len()
}
var tail []byte
for i := 0; i < v.Len(); i++ {
@ -213,6 +257,45 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
tail = append(tail, val...)
}
return append(ret, tail...), nil
case TupleTy:
// (T1,...,Tk) for k >= 0 and any types T1, …, Tk
// enc(X) = head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(k))
// where X = (X(1), ..., X(k)) and head and tail are defined for Ti being a static
// type as
// head(X(i)) = enc(X(i)) and tail(X(i)) = "" (the empty string)
// and as
// head(X(i)) = enc(len(head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(i-1))))
// tail(X(i)) = enc(X(i))
// otherwise, i.e. if Ti is a dynamic type.
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, v)
if err != nil {
return nil, err
}
// Calculate prefix occupied size.
offset := 0
for _, elem := range t.TupleElems {
offset += getTypeSize(*elem)
}
var ret, tail []byte
for i, elem := range t.TupleElems {
field := v.FieldByName(fieldmap[t.TupleRawNames[i]])
if !field.IsValid() {
return nil, fmt.Errorf("field %s for tuple not found in the given struct", t.TupleRawNames[i])
}
val, err := elem.pack(field)
if err != nil {
return nil, err
}
if isDynamicType(*elem) {
ret = append(ret, packNum(reflect.ValueOf(offset))...)
tail = append(tail, val...)
offset += len(val)
} else {
ret = append(ret, val...)
}
}
return append(ret, tail...), nil
default:
return packElement(t, v), nil
}
@ -225,25 +308,45 @@ func (t Type) requiresLengthPrefix() bool {
}
// isDynamicType returns true if the type is dynamic.
// StringTy, BytesTy, and SliceTy(irrespective of slice element type) are dynamic types
// ArrayTy is considered dynamic if and only if the Array element is a dynamic type.
// This function recursively checks the type for slice and array elements.
// The following types are called “dynamic”:
// * bytes
// * string
// * T[] for any T
// * T[k] for any dynamic T and any k >= 0
// * (T1,...,Tk) if Ti is dynamic for some 1 <= i <= k
func isDynamicType(t Type) bool {
// dynamic types
// array is also a dynamic type if the array type is dynamic
if t.T == TupleTy {
for _, elem := range t.TupleElems {
if isDynamicType(*elem) {
return true
}
}
return false
}
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
}
// getDynamicTypeOffset returns the offset for the type.
// See `isDynamicType` to know which types are considered dynamic.
// If the type t is an array and element type is not a dynamic type, then we consider it a static type and
// return 32 * size of array since length prefix is not required.
// If t is a dynamic type or element type(for slices and arrays) is dynamic, then we simply return 32 as offset.
func getDynamicTypeOffset(t Type) int {
// if it is an array and there are no dynamic types
// then the array is static type
// getTypeSize returns the size that this type needs to occupy.
// We distinguish static and dynamic types. Static types are encoded in-place
// and dynamic types are encoded at a separately allocated location after the
// current block.
// So for a static variable, the size returned represents the size that the
// variable actually occupies.
// For a dynamic variable, the returned size is fixed 32 bytes, which is used
// to store the location reference for actual value storage.
func getTypeSize(t Type) int {
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
return 32 * t.Size
// Recursively calculate type size if it is a nested array
if t.Elem.T == ArrayTy {
return t.Size * getTypeSize(*t.Elem)
}
return t.Size * 32
} else if t.T == TupleTy && !isDynamicType(t) {
total := 0
for _, elem := range t.TupleElems {
total += getTypeSize(*elem)
}
return total
}
return 32
}

View File

@ -33,71 +33,74 @@ type typeWithoutStringer Type
func TestTypeRegexp(t *testing.T) {
tests := []struct {
blob string
components []ArgumentMarshaling
kind Type
}{
{"bool", Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
{"bool[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
{"bool[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
{"bool[2][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
{"bool[][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
{"bool[][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
{"bool[2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
{"bool[2][][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
{"bool[2][2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
{"bool[][][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
{"bool[][2][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
{"int8", Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
{"int16", Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
{"int32", Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
{"int64", Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
{"int256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
{"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
{"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
{"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
{"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
{"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
{"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
{"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
{"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
{"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
{"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
{"uint8", Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
{"uint16", Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
{"uint32", Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
{"uint64", Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
{"uint256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
{"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
{"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
{"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
{"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
{"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
{"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
{"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
{"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
{"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
{"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
{"bytes32", Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
{"bytes[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
{"bytes[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
{"bytes32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
{"bytes32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
{"string", Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
{"string[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
{"string[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
{"address", Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
{"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
{"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
{"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
{"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
{"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
{"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
{"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
{"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
{"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
{"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
{"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
{"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
{"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
{"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
{"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
{"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
{"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
{"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
{"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
{"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
{"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
{"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
{"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
{"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
{"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
{"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
{"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
{"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
{"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
{"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
{"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
{"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
{"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
{"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
{"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
{"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
{"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
{"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
{"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
{"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
{"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
{"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
{"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
{"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
{"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
{"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
{"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
{"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
{"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
{"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
{"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
{"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
{"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
{"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
// TODO when fixed types are implemented properly
// {"fixed", Type{}},
// {"fixed128x128", Type{}},
// {"fixed[]", Type{}},
// {"fixed[2]", Type{}},
// {"fixed128x128[]", Type{}},
// {"fixed128x128[2]", Type{}},
// {"fixed", nil, Type{}},
// {"fixed128x128", nil, Type{}},
// {"fixed[]", nil, Type{}},
// {"fixed[2]", nil, Type{}},
// {"fixed128x128[]", nil, Type{}},
// {"fixed128x128[2]", nil, Type{}},
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct{ A int64 }{}), stringKind: "(int64)",
TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
}
for _, tt := range tests {
typ, err := NewType(tt.blob)
typ, err := NewType(tt.blob, tt.components)
if err != nil {
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
}
@ -110,153 +113,169 @@ func TestTypeRegexp(t *testing.T) {
func TestTypeCheck(t *testing.T) {
for i, test := range []struct {
typ string
components []ArgumentMarshaling
input interface{}
err string
}{
{"uint", big.NewInt(1), "unsupported arg type: uint"},
{"int", big.NewInt(1), "unsupported arg type: int"},
{"uint256", big.NewInt(1), ""},
{"uint256[][3][]", [][3][]*big.Int{{{}}}, ""},
{"uint256[][][3]", [3][][]*big.Int{{{}}}, ""},
{"uint256[3][][]", [][][3]*big.Int{{{}}}, ""},
{"uint256[3][3][3]", [3][3][3]*big.Int{{{}}}, ""},
{"uint8[][]", [][]uint8{}, ""},
{"int256", big.NewInt(1), ""},
{"uint8", uint8(1), ""},
{"uint16", uint16(1), ""},
{"uint32", uint32(1), ""},
{"uint64", uint64(1), ""},
{"int8", int8(1), ""},
{"int16", int16(1), ""},
{"int32", int32(1), ""},
{"int64", int64(1), ""},
{"uint24", big.NewInt(1), ""},
{"uint40", big.NewInt(1), ""},
{"uint48", big.NewInt(1), ""},
{"uint56", big.NewInt(1), ""},
{"uint72", big.NewInt(1), ""},
{"uint80", big.NewInt(1), ""},
{"uint88", big.NewInt(1), ""},
{"uint96", big.NewInt(1), ""},
{"uint104", big.NewInt(1), ""},
{"uint112", big.NewInt(1), ""},
{"uint120", big.NewInt(1), ""},
{"uint128", big.NewInt(1), ""},
{"uint136", big.NewInt(1), ""},
{"uint144", big.NewInt(1), ""},
{"uint152", big.NewInt(1), ""},
{"uint160", big.NewInt(1), ""},
{"uint168", big.NewInt(1), ""},
{"uint176", big.NewInt(1), ""},
{"uint184", big.NewInt(1), ""},
{"uint192", big.NewInt(1), ""},
{"uint200", big.NewInt(1), ""},
{"uint208", big.NewInt(1), ""},
{"uint216", big.NewInt(1), ""},
{"uint224", big.NewInt(1), ""},
{"uint232", big.NewInt(1), ""},
{"uint240", big.NewInt(1), ""},
{"uint248", big.NewInt(1), ""},
{"int24", big.NewInt(1), ""},
{"int40", big.NewInt(1), ""},
{"int48", big.NewInt(1), ""},
{"int56", big.NewInt(1), ""},
{"int72", big.NewInt(1), ""},
{"int80", big.NewInt(1), ""},
{"int88", big.NewInt(1), ""},
{"int96", big.NewInt(1), ""},
{"int104", big.NewInt(1), ""},
{"int112", big.NewInt(1), ""},
{"int120", big.NewInt(1), ""},
{"int128", big.NewInt(1), ""},
{"int136", big.NewInt(1), ""},
{"int144", big.NewInt(1), ""},
{"int152", big.NewInt(1), ""},
{"int160", big.NewInt(1), ""},
{"int168", big.NewInt(1), ""},
{"int176", big.NewInt(1), ""},
{"int184", big.NewInt(1), ""},
{"int192", big.NewInt(1), ""},
{"int200", big.NewInt(1), ""},
{"int208", big.NewInt(1), ""},
{"int216", big.NewInt(1), ""},
{"int224", big.NewInt(1), ""},
{"int232", big.NewInt(1), ""},
{"int240", big.NewInt(1), ""},
{"int248", big.NewInt(1), ""},
{"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
{"uint8", uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
{"uint8", uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
{"uint8", uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
{"uint8", int8(1), "abi: cannot use int8 as type uint8 as argument"},
{"uint8", int16(1), "abi: cannot use int16 as type uint8 as argument"},
{"uint8", int32(1), "abi: cannot use int32 as type uint8 as argument"},
{"uint8", int64(1), "abi: cannot use int64 as type uint8 as argument"},
{"uint16", uint16(1), ""},
{"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
{"uint16[]", []uint16{1, 2, 3}, ""},
{"uint16[]", [3]uint16{1, 2, 3}, ""},
{"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
{"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
{"uint16[3]", []uint16{1, 2, 3}, ""},
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
{"address[]", []common.Address{{1}}, ""},
{"address[1]", []common.Address{{1}}, ""},
{"address[1]", [1]common.Address{{1}}, ""},
{"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
{"bytes32", [32]byte{}, ""},
{"bytes31", [31]byte{}, ""},
{"bytes30", [30]byte{}, ""},
{"bytes29", [29]byte{}, ""},
{"bytes28", [28]byte{}, ""},
{"bytes27", [27]byte{}, ""},
{"bytes26", [26]byte{}, ""},
{"bytes25", [25]byte{}, ""},
{"bytes24", [24]byte{}, ""},
{"bytes23", [23]byte{}, ""},
{"bytes22", [22]byte{}, ""},
{"bytes21", [21]byte{}, ""},
{"bytes20", [20]byte{}, ""},
{"bytes19", [19]byte{}, ""},
{"bytes18", [18]byte{}, ""},
{"bytes17", [17]byte{}, ""},
{"bytes16", [16]byte{}, ""},
{"bytes15", [15]byte{}, ""},
{"bytes14", [14]byte{}, ""},
{"bytes13", [13]byte{}, ""},
{"bytes12", [12]byte{}, ""},
{"bytes11", [11]byte{}, ""},
{"bytes10", [10]byte{}, ""},
{"bytes9", [9]byte{}, ""},
{"bytes8", [8]byte{}, ""},
{"bytes7", [7]byte{}, ""},
{"bytes6", [6]byte{}, ""},
{"bytes5", [5]byte{}, ""},
{"bytes4", [4]byte{}, ""},
{"bytes3", [3]byte{}, ""},
{"bytes2", [2]byte{}, ""},
{"bytes1", [1]byte{}, ""},
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
{"bytes32", common.Hash{1}, ""},
{"bytes31", common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
{"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
{"bytes", []byte{0, 1}, ""},
{"bytes", [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
{"bytes", common.Hash{1}, "abi: cannot use array as type slice as argument"},
{"string", "hello world", ""},
{"string", string(""), ""},
{"string", []byte{}, "abi: cannot use slice as type string as argument"},
{"bytes32[]", [][32]byte{{}}, ""},
{"function", [24]byte{}, ""},
{"bytes20", common.Address{}, ""},
{"address", [20]byte{}, ""},
{"address", common.Address{}, ""},
{"bytes32[]]", "", "invalid arg type in abi"},
{"invalidType", "", "unsupported arg type: invalidType"},
{"invalidSlice[]", "", "unsupported arg type: invalidSlice"},
{"uint", nil, big.NewInt(1), "unsupported arg type: uint"},
{"int", nil, big.NewInt(1), "unsupported arg type: int"},
{"uint256", nil, big.NewInt(1), ""},
{"uint256[][3][]", nil, [][3][]*big.Int{{{}}}, ""},
{"uint256[][][3]", nil, [3][][]*big.Int{{{}}}, ""},
{"uint256[3][][]", nil, [][][3]*big.Int{{{}}}, ""},
{"uint256[3][3][3]", nil, [3][3][3]*big.Int{{{}}}, ""},
{"uint8[][]", nil, [][]uint8{}, ""},
{"int256", nil, big.NewInt(1), ""},
{"uint8", nil, uint8(1), ""},
{"uint16", nil, uint16(1), ""},
{"uint32", nil, uint32(1), ""},
{"uint64", nil, uint64(1), ""},
{"int8", nil, int8(1), ""},
{"int16", nil, int16(1), ""},
{"int32", nil, int32(1), ""},
{"int64", nil, int64(1), ""},
{"uint24", nil, big.NewInt(1), ""},
{"uint40", nil, big.NewInt(1), ""},
{"uint48", nil, big.NewInt(1), ""},
{"uint56", nil, big.NewInt(1), ""},
{"uint72", nil, big.NewInt(1), ""},
{"uint80", nil, big.NewInt(1), ""},
{"uint88", nil, big.NewInt(1), ""},
{"uint96", nil, big.NewInt(1), ""},
{"uint104", nil, big.NewInt(1), ""},
{"uint112", nil, big.NewInt(1), ""},
{"uint120", nil, big.NewInt(1), ""},
{"uint128", nil, big.NewInt(1), ""},
{"uint136", nil, big.NewInt(1), ""},
{"uint144", nil, big.NewInt(1), ""},
{"uint152", nil, big.NewInt(1), ""},
{"uint160", nil, big.NewInt(1), ""},
{"uint168", nil, big.NewInt(1), ""},
{"uint176", nil, big.NewInt(1), ""},
{"uint184", nil, big.NewInt(1), ""},
{"uint192", nil, big.NewInt(1), ""},
{"uint200", nil, big.NewInt(1), ""},
{"uint208", nil, big.NewInt(1), ""},
{"uint216", nil, big.NewInt(1), ""},
{"uint224", nil, big.NewInt(1), ""},
{"uint232", nil, big.NewInt(1), ""},
{"uint240", nil, big.NewInt(1), ""},
{"uint248", nil, big.NewInt(1), ""},
{"int24", nil, big.NewInt(1), ""},
{"int40", nil, big.NewInt(1), ""},
{"int48", nil, big.NewInt(1), ""},
{"int56", nil, big.NewInt(1), ""},
{"int72", nil, big.NewInt(1), ""},
{"int80", nil, big.NewInt(1), ""},
{"int88", nil, big.NewInt(1), ""},
{"int96", nil, big.NewInt(1), ""},
{"int104", nil, big.NewInt(1), ""},
{"int112", nil, big.NewInt(1), ""},
{"int120", nil, big.NewInt(1), ""},
{"int128", nil, big.NewInt(1), ""},
{"int136", nil, big.NewInt(1), ""},
{"int144", nil, big.NewInt(1), ""},
{"int152", nil, big.NewInt(1), ""},
{"int160", nil, big.NewInt(1), ""},
{"int168", nil, big.NewInt(1), ""},
{"int176", nil, big.NewInt(1), ""},
{"int184", nil, big.NewInt(1), ""},
{"int192", nil, big.NewInt(1), ""},
{"int200", nil, big.NewInt(1), ""},
{"int208", nil, big.NewInt(1), ""},
{"int216", nil, big.NewInt(1), ""},
{"int224", nil, big.NewInt(1), ""},
{"int232", nil, big.NewInt(1), ""},
{"int240", nil, big.NewInt(1), ""},
{"int248", nil, big.NewInt(1), ""},
{"uint30", nil, uint8(1), "abi: cannot use uint8 as type ptr as argument"},
{"uint8", nil, uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
{"uint8", nil, uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
{"uint8", nil, uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
{"uint8", nil, int8(1), "abi: cannot use int8 as type uint8 as argument"},
{"uint8", nil, int16(1), "abi: cannot use int16 as type uint8 as argument"},
{"uint8", nil, int32(1), "abi: cannot use int32 as type uint8 as argument"},
{"uint8", nil, int64(1), "abi: cannot use int64 as type uint8 as argument"},
{"uint16", nil, uint16(1), ""},
{"uint16", nil, uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
{"uint16[]", nil, []uint16{1, 2, 3}, ""},
{"uint16[]", nil, [3]uint16{1, 2, 3}, ""},
{"uint16[]", nil, []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
{"uint16[3]", nil, [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
{"uint16[3]", nil, [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
{"uint16[3]", nil, []uint16{1, 2, 3}, ""},
{"uint16[3]", nil, []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
{"address[]", nil, []common.Address{{1}}, ""},
{"address[1]", nil, []common.Address{{1}}, ""},
{"address[1]", nil, [1]common.Address{{1}}, ""},
{"address[2]", nil, [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
{"bytes32", nil, [32]byte{}, ""},
{"bytes31", nil, [31]byte{}, ""},
{"bytes30", nil, [30]byte{}, ""},
{"bytes29", nil, [29]byte{}, ""},
{"bytes28", nil, [28]byte{}, ""},
{"bytes27", nil, [27]byte{}, ""},
{"bytes26", nil, [26]byte{}, ""},
{"bytes25", nil, [25]byte{}, ""},
{"bytes24", nil, [24]byte{}, ""},
{"bytes23", nil, [23]byte{}, ""},
{"bytes22", nil, [22]byte{}, ""},
{"bytes21", nil, [21]byte{}, ""},
{"bytes20", nil, [20]byte{}, ""},
{"bytes19", nil, [19]byte{}, ""},
{"bytes18", nil, [18]byte{}, ""},
{"bytes17", nil, [17]byte{}, ""},
{"bytes16", nil, [16]byte{}, ""},
{"bytes15", nil, [15]byte{}, ""},
{"bytes14", nil, [14]byte{}, ""},
{"bytes13", nil, [13]byte{}, ""},
{"bytes12", nil, [12]byte{}, ""},
{"bytes11", nil, [11]byte{}, ""},
{"bytes10", nil, [10]byte{}, ""},
{"bytes9", nil, [9]byte{}, ""},
{"bytes8", nil, [8]byte{}, ""},
{"bytes7", nil, [7]byte{}, ""},
{"bytes6", nil, [6]byte{}, ""},
{"bytes5", nil, [5]byte{}, ""},
{"bytes4", nil, [4]byte{}, ""},
{"bytes3", nil, [3]byte{}, ""},
{"bytes2", nil, [2]byte{}, ""},
{"bytes1", nil, [1]byte{}, ""},
{"bytes32", nil, [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
{"bytes32", nil, common.Hash{1}, ""},
{"bytes31", nil, common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
{"bytes31", nil, [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
{"bytes", nil, []byte{0, 1}, ""},
{"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
{"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"},
{"string", nil, "hello world", ""},
{"string", nil, string(""), ""},
{"string", nil, []byte{}, "abi: cannot use slice as type string as argument"},
{"bytes32[]", nil, [][32]byte{{}}, ""},
{"function", nil, [24]byte{}, ""},
{"bytes20", nil, common.Address{}, ""},
{"address", nil, [20]byte{}, ""},
{"address", nil, common.Address{}, ""},
{"bytes32[]]", nil, "", "invalid arg type in abi"},
{"invalidType", nil, "", "unsupported arg type: invalidType"},
{"invalidSlice[]", nil, "", "unsupported arg type: invalidSlice"},
// simple tuple
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, struct {
A *big.Int
B *big.Int
}{}, ""},
// tuple slice
{"tuple[]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
A *big.Int
B *big.Int
}{}, ""},
// tuple array
{"tuple[2]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
A *big.Int
B *big.Int
}{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
} {
typ, err := NewType(test.typ)
typ, err := NewType(test.typ, test.components)
if err != nil && len(test.err) == 0 {
t.Fatal("unexpected parse error:", err)
} else if err != nil && len(test.err) != 0 {

View File

@ -115,17 +115,6 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
}
func getFullElemSize(elem *Type) int {
//all other should be counted as 32 (slices have pointers to respective elements)
size := 32
//arrays wrap it, each element being the same size
for elem.T == ArrayTy {
size *= elem.Size
elem = elem.Elem
}
return size
}
// iteratively unpack elements
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
if size < 0 {
@ -150,13 +139,9 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
// Arrays have packed elements, resulting in longer unpack steps.
// Slices have just 32 bytes per element (pointing to the contents).
elemSize := 32
if t.T == ArrayTy {
elemSize = getFullElemSize(t.Elem)
}
elemSize := getTypeSize(*t.Elem)
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
inter, err := toGoType(i, *t.Elem, output)
if err != nil {
return nil, err
@ -170,6 +155,36 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
return refSlice.Interface(), nil
}
func forTupleUnpack(t Type, output []byte) (interface{}, error) {
retval := reflect.New(t.Type).Elem()
virtualArgs := 0
for index, elem := range t.TupleElems {
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
if elem.T == ArrayTy && !isDynamicType(*elem) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
// we count the index from now on.
//
// Array values nested multiple levels deep are also encoded inline:
// [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
virtualArgs += getTypeSize(*elem)/32 - 1
} else if elem.T == TupleTy && !isDynamicType(*elem) {
// If we have a static tuple, like (uint256, bool, uint256), these are
// coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(*elem)/32 - 1
}
if err != nil {
return nil, err
}
retval.Field(index).Set(reflect.ValueOf(marshalledValue))
}
return retval.Interface(), nil
}
// toGoType parses the output bytes and recursively assigns the value of these bytes
// into a go type with accordance with the ABI spec.
func toGoType(index int, t Type, output []byte) (interface{}, error) {
@ -179,13 +194,13 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
var (
returnOutput []byte
begin, end int
begin, length int
err error
)
// if we require a length prefix, find the beginning word and size returned.
if t.requiresLengthPrefix() {
begin, end, err = lengthPrefixPointsTo(index, output)
begin, length, err = lengthPrefixPointsTo(index, output)
if err != nil {
return nil, err
}
@ -194,12 +209,26 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
}
switch t.T {
case TupleTy:
if isDynamicType(t) {
begin, err := tuplePointsTo(index, output)
if err != nil {
return nil, err
}
return forTupleUnpack(t, output[begin:])
} else {
return forTupleUnpack(t, output[index:])
}
case SliceTy:
return forEachUnpack(t, output, begin, end)
return forEachUnpack(t, output[begin:], 0, length)
case ArrayTy:
return forEachUnpack(t, output, index, t.Size)
if isDynamicType(*t.Elem) {
offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]))
return forEachUnpack(t, output[offset:], 0, t.Size)
}
return forEachUnpack(t, output[index:], 0, t.Size)
case StringTy: // variable arrays are written at the end of the return bytes
return string(output[begin : begin+end]), nil
return string(output[begin : begin+length]), nil
case IntTy, UintTy:
return readInteger(t.T, t.Kind, returnOutput), nil
case BoolTy:
@ -209,7 +238,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
case HashTy:
return common.BytesToHash(returnOutput), nil
case BytesTy:
return output[begin : begin+end], nil
return output[begin : begin+length], nil
case FixedBytesTy:
return readFixedBytes(t, returnOutput)
case FunctionTy:
@ -250,3 +279,17 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
length = int(lengthBig.Uint64())
return
}
// tuplePointsTo resolves the location reference for dynamic tuple.
func tuplePointsTo(index int, output []byte) (start int, err error) {
offset := big.NewInt(0).SetBytes(output[index : index+32])
outputLen := big.NewInt(int64(len(output)))
if offset.Cmp(big.NewInt(int64(len(output)))) > 0 {
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
}
if offset.BitLen() > 63 {
return 0, fmt.Errorf("abi offset larger than int64: %v", offset)
}
return int(offset.Uint64()), nil
}

View File

@ -173,9 +173,14 @@ var unpackTests = []unpackTest{
// multi dimensional, if these pass, all types that don't require length prefix should pass
{
def: `[{"type": "uint8[][]"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000E0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: [][]uint8{{1, 2}, {1, 2}},
},
{
def: `[{"type": "uint8[][]"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
want: [][]uint8{{1, 2}, {1, 2, 3}},
},
{
def: `[{"type": "uint8[2][2]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@ -183,7 +188,7 @@ var unpackTests = []unpackTest{
},
{
def: `[{"type": "uint8[][2]"}]`,
enc: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
want: [2][]uint8{{1}, {1}},
},
{
@ -191,6 +196,11 @@ var unpackTests = []unpackTest{
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: [][2]uint8{{1, 2}},
},
{
def: `[{"type": "uint8[2][]"}]`,
enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: [][2]uint8{{1, 2}, {1, 2}},
},
{
def: `[{"type": "uint16[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@ -236,6 +246,26 @@ var unpackTests = []unpackTest{
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
},
{
def: `[{"type": "string[4]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000",
want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
},
{
def: `[{"type": "string[]"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000",
want: []string{"Ethereum", "go-ethereum"},
},
{
def: `[{"type": "bytes[]"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000",
want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
},
{
def: `[{"type": "uint256[2][][]"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8",
want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
},
{
def: `[{"type": "int8[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@ -295,6 +325,53 @@ var unpackTests = []unpackTest{
Int2 *big.Int
}{big.NewInt(1), big.NewInt(2)},
},
{
def: `[{"name":"int_one","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: struct {
IntOne *big.Int
}{big.NewInt(1)},
},
{
def: `[{"name":"int__one","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: struct {
IntOne *big.Int
}{big.NewInt(1)},
},
{
def: `[{"name":"int_one_","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: struct {
IntOne *big.Int
}{big.NewInt(1)},
},
{
def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: struct {
IntOne *big.Int
Intone *big.Int
}{big.NewInt(1), big.NewInt(2)},
},
{
def: `[{"name":"___","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: struct {
IntOne *big.Int
Intone *big.Int
}{},
err: "abi: purely underscored output cannot unpack to struct",
},
{
def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: struct {
Int1 *big.Int
Int2 *big.Int
}{},
err: "abi: multiple outputs mapping to the same struct field 'IntOne'",
},
{
def: `[{"name":"int","type":"int256"},{"name":"Int","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@ -359,6 +436,55 @@ func TestUnpack(t *testing.T) {
}
}
func TestUnpackSetDynamicArrayOutput(t *testing.T) {
abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
if err != nil {
t.Fatal(err)
}
var (
marshalledReturn32 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783132333435363738393000000000000000000000000000000000000000003078303938373635343332310000000000000000000000000000000000000000")
marshalledReturn15 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783031323334350000000000000000000000000000000000000000000000003078393837363534000000000000000000000000000000000000000000000000")
out32 [][32]byte
out15 [][15]byte
)
// test 32
err = abi.Unpack(&out32, "testDynamicFixedBytes32", marshalledReturn32)
if err != nil {
t.Fatal(err)
}
if len(out32) != 2 {
t.Fatalf("expected array with 2 values, got %d", len(out32))
}
expected := common.Hex2Bytes("3078313233343536373839300000000000000000000000000000000000000000")
if !bytes.Equal(out32[0][:], expected) {
t.Errorf("expected %x, got %x\n", expected, out32[0])
}
expected = common.Hex2Bytes("3078303938373635343332310000000000000000000000000000000000000000")
if !bytes.Equal(out32[1][:], expected) {
t.Errorf("expected %x, got %x\n", expected, out32[1])
}
// test 15
err = abi.Unpack(&out15, "testDynamicFixedBytes32", marshalledReturn15)
if err != nil {
t.Fatal(err)
}
if len(out15) != 2 {
t.Fatalf("expected array with 2 values, got %d", len(out15))
}
expected = common.Hex2Bytes("307830313233343500000000000000")
if !bytes.Equal(out15[0][:], expected) {
t.Errorf("expected %x, got %x\n", expected, out15[0])
}
expected = common.Hex2Bytes("307839383736353400000000000000")
if !bytes.Equal(out15[1][:], expected) {
t.Errorf("expected %x, got %x\n", expected, out15[1])
}
}
type methodMultiOutput struct {
Int *big.Int
String string
@ -462,6 +588,68 @@ func TestMultiReturnWithArray(t *testing.T) {
}
}
func TestMultiReturnWithStringArray(t *testing.T) {
const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
}
buff := new(bytes.Buffer)
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000"))
temp, _ := big.NewInt(0).SetString("30000000000000000000", 10)
ret1, ret1Exp := new([3]*big.Int), [3]*big.Int{big.NewInt(1545304298), big.NewInt(6), temp}
ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f")
ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"}
ret4, ret4Exp := new(bool), false
if err := abi.Unpack(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(*ret1, ret1Exp) {
t.Error("big.Int array result", *ret1, "!= Expected", ret1Exp)
}
if !reflect.DeepEqual(*ret2, ret2Exp) {
t.Error("address result", *ret2, "!= Expected", ret2Exp)
}
if !reflect.DeepEqual(*ret3, ret3Exp) {
t.Error("string array result", *ret3, "!= Expected", ret3Exp)
}
if !reflect.DeepEqual(*ret4, ret4Exp) {
t.Error("bool result", *ret4, "!= Expected", ret4Exp)
}
}
func TestMultiReturnWithStringSlice(t *testing.T) {
const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
}
buff := new(bytes.Buffer)
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0] offset
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000120")) // output[1] offset
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[0] length
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0][0] offset
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // output[0][1] offset
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008")) // output[0][0] length
buff.Write(common.Hex2Bytes("657468657265756d000000000000000000000000000000000000000000000000")) // output[0][0] value
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000b")) // output[0][1] length
buff.Write(common.Hex2Bytes("676f2d657468657265756d000000000000000000000000000000000000000000")) // output[0][1] value
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[1] length
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000064")) // output[1][0] value
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value
ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"}
ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)}
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(*ret1, ret1Exp) {
t.Error("string slice result", *ret1, "!= Expected", ret1Exp)
}
if !reflect.DeepEqual(*ret2, ret2Exp) {
t.Error("uint256 slice result", *ret2, "!= Expected", ret2Exp)
}
}
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
// Similar to TestMultiReturnWithArray, but with a special case in mind:
// values of nested static arrays count towards the size as well, and any element following
@ -751,6 +939,108 @@ func TestUnmarshal(t *testing.T) {
}
}
func TestUnpackTuple(t *testing.T) {
const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
abi, err := JSON(strings.NewReader(simpleTuple))
if err != nil {
t.Fatal(err)
}
buff := new(bytes.Buffer)
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // ret[a] = 1
buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
v := struct {
Ret struct {
A *big.Int
B *big.Int
}
}{Ret: struct {
A *big.Int
B *big.Int
}{new(big.Int), new(big.Int)}}
err = abi.Unpack(&v, "tuple", buff.Bytes())
if err != nil {
t.Error(err)
} else {
if v.Ret.A.Cmp(big.NewInt(1)) != 0 {
t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.Ret.A)
}
if v.Ret.B.Cmp(big.NewInt(-1)) != 0 {
t.Errorf("unexpected value unpacked: want %x, got %x", v.Ret.B, -1)
}
}
// Test nested tuple
const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[
{"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]},
{"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]},
{"type":"uint256","name":"a"}
]}]`
abi, err = JSON(strings.NewReader(nestedTuple))
if err != nil {
t.Fatal(err)
}
buff.Reset()
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // s offset
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")) // t.X = 0
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // t.Y = 1
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // a = 1
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.A = 1
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")) // s.B offset
buff.Write(common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0")) // s.C offset
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B length
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.B[0] = 1
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B[0] = 2
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C length
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[0].X
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[0].Y
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[1].X
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[1].Y
type T struct {
X *big.Int `abi:"x"`
Z *big.Int `abi:"y"` // Test whether the abi tag works.
}
type S struct {
A *big.Int
B []*big.Int
C []T
}
type Ret struct {
FieldS S `abi:"s"`
FieldT T `abi:"t"`
A *big.Int
}
var ret Ret
var expected = Ret{
FieldS: S{
A: big.NewInt(1),
B: []*big.Int{big.NewInt(1), big.NewInt(2)},
C: []T{
{big.NewInt(1), big.NewInt(2)},
{big.NewInt(2), big.NewInt(1)},
},
},
FieldT: T{
big.NewInt(0), big.NewInt(1),
},
A: big.NewInt(1),
}
err = abi.Unpack(&ret, "tuple", buff.Bytes())
if err != nil {
t.Error(err)
}
if reflect.DeepEqual(ret, expected) {
t.Error("unexpected unpack value")
}
}
func TestOOMMaliciousInput(t *testing.T) {
oomTests := []unpackTest{
{

View File

@ -52,8 +52,8 @@ func (w *keystoreWallet) Status() (string, error) {
// is no connection or decryption step necessary to access the list of accounts.
func (w *keystoreWallet) Open(passphrase string) error { return nil }
// Close implements accounts.Wallet, but is a noop for plain wallets since is no
// meaningful open operation.
// Close implements accounts.Wallet, but is a noop for plain wallets since there
// is no meaningful open operation.
func (w *keystoreWallet) Close() error { return nil }
// Accounts implements accounts.Wallet, returning an account list consisting of
@ -84,10 +84,7 @@ func (w *keystoreWallet) SelfDerive(base accounts.DerivationPath, chain ethereum
// able to sign via our shared keystore backend).
func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
if account.Address != w.account.Address {
return nil, accounts.ErrUnknownAccount
}
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@ -100,10 +97,7 @@ func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte
// be able to sign via our shared keystore backend).
func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
if account.Address != w.account.Address {
return nil, accounts.ErrUnknownAccount
}
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@ -114,10 +108,7 @@ func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction,
// given hash with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
if account.Address != w.account.Address {
return nil, accounts.ErrUnknownAccount
}
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@ -128,10 +119,7 @@ func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passph
// transaction with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
if account.Address != w.account.Address {
return nil, accounts.ErrUnknownAccount
}
if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign

View File

@ -257,7 +257,9 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
// Decode the hex sting into an Ethereum address and return
var address common.Address
hex.Decode(address[:], hexstr)
if _, err = hex.Decode(address[:], hexstr); err != nil {
return common.Address{}, err
}
return address, nil
}

View File

@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip
- 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.4.windows-%GETH_ARCH%.zip
- 7z x go1.11.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version

View File

@ -1,3 +1,19 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build none
/*

View File

@ -20,7 +20,6 @@ import (
"bufio"
"errors"
"fmt"
"io"
"math/big"
"os"
"reflect"
@ -198,7 +197,17 @@ func dumpConfig(ctx *cli.Context) error {
if err != nil {
return err
}
io.WriteString(os.Stdout, comment)
os.Stdout.Write(out)
dump := os.Stdout
if ctx.NArg() > 0 {
dump, err = os.OpenFile(ctx.Args().Get(0), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer dump.Close()
}
dump.WriteString(comment)
dump.Write(out)
return nil
}

View File

@ -174,7 +174,11 @@ func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpec
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
spec.Accounts[common.UnprefixedAddress(common.BytesToAddress([]byte{address}))].Precompiled = data
addr := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
if _, exist := spec.Accounts[addr]; !exist {
spec.Accounts[addr] = &alethGenesisSpecAccount{}
}
spec.Accounts[addr].Precompiled = data
}
func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {

View File

@ -33,11 +33,11 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
"golang.org/x/crypto/sha3"
)
const (
@ -598,7 +598,7 @@ func TestKeypairSanity(t *testing.T) {
t.Fatal(err)
}
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
hasher.Write(salt)
shared, err := hex.DecodeString(sharedSecret)
if err != nil {

View File

@ -164,10 +164,6 @@ var (
Name: "topic",
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
}
SwarmFeedDataOnCreateFlag = cli.StringFlag{
Name: "data",
Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
}
SwarmFeedManifestFlag = cli.StringFlag{
Name: "manifest",
Usage: "Refers to the feed through a manifest",

View File

@ -27,7 +27,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/sha3"
"golang.org/x/crypto/sha3"
)
// Lengths of hashes and addresses in bytes.
@ -196,7 +196,7 @@ func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Hex returns an EIP55-compliant hex string representation of the address.
func (a Address) Hex() string {
unchecksummed := hex.EncodeToString(a[:])
sha := sha3.NewKeccak256()
sha := sha3.NewLegacyKeccak256()
sha.Write([]byte(unchecksummed))
hash := sha.Sum(nil)

View File

@ -33,13 +33,13 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
lru "github.com/hashicorp/golang-lru"
"golang.org/x/crypto/sha3"
)
const (
@ -148,7 +148,7 @@ type SignerFn func(accounts.Account, []byte) ([]byte, error)
// panics. This is done to avoid accidentally using both forms (signature present
// or not), which could be abused to produce different hashes for the same header.
func sigHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, []interface{}{
header.ParentHash,

View File

@ -30,8 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"golang.org/x/crypto/sha3"
)
const (
@ -123,7 +123,7 @@ func seedHash(block uint64) []byte {
if block < epochLength {
return seed
}
keccak256 := makeHasher(sha3.NewKeccak256())
keccak256 := makeHasher(sha3.NewLegacyKeccak256())
for i := 0; i < int(block/epochLength); i++ {
keccak256(seed, seed)
}
@ -177,7 +177,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
}
}()
// Create a hasher to reuse between invocations
keccak512 := makeHasher(sha3.NewKeccak512())
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
// Sequentially produce the initial dataset
keccak512(cache, seed)
@ -301,7 +301,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
defer pend.Done()
// Create a hasher to reuse between invocations
keccak512 := makeHasher(sha3.NewKeccak512())
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
// Calculate the data segment this thread should generate
batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
@ -375,7 +375,7 @@ func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32)
// in-memory cache) in order to produce our final value for a particular header
// hash and nonce.
func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
keccak512 := makeHasher(sha3.NewKeccak512())
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
lookup := func(index uint32) []uint32 {
rawData := generateDatasetItem(cache, index, keccak512)

View File

@ -31,9 +31,9 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// Ethash proof-of-work protocol constants.
@ -575,7 +575,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header
// SealHash returns the hash of a block prior to it being sealed.
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, []interface{}{
header.ParentHash,

View File

@ -65,7 +65,7 @@ const (
triesInMemory = 128
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
BlockChainVersion = 3
BlockChainVersion uint64 = 3
)
// CacheConfig contains the configuration values for the trie caching/pruning

View File

@ -23,9 +23,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// Tests block header storage and retrieval operations.
@ -47,7 +47,7 @@ func TestHeaderStorage(t *testing.T) {
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
t.Fatalf("Stored header RLP not found")
} else {
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
hasher.Write(entry)
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
@ -68,7 +68,7 @@ func TestBodyStorage(t *testing.T) {
// Create a test body to move around the database and make sure it's really new
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, body)
hash := common.BytesToHash(hasher.Sum(nil))
@ -85,7 +85,7 @@ func TestBodyStorage(t *testing.T) {
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
t.Fatalf("Stored body RLP not found")
} else {
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
hasher.Write(entry)
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {

View File

@ -26,19 +26,27 @@ import (
)
// ReadDatabaseVersion retrieves the version number of the database.
func ReadDatabaseVersion(db DatabaseReader) int {
var version int
func ReadDatabaseVersion(db DatabaseReader) *uint64 {
var version uint64
enc, _ := db.Get(databaseVerisionKey)
rlp.DecodeBytes(enc, &version)
if len(enc) == 0 {
return nil
}
if err := rlp.DecodeBytes(enc, &version); err != nil {
return nil
}
return version
return &version
}
// WriteDatabaseVersion stores the version number of the database
func WriteDatabaseVersion(db DatabaseWriter, version int) {
enc, _ := rlp.EncodeToBytes(version)
if err := db.Put(databaseVerisionKey, enc); err != nil {
func WriteDatabaseVersion(db DatabaseWriter, version uint64) {
enc, err := rlp.EncodeToBytes(version)
if err != nil {
log.Crit("Failed to encode database version", "err", err)
}
if err = db.Put(databaseVerisionKey, enc); err != nil {
log.Crit("Failed to store the database version", "err", err)
}
}

View File

@ -468,9 +468,9 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
//
// Carrying over the balance ensures that Ether doesn't disappear.
func (self *StateDB) CreateAccount(addr common.Address) {
new, prev := self.createObject(addr)
newObj, prev := self.createObject(addr)
if prev != nil {
new.setBalance(prev.data.Balance)
newObj.setBalance(prev.data.Balance)
}
}

View File

@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
// senderCacher is a concurrent transaction sender recoverer anc cacher.
// senderCacher is a concurrent transaction sender recoverer and cacher.
var senderCacher = newTxSenderCacher(runtime.NumCPU())
// txSenderCacherRequest is a request for recovering transaction senders with a

View File

@ -172,6 +172,26 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig {
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
conf.PriceBump = DefaultTxPoolConfig.PriceBump
}
if conf.AccountSlots < 1 {
log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
}
if conf.GlobalSlots < 1 {
log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
}
if conf.AccountQueue < 1 {
log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
}
if conf.GlobalQueue < 1 {
log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
}
if conf.Lifetime < 1 {
log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
conf.Lifetime = DefaultTxPoolConfig.Lifetime
}
return conf
}

View File

@ -1095,7 +1095,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
config.GlobalSlots = 0
config.GlobalSlots = 1
pool := NewTxPool(config, params.TestChainConfig, blockchain)
defer pool.Stop()

View File

@ -28,8 +28,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
var (
@ -109,7 +109,7 @@ func (h *Header) Size() common.StorageSize {
}
func rlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewKeccak256()
hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h

View File

@ -234,7 +234,7 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) {
}
// WithSignature returns a new transaction with the given signature.
// This signature needs to be formatted as described in the yellow paper (v+27).
// This signature needs to be in the [R || S || V] format where V is 0 or 1.
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
r, s, v, err := signer.SignatureValues(tx, sig)
if err != nil {

View File

@ -24,8 +24,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params"
"golang.org/x/crypto/sha3"
)
var (
@ -387,7 +387,7 @@ func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory
data := memory.Get(offset.Int64(), size.Int64())
if interpreter.hasher == nil {
interpreter.hasher = sha3.NewKeccak256().(keccakState)
interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState)
} else {
interpreter.hasher.Reset()
}

View File

@ -30,8 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
var (
@ -43,7 +43,7 @@ var errInvalidPubkey = errors.New("invalid secp256k1 public key")
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
d := sha3.NewKeccak256()
d := sha3.NewLegacyKeccak256()
for _, b := range data {
d.Write(b)
}
@ -53,7 +53,7 @@ func Keccak256(data ...[]byte) []byte {
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
// converting it to an internal Hash data structure.
func Keccak256Hash(data ...[]byte) (h common.Hash) {
d := sha3.NewKeccak256()
d := sha3.NewLegacyKeccak256()
for _, b := range data {
d.Write(b)
}
@ -63,7 +63,7 @@ func Keccak256Hash(data ...[]byte) (h common.Hash) {
// Keccak512 calculates and returns the Keccak512 hash of the input data.
func Keccak512(data ...[]byte) []byte {
d := sha3.NewKeccak512()
d := sha3.NewLegacyKeccak512()
for _, b := range data {
d.Write(b)
}

View File

@ -310,7 +310,7 @@ var theCurve = new(BitCurve)
func init() {
// See SEC 2 section 2.7.1
// curve parameters taken from:
// http://www.secg.org/collateral/sec2_final.pdf
// http://www.secg.org/sec2-v2.pdf
theCurve.P, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 0)
theCurve.N, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 0)
theCurve.B, _ = new(big.Int).SetString("0x0000000000000000000000000000000000000000000000000000000000000007", 0)

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,66 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
// the SHAKE variable-output-length hash functions defined by FIPS-202.
//
// Both types of hash function use the "sponge" construction and the Keccak
// permutation. For a detailed specification see http://keccak.noekeon.org/
//
//
// Guidance
//
// If you aren't sure what function you need, use SHAKE256 with at least 64
// bytes of output. The SHAKE instances are faster than the SHA3 instances;
// the latter have to allocate memory to conform to the hash.Hash interface.
//
// If you need a secret-key MAC (message authentication code), prepend the
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
// output.
//
//
// Security strengths
//
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
// strength against preimage attacks of x bits. Since they only produce "x"
// bits of output, their collision-resistance is only "x/2" bits.
//
// The SHAKE-256 and -128 functions have a generic security strength of 256 and
// 128 bits against all attacks, provided that at least 2x bits of their output
// is used. Requesting more than 64 or 32 bytes of output, respectively, does
// not increase the collision-resistance of the SHAKE functions.
//
//
// The sponge construction
//
// A sponge builds a pseudo-random function from a public pseudo-random
// permutation, by applying the permutation to a state of "rate + capacity"
// bytes, but hiding "capacity" of the bytes.
//
// A sponge starts out with a zero state. To hash an input using a sponge, up
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
// is then "full" and the permutation is applied to "empty" it. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge in the same way, except that output
// output is copied out instead of input being XORed in.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
//
//
// Recommendations
//
// The SHAKE functions are recommended for most new uses. They can produce
// output of arbitrary length. SHAKE256, with an output length of at least
// 64 bytes, provides 256-bit security against all attacks. The Keccak team
// recommends it for most applications upgrading from SHA2-512. (NIST chose a
// much stronger, but much slower, sponge instance for SHA3-512.)
//
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
// They produce output of the same length, with the same security strengths
// against all attacks. This means, in particular, that SHA3-256 only has
// 128-bit collision resistance, because its output length is 32 bytes.
package sha3

View File

@ -1,71 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file provides functions for creating instances of the SHA-3
// and SHAKE hash functions, as well as utility functions for hashing
// bytes.
import (
"hash"
)
// NewKeccak256 creates a new Keccak-256 hash.
func NewKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
// NewKeccak512 creates a new Keccak-512 hash.
func NewKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
// New224 creates a new SHA3-224 hash.
// Its generic security strength is 224 bits against preimage attacks,
// and 112 bits against collision attacks.
func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
// New256 creates a new SHA3-256 hash.
// Its generic security strength is 256 bits against preimage attacks,
// and 128 bits against collision attacks.
func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
// New384 creates a new SHA3-384 hash.
// Its generic security strength is 384 bits against preimage attacks,
// and 192 bits against collision attacks.
func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
// New512 creates a new SHA3-512 hash.
// Its generic security strength is 512 bits against preimage attacks,
// and 256 bits against collision attacks.
func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
h := New224()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum256 returns the SHA3-256 digest of the data.
func Sum256(data []byte) (digest [32]byte) {
h := New256()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum384 returns the SHA3-384 digest of the data.
func Sum384(data []byte) (digest [48]byte) {
h := New384()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum512 returns the SHA3-512 digest of the data.
func Sum512(data []byte) (digest [64]byte) {
h := New512()
h.Write(data)
h.Sum(digest[:0])
return
}

View File

@ -1,412 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine gccgo
package sha3
// rc stores the round constants for use in the ι step.
var rc = [24]uint64{
0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
0x8000000080008000,
0x000000000000808B,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008A,
0x0000000000000088,
0x0000000080008009,
0x000000008000000A,
0x000000008000808B,
0x800000000000008B,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800A,
0x800000008000000A,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008,
}
// keccakF1600 applies the Keccak permutation to a 1600b-wide
// state represented as a slice of 25 uint64s.
func keccakF1600(a *[25]uint64) {
// Implementation translated from Keccak-inplace.c
// in the keccak reference code.
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
for i := 0; i < 24; i += 4 {
// Combines the 5 steps in each round into 2 steps.
// Unrolls 4 rounds per loop and spreads some steps across rounds.
// Round 1
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[6] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[12] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[18] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[24] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
a[6] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[16] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[22] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[3] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[10] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[1] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[7] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[19] ^ d4
bc3 = t<<8 | t>>(64-8)
a[20] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[11] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[23] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[4] ^ d4
bc0 = t<<27 | t>>(64-27)
a[5] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[2] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[8] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[14] ^ d4
bc2 = t<<39 | t>>(64-39)
a[15] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
// Round 2
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[16] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[7] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[23] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[14] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
a[16] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[11] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[2] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[18] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[20] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[6] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[22] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[4] ^ d4
bc3 = t<<8 | t>>(64-8)
a[15] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[1] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[8] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[24] ^ d4
bc0 = t<<27 | t>>(64-27)
a[10] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[12] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[3] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[19] ^ d4
bc2 = t<<39 | t>>(64-39)
a[5] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
// Round 3
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[11] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[22] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[8] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[19] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
a[11] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[1] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[12] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[23] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[15] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[16] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[2] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[24] ^ d4
bc3 = t<<8 | t>>(64-8)
a[5] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[6] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[3] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[14] ^ d4
bc0 = t<<27 | t>>(64-27)
a[20] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[7] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[18] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[4] ^ d4
bc2 = t<<39 | t>>(64-39)
a[10] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
// Round 4
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[1] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[2] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[3] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[4] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
a[1] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[6] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[7] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[8] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[5] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[11] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[12] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[14] ^ d4
bc3 = t<<8 | t>>(64-8)
a[10] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[16] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[18] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[19] ^ d4
bc0 = t<<27 | t>>(64-27)
a[15] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[22] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[23] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[24] ^ d4
bc2 = t<<39 | t>>(64-39)
a[20] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
}
}

View File

@ -1,13 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
package sha3
// This function is implemented in keccakf_amd64.s.
//go:noescape
func keccakF1600(state *[25]uint64)

View File

@ -1,390 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
// This code was translated into a form compatible with 6a from the public
// domain sources at https://github.com/gvanas/KeccakCodePackage
// Offsets in state
#define _ba (0*8)
#define _be (1*8)
#define _bi (2*8)
#define _bo (3*8)
#define _bu (4*8)
#define _ga (5*8)
#define _ge (6*8)
#define _gi (7*8)
#define _go (8*8)
#define _gu (9*8)
#define _ka (10*8)
#define _ke (11*8)
#define _ki (12*8)
#define _ko (13*8)
#define _ku (14*8)
#define _ma (15*8)
#define _me (16*8)
#define _mi (17*8)
#define _mo (18*8)
#define _mu (19*8)
#define _sa (20*8)
#define _se (21*8)
#define _si (22*8)
#define _so (23*8)
#define _su (24*8)
// Temporary registers
#define rT1 AX
// Round vars
#define rpState DI
#define rpStack SP
#define rDa BX
#define rDe CX
#define rDi DX
#define rDo R8
#define rDu R9
#define rBa R10
#define rBe R11
#define rBi R12
#define rBo R13
#define rBu R14
#define rCa SI
#define rCe BP
#define rCi rBi
#define rCo rBo
#define rCu R15
#define MOVQ_RBI_RCE MOVQ rBi, rCe
#define XORQ_RT1_RCA XORQ rT1, rCa
#define XORQ_RT1_RCE XORQ rT1, rCe
#define XORQ_RBA_RCU XORQ rBa, rCu
#define XORQ_RBE_RCU XORQ rBe, rCu
#define XORQ_RDU_RCU XORQ rDu, rCu
#define XORQ_RDA_RCA XORQ rDa, rCa
#define XORQ_RDE_RCE XORQ rDe, rCe
#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
/* Prepare round */ \
MOVQ rCe, rDa; \
ROLQ $1, rDa; \
\
MOVQ _bi(iState), rCi; \
XORQ _gi(iState), rDi; \
XORQ rCu, rDa; \
XORQ _ki(iState), rCi; \
XORQ _mi(iState), rDi; \
XORQ rDi, rCi; \
\
MOVQ rCi, rDe; \
ROLQ $1, rDe; \
\
MOVQ _bo(iState), rCo; \
XORQ _go(iState), rDo; \
XORQ rCa, rDe; \
XORQ _ko(iState), rCo; \
XORQ _mo(iState), rDo; \
XORQ rDo, rCo; \
\
MOVQ rCo, rDi; \
ROLQ $1, rDi; \
\
MOVQ rCu, rDo; \
XORQ rCe, rDi; \
ROLQ $1, rDo; \
\
MOVQ rCa, rDu; \
XORQ rCi, rDo; \
ROLQ $1, rDu; \
\
/* Result b */ \
MOVQ _ba(iState), rBa; \
MOVQ _ge(iState), rBe; \
XORQ rCo, rDu; \
MOVQ _ki(iState), rBi; \
MOVQ _mo(iState), rBo; \
MOVQ _su(iState), rBu; \
XORQ rDe, rBe; \
ROLQ $44, rBe; \
XORQ rDi, rBi; \
XORQ rDa, rBa; \
ROLQ $43, rBi; \
\
MOVQ rBe, rCa; \
MOVQ rc, rT1; \
ORQ rBi, rCa; \
XORQ rBa, rT1; \
XORQ rT1, rCa; \
MOVQ rCa, _ba(oState); \
\
XORQ rDu, rBu; \
ROLQ $14, rBu; \
MOVQ rBa, rCu; \
ANDQ rBe, rCu; \
XORQ rBu, rCu; \
MOVQ rCu, _bu(oState); \
\
XORQ rDo, rBo; \
ROLQ $21, rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _bi(oState); \
\
NOTQ rBi; \
ORQ rBa, rBu; \
ORQ rBo, rBi; \
XORQ rBo, rBu; \
XORQ rBe, rBi; \
MOVQ rBu, _bo(oState); \
MOVQ rBi, _be(oState); \
B_RBI_RCE; \
\
/* Result g */ \
MOVQ _gu(iState), rBe; \
XORQ rDu, rBe; \
MOVQ _ka(iState), rBi; \
ROLQ $20, rBe; \
XORQ rDa, rBi; \
ROLQ $3, rBi; \
MOVQ _bo(iState), rBa; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDo, rBa; \
MOVQ _me(iState), rBo; \
MOVQ _si(iState), rBu; \
ROLQ $28, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ga(oState); \
G_RT1_RCA; \
\
XORQ rDe, rBo; \
ROLQ $45, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ge(oState); \
G_RT1_RCE; \
\
XORQ rDi, rBu; \
ROLQ $61, rBu; \
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _go(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _gu(oState); \
NOTQ rBu; \
G_RBA_RCU; \
\
ORQ rBu, rBo; \
XORQ rBi, rBo; \
MOVQ rBo, _gi(oState); \
\
/* Result k */ \
MOVQ _be(iState), rBa; \
MOVQ _gi(iState), rBe; \
MOVQ _ko(iState), rBi; \
MOVQ _mu(iState), rBo; \
MOVQ _sa(iState), rBu; \
XORQ rDi, rBe; \
ROLQ $6, rBe; \
XORQ rDo, rBi; \
ROLQ $25, rBi; \
MOVQ rBe, rT1; \
ORQ rBi, rT1; \
XORQ rDe, rBa; \
ROLQ $1, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ka(oState); \
K_RT1_RCA; \
\
XORQ rDu, rBo; \
ROLQ $8, rBo; \
MOVQ rBi, rT1; \
ANDQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _ke(oState); \
K_RT1_RCE; \
\
XORQ rDa, rBu; \
ROLQ $18, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ANDQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _ki(oState); \
\
MOVQ rBu, rT1; \
ORQ rBa, rT1; \
XORQ rBo, rT1; \
MOVQ rT1, _ko(oState); \
\
ANDQ rBe, rBa; \
XORQ rBu, rBa; \
MOVQ rBa, _ku(oState); \
K_RBA_RCU; \
\
/* Result m */ \
MOVQ _ga(iState), rBe; \
XORQ rDa, rBe; \
MOVQ _ke(iState), rBi; \
ROLQ $36, rBe; \
XORQ rDe, rBi; \
MOVQ _bu(iState), rBa; \
ROLQ $10, rBi; \
MOVQ rBe, rT1; \
MOVQ _mi(iState), rBo; \
ANDQ rBi, rT1; \
XORQ rDu, rBa; \
MOVQ _so(iState), rBu; \
ROLQ $27, rBa; \
XORQ rBa, rT1; \
MOVQ rT1, _ma(oState); \
M_RT1_RCA; \
\
XORQ rDi, rBo; \
ROLQ $15, rBo; \
MOVQ rBi, rT1; \
ORQ rBo, rT1; \
XORQ rBe, rT1; \
MOVQ rT1, _me(oState); \
M_RT1_RCE; \
\
XORQ rDo, rBu; \
ROLQ $56, rBu; \
NOTQ rBo; \
MOVQ rBo, rT1; \
ORQ rBu, rT1; \
XORQ rBi, rT1; \
MOVQ rT1, _mi(oState); \
\
ORQ rBa, rBe; \
XORQ rBu, rBe; \
MOVQ rBe, _mu(oState); \
\
ANDQ rBa, rBu; \
XORQ rBo, rBu; \
MOVQ rBu, _mo(oState); \
M_RBE_RCU; \
\
/* Result s */ \
MOVQ _bi(iState), rBa; \
MOVQ _go(iState), rBe; \
MOVQ _ku(iState), rBi; \
XORQ rDi, rBa; \
MOVQ _ma(iState), rBo; \
ROLQ $62, rBa; \
XORQ rDo, rBe; \
MOVQ _se(iState), rBu; \
ROLQ $55, rBe; \
\
XORQ rDu, rBi; \
MOVQ rBa, rDu; \
XORQ rDe, rBu; \
ROLQ $2, rBu; \
ANDQ rBe, rDu; \
XORQ rBu, rDu; \
MOVQ rDu, _su(oState); \
\
ROLQ $39, rBi; \
S_RDU_RCU; \
NOTQ rBe; \
XORQ rDa, rBo; \
MOVQ rBe, rDa; \
ANDQ rBi, rDa; \
XORQ rBa, rDa; \
MOVQ rDa, _sa(oState); \
S_RDA_RCA; \
\
ROLQ $41, rBo; \
MOVQ rBi, rDe; \
ORQ rBo, rDe; \
XORQ rBe, rDe; \
MOVQ rDe, _se(oState); \
S_RDE_RCE; \
\
MOVQ rBo, rDi; \
MOVQ rBu, rDo; \
ANDQ rBu, rDi; \
ORQ rBa, rDo; \
XORQ rBi, rDi; \
XORQ rBo, rDo; \
MOVQ rDi, _si(oState); \
MOVQ rDo, _so(oState) \
// func keccakF1600(state *[25]uint64)
TEXT ·keccakF1600(SB), 0, $200-8
MOVQ state+0(FP), rpState
// Convert the user state into an internal state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
// Execute the KeccakF permutation
MOVQ _ba(rpState), rCa
MOVQ _be(rpState), rCe
MOVQ _bu(rpState), rCu
XORQ _ga(rpState), rCa
XORQ _ge(rpState), rCe
XORQ _gu(rpState), rCu
XORQ _ka(rpState), rCa
XORQ _ke(rpState), rCe
XORQ _ku(rpState), rCu
XORQ _ma(rpState), rCa
XORQ _me(rpState), rCe
XORQ _mu(rpState), rCu
XORQ _sa(rpState), rCa
XORQ _se(rpState), rCe
MOVQ _si(rpState), rDi
MOVQ _so(rpState), rDo
XORQ _su(rpState), rCu
mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
// Revert the internal state to the user state
NOTQ _be(rpState)
NOTQ _bi(rpState)
NOTQ _go(rpState)
NOTQ _ki(rpState)
NOTQ _mi(rpState)
NOTQ _sa(rpState)
RET

View File

@ -1,18 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package sha3
import (
"crypto"
)
func init() {
crypto.RegisterHash(crypto.SHA3_224, New224)
crypto.RegisterHash(crypto.SHA3_256, New256)
crypto.RegisterHash(crypto.SHA3_384, New384)
crypto.RegisterHash(crypto.SHA3_512, New512)
}

View File

@ -1,192 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// spongeDirection indicates the direction bytes are flowing through the sponge.
type spongeDirection int
const (
// spongeAbsorbing indicates that the sponge is absorbing input.
spongeAbsorbing spongeDirection = iota
// spongeSqueezing indicates that the sponge is being squeezed.
spongeSqueezing
)
const (
// maxRate is the maximum size of the internal buffer. SHAKE-256
// currently needs the largest buffer.
maxRate = 168
)
type state struct {
// Generic sponge components.
a [25]uint64 // main state of the hash
buf []byte // points into storage
rate int // the number of bytes of state to use
// dsbyte contains the "domain separation" bits and the first bit of
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
// SHA-3 and SHAKE functions by appending bitstrings to the message.
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
// padding rule from section 5.1 is applied to pad the message to a multiple
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
// giving 00000110b (0x06) and 00011111b (0x1f).
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
// Extendable-Output Functions (May 2014)"
dsbyte byte
storage [maxRate]byte
// Specific to SHA-3 and SHAKE.
outputLen int // the default output size in bytes
state spongeDirection // whether the sponge is absorbing or squeezing
}
// BlockSize returns the rate of sponge underlying this hash function.
func (d *state) BlockSize() int { return d.rate }
// Size returns the output size of the hash function in bytes.
func (d *state) Size() int { return d.outputLen }
// Reset clears the internal state by zeroing the sponge state and
// the byte buffer, and setting Sponge.state to absorbing.
func (d *state) Reset() {
// Zero the permutation's state.
for i := range d.a {
d.a[i] = 0
}
d.state = spongeAbsorbing
d.buf = d.storage[:0]
}
func (d *state) clone() *state {
ret := *d
if ret.state == spongeAbsorbing {
ret.buf = ret.storage[:len(ret.buf)]
} else {
ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
}
return &ret
}
// permute applies the KeccakF-1600 permutation. It handles
// any input-output buffering.
func (d *state) permute() {
switch d.state {
case spongeAbsorbing:
// If we're absorbing, we need to xor the input into the state
// before applying the permutation.
xorIn(d, d.buf)
d.buf = d.storage[:0]
keccakF1600(&d.a)
case spongeSqueezing:
// If we're squeezing, we need to apply the permutatin before
// copying more output.
keccakF1600(&d.a)
d.buf = d.storage[:d.rate]
copyOut(d, d.buf)
}
}
// pads appends the domain separation bits in dsbyte, applies
// the multi-bitrate 10..1 padding rule, and permutes the state.
func (d *state) padAndPermute(dsbyte byte) {
if d.buf == nil {
d.buf = d.storage[:0]
}
// Pad with this instance's domain-separator bits. We know that there's
// at least one byte of space in d.buf because, if it were full,
// permute would have been called to empty it. dsbyte also contains the
// first one bit for the padding. See the comment in the state struct.
d.buf = append(d.buf, dsbyte)
zerosStart := len(d.buf)
d.buf = d.storage[:d.rate]
for i := zerosStart; i < d.rate; i++ {
d.buf[i] = 0
}
// This adds the final one bit for the padding. Because of the way that
// bits are numbered from the LSB upwards, the final bit is the MSB of
// the last byte.
d.buf[d.rate-1] ^= 0x80
// Apply the permutation
d.permute()
d.state = spongeSqueezing
d.buf = d.storage[:d.rate]
copyOut(d, d.buf)
}
// Write absorbs more data into the hash's state. It produces an error
// if more data is written to the ShakeHash after writing
func (d *state) Write(p []byte) (written int, err error) {
if d.state != spongeAbsorbing {
panic("sha3: write to sponge after read")
}
if d.buf == nil {
d.buf = d.storage[:0]
}
written = len(p)
for len(p) > 0 {
if len(d.buf) == 0 && len(p) >= d.rate {
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
xorIn(d, p[:d.rate])
p = p[d.rate:]
keccakF1600(&d.a)
} else {
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
todo := d.rate - len(d.buf)
if todo > len(p) {
todo = len(p)
}
d.buf = append(d.buf, p[:todo]...)
p = p[todo:]
// If the sponge is full, apply the permutation.
if len(d.buf) == d.rate {
d.permute()
}
}
}
return
}
// Read squeezes an arbitrary number of bytes from the sponge.
func (d *state) Read(out []byte) (n int, err error) {
// If we're still absorbing, pad and apply the permutation.
if d.state == spongeAbsorbing {
d.padAndPermute(d.dsbyte)
}
n = len(out)
// Now, do the squeezing.
for len(out) > 0 {
n := copy(out, d.buf)
d.buf = d.buf[n:]
out = out[n:]
// Apply the permutation if we've squeezed the sponge dry.
if len(d.buf) == 0 {
d.permute()
}
}
return
}
// Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes.
func (d *state) Sum(in []byte) []byte {
// Make a copy of the original hash so that caller can keep writing
// and summing.
dup := d.clone()
hash := make([]byte, dup.outputLen)
dup.Read(hash)
return append(in, hash...)
}

View File

@ -1,297 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// Tests include all the ShortMsgKATs provided by the Keccak team at
// https://github.com/gvanas/KeccakCodePackage
//
// They only include the zero-bit case of the bitwise testvectors
// published by NIST in the draft of FIPS-202.
import (
"bytes"
"compress/flate"
"encoding/hex"
"encoding/json"
"hash"
"os"
"strings"
"testing"
)
const (
testString = "brekeccakkeccak koax koax"
katFilename = "testdata/keccakKats.json.deflate"
)
// Internal-use instances of SHAKE used to test against KATs.
func newHashShake128() hash.Hash {
return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
}
func newHashShake256() hash.Hash {
return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
}
// testDigests contains functions returning hash.Hash instances
// with output-length equal to the KAT length for both SHA-3 and
// SHAKE instances.
var testDigests = map[string]func() hash.Hash{
"SHA3-224": New224,
"SHA3-256": New256,
"SHA3-384": New384,
"SHA3-512": New512,
"SHAKE128": newHashShake128,
"SHAKE256": newHashShake256,
}
// testShakes contains functions that return ShakeHash instances for
// testing the ShakeHash-specific interface.
var testShakes = map[string]func() ShakeHash{
"SHAKE128": NewShake128,
"SHAKE256": NewShake256,
}
// structs used to marshal JSON test-cases.
type KeccakKats struct {
Kats map[string][]struct {
Digest string `json:"digest"`
Length int64 `json:"length"`
Message string `json:"message"`
}
}
func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) {
xorInOrig, copyOutOrig := xorIn, copyOut
xorIn, copyOut = xorInGeneric, copyOutGeneric
testf("generic")
if xorImplementationUnaligned != "generic" {
xorIn, copyOut = xorInUnaligned, copyOutUnaligned
testf("unaligned")
}
xorIn, copyOut = xorInOrig, copyOutOrig
}
// TestKeccakKats tests the SHA-3 and Shake implementations against all the
// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
// (The testvectors are stored in keccakKats.json.deflate due to their length.)
func TestKeccakKats(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
// Read the KATs.
deflated, err := os.Open(katFilename)
if err != nil {
t.Errorf("error opening %s: %s", katFilename, err)
}
file := flate.NewReader(deflated)
dec := json.NewDecoder(file)
var katSet KeccakKats
err = dec.Decode(&katSet)
if err != nil {
t.Errorf("error decoding KATs: %s", err)
}
// Do the KATs.
for functionName, kats := range katSet.Kats {
d := testDigests[functionName]()
for _, kat := range kats {
d.Reset()
in, err := hex.DecodeString(kat.Message)
if err != nil {
t.Errorf("error decoding KAT: %s", err)
}
d.Write(in[:kat.Length/8])
got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
if got != kat.Digest {
t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s",
functionName, impl, kat.Length, kat.Message, got, kat.Digest)
t.Logf("wanted %+v", kat)
t.FailNow()
}
continue
}
}
})
}
// TestUnalignedWrite tests that writing data in an arbitrary pattern with
// small input buffers.
func TestUnalignedWrite(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
buf := sequentialBytes(0x10000)
for alg, df := range testDigests {
d := df()
d.Reset()
d.Write(buf)
want := d.Sum(nil)
d.Reset()
for i := 0; i < len(buf); {
// Cycle through offsets which make a 137 byte sequence.
// Because 137 is prime this sequence should exercise all corner cases.
offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
for _, j := range offsets {
if v := len(buf) - i; v < j {
j = v
}
d.Write(buf[i : i+j])
i += j
}
}
got := d.Sum(nil)
if !bytes.Equal(got, want) {
t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want)
}
}
})
}
// TestAppend checks that appending works when reallocation is necessary.
func TestAppend(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
d := New224()
for capacity := 2; capacity <= 66; capacity += 64 {
// The first time around the loop, Sum will have to reallocate.
// The second time, it will not.
buf := make([]byte, 2, capacity)
d.Reset()
d.Write([]byte{0xcc})
buf = d.Sum(buf)
expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
t.Errorf("got %s, want %s", got, expected)
}
}
})
}
// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
func TestAppendNoRealloc(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
buf := make([]byte, 1, 200)
d := New224()
d.Write([]byte{0xcc})
buf = d.Sum(buf)
expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
t.Errorf("%s: got %s, want %s", impl, got, expected)
}
})
}
// TestSqueezing checks that squeezing the full output a single time produces
// the same output as repeatedly squeezing the instance.
func TestSqueezing(t *testing.T) {
testUnalignedAndGeneric(t, func(impl string) {
for functionName, newShakeHash := range testShakes {
d0 := newShakeHash()
d0.Write([]byte(testString))
ref := make([]byte, 32)
d0.Read(ref)
d1 := newShakeHash()
d1.Write([]byte(testString))
var multiple []byte
for range ref {
one := make([]byte, 1)
d1.Read(one)
multiple = append(multiple, one...)
}
if !bytes.Equal(ref, multiple) {
t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
}
}
})
}
// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
func sequentialBytes(size int) []byte {
result := make([]byte, size)
for i := range result {
result[i] = byte(i)
}
return result
}
// BenchmarkPermutationFunction measures the speed of the permutation function
// with no input data.
func BenchmarkPermutationFunction(b *testing.B) {
b.SetBytes(int64(200))
var lanes [25]uint64
for i := 0; i < b.N; i++ {
keccakF1600(&lanes)
}
}
// benchmarkHash tests the speed to hash num buffers of buflen each.
func benchmarkHash(b *testing.B, h hash.Hash, size, num int) {
b.StopTimer()
h.Reset()
data := sequentialBytes(size)
b.SetBytes(int64(size * num))
b.StartTimer()
var state []byte
for i := 0; i < b.N; i++ {
for j := 0; j < num; j++ {
h.Write(data)
}
state = h.Sum(state[:0])
}
b.StopTimer()
h.Reset()
}
// benchmarkShake is specialized to the Shake instances, which don't
// require a copy on reading output.
func benchmarkShake(b *testing.B, h ShakeHash, size, num int) {
b.StopTimer()
h.Reset()
data := sequentialBytes(size)
d := make([]byte, 32)
b.SetBytes(int64(size * num))
b.StartTimer()
for i := 0; i < b.N; i++ {
h.Reset()
for j := 0; j < num; j++ {
h.Write(data)
}
h.Read(d)
}
}
func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) }
func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) }
func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) }
func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) }
func BenchmarkShake128_MTU(b *testing.B) { benchmarkShake(b, NewShake128(), 1350, 1) }
func BenchmarkShake256_MTU(b *testing.B) { benchmarkShake(b, NewShake256(), 1350, 1) }
func BenchmarkShake256_16x(b *testing.B) { benchmarkShake(b, NewShake256(), 16, 1024) }
func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) }
func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) }
func Example_sum() {
buf := []byte("some data to hash")
// A hash needs to be 64 bytes long to have 256-bit collision resistance.
h := make([]byte, 64)
// Compute a 64-byte hash of buf and put it in h.
ShakeSum256(h, buf)
}
func Example_mac() {
k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long")
buf := []byte("and this is some data to authenticate")
// A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
h := make([]byte, 32)
d := NewShake256()
// Write the key into the hash.
d.Write(k)
// Now write the data.
d.Write(buf)
// Read 32 bytes of output from the hash into h.
d.Read(h)
}

View File

@ -1,60 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file defines the ShakeHash interface, and provides
// functions for creating SHAKE instances, as well as utility
// functions for hashing bytes to arbitrary-length output.
import (
"io"
)
// ShakeHash defines the interface to hash functions that
// support arbitrary-length output.
type ShakeHash interface {
// Write absorbs more data into the hash's state. It panics if input is
// written to it after output has been read from it.
io.Writer
// Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error.
io.Reader
// Clone returns a copy of the ShakeHash in its current state.
Clone() ShakeHash
// Reset resets the ShakeHash to its initial state.
Reset()
}
func (d *state) Clone() ShakeHash {
return d.clone()
}
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 256 bits against all attacks if
// at least 64 bytes of its output are used.
func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
// ShakeSum128 writes an arbitrary-length digest of data into hash.
func ShakeSum128(hash, data []byte) {
h := NewShake128()
h.Write(data)
h.Read(hash)
}
// ShakeSum256 writes an arbitrary-length digest of data into hash.
func ShakeSum256(hash, data []byte) {
h := NewShake256()
h.Write(data)
h.Read(hash)
}

View File

@ -1,16 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!386,!ppc64le appengine
package sha3
var (
xorIn = xorInGeneric
copyOut = copyOutGeneric
xorInUnaligned = xorInGeneric
copyOutUnaligned = copyOutGeneric
)
const xorImplementationUnaligned = "generic"

View File

@ -1,28 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
import "encoding/binary"
// xorInGeneric xors the bytes in buf into the state; it
// makes no non-portable assumptions about memory layout
// or alignment.
func xorInGeneric(d *state, buf []byte) {
n := len(buf) / 8
for i := 0; i < n; i++ {
a := binary.LittleEndian.Uint64(buf)
d.a[i] ^= a
buf = buf[8:]
}
}
// copyOutGeneric copies ulint64s to a byte buffer.
func copyOutGeneric(d *state, b []byte) {
for i := 0; len(b) >= 8; i++ {
binary.LittleEndian.PutUint64(b, d.a[i])
b = b[8:]
}
}

View File

@ -1,58 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 386 ppc64le
// +build !appengine
package sha3
import "unsafe"
func xorInUnaligned(d *state, buf []byte) {
bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))
n := len(buf)
if n >= 72 {
d.a[0] ^= bw[0]
d.a[1] ^= bw[1]
d.a[2] ^= bw[2]
d.a[3] ^= bw[3]
d.a[4] ^= bw[4]
d.a[5] ^= bw[5]
d.a[6] ^= bw[6]
d.a[7] ^= bw[7]
d.a[8] ^= bw[8]
}
if n >= 104 {
d.a[9] ^= bw[9]
d.a[10] ^= bw[10]
d.a[11] ^= bw[11]
d.a[12] ^= bw[12]
}
if n >= 136 {
d.a[13] ^= bw[13]
d.a[14] ^= bw[14]
d.a[15] ^= bw[15]
d.a[16] ^= bw[16]
}
if n >= 144 {
d.a[17] ^= bw[17]
}
if n >= 168 {
d.a[18] ^= bw[18]
d.a[19] ^= bw[19]
d.a[20] ^= bw[20]
}
}
func copyOutUnaligned(d *state, buf []byte) {
ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
copy(buf, ab[:])
}
var (
xorIn = xorInUnaligned
copyOut = copyOutUnaligned
)
const xorImplementationUnaligned = "unaligned"

View File

@ -143,8 +143,10 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if !config.SkipBcVersionCheck {
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
if bcVersion != core.BlockChainVersion && bcVersion != 0 {
return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d).\n", bcVersion, core.BlockChainVersion)
if bcVersion != nil && *bcVersion > core.BlockChainVersion {
return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)
} else if bcVersion != nil && *bcVersion < core.BlockChainVersion {
log.Warn("Upgrade blockchain database version", "from", *bcVersion, "to", core.BlockChainVersion)
}
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
}

View File

@ -1488,7 +1488,15 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
}
if index, err := d.blockchain.InsertChain(blocks); err != nil {
if index < len(results) {
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
} else {
// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
// when it needs to preprocess blocks to import a sidechain.
// The importer will put together a new list of blocks to import, which is a superset
// of the blocks delivered from the downloader, and the indexing will be off.
log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
}
return errInvalidChain
}
return nil

View File

@ -25,10 +25,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
)
// stateReq represents a batch of state fetch requests grouped together into
@ -240,7 +240,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
return &stateSync{
d: d,
sched: state.NewStateSync(root, d.stateDB),
keccak: sha3.NewKeccak256(),
keccak: sha3.NewLegacyKeccak256(),
tasks: make(map[common.Hash]*stateTask),
deliver: make(chan *stateReq),
cancel: make(chan struct{}),
@ -398,9 +398,8 @@ func (s *stateSync) fillTasks(n int, req *stateReq) {
// process iterates over a batch of delivered state data, injecting each item
// into a running state sync, re-queuing any items that were requested but not
// delivered.
// Returns whether the peer actually managed to deliver anything of value,
// and any error that occurred
// delivered. Returns whether the peer actually managed to deliver anything of
// value, and any error that occurred.
func (s *stateSync) process(req *stateReq) (int, error) {
// Collect processing stats and update progress if valid data was received
duplicate, unexpected, successful := 0, 0, 0
@ -412,14 +411,12 @@ func (s *stateSync) process(req *stateReq) (int, error) {
}(time.Now())
// Iterate over all the delivered data and inject one-by-one into the trie
progress := false
for _, blob := range req.response {
prog, hash, err := s.processNodeData(blob)
_, hash, err := s.processNodeData(blob)
switch err {
case nil:
s.numUncommitted++
s.bytesUncommitted += len(blob)
progress = progress || prog
successful++
case trie.ErrNotRequested:
unexpected++

File diff suppressed because one or more lines are too long

View File

@ -38,7 +38,7 @@
var op = log.op.toString();
}
// If a new contract is being created, add to the call stack
if (syscall && op == 'CREATE') {
if (syscall && (op == 'CREATE' || op == "CREATE2")) {
var inOff = log.stack.peek(1).valueOf();
var inEnd = inOff + log.stack.peek(2).valueOf();
@ -116,7 +116,7 @@
// Pop off the last call and get the execution results
var call = this.callstack.pop();
if (call.type == 'CREATE') {
if (call.type == 'CREATE' || call.type == "CREATE2") {
// If the call was a CREATE, retrieve the contract address and output code
call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16);
delete call.gasIn; delete call.gasCost;

View File

@ -86,6 +86,14 @@
var from = log.contract.getAddress();
this.lookupAccount(toContract(from, db.getNonce(from)), db);
break;
case "CREATE2":
var from = log.contract.getAddress();
// stack: salt, size, offset, endowment
var offset = log.stack.peek(1).valueOf()
var size = log.stack.peek(2).valueOf()
var end = offset + size
this.lookupAccount(toContract2(from, log.stack.peek(3).toString(16), log.memory.slice(offset, end)), db);
break;
case "CALL": case "CALLCODE": case "DELEGATECALL": case "STATICCALL":
this.lookupAccount(toAddress(log.stack.peek(1).toString(16)), db);
break;

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore ((tracers)|(assets)).go ./...
//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore tracers.go -ignore assets.go ./...
//go:generate gofmt -s -w assets.go
// Package tracers contains the actual JavaScript tracer assets.

View File

@ -367,6 +367,28 @@ func New(code string) (*Tracer, error) {
copy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])
return 1
})
tracer.vm.PushGlobalGoFunction("toContract2", func(ctx *duktape.Context) int {
var from common.Address
if ptr, size := ctx.GetBuffer(-3); ptr != nil {
from = common.BytesToAddress(makeSlice(ptr, size))
} else {
from = common.HexToAddress(ctx.GetString(-3))
}
// Retrieve salt hex string from js stack
salt := common.HexToHash(ctx.GetString(-2))
// Retrieve code slice from js stack
var code []byte
if ptr, size := ctx.GetBuffer(-1); ptr != nil {
code = common.CopyBytes(makeSlice(ptr, size))
} else {
code = common.FromHex(ctx.GetString(-1))
}
codeHash := crypto.Keccak256(code)
ctx.Pop3()
contract := crypto.CreateAddress2(from, salt, codeHash)
copy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])
return 1
})
tracer.vm.PushGlobalGoFunction("isPrecompiled", func(ctx *duktape.Context) int {
_, ok := vm.PrecompiledContractsByzantium[common.BytesToAddress(popSlice(ctx))]
ctx.PushBoolean(ok)

View File

@ -17,6 +17,8 @@
package tracers
import (
"crypto/ecdsa"
"crypto/rand"
"encoding/json"
"io/ioutil"
"math/big"
@ -31,7 +33,9 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests"
)
@ -116,6 +120,83 @@ type callTracerTest struct {
Result *callTrace `json:"result"`
}
func TestPrestateTracerCreate2(t *testing.T) {
unsigned_tx := types.NewTransaction(1, common.HexToAddress("0x00000000000000000000000000000000deadbeef"),
new(big.Int), 5000000, big.NewInt(1), []byte{})
privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader)
if err != nil {
t.Fatalf("err %v", err)
}
signer := types.NewEIP155Signer(big.NewInt(1))
tx, err := types.SignTx(unsigned_tx, signer, privateKeyECDSA)
if err != nil {
t.Fatalf("err %v", err)
}
/**
This comes from one of the test-vectors on the Skinny Create2 - EIP
address 0x00000000000000000000000000000000deadbeef
salt 0x00000000000000000000000000000000000000000000000000000000cafebabe
init_code 0xdeadbeef
gas (assuming no mem expansion): 32006
result: 0x60f3f640a8508fC6a86d45DF051962668E1e8AC7
*/
origin, _ := signer.Sender(tx)
context := vm.Context{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Origin: origin,
Coinbase: common.Address{},
BlockNumber: new(big.Int).SetUint64(8000000),
Time: new(big.Int).SetUint64(5),
Difficulty: big.NewInt(0x30000),
GasLimit: uint64(6000000),
GasPrice: big.NewInt(1),
}
alloc := core.GenesisAlloc{}
// The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns
// the address
alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = core.GenesisAccount{
Nonce: 1,
Code: hexutil.MustDecode("0x63deadbeef60005263cafebabe6004601c6000F560005260206000F3"),
Balance: big.NewInt(1),
}
alloc[origin] = core.GenesisAccount{
Nonce: 1,
Code: []byte{},
Balance: big.NewInt(500000000000000),
}
statedb := tests.MakePreState(ethdb.NewMemDatabase(), alloc)
// Create the tracer, the EVM environment and run it
tracer, err := New("prestateTracer")
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
evm := vm.NewEVM(context, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, _, _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
res, err := tracer.GetResult()
if err != nil {
t.Fatalf("failed to retrieve trace result: %v", err)
}
ret := make(map[string]interface{})
if err := json.Unmarshal(res, &ret); err != nil {
t.Fatalf("failed to unmarshal trace result: %v", err)
}
if _, has := ret["0x60f3f640a8508fc6a86d45df051962668e1e8ac7"]; !has {
t.Fatalf("Expected 0x60f3f640a8508fc6a86d45df051962668e1e8ac7 in result")
}
}
// Iterates over all the input-output datasets in the tracer test harness and
// runs the JavaScript tracers against them.
func TestCallTracer(t *testing.T) {
@ -185,8 +266,9 @@ func TestCallTracer(t *testing.T) {
if err := json.Unmarshal(res, ret); err != nil {
t.Fatalf("failed to unmarshal trace result: %v", err)
}
if !reflect.DeepEqual(ret, test.Result) {
t.Fatalf("trace mismatch: have %+v, want %+v", ret, test.Result)
t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result)
}
})
}

View File

@ -1074,6 +1074,15 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockHashAndIndex(ctx cont
// GetTransactionCount returns the number of transactions the given address has sent for the given block number
func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*hexutil.Uint64, error) {
// Ask transaction pool for the nonce which includes pending transactions
if blockNr == rpc.PendingBlockNumber {
nonce, err := s.b.GetPoolNonce(ctx, address)
if err != nil {
return nil, err
}
return (*hexutil.Uint64)(&nonce), nil
}
// Resolve block number and use its state to ask for the nonce
state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
if state == nil || err != nil {
return nil, err

View File

@ -18,6 +18,7 @@
package web3ext
var Modules = map[string]string{
"accounting": Accounting_JS,
"admin": Admin_JS,
"chequebook": Chequebook_JS,
"clique": Clique_JS,
@ -704,3 +705,47 @@ web3._extend({
]
});
`
const Accounting_JS = `
web3._extend({
property: 'accounting',
methods: [
new web3._extend.Property({
name: 'balance',
getter: 'account_balance'
}),
new web3._extend.Property({
name: 'balanceCredit',
getter: 'account_balanceCredit'
}),
new web3._extend.Property({
name: 'balanceDebit',
getter: 'account_balanceDebit'
}),
new web3._extend.Property({
name: 'bytesCredit',
getter: 'account_bytesCredit'
}),
new web3._extend.Property({
name: 'bytesDebit',
getter: 'account_bytesDebit'
}),
new web3._extend.Property({
name: 'msgCredit',
getter: 'account_msgCredit'
}),
new web3._extend.Property({
name: 'msgDebit',
getter: 'account_msgDebit'
}),
new web3._extend.Property({
name: 'peerDrops',
getter: 'account_peerDrops'
}),
new web3._extend.Property({
name: 'selfDrops',
getter: 'account_selfDrops'
}),
]
});
`

View File

@ -27,10 +27,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
var (
@ -800,7 +800,7 @@ func (n *nodeNetGuts) startNextQuery(net *Network) {
func (q *findnodeQuery) start(net *Network) bool {
// Satisfy queries against the local node directly.
if q.remote == net.tab.self {
closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize)
closest := net.tab.closest(q.target, bucketSize)
q.reply <- closest.entries
return true
}
@ -1234,7 +1234,7 @@ func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
}
func rlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewKeccak256()
hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h

View File

@ -23,9 +23,9 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
// List of known secure identity schemes.
@ -48,7 +48,7 @@ func SignV4(r *enr.Record, privkey *ecdsa.PrivateKey) error {
cpy.Set(enr.ID("v4"))
cpy.Set(Secp256k1(privkey.PublicKey))
h := sha3.NewKeccak256()
h := sha3.NewLegacyKeccak256()
rlp.Encode(h, cpy.AppendElements(nil))
sig, err := crypto.Sign(h.Sum(nil), privkey)
if err != nil {
@ -69,7 +69,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error {
return fmt.Errorf("invalid public key")
}
h := sha3.NewKeccak256()
h := sha3.NewLegacyKeccak256()
rlp.Encode(h, r.AppendElements(nil))
if !crypto.VerifySignature(entry, h.Sum(nil), sig) {
return enr.ErrInvalidSig

View File

@ -22,31 +22,33 @@ import (
"github.com/ethereum/go-ethereum/metrics"
)
//define some metrics
// define some metrics
var (
//All metrics are cumulative
// All metrics are cumulative
//total amount of units credited
// total amount of units credited
mBalanceCredit metrics.Counter
//total amount of units debited
// total amount of units debited
mBalanceDebit metrics.Counter
//total amount of bytes credited
// total amount of bytes credited
mBytesCredit metrics.Counter
//total amount of bytes debited
// total amount of bytes debited
mBytesDebit metrics.Counter
//total amount of credited messages
// total amount of credited messages
mMsgCredit metrics.Counter
//total amount of debited messages
// total amount of debited messages
mMsgDebit metrics.Counter
//how many times local node had to drop remote peers
// how many times local node had to drop remote peers
mPeerDrops metrics.Counter
//how many times local node overdrafted and dropped
// how many times local node overdrafted and dropped
mSelfDrops metrics.Counter
MetricsRegistry metrics.Registry
)
//Prices defines how prices are being passed on to the accounting instance
// Prices defines how prices are being passed on to the accounting instance
type Prices interface {
//Return the Price for a message
// Return the Price for a message
Price(interface{}) *Price
}
@ -57,20 +59,20 @@ const (
Receiver = Payer(false)
)
//Price represents the costs of a message
// Price represents the costs of a message
type Price struct {
Value uint64 //
PerByte bool //True if the price is per byte or for unit
Value uint64
PerByte bool // True if the price is per byte or for unit
Payer Payer
}
//For gives back the price for a message
//A protocol provides the message price in absolute value
//This method then returns the correct signed amount,
//depending on who pays, which is identified by the `payer` argument:
//`Send` will pass a `Sender` payer, `Receive` will pass the `Receiver` argument.
//Thus: If Sending and sender pays, amount positive, otherwise negative
//If Receiving, and receiver pays, amount positive, otherwise negative
// For gives back the price for a message
// A protocol provides the message price in absolute value
// This method then returns the correct signed amount,
// depending on who pays, which is identified by the `payer` argument:
// `Send` will pass a `Sender` payer, `Receive` will pass the `Receiver` argument.
// Thus: If Sending and sender pays, amount positive, otherwise negative
// If Receiving, and receiver pays, amount positive, otherwise negative
func (p *Price) For(payer Payer, size uint32) int64 {
price := p.Value
if p.PerByte {
@ -82,22 +84,22 @@ func (p *Price) For(payer Payer, size uint32) int64 {
return int64(price)
}
//Balance is the actual accounting instance
//Balance defines the operations needed for accounting
//Implementations internally maintain the balance for every peer
// Balance is the actual accounting instance
// Balance defines the operations needed for accounting
// Implementations internally maintain the balance for every peer
type Balance interface {
//Adds amount to the local balance with remote node `peer`;
//positive amount = credit local node
//negative amount = debit local node
// Adds amount to the local balance with remote node `peer`;
// positive amount = credit local node
// negative amount = debit local node
Add(amount int64, peer *Peer) error
}
//Accounting implements the Hook interface
//It interfaces to the balances through the Balance interface,
//while interfacing with protocols and its prices through the Prices interface
// Accounting implements the Hook interface
// It interfaces to the balances through the Balance interface,
// while interfacing with protocols and its prices through the Prices interface
type Accounting struct {
Balance //interface to accounting logic
Prices //interface to prices logic
Balance // interface to accounting logic
Prices // interface to prices logic
}
func NewAccounting(balance Balance, po Prices) *Accounting {
@ -108,70 +110,68 @@ func NewAccounting(balance Balance, po Prices) *Accounting {
return ah
}
//SetupAccountingMetrics creates a separate registry for p2p accounting metrics;
//this registry should be independent of any other metrics as it persists at different endpoints.
//It also instantiates the given metrics and starts the persisting go-routine which
//at the passed interval writes the metrics to a LevelDB
// SetupAccountingMetrics creates a separate registry for p2p accounting metrics;
// this registry should be independent of any other metrics as it persists at different endpoints.
// It also instantiates the given metrics and starts the persisting go-routine which
// at the passed interval writes the metrics to a LevelDB
func SetupAccountingMetrics(reportInterval time.Duration, path string) *AccountingMetrics {
//create an empty registry
registry := metrics.NewRegistry()
//instantiate the metrics
mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", registry)
mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", registry)
mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", registry)
mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", registry)
mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", registry)
mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", registry)
mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", registry)
mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", registry)
//create the DB and start persisting
return NewAccountingMetrics(registry, reportInterval, path)
// create an empty registry
MetricsRegistry = metrics.NewRegistry()
// instantiate the metrics
mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", MetricsRegistry)
mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", MetricsRegistry)
mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", MetricsRegistry)
mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", MetricsRegistry)
mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", MetricsRegistry)
mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", MetricsRegistry)
mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", MetricsRegistry)
mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", MetricsRegistry)
// create the DB and start persisting
return NewAccountingMetrics(MetricsRegistry, reportInterval, path)
}
//Implement Hook.Send
// Send takes a peer, a size and a msg and
// - calculates the cost for the local node sending a msg of size to peer using the Prices interface
// - credits/debits local node using balance interface
func (ah *Accounting) Send(peer *Peer, size uint32, msg interface{}) error {
//get the price for a message (through the protocol spec)
// get the price for a message (through the protocol spec)
price := ah.Price(msg)
//this message doesn't need accounting
// this message doesn't need accounting
if price == nil {
return nil
}
//evaluate the price for sending messages
// evaluate the price for sending messages
costToLocalNode := price.For(Sender, size)
//do the accounting
// do the accounting
err := ah.Add(costToLocalNode, peer)
//record metrics: just increase counters for user-facing metrics
// record metrics: just increase counters for user-facing metrics
ah.doMetrics(costToLocalNode, size, err)
return err
}
//Implement Hook.Receive
// Receive takes a peer, a size and a msg and
// - calculates the cost for the local node receiving a msg of size from peer using the Prices interface
// - credits/debits local node using balance interface
func (ah *Accounting) Receive(peer *Peer, size uint32, msg interface{}) error {
//get the price for a message (through the protocol spec)
// get the price for a message (through the protocol spec)
price := ah.Price(msg)
//this message doesn't need accounting
// this message doesn't need accounting
if price == nil {
return nil
}
//evaluate the price for receiving messages
// evaluate the price for receiving messages
costToLocalNode := price.For(Receiver, size)
//do the accounting
// do the accounting
err := ah.Add(costToLocalNode, peer)
//record metrics: just increase counters for user-facing metrics
// record metrics: just increase counters for user-facing metrics
ah.doMetrics(costToLocalNode, size, err)
return err
}
//record some metrics
//this is not an error handling. `err` is returned by both `Send` and `Receive`
//`err` will only be non-nil if a limit has been violated (overdraft), in which case the peer has been dropped.
//if the limit has been violated and `err` is thus not nil:
// record some metrics
// this is not an error handling. `err` is returned by both `Send` and `Receive`
// `err` will only be non-nil if a limit has been violated (overdraft), in which case the peer has been dropped.
// if the limit has been violated and `err` is thus not nil:
// * if the price is positive, local node has been credited; thus `err` implicitly signals the REMOTE has been dropped
// * if the price is negative, local node has been debited, thus `err` implicitly signals LOCAL node "overdraft"
func (ah *Accounting) doMetrics(price int64, size uint32, err error) {
@ -180,7 +180,7 @@ func (ah *Accounting) doMetrics(price int64, size uint32, err error) {
mBytesCredit.Inc(int64(size))
mMsgCredit.Inc(1)
if err != nil {
//increase the number of times a remote node has been dropped due to "overdraft"
// increase the number of times a remote node has been dropped due to "overdraft"
mPeerDrops.Inc(1)
}
} else {
@ -188,7 +188,7 @@ func (ah *Accounting) doMetrics(price int64, size uint32, err error) {
mBytesDebit.Inc(int64(size))
mMsgDebit.Inc(1)
if err != nil {
//increase the number of times the local node has done an "overdraft" in respect to other nodes
// increase the number of times the local node has done an "overdraft" in respect to other nodes
mSelfDrops.Inc(1)
}
}

View File

@ -0,0 +1,94 @@
package protocols
import (
"errors"
)
// Textual version number of accounting API
const AccountingVersion = "1.0"
var errNoAccountingMetrics = errors.New("accounting metrics not enabled")
// AccountingApi provides an API to access account related information
type AccountingApi struct {
metrics *AccountingMetrics
}
// NewAccountingApi creates a new AccountingApi
// m will be used to check if accounting metrics are enabled
func NewAccountingApi(m *AccountingMetrics) *AccountingApi {
return &AccountingApi{m}
}
// Balance returns local node balance (units credited - units debited)
func (self *AccountingApi) Balance() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
balance := mBalanceCredit.Count() - mBalanceDebit.Count()
return balance, nil
}
// BalanceCredit returns total amount of units credited by local node
func (self *AccountingApi) BalanceCredit() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
return mBalanceCredit.Count(), nil
}
// BalanceCredit returns total amount of units debited by local node
func (self *AccountingApi) BalanceDebit() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
return mBalanceDebit.Count(), nil
}
// BytesCredit returns total amount of bytes credited by local node
func (self *AccountingApi) BytesCredit() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
return mBytesCredit.Count(), nil
}
// BalanceCredit returns total amount of bytes debited by local node
func (self *AccountingApi) BytesDebit() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
return mBytesDebit.Count(), nil
}
// MsgCredit returns total amount of messages credited by local node
func (self *AccountingApi) MsgCredit() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
return mMsgCredit.Count(), nil
}
// MsgDebit returns total amount of messages debited by local node
func (self *AccountingApi) MsgDebit() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
return mMsgDebit.Count(), nil
}
// PeerDrops returns number of times when local node had to drop remote peers
func (self *AccountingApi) PeerDrops() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
return mPeerDrops.Count(), nil
}
// SelfDrops returns number of times when local node was overdrafted and dropped
func (self *AccountingApi) SelfDrops() (int64, error) {
if self.metrics == nil {
return 0, errNoAccountingMetrics
}
return mSelfDrops.Count(), nil
}

View File

@ -39,9 +39,9 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
"golang.org/x/crypto/sha3"
)
const (
@ -253,10 +253,10 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
}
// setup sha3 instances for the MACs
mac1 := sha3.NewKeccak256()
mac1 := sha3.NewLegacyKeccak256()
mac1.Write(xor(s.MAC, h.respNonce))
mac1.Write(auth)
mac2 := sha3.NewKeccak256()
mac2 := sha3.NewLegacyKeccak256()
mac2.Write(xor(s.MAC, h.initNonce))
mac2.Write(authResp)
if h.initiator {

View File

@ -34,9 +34,9 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/p2p/simulations/pipes"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
func TestSharedSecret(t *testing.T) {
@ -334,8 +334,8 @@ func TestRLPXFrameRW(t *testing.T) {
s1 := secrets{
AES: aesSecret,
MAC: macSecret,
EgressMAC: sha3.NewKeccak256(),
IngressMAC: sha3.NewKeccak256(),
EgressMAC: sha3.NewLegacyKeccak256(),
IngressMAC: sha3.NewLegacyKeccak256(),
}
s1.EgressMAC.Write(egressMACinit)
s1.IngressMAC.Write(ingressMACinit)
@ -344,8 +344,8 @@ func TestRLPXFrameRW(t *testing.T) {
s2 := secrets{
AES: aesSecret,
MAC: macSecret,
EgressMAC: sha3.NewKeccak256(),
IngressMAC: sha3.NewKeccak256(),
EgressMAC: sha3.NewLegacyKeccak256(),
IngressMAC: sha3.NewLegacyKeccak256(),
}
s2.EgressMAC.Write(ingressMACinit)
s2.IngressMAC.Write(egressMACinit)

View File

@ -26,10 +26,10 @@ import (
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"golang.org/x/crypto/sha3"
)
// func init() {
@ -48,8 +48,8 @@ func newTestTransport(rpub *ecdsa.PublicKey, fd net.Conn) transport {
wrapped.rw = newRLPXFrameRW(fd, secrets{
MAC: zero16,
AES: zero16,
IngressMAC: sha3.NewKeccak256(),
EgressMAC: sha3.NewKeccak256(),
IngressMAC: sha3.NewLegacyKeccak256(),
EgressMAC: sha3.NewLegacyKeccak256(),
})
return &testTransport{rpub: rpub, rlpx: wrapped}
}

View File

@ -46,7 +46,7 @@ import (
func init() {
// Register a reexec function to start a simulation node when the current binary is
// executed as "p2p-node" (rather than whataver the main() function would normally do).
// executed as "p2p-node" (rather than whatever the main() function would normally do).
reexec.Register("p2p-node", execP2PNode)
}

View File

@ -130,7 +130,7 @@ func (s *SimAdapter) Dial(dest *enode.Node) (conn net.Conn, err error) {
return nil, err
}
// this is simulated 'listening'
// asynchronously call the dialed destintion node's p2p server
// asynchronously call the dialed destination node's p2p server
// to set up connection on the 'listening' side
go srv.SetupConn(pipe1, 0, nil)
return pipe2, nil
@ -351,17 +351,3 @@ func (sn *SimNode) NodeInfo() *p2p.NodeInfo {
}
return server.NodeInfo()
}
func setSocketBuffer(conn net.Conn, socketReadBuffer int, socketWriteBuffer int) error {
if v, ok := conn.(*net.UnixConn); ok {
err := v.SetReadBuffer(socketReadBuffer)
if err != nil {
return err
}
err = v.SetWriteBuffer(socketWriteBuffer)
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,132 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package simulations
import (
"errors"
"strings"
"github.com/ethereum/go-ethereum/p2p/enode"
)
var (
ErrNodeNotFound = errors.New("node not found")
)
// ConnectToLastNode connects the node with provided NodeID
// to the last node that is up, and avoiding connection to self.
// It is useful when constructing a chain network topology
// when Network adds and removes nodes dynamically.
func (net *Network) ConnectToLastNode(id enode.ID) (err error) {
ids := net.getUpNodeIDs()
l := len(ids)
if l < 2 {
return nil
}
last := ids[l-1]
if last == id {
last = ids[l-2]
}
return net.connect(last, id)
}
// ConnectToRandomNode connects the node with provided NodeID
// to a random node that is up.
func (net *Network) ConnectToRandomNode(id enode.ID) (err error) {
selected := net.GetRandomUpNode(id)
if selected == nil {
return ErrNodeNotFound
}
return net.connect(selected.ID(), id)
}
// ConnectNodesFull connects all nodes one to another.
// It provides a complete connectivity in the network
// which should be rarely needed.
func (net *Network) ConnectNodesFull(ids []enode.ID) (err error) {
if ids == nil {
ids = net.getUpNodeIDs()
}
for i, lid := range ids {
for _, rid := range ids[i+1:] {
if err = net.connect(lid, rid); err != nil {
return err
}
}
}
return nil
}
// ConnectNodesChain connects all nodes in a chain topology.
// If ids argument is nil, all nodes that are up will be connected.
func (net *Network) ConnectNodesChain(ids []enode.ID) (err error) {
if ids == nil {
ids = net.getUpNodeIDs()
}
l := len(ids)
for i := 0; i < l-1; i++ {
if err := net.connect(ids[i], ids[i+1]); err != nil {
return err
}
}
return nil
}
// ConnectNodesRing connects all nodes in a ring topology.
// If ids argument is nil, all nodes that are up will be connected.
func (net *Network) ConnectNodesRing(ids []enode.ID) (err error) {
if ids == nil {
ids = net.getUpNodeIDs()
}
l := len(ids)
if l < 2 {
return nil
}
if err := net.ConnectNodesChain(ids); err != nil {
return err
}
return net.connect(ids[l-1], ids[0])
}
// ConnectNodesStar connects all nodes into a star topology
// If ids argument is nil, all nodes that are up will be connected.
func (net *Network) ConnectNodesStar(ids []enode.ID, center enode.ID) (err error) {
if ids == nil {
ids = net.getUpNodeIDs()
}
for _, id := range ids {
if center == id {
continue
}
if err := net.connect(center, id); err != nil {
return err
}
}
return nil
}
// connect connects two nodes but ignores already connected error.
func (net *Network) connect(oneID, otherID enode.ID) error {
return ignoreAlreadyConnectedErr(net.Connect(oneID, otherID))
}
func ignoreAlreadyConnectedErr(err error) error {
if err == nil || strings.Contains(err.Error(), "already connected") {
return nil
}
return err
}

View File

@ -0,0 +1,172 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package simulations
import (
"testing"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
)
func newTestNetwork(t *testing.T, nodeCount int) (*Network, []enode.ID) {
t.Helper()
adapter := adapters.NewSimAdapter(adapters.Services{
"noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
return NewNoopService(nil), nil
},
})
// create network
network := NewNetwork(adapter, &NetworkConfig{
DefaultService: "noopwoop",
})
// create and start nodes
ids := make([]enode.ID, nodeCount)
for i := range ids {
conf := adapters.RandomNodeConfig()
node, err := network.NewNodeWithConfig(conf)
if err != nil {
t.Fatalf("error creating node: %s", err)
}
if err := network.Start(node.ID()); err != nil {
t.Fatalf("error starting node: %s", err)
}
ids[i] = node.ID()
}
if len(network.Conns) > 0 {
t.Fatal("no connections should exist after just adding nodes")
}
return network, ids
}
func TestConnectToLastNode(t *testing.T) {
net, ids := newTestNetwork(t, 10)
defer net.Shutdown()
first := ids[0]
if err := net.ConnectToLastNode(first); err != nil {
t.Fatal(err)
}
last := ids[len(ids)-1]
for i, id := range ids {
if id == first || id == last {
continue
}
if net.GetConn(first, id) != nil {
t.Errorf("connection must not exist with node(ind: %v, id: %v)", i, id)
}
}
if net.GetConn(first, last) == nil {
t.Error("first and last node must be connected")
}
}
func TestConnectToRandomNode(t *testing.T) {
net, ids := newTestNetwork(t, 10)
defer net.Shutdown()
err := net.ConnectToRandomNode(ids[0])
if err != nil {
t.Fatal(err)
}
var cc int
for i, a := range ids {
for _, b := range ids[i:] {
if net.GetConn(a, b) != nil {
cc++
}
}
}
if cc != 1 {
t.Errorf("expected one connection, got %v", cc)
}
}
func TestConnectNodesFull(t *testing.T) {
tests := []struct {
name string
nodeCount int
}{
{name: "no node", nodeCount: 0},
{name: "single node", nodeCount: 1},
{name: "2 nodes", nodeCount: 2},
{name: "3 nodes", nodeCount: 3},
{name: "even number of nodes", nodeCount: 12},
{name: "odd number of nodes", nodeCount: 13},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
net, ids := newTestNetwork(t, test.nodeCount)
defer net.Shutdown()
err := net.ConnectNodesFull(ids)
if err != nil {
t.Fatal(err)
}
VerifyFull(t, net, ids)
})
}
}
func TestConnectNodesChain(t *testing.T) {
net, ids := newTestNetwork(t, 10)
defer net.Shutdown()
err := net.ConnectNodesChain(ids)
if err != nil {
t.Fatal(err)
}
VerifyChain(t, net, ids)
}
func TestConnectNodesRing(t *testing.T) {
net, ids := newTestNetwork(t, 10)
defer net.Shutdown()
err := net.ConnectNodesRing(ids)
if err != nil {
t.Fatal(err)
}
VerifyRing(t, net, ids)
}
func TestConnectNodesStar(t *testing.T) {
net, ids := newTestNetwork(t, 10)
defer net.Shutdown()
pivotIndex := 2
err := net.ConnectNodesStar(ids, ids[pivotIndex])
if err != nil {
t.Fatal(err)
}
VerifyStar(t, net, ids, pivotIndex)
}

View File

@ -35,7 +35,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
colorable "github.com/mattn/go-colorable"
"github.com/mattn/go-colorable"
)
var (
@ -294,6 +294,7 @@ var testServices = adapters.Services{
}
func testHTTPServer(t *testing.T) (*Network, *httptest.Server) {
t.Helper()
adapter := adapters.NewSimAdapter(testServices)
network := NewNetwork(adapter, &NetworkConfig{
DefaultService: "test",

View File

@ -15,7 +15,7 @@
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package simulations simulates p2p networks.
// A mokcer simulates starting and stopping real nodes in a network.
// A mocker simulates starting and stopping real nodes in a network.
package simulations
import (
@ -135,13 +135,13 @@ func TestMocker(t *testing.T) {
wg.Wait()
//check there are nodeCount number of nodes in the network
nodes_info, err := client.GetNodes()
nodesInfo, err := client.GetNodes()
if err != nil {
t.Fatalf("Could not get nodes list: %s", err)
}
if len(nodes_info) != nodeCount {
t.Fatalf("Expected %d number of nodes, got: %d", nodeCount, len(nodes_info))
if len(nodesInfo) != nodeCount {
t.Fatalf("Expected %d number of nodes, got: %d", nodeCount, len(nodesInfo))
}
//stop the mocker
@ -160,12 +160,12 @@ func TestMocker(t *testing.T) {
}
//now the number of nodes in the network should be zero
nodes_info, err = client.GetNodes()
nodesInfo, err = client.GetNodes()
if err != nil {
t.Fatalf("Could not get nodes list: %s", err)
}
if len(nodes_info) != 0 {
t.Fatalf("Expected empty list of nodes, got: %d", len(nodes_info))
if len(nodesInfo) != 0 {
t.Fatalf("Expected empty list of nodes, got: %d", len(nodesInfo))
}
}

View File

@ -22,6 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
"math/rand"
"sync"
"time"
@ -370,23 +371,32 @@ func (net *Network) DidReceive(sender, receiver enode.ID, proto string, code uin
// GetNode gets the node with the given ID, returning nil if the node does not
// exist
func (net *Network) GetNode(id enode.ID) *Node {
net.lock.Lock()
defer net.lock.Unlock()
net.lock.RLock()
defer net.lock.RUnlock()
return net.getNode(id)
}
// GetNode gets the node with the given name, returning nil if the node does
// not exist
func (net *Network) GetNodeByName(name string) *Node {
net.lock.Lock()
defer net.lock.Unlock()
net.lock.RLock()
defer net.lock.RUnlock()
return net.getNodeByName(name)
}
func (net *Network) getNodeByName(name string) *Node {
for _, node := range net.Nodes {
if node.Config.Name == name {
return node
}
}
return nil
}
// GetNodes returns the existing nodes
func (net *Network) GetNodes() (nodes []*Node) {
net.lock.Lock()
defer net.lock.Unlock()
net.lock.RLock()
defer net.lock.RUnlock()
nodes = append(nodes, net.Nodes...)
return nodes
@ -400,20 +410,67 @@ func (net *Network) getNode(id enode.ID) *Node {
return net.Nodes[i]
}
func (net *Network) getNodeByName(name string) *Node {
// GetRandomUpNode returns a random node on the network, which is running.
func (net *Network) GetRandomUpNode(excludeIDs ...enode.ID) *Node {
net.lock.RLock()
defer net.lock.RUnlock()
return net.getRandomNode(net.getUpNodeIDs(), excludeIDs)
}
func (net *Network) getUpNodeIDs() (ids []enode.ID) {
for _, node := range net.Nodes {
if node.Config.Name == name {
return node
if node.Up {
ids = append(ids, node.ID())
}
}
return ids
}
// GetRandomDownNode returns a random node on the network, which is stopped.
func (net *Network) GetRandomDownNode(excludeIDs ...enode.ID) *Node {
net.lock.RLock()
defer net.lock.RUnlock()
return net.getRandomNode(net.getDownNodeIDs(), excludeIDs)
}
func (net *Network) getDownNodeIDs() (ids []enode.ID) {
for _, node := range net.GetNodes() {
if !node.Up {
ids = append(ids, node.ID())
}
}
return ids
}
func (net *Network) getRandomNode(ids []enode.ID, excludeIDs []enode.ID) *Node {
filtered := filterIDs(ids, excludeIDs)
l := len(filtered)
if l == 0 {
return nil
}
return net.GetNode(filtered[rand.Intn(l)])
}
func filterIDs(ids []enode.ID, excludeIDs []enode.ID) []enode.ID {
exclude := make(map[enode.ID]bool)
for _, id := range excludeIDs {
exclude[id] = true
}
var filtered []enode.ID
for _, id := range ids {
if _, found := exclude[id]; !found {
filtered = append(filtered, id)
}
}
return filtered
}
// GetConn returns the connection which exists between "one" and "other"
// regardless of which node initiated the connection
func (net *Network) GetConn(oneID, otherID enode.ID) *Conn {
net.lock.Lock()
defer net.lock.Unlock()
net.lock.RLock()
defer net.lock.RUnlock()
return net.getConn(oneID, otherID)
}
@ -459,7 +516,7 @@ func (net *Network) getConn(oneID, otherID enode.ID) *Conn {
return net.Conns[i]
}
// InitConn(one, other) retrieves the connectiton model for the connection between
// InitConn(one, other) retrieves the connection model for the connection between
// peers one and other, or creates a new one if it does not exist
// the order of nodes does not matter, i.e., Conn(i,j) == Conn(j, i)
// it checks if the connection is already up, and if the nodes are running
@ -505,8 +562,8 @@ func (net *Network) Shutdown() {
close(net.quitc)
}
//Reset resets all network properties:
//emtpies the nodes and the connection list
// Reset resets all network properties:
// empties the nodes and the connection list
func (net *Network) Reset() {
net.lock.Lock()
defer net.lock.Unlock()

View File

@ -18,14 +18,266 @@ package simulations
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
)
// Tests that a created snapshot with a minimal service only contains the expected connections
// and that a network when loaded with this snapshot only contains those same connections
func TestSnapshot(t *testing.T) {
// PART I
// create snapshot from ring network
// this is a minimal service, whose protocol will take exactly one message OR close of connection before quitting
adapter := adapters.NewSimAdapter(adapters.Services{
"noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
return NewNoopService(nil), nil
},
})
// create network
network := NewNetwork(adapter, &NetworkConfig{
DefaultService: "noopwoop",
})
// \todo consider making a member of network, set to true threadsafe when shutdown
runningOne := true
defer func() {
if runningOne {
network.Shutdown()
}
}()
// create and start nodes
nodeCount := 20
ids := make([]enode.ID, nodeCount)
for i := 0; i < nodeCount; i++ {
conf := adapters.RandomNodeConfig()
node, err := network.NewNodeWithConfig(conf)
if err != nil {
t.Fatalf("error creating node: %s", err)
}
if err := network.Start(node.ID()); err != nil {
t.Fatalf("error starting node: %s", err)
}
ids[i] = node.ID()
}
// subscribe to peer events
evC := make(chan *Event)
sub := network.Events().Subscribe(evC)
defer sub.Unsubscribe()
// connect nodes in a ring
// spawn separate thread to avoid deadlock in the event listeners
go func() {
for i, id := range ids {
peerID := ids[(i+1)%len(ids)]
if err := network.Connect(id, peerID); err != nil {
t.Fatal(err)
}
}
}()
// collect connection events up to expected number
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
defer cancel()
checkIds := make(map[enode.ID][]enode.ID)
connEventCount := nodeCount
OUTER:
for {
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
case ev := <-evC:
if ev.Type == EventTypeConn && !ev.Control {
// fail on any disconnect
if !ev.Conn.Up {
t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other)
}
checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other)
checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One)
connEventCount--
log.Debug("ev", "count", connEventCount)
if connEventCount == 0 {
break OUTER
}
}
}
}
// create snapshot of current network
snap, err := network.Snapshot()
if err != nil {
t.Fatal(err)
}
j, err := json.Marshal(snap)
if err != nil {
t.Fatal(err)
}
log.Debug("snapshot taken", "nodes", len(snap.Nodes), "conns", len(snap.Conns), "json", string(j))
// verify that the snap element numbers check out
if len(checkIds) != len(snap.Conns) || len(checkIds) != len(snap.Nodes) {
t.Fatalf("snapshot wrong node,conn counts %d,%d != %d", len(snap.Nodes), len(snap.Conns), len(checkIds))
}
// shut down sim network
runningOne = false
sub.Unsubscribe()
network.Shutdown()
// check that we have all the expected connections in the snapshot
for nodid, nodConns := range checkIds {
for _, nodConn := range nodConns {
var match bool
for _, snapConn := range snap.Conns {
if snapConn.One == nodid && snapConn.Other == nodConn {
match = true
break
} else if snapConn.Other == nodid && snapConn.One == nodConn {
match = true
break
}
}
if !match {
t.Fatalf("snapshot missing conn %v -> %v", nodid, nodConn)
}
}
}
log.Info("snapshot checked")
// PART II
// load snapshot and verify that exactly same connections are formed
adapter = adapters.NewSimAdapter(adapters.Services{
"noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
return NewNoopService(nil), nil
},
})
network = NewNetwork(adapter, &NetworkConfig{
DefaultService: "noopwoop",
})
defer func() {
network.Shutdown()
}()
// subscribe to peer events
// every node up and conn up event will generate one additional control event
// therefore multiply the count by two
evC = make(chan *Event, (len(snap.Conns)*2)+(len(snap.Nodes)*2))
sub = network.Events().Subscribe(evC)
defer sub.Unsubscribe()
// load the snapshot
// spawn separate thread to avoid deadlock in the event listeners
err = network.Load(snap)
if err != nil {
t.Fatal(err)
}
// collect connection events up to expected number
ctx, cancel = context.WithTimeout(context.TODO(), time.Second*3)
defer cancel()
connEventCount = nodeCount
OUTER_TWO:
for {
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
case ev := <-evC:
if ev.Type == EventTypeConn && !ev.Control {
// fail on any disconnect
if !ev.Conn.Up {
t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other)
}
log.Debug("conn", "on", ev.Conn.One, "other", ev.Conn.Other)
checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other)
checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One)
connEventCount--
log.Debug("ev", "count", connEventCount)
if connEventCount == 0 {
break OUTER_TWO
}
}
}
}
// check that we have all expected connections in the network
for _, snapConn := range snap.Conns {
var match bool
for nodid, nodConns := range checkIds {
for _, nodConn := range nodConns {
if snapConn.One == nodid && snapConn.Other == nodConn {
match = true
break
} else if snapConn.Other == nodid && snapConn.One == nodConn {
match = true
break
}
}
}
if !match {
t.Fatalf("network missing conn %v -> %v", snapConn.One, snapConn.Other)
}
}
// verify that network didn't generate any other additional connection events after the ones we have collected within a reasonable period of time
ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
defer cancel()
select {
case <-ctx.Done():
case ev := <-evC:
if ev.Type == EventTypeConn {
t.Fatalf("Superfluous conn found %v -> %v", ev.Conn.One, ev.Conn.Other)
}
}
// This test validates if all connections from the snapshot
// are created in the network.
t.Run("conns after load", func(t *testing.T) {
// Create new network.
n := NewNetwork(
adapters.NewSimAdapter(adapters.Services{
"noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
return NewNoopService(nil), nil
},
}),
&NetworkConfig{
DefaultService: "noopwoop",
},
)
defer n.Shutdown()
// Load the same snapshot.
err := n.Load(snap)
if err != nil {
t.Fatal(err)
}
// Check every connection from the snapshot
// if it is in the network, too.
for _, c := range snap.Conns {
if n.GetConn(c.One, c.Other) == nil {
t.Errorf("missing connection: %s -> %s", c.One, c.Other)
}
}
})
}
// TestNetworkSimulation creates a multi-node simulation network with each node
// connected in a ring topology, checks that all nodes successfully handshake
// with each other and that a snapshot fully represents the desired topology
@ -158,3 +410,78 @@ func triggerChecks(ctx context.Context, ids []enode.ID, trigger chan enode.ID, i
}
}
}
// \todo: refactor to implement shapshots
// and connect configuration methods once these are moved from
// swarm/network/simulations/connect.go
func BenchmarkMinimalService(b *testing.B) {
b.Run("ring/32", benchmarkMinimalServiceTmp)
}
func benchmarkMinimalServiceTmp(b *testing.B) {
// stop timer to discard setup time pollution
args := strings.Split(b.Name(), "/")
nodeCount, err := strconv.ParseInt(args[2], 10, 16)
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
// this is a minimal service, whose protocol will close a channel upon run of protocol
// making it possible to bench the time it takes for the service to start and protocol actually to be run
protoCMap := make(map[enode.ID]map[enode.ID]chan struct{})
adapter := adapters.NewSimAdapter(adapters.Services{
"noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
protoCMap[ctx.Config.ID] = make(map[enode.ID]chan struct{})
svc := NewNoopService(protoCMap[ctx.Config.ID])
return svc, nil
},
})
// create network
network := NewNetwork(adapter, &NetworkConfig{
DefaultService: "noopwoop",
})
defer network.Shutdown()
// create and start nodes
ids := make([]enode.ID, nodeCount)
for i := 0; i < int(nodeCount); i++ {
conf := adapters.RandomNodeConfig()
node, err := network.NewNodeWithConfig(conf)
if err != nil {
b.Fatalf("error creating node: %s", err)
}
if err := network.Start(node.ID()); err != nil {
b.Fatalf("error starting node: %s", err)
}
ids[i] = node.ID()
}
// ready, set, go
b.ResetTimer()
// connect nodes in a ring
for i, id := range ids {
peerID := ids[(i+1)%len(ids)]
if err := network.Connect(id, peerID); err != nil {
b.Fatal(err)
}
}
// wait for all protocols to signal to close down
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
defer cancel()
for nodid, peers := range protoCMap {
for peerid, peerC := range peers {
log.Debug("getting ", "node", nodid, "peer", peerid)
select {
case <-ctx.Done():
b.Fatal(ctx.Err())
case <-peerC:
}
}
}
}
}

View File

@ -0,0 +1,134 @@
package simulations
import (
"testing"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rpc"
)
// NoopService is the service that does not do anything
// but implements node.Service interface.
type NoopService struct {
c map[enode.ID]chan struct{}
}
func NewNoopService(ackC map[enode.ID]chan struct{}) *NoopService {
return &NoopService{
c: ackC,
}
}
func (t *NoopService) Protocols() []p2p.Protocol {
return []p2p.Protocol{
{
Name: "noop",
Version: 666,
Length: 0,
Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
if t.c != nil {
t.c[peer.ID()] = make(chan struct{})
close(t.c[peer.ID()])
}
rw.ReadMsg()
return nil
},
NodeInfo: func() interface{} {
return struct{}{}
},
PeerInfo: func(id enode.ID) interface{} {
return struct{}{}
},
Attributes: []enr.Entry{},
},
}
}
func (t *NoopService) APIs() []rpc.API {
return []rpc.API{}
}
func (t *NoopService) Start(server *p2p.Server) error {
return nil
}
func (t *NoopService) Stop() error {
return nil
}
func VerifyRing(t *testing.T, net *Network, ids []enode.ID) {
t.Helper()
n := len(ids)
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
c := net.GetConn(ids[i], ids[j])
if i == j-1 || (i == 0 && j == n-1) {
if c == nil {
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
}
} else {
if c != nil {
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
}
}
}
}
}
func VerifyChain(t *testing.T, net *Network, ids []enode.ID) {
t.Helper()
n := len(ids)
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
c := net.GetConn(ids[i], ids[j])
if i == j-1 {
if c == nil {
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
}
} else {
if c != nil {
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
}
}
}
}
}
func VerifyFull(t *testing.T, net *Network, ids []enode.ID) {
t.Helper()
n := len(ids)
var connections int
for i, lid := range ids {
for _, rid := range ids[i+1:] {
if net.GetConn(lid, rid) != nil {
connections++
}
}
}
want := n * (n - 1) / 2
if connections != want {
t.Errorf("wrong number of connections, got: %v, want: %v", connections, want)
}
}
func VerifyStar(t *testing.T, net *Network, ids []enode.ID, centerIndex int) {
t.Helper()
n := len(ids)
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
c := net.GetConn(ids[i], ids[j])
if i == centerIndex || j == centerIndex {
if c == nil {
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
}
} else {
if c != nil {
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
}
}
}
}
}

View File

@ -42,7 +42,7 @@ var (
EIP155Block: big.NewInt(2675000),
EIP158Block: big.NewInt(2675000),
ByzantiumBlock: big.NewInt(4370000),
ConstantinopleBlock: big.NewInt(7080000),
ConstantinopleBlock: nil,
Ethash: new(EthashConfig),
}

View File

@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 8 // Minor version component of the current release
VersionPatch = 20 // Patch version component of the current release
VersionPatch = 21 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)

View File

@ -36,11 +36,15 @@ import (
)
const (
contentType = "application/json"
maxRequestContentLength = 1024 * 512
)
var nullAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:0")
var (
// https://www.jsonrpc.org/historical/json-rpc-over-http.html#id13
acceptedContentTypes = []string{"application/json", "application/json-rpc", "application/jsonrequest"}
contentType = acceptedContentTypes[0]
nullAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:0")
)
type httpConn struct {
client *http.Client
@ -263,12 +267,21 @@ func validateRequest(r *http.Request) (int, error) {
err := fmt.Errorf("content length too large (%d>%d)", r.ContentLength, maxRequestContentLength)
return http.StatusRequestEntityTooLarge, err
}
mt, _, err := mime.ParseMediaType(r.Header.Get("content-type"))
if r.Method != http.MethodOptions && (err != nil || mt != contentType) {
// Allow OPTIONS (regardless of content-type)
if r.Method == http.MethodOptions {
return 0, nil
}
// Check content-type
if mt, _, err := mime.ParseMediaType(r.Header.Get("content-type")); err == nil {
for _, accepted := range acceptedContentTypes {
if accepted == mt {
return 0, nil
}
}
}
// Invalid content-type
err := fmt.Errorf("invalid content type, only %s is supported", contentType)
return http.StatusUnsupportedMediaType, err
}
return 0, nil
}
func newCorsHandler(srv *Server, allowedOrigins []string) http.Handler {

View File

@ -20,13 +20,31 @@ package rpc
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/log"
)
/*
#include <sys/un.h>
int max_socket_path_size() {
struct sockaddr_un s;
return sizeof(s.sun_path);
}
*/
import "C"
// ipcListen will create a Unix socket on the given endpoint.
func ipcListen(endpoint string) (net.Listener, error) {
if len(endpoint) > int(C.max_socket_path_size()) {
log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", C.max_socket_path_size()),
"endpoint", endpoint)
}
// Ensure the IPC path exists and remove any previous leftover
if err := os.MkdirAll(filepath.Dir(endpoint), 0751); err != nil {
return nil, err

View File

@ -15,11 +15,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/storage"
"golang.org/x/crypto/scrypt"
"golang.org/x/crypto/sha3"
cli "gopkg.in/urfave/cli.v1"
)
@ -336,7 +336,7 @@ func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.Priva
}
func (a *API) getACTDecryptionKey(ctx context.Context, actManifestAddress storage.Address, sessionKey []byte) (found bool, ciphertext, decryptionKey []byte, err error) {
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)
hasher.Reset()
@ -462,7 +462,7 @@ func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees
return nil, nil, nil, err
}
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)
@ -484,7 +484,7 @@ func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees
if err != nil {
return nil, nil, nil, err
}
hasher := sha3.NewKeccak256()
hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)

View File

@ -50,10 +50,6 @@ import (
opentracing "github.com/opentracing/opentracing-go"
)
var (
ErrNotFound = errors.New("not found")
)
var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
@ -136,13 +132,6 @@ func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolv
}
}
// MultiResolverOptionWithNameHash is unused at the time of this writing
func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption {
return func(m *MultiResolver) {
m.nameHash = nameHash
}
}
// NewMultiResolver creates a new instance of MultiResolver.
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
m = &MultiResolver{
@ -173,40 +162,6 @@ func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
return
}
// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address
func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) {
rs, err := m.getResolveValidator(name)
if err != nil {
return false, err
}
var addr common.Address
for _, r := range rs {
addr, err = r.Owner(m.nameHash(name))
// we hide the error if it is not for the last resolver we check
if err == nil {
return addr == address, nil
}
}
return false, err
}
// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number
func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) {
rs, err := m.getResolveValidator(name)
if err != nil {
return nil, err
}
for _, r := range rs {
var header *types.Header
header, err = r.HeaderByNumber(ctx, blockNr)
// we hide the error if it is not for the last resolver we check
if err == nil {
return header, nil
}
}
return nil, err
}
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
rs := m.resolvers[""]
@ -224,11 +179,6 @@ func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, er
return rs, nil
}
// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses
func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) {
m.nameHash = nameHash
}
/*
API implements webserver/file system related content storage and retrieval
on top of the FileStore
@ -265,9 +215,6 @@ func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt b
return a.fileStore.Store(ctx, data, size, toEncrypt)
}
// ErrResolve is returned when an URI cannot be resolved from ENS.
type ErrResolve error
// Resolve a name into a content-addressed hash
// where address could be an ENS name, or a content addressed hash
func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) {
@ -980,11 +927,6 @@ func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.A
return a.feed.Update(ctx, request)
}
// FeedsHashSize returned the size of the digest produced by Swarm feeds' hashing function
func (a *API) FeedsHashSize() int {
return a.feed.HashSize
}
// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails
var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest")

View File

@ -45,11 +45,6 @@ import (
"github.com/pborman/uuid"
)
var (
DefaultGateway = "http://localhost:8500"
DefaultClient = NewClient(DefaultGateway)
)
var (
ErrUnauthorized = errors.New("unauthorized")
)

View File

@ -20,8 +20,8 @@ import (
"encoding/binary"
"errors"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
"golang.org/x/crypto/sha3"
)
type RefEncryption struct {
@ -39,12 +39,12 @@ func NewRefEncryption(refSize int) *RefEncryption {
}
func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) {
spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewKeccak256)
spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256)
encryptedSpan, err := spanEncryption.Encrypt(re.span)
if err != nil {
return nil, err
}
dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewKeccak256)
dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256)
encryptedData, err := dataEncryption.Encrypt(ref)
if err != nil {
return nil, err
@ -57,7 +57,7 @@ func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) {
}
func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) {
spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewKeccak256)
spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256)
decryptedSpan, err := spanEncryption.Decrypt(ref[:8])
if err != nil {
return nil, err
@ -68,7 +68,7 @@ func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) {
return nil, errors.New("invalid span in encrypted reference")
}
dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewKeccak256)
dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256)
decryptedRef, err := dataEncryption.Decrypt(ref[8:])
if err != nil {
return nil, err

View File

@ -80,7 +80,7 @@ func InitLoggingResponseWriter(h http.Handler) http.Handler {
h.ServeHTTP(writer, r)
ts := time.Since(tn)
log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts*time.Millisecond)
log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts)
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).Update(ts)
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.%d.time", r.Method, writer.statusCode), nil).Update(ts)
})

View File

@ -83,23 +83,3 @@ func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) {
}
return &Response{mimeType, status, expsize, string(body[:size])}, err
}
// Modify(rootHash, basePath, contentHash, contentType) takes th e manifest trie rooted in rootHash,
// and merge on to it. creating an entry w conentType (mime)
//
// DEPRECATED: Use the HTTP API instead
func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, contentType string) (newRootHash string, err error) {
uri, err := Parse("bzz:/" + rootHash)
if err != nil {
return "", err
}
addr, err := s.api.Resolve(ctx, uri.Addr)
if err != nil {
return "", err
}
addr, err = s.api.Modify(ctx, addr, path, contentHash, contentType)
if err != nil {
return "", err
}
return addr.Hex(), nil
}

View File

@ -29,18 +29,6 @@ func NewControl(api *API, hive *network.Hive) *Control {
return &Control{api, hive}
}
//func (self *Control) BlockNetworkRead(on bool) {
// self.hive.BlockNetworkRead(on)
//}
//
//func (self *Control) SyncEnabled(on bool) {
// self.hive.SyncEnabled(on)
//}
//
//func (self *Control) SwapEnabled(on bool) {
// self.hive.SwapEnabled(on)
//}
//
func (c *Control) Hive() string {
return c.hive.String()
}

Some files were not shown because too many files have changed in this diff Show More