diff --git a/Gopkg.lock b/Gopkg.lock
index 55fb47d9..edf16139 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -37,7 +37,7 @@
version = "v1.7.1"
[[projects]]
- digest = "1:a9c8210eb5d36a9a6e66953dc3d3cabd3afbbfb4f50baab0db1af1b723254b82"
+ digest = "1:3d26f660432345429f6b09595e4707ee12745547323bcd1dc91457125aefeedc"
name = "github.com/ethereum/go-ethereum"
packages = [
".",
@@ -56,7 +56,6 @@
"crypto",
"crypto/ecies",
"crypto/secp256k1",
- "crypto/sha3",
"ethclient",
"ethdb",
"event",
@@ -75,8 +74,8 @@
"trie",
]
pruneopts = ""
- revision = "24d727b6d6e2c0cde222fa12155c4a6db5caaf2e"
- version = "v1.8.20"
+ revision = "9dc5d1a915ac0e0bd8429d6ac41df50eec91de5f"
+ version = "v1.8.21"
[[projects]]
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
@@ -416,15 +415,16 @@
[[projects]]
branch = "master"
- digest = "1:61a86f0be8b466d6e3fbdabb155aaa4006137cb5e3fd3b949329d103fa0ceb0f"
+ digest = "1:59b49c47c11a48f1054529207f65907c014ecf5f9a7c0d9c0f1616dec7b062ed"
name = "golang.org/x/crypto"
packages = [
"pbkdf2",
"scrypt",
+ "sha3",
"ssh/terminal",
]
pruneopts = ""
- revision = "0e37d006457bf46f9e6692014ba72ef82c33022c"
+ revision = "ff983b9c42bc9fbf91556e191cc8efb585c16908"
[[projects]]
branch = "master"
@@ -548,7 +548,6 @@
"github.com/ethereum/go-ethereum/crypto",
"github.com/ethereum/go-ethereum/ethclient",
"github.com/ethereum/go-ethereum/ethdb",
- "github.com/ethereum/go-ethereum/log",
"github.com/ethereum/go-ethereum/p2p",
"github.com/ethereum/go-ethereum/p2p/discv5",
"github.com/ethereum/go-ethereum/params",
diff --git a/Gopkg.toml b/Gopkg.toml
index 82a3ca70..b06a8f00 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -51,4 +51,4 @@
[[constraint]]
name = "github.com/ethereum/go-ethereum"
- version = "1.8.20"
+ version = "1.8.21"
diff --git a/vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS b/vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS
index 9a61d393..c03fa06c 100644
--- a/vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS
+++ b/vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS
@@ -2,6 +2,7 @@
# Each line is a file pattern followed by one or more owners.
accounts/usbwallet @karalabe
+accounts/abi @gballet
consensus @karalabe
core/ @karalabe @holiman
eth/ @karalabe
@@ -9,27 +10,4 @@ les/ @zsfelfoldi
light/ @zsfelfoldi
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi
-p2p/simulations @lmars
-p2p/protocols @zelig
-swarm/api/http @justelad
-swarm/bmt @zelig
-swarm/dev @lmars
-swarm/fuse @jmozah @holisticode
-swarm/grafana_dashboards @nonsense
-swarm/metrics @nonsense @holisticode
-swarm/multihash @nolash
-swarm/network/bitvector @zelig @janos
-swarm/network/priorityqueue @zelig @janos
-swarm/network/simulations @zelig @janos
-swarm/network/stream @janos @zelig @holisticode @justelad
-swarm/network/stream/intervals @janos
-swarm/network/stream/testing @zelig
-swarm/pot @zelig
-swarm/pss @nolash @zelig @nonsense
-swarm/services @zelig
-swarm/state @justelad
-swarm/storage/encryption @zelig @nagydani
-swarm/storage/mock @janos
-swarm/storage/feed @nolash @jpeletier
-swarm/testutil @lmars
whisper/ @gballet @gluk256
diff --git a/vendor/github.com/ethereum/go-ethereum/.github/no-response.yml b/vendor/github.com/ethereum/go-ethereum/.github/no-response.yml
index b6e96efd..903d4ce8 100644
--- a/vendor/github.com/ethereum/go-ethereum/.github/no-response.yml
+++ b/vendor/github.com/ethereum/go-ethereum/.github/no-response.yml
@@ -1,7 +1,7 @@
# Number of days of inactivity before an Issue is closed for lack of response
daysUntilClose: 30
# Label requiring a response
-responseRequiredLabel: more-information-needed
+responseRequiredLabel: "need:more-information"
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
closeComment: >
This issue has been automatically closed because there has been no response
diff --git a/vendor/github.com/ethereum/go-ethereum/.github/stale.yml b/vendor/github.com/ethereum/go-ethereum/.github/stale.yml
index c621939c..6d921cc7 100644
--- a/vendor/github.com/ethereum/go-ethereum/.github/stale.yml
+++ b/vendor/github.com/ethereum/go-ethereum/.github/stale.yml
@@ -7,7 +7,7 @@ exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
-staleLabel: stale
+staleLabel: "status:inactive"
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
diff --git a/vendor/github.com/ethereum/go-ethereum/.travis.yml b/vendor/github.com/ethereum/go-ethereum/.travis.yml
index 33a4f894..b4127708 100644
--- a/vendor/github.com/ethereum/go-ethereum/.travis.yml
+++ b/vendor/github.com/ethereum/go-ethereum/.travis.yml
@@ -156,7 +156,7 @@ matrix:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- - curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz
+ - curl https://storage.googleapis.com/golang/go1.11.4.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/abi.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/abi.go
index 535e5d78..08d5db97 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/abi.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/abi.go
@@ -58,13 +58,11 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
return nil, err
}
return arguments, nil
-
}
method, exist := abi.Methods[name]
if !exist {
return nil, fmt.Errorf("method '%s' not found", name)
}
-
arguments, err := method.Inputs.Pack(args...)
if err != nil {
return nil, err
@@ -82,7 +80,7 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
// we need to decide whether we're calling a method or an event
if method, ok := abi.Methods[name]; ok {
if len(output)%32 != 0 {
- return fmt.Errorf("abi: improperly formatted output")
+ return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(output), output)
}
return method.Outputs.Unpack(v, output)
} else if event, ok := abi.Events[name]; ok {
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/abi_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/abi_test.go
index 59ba79cb..b9444f9f 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/abi_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/abi_test.go
@@ -22,11 +22,10 @@ import (
"fmt"
"log"
"math/big"
+ "reflect"
"strings"
"testing"
- "reflect"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -52,11 +51,14 @@ const jsondata2 = `
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
- { "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
+ { "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
+ { "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
+ { "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
+ { "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
]`
func TestReader(t *testing.T) {
- Uint256, _ := NewType("uint256")
+ Uint256, _ := NewType("uint256", nil)
exp := ABI{
Methods: map[string]Method{
"balance": {
@@ -177,7 +179,7 @@ func TestTestSlice(t *testing.T) {
}
func TestMethodSignature(t *testing.T) {
- String, _ := NewType("string")
+ String, _ := NewType("string", nil)
m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
exp := "foo(string,string)"
if m.Sig() != exp {
@@ -189,12 +191,31 @@ func TestMethodSignature(t *testing.T) {
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
}
- uintt, _ := NewType("uint256")
+ uintt, _ := NewType("uint256", nil)
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
exp = "foo(uint256)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
}
+
+ // Method with tuple arguments
+ s, _ := NewType("tuple", []ArgumentMarshaling{
+ {Name: "a", Type: "int256"},
+ {Name: "b", Type: "int256[]"},
+ {Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
+ {Name: "x", Type: "int256"},
+ {Name: "y", Type: "int256"},
+ }},
+ {Name: "d", Type: "tuple[2]", Components: []ArgumentMarshaling{
+ {Name: "x", Type: "int256"},
+ {Name: "y", Type: "int256"},
+ }},
+ })
+ m = Method{"foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
+ exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
+ if m.Sig() != exp {
+ t.Error("signature mismatch", exp, "!=", m.Sig())
+ }
}
func TestMultiPack(t *testing.T) {
@@ -564,11 +585,13 @@ func TestBareEvents(t *testing.T) {
const definition = `[
{ "type" : "event", "name" : "balance" },
{ "type" : "event", "name" : "anon", "anonymous" : true},
- { "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] }
+ { "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] },
+ { "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
]`
- arg0, _ := NewType("uint256")
- arg1, _ := NewType("address")
+ arg0, _ := NewType("uint256", nil)
+ arg1, _ := NewType("address", nil)
+ tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
expectedEvents := map[string]struct {
Anonymous bool
@@ -580,6 +603,10 @@ func TestBareEvents(t *testing.T) {
{Name: "arg0", Type: arg0, Indexed: false},
{Name: "arg1", Type: arg1, Indexed: true},
}},
+ "tuple": {false, []Argument{
+ {Name: "t", Type: tuple, Indexed: false},
+ {Name: "arg1", Type: arg1, Indexed: true},
+ }},
}
abi, err := JSON(strings.NewReader(definition))
@@ -646,28 +673,24 @@ func TestUnpackEvent(t *testing.T) {
}
type ReceivedEvent struct {
- Address common.Address
- Amount *big.Int
- Memo []byte
+ Sender common.Address
+ Amount *big.Int
+ Memo []byte
}
var ev ReceivedEvent
err = abi.Unpack(&ev, "received", data)
if err != nil {
t.Error(err)
- } else {
- t.Logf("len(data): %d; received event: %+v", len(data), ev)
}
type ReceivedAddrEvent struct {
- Address common.Address
+ Sender common.Address
}
var receivedAddrEv ReceivedAddrEvent
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
if err != nil {
t.Error(err)
- } else {
- t.Logf("len(data): %d; received event: %+v", len(data), receivedAddrEv)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go
index f544c80d..d0a6b035 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go
@@ -33,24 +33,27 @@ type Argument struct {
type Arguments []Argument
+type ArgumentMarshaling struct {
+ Name string
+ Type string
+ Components []ArgumentMarshaling
+ Indexed bool
+}
+
// UnmarshalJSON implements json.Unmarshaler interface
func (argument *Argument) UnmarshalJSON(data []byte) error {
- var extarg struct {
- Name string
- Type string
- Indexed bool
- }
- err := json.Unmarshal(data, &extarg)
+ var arg ArgumentMarshaling
+ err := json.Unmarshal(data, &arg)
if err != nil {
return fmt.Errorf("argument json err: %v", err)
}
- argument.Type, err = NewType(extarg.Type)
+ argument.Type, err = NewType(arg.Type, arg.Components)
if err != nil {
return err
}
- argument.Name = extarg.Name
- argument.Indexed = extarg.Indexed
+ argument.Name = arg.Name
+ argument.Indexed = arg.Indexed
return nil
}
@@ -85,7 +88,6 @@ func (arguments Arguments) isTuple() bool {
// Unpack performs the operation hexdata -> Go format
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
-
// make sure the passed value is arguments pointer
if reflect.Ptr != reflect.ValueOf(v).Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
@@ -97,52 +99,134 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if arguments.isTuple() {
return arguments.unpackTuple(v, marshalledValues)
}
- return arguments.unpackAtomic(v, marshalledValues)
+ return arguments.unpackAtomic(v, marshalledValues[0])
}
-func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
+// unpack sets the unmarshalled value to go format.
+// Note the dst here must be settable.
+func unpack(t *Type, dst interface{}, src interface{}) error {
+ var (
+ dstVal = reflect.ValueOf(dst).Elem()
+ srcVal = reflect.ValueOf(src)
+ )
+ if t.T != TupleTy && !((t.T == SliceTy || t.T == ArrayTy) && t.Elem.T == TupleTy) {
+ return set(dstVal, srcVal)
+ }
+
+ switch t.T {
+ case TupleTy:
+ if dstVal.Kind() != reflect.Struct {
+ return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind())
+ }
+ fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal)
+ if err != nil {
+ return err
+ }
+ for i, elem := range t.TupleElems {
+ fname := fieldmap[t.TupleRawNames[i]]
+ field := dstVal.FieldByName(fname)
+ if !field.IsValid() {
+ return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i])
+ }
+ if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+ case SliceTy:
+ if dstVal.Kind() != reflect.Slice {
+ return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind())
+ }
+ slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len())
+ for i := 0; i < slice.Len(); i++ {
+ if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ dstVal.Set(slice)
+ case ArrayTy:
+ if dstVal.Kind() != reflect.Array {
+ return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind())
+ }
+ array := reflect.New(dstVal.Type()).Elem()
+ for i := 0; i < array.Len(); i++ {
+ if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ dstVal.Set(array)
+ }
+ return nil
+}
+
+// unpackAtomic unpacks ( hexdata -> go ) a single value
+func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
+ if arguments.LengthNonIndexed() == 0 {
+ return nil
+ }
+ argument := arguments.NonIndexed()[0]
+ elem := reflect.ValueOf(v).Elem()
+
+ if elem.Kind() == reflect.Struct {
+ fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem)
+ if err != nil {
+ return err
+ }
+ field := elem.FieldByName(fieldmap[argument.Name])
+ if !field.IsValid() {
+ return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name)
+ }
+ return unpack(&argument.Type, field.Addr().Interface(), marshalledValues)
+ }
+ return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues)
+}
+
+// unpackTuple unpacks ( hexdata -> go ) a batch of values.
+func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
var (
value = reflect.ValueOf(v).Elem()
typ = value.Type()
kind = value.Kind()
)
-
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
return err
}
// If the interface is a struct, get of abi->struct_field mapping
-
var abi2struct map[string]string
if kind == reflect.Struct {
- var err error
- abi2struct, err = mapAbiToStructFields(arguments, value)
+ var (
+ argNames []string
+ err error
+ )
+ for _, arg := range arguments.NonIndexed() {
+ argNames = append(argNames, arg.Name)
+ }
+ abi2struct, err = mapArgNamesToStructFields(argNames, value)
if err != nil {
return err
}
}
for i, arg := range arguments.NonIndexed() {
-
- reflectValue := reflect.ValueOf(marshalledValues[i])
-
switch kind {
case reflect.Struct:
- if structField, ok := abi2struct[arg.Name]; ok {
- if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
- return err
- }
+ field := value.FieldByName(abi2struct[arg.Name])
+ if !field.IsValid() {
+ return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
+ }
+ if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil {
+ return err
}
case reflect.Slice, reflect.Array:
if value.Len() < i {
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
}
v := value.Index(i)
- if err := requireAssignable(v, reflectValue); err != nil {
+ if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil {
return err
}
-
- if err := set(v.Elem(), reflectValue, arg); err != nil {
+ if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil {
return err
}
default:
@@ -150,48 +234,7 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
}
}
return nil
-}
-// unpackAtomic unpacks ( hexdata -> go ) a single value
-func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
- if len(marshalledValues) != 1 {
- return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
- }
-
- elem := reflect.ValueOf(v).Elem()
- kind := elem.Kind()
- reflectValue := reflect.ValueOf(marshalledValues[0])
-
- var abi2struct map[string]string
- if kind == reflect.Struct {
- var err error
- if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
- return err
- }
- arg := arguments.NonIndexed()[0]
- if structField, ok := abi2struct[arg.Name]; ok {
- return set(elem.FieldByName(structField), reflectValue, arg)
- }
- return nil
- }
-
- return set(elem, reflectValue, arguments.NonIndexed()[0])
-
-}
-
-// Computes the full size of an array;
-// i.e. counting nested arrays, which count towards size for unpacking.
-func getArraySize(arr *Type) int {
- size := arr.Size
- // Arrays can be nested, with each element being the same size
- arr = arr.Elem
- for arr.T == ArrayTy {
- // Keep multiplying by elem.Size while the elem is an array.
- size *= arr.Size
- arr = arr.Elem
- }
- // Now we have the full array size, including its children.
- return size
}
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
@@ -202,7 +245,7 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
virtualArgs := 0
for index, arg := range arguments.NonIndexed() {
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
- if arg.Type.T == ArrayTy {
+ if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
@@ -213,7 +256,11 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
- virtualArgs += getArraySize(&arg.Type) - 1
+ virtualArgs += getTypeSize(arg.Type)/32 - 1
+ } else if arg.Type.T == TupleTy && !isDynamicType(arg.Type) {
+ // If we have a static tuple, like (uint256, bool, uint256), these are
+ // coded as just like uint256,bool,uint256
+ virtualArgs += getTypeSize(arg.Type)/32 - 1
}
if err != nil {
return nil, err
@@ -243,7 +290,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
- inputOffset += getDynamicTypeOffset(abiArg.Type)
+ inputOffset += getTypeSize(abiArg.Type)
}
var ret []byte
for i, a := range args {
@@ -272,14 +319,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
return ret, nil
}
-// capitalise makes the first character of a string upper case, also removing any
-// prefixing underscores from the variable names.
-func capitalise(input string) string {
- for len(input) > 0 && input[0] == '_' {
- input = input[1:]
+// ToCamelCase converts an under-score string to a camel-case string
+func ToCamelCase(input string) string {
+ parts := strings.Split(input, "_")
+ for i, s := range parts {
+ if len(s) > 0 {
+ parts[i] = strings.ToUpper(s[:1]) + s[1:]
+ }
}
- if len(input) == 0 {
- return ""
- }
- return strings.ToUpper(input[:1]) + input[1:]
+ return strings.Join(parts, "")
}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/base.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/base.go
index 83ad1c8a..c37bdf11 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/base.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/base.go
@@ -36,10 +36,10 @@ type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Tra
// CallOpts is the collection of options to fine tune a contract call request.
type CallOpts struct {
- Pending bool // Whether to operate on the pending state or the last known one
- From common.Address // Optional the sender address, otherwise the first account is used
-
- Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
+ Pending bool // Whether to operate on the pending state or the last known one
+ From common.Address // Optional the sender address, otherwise the first account is used
+ BlockNumber *big.Int // Optional the block number on which the call should be performed
+ Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// TransactOpts is the collection of authorization data required to create a
@@ -148,10 +148,10 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
}
}
} else {
- output, err = c.caller.CallContract(ctx, msg, nil)
+ output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
if err == nil && len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise.
- if code, err = c.caller.CodeAt(ctx, c.address, nil); err != nil {
+ if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
return err
} else if len(code) == 0 {
return ErrNoCode
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/base_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/base_test.go
new file mode 100644
index 00000000..8adff8b5
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/base_test.go
@@ -0,0 +1,64 @@
+package bind_test
+
+import (
+ "context"
+ "math/big"
+ "testing"
+
+ ethereum "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type mockCaller struct {
+ codeAtBlockNumber *big.Int
+ callContractBlockNumber *big.Int
+}
+
+func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
+ mc.codeAtBlockNumber = blockNumber
+ return []byte{1, 2, 3}, nil
+}
+
+func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
+ mc.callContractBlockNumber = blockNumber
+ return nil, nil
+}
+
+func TestPassingBlockNumber(t *testing.T) {
+
+ mc := &mockCaller{}
+
+ bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
+ Methods: map[string]abi.Method{
+ "something": {
+ Name: "something",
+ Outputs: abi.Arguments{},
+ },
+ },
+ }, mc, nil, nil)
+ var ret string
+
+ blockNumber := big.NewInt(42)
+
+ bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something")
+
+ if mc.callContractBlockNumber != blockNumber {
+ t.Fatalf("CallContract() was not passed the block number")
+ }
+
+ if mc.codeAtBlockNumber != blockNumber {
+ t.Fatalf("CodeAt() was not passed the block number")
+ }
+
+ bc.Call(&bind.CallOpts{}, &ret, "something")
+
+ if mc.callContractBlockNumber != nil {
+ t.Fatalf("CallContract() was passed a block number when it should not have been")
+ }
+
+ if mc.codeAtBlockNumber != nil {
+ t.Fatalf("CodeAt() was passed a block number when it should not have been")
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind.go
index 4dca4b4e..5ee30d02 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind.go
@@ -381,54 +381,23 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
// methodNormalizer is a name transformer that modifies Solidity method names to
// conform to target language naming concentions.
var methodNormalizer = map[Lang]func(string) string{
- LangGo: capitalise,
+ LangGo: abi.ToCamelCase,
LangJava: decapitalise,
}
// capitalise makes a camel-case string which starts with an upper case character.
func capitalise(input string) string {
- for len(input) > 0 && input[0] == '_' {
- input = input[1:]
- }
- if len(input) == 0 {
- return ""
- }
- return toCamelCase(strings.ToUpper(input[:1]) + input[1:])
+ return abi.ToCamelCase(input)
}
// decapitalise makes a camel-case string which starts with a lower case character.
func decapitalise(input string) string {
- for len(input) > 0 && input[0] == '_' {
- input = input[1:]
- }
if len(input) == 0 {
- return ""
+ return input
}
- return toCamelCase(strings.ToLower(input[:1]) + input[1:])
-}
-// toCamelCase converts an under-score string to a camel-case string
-func toCamelCase(input string) string {
- toupper := false
-
- result := ""
- for k, v := range input {
- switch {
- case k == 0:
- result = strings.ToUpper(string(input[0]))
-
- case toupper:
- result += strings.ToUpper(string(v))
- toupper = false
-
- case v == '_':
- toupper = true
-
- default:
- result += string(v)
- }
- }
- return result
+ goForm := abi.ToCamelCase(input)
+ return strings.ToLower(goForm[:1]) + goForm[1:]
}
// structured checks whether a list of ABI data types has enough information to
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/event.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/event.go
index a3f6be97..9392c199 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/event.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/event.go
@@ -36,12 +36,12 @@ type Event struct {
func (e Event) String() string {
inputs := make([]string, len(e.Inputs))
for i, input := range e.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
+ inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
if input.Indexed {
- inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
+ inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
}
}
- return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
+ return fmt.Sprintf("event %v(%v)", e.Name, strings.Join(inputs, ", "))
}
// Id returns the canonical representation of the event's signature used by the
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/event_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/event_test.go
index 3bfdd7c0..e735cceb 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/event_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/event_test.go
@@ -87,12 +87,12 @@ func TestEventId(t *testing.T) {
}{
{
definition: `[
- { "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
- { "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
+ { "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
+ { "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
]`,
expectations: map[string]common.Hash{
- "balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
- "check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
+ "Balance": crypto.Keccak256Hash([]byte("Balance(uint256)")),
+ "Check": crypto.Keccak256Hash([]byte("Check(address,uint256)")),
},
},
}
@@ -111,6 +111,39 @@ func TestEventId(t *testing.T) {
}
}
+func TestEventString(t *testing.T) {
+ var table = []struct {
+ definition string
+ expectations map[string]string
+ }{
+ {
+ definition: `[
+ { "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
+ { "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] },
+ { "type" : "event", "name" : "Transfer", "inputs": [{ "name": "from", "type": "address", "indexed": true }, { "name": "to", "type": "address", "indexed": true }, { "name": "value", "type": "uint256" }] }
+ ]`,
+ expectations: map[string]string{
+ "Balance": "event Balance(uint256 in)",
+ "Check": "event Check(address t, uint256 b)",
+ "Transfer": "event Transfer(address indexed from, address indexed to, uint256 value)",
+ },
+ },
+ }
+
+ for _, test := range table {
+ abi, err := JSON(strings.NewReader(test.definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for name, event := range abi.Events {
+ if event.String() != test.expectations[name] {
+ t.Errorf("expected string to be %s, got %s", test.expectations[name], event.String())
+ }
+ }
+ }
+}
+
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/method.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/method.go
index 58310576..2d8d3d65 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/method.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/method.go
@@ -56,14 +56,14 @@ func (method Method) Sig() string {
func (method Method) String() string {
inputs := make([]string, len(method.Inputs))
for i, input := range method.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
+ inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
}
outputs := make([]string, len(method.Outputs))
for i, output := range method.Outputs {
+ outputs[i] = output.Type.String()
if len(output.Name) > 0 {
- outputs[i] = fmt.Sprintf("%v ", output.Name)
+ outputs[i] += fmt.Sprintf(" %v", output.Name)
}
- outputs[i] += output.Type.String()
}
constant := ""
if method.Const {
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/method_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/method_test.go
new file mode 100644
index 00000000..a98f1cd3
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/method_test.go
@@ -0,0 +1,61 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package abi
+
+import (
+ "strings"
+ "testing"
+)
+
+const methoddata = `
+[
+ { "type" : "function", "name" : "balance", "constant" : true },
+ { "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
+ { "type" : "function", "name" : "transfer", "constant" : false, "inputs" : [ { "name" : "from", "type" : "address" }, { "name" : "to", "type" : "address" }, { "name" : "value", "type" : "uint256" } ], "outputs" : [ { "name" : "success", "type" : "bool" } ] }
+]`
+
+func TestMethodString(t *testing.T) {
+ var table = []struct {
+ method string
+ expectation string
+ }{
+ {
+ method: "balance",
+ expectation: "function balance() constant returns()",
+ },
+ {
+ method: "send",
+ expectation: "function send(uint256 amount) returns()",
+ },
+ {
+ method: "transfer",
+ expectation: "function transfer(address from, address to, uint256 value) returns(bool success)",
+ },
+ }
+
+ abi, err := JSON(strings.NewReader(methoddata))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range table {
+ got := abi.Methods[test.method].String()
+ if got != test.expectation {
+ t.Errorf("expected string to be %s, got %s", test.expectation, got)
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/pack_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/pack_test.go
index ddd2b736..10cd3a39 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/pack_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/pack_test.go
@@ -29,303 +29,356 @@ import (
func TestPack(t *testing.T) {
for i, test := range []struct {
- typ string
-
- input interface{}
- output []byte
+ typ string
+ components []ArgumentMarshaling
+ input interface{}
+ output []byte
}{
{
"uint8",
+ nil,
uint8(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint8[]",
+ nil,
[]uint8{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint16",
+ nil,
uint16(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint16[]",
+ nil,
[]uint16{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint32",
+ nil,
uint32(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint32[]",
+ nil,
[]uint32{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint64",
+ nil,
uint64(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint64[]",
+ nil,
[]uint64{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint256",
+ nil,
big.NewInt(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint256[]",
+ nil,
[]*big.Int{big.NewInt(1), big.NewInt(2)},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int8",
+ nil,
int8(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int8[]",
+ nil,
[]int8{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int16",
+ nil,
int16(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int16[]",
+ nil,
[]int16{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int32",
+ nil,
int32(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int32[]",
+ nil,
[]int32{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int64",
+ nil,
int64(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int64[]",
+ nil,
[]int64{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int256",
+ nil,
big.NewInt(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int256[]",
+ nil,
[]*big.Int{big.NewInt(1), big.NewInt(2)},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"bytes1",
+ nil,
[1]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes2",
+ nil,
[2]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes3",
+ nil,
[3]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes4",
+ nil,
[4]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes5",
+ nil,
[5]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes6",
+ nil,
[6]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes7",
+ nil,
[7]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes8",
+ nil,
[8]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes9",
+ nil,
[9]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes10",
+ nil,
[10]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes11",
+ nil,
[11]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes12",
+ nil,
[12]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes13",
+ nil,
[13]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes14",
+ nil,
[14]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes15",
+ nil,
[15]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes16",
+ nil,
[16]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes17",
+ nil,
[17]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes18",
+ nil,
[18]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes19",
+ nil,
[19]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes20",
+ nil,
[20]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes21",
+ nil,
[21]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes22",
+ nil,
[22]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes23",
+ nil,
[23]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes24",
- [24]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes24",
+ nil,
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes25",
+ nil,
[25]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes26",
+ nil,
[26]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes27",
+ nil,
[27]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes28",
+ nil,
[28]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes29",
+ nil,
[29]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes30",
+ nil,
[30]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes31",
+ nil,
[31]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes32",
+ nil,
[32]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"uint32[2][3][4]",
+ nil,
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
},
{
"address[]",
+ nil,
[]common.Address{{1}, {2}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
},
{
"bytes32[]",
+ nil,
[]common.Hash{{1}, {2}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
},
{
"function",
+ nil,
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"string",
+ nil,
"foobar",
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
},
{
"string[]",
+ nil,
[]string{"hello", "foobar"},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
@@ -337,6 +390,7 @@ func TestPack(t *testing.T) {
},
{
"string[2]",
+ nil,
[]string{"hello", "foobar"},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
@@ -347,6 +401,7 @@ func TestPack(t *testing.T) {
},
{
"bytes32[][]",
+ nil,
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
@@ -362,6 +417,7 @@ func TestPack(t *testing.T) {
{
"bytes32[][2]",
+ nil,
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
@@ -376,6 +432,7 @@ func TestPack(t *testing.T) {
{
"bytes32[3][2]",
+ nil,
[][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
@@ -384,12 +441,182 @@ func TestPack(t *testing.T) {
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
},
+ {
+ // static tuple
+ "tuple",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "int64"},
+ {Name: "b", Type: "int256"},
+ {Name: "c", Type: "int256"},
+ {Name: "d", Type: "bool"},
+ {Name: "e", Type: "bytes32[3][2]"},
+ },
+ struct {
+ A int64
+ B *big.Int
+ C *big.Int
+ D bool
+ E [][]common.Hash
+ }{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
+ "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2]
+ },
+ {
+ // dynamic tuple
+ "tuple",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "string"},
+ {Name: "b", Type: "int64"},
+ {Name: "c", Type: "bytes"},
+ {Name: "d", Type: "string[]"},
+ {Name: "e", Type: "int256[]"},
+ {Name: "f", Type: "address[]"},
+ },
+ struct {
+ FieldA string `abi:"a"` // Test whether abi tag works
+ FieldB int64 `abi:"b"`
+ C []byte
+ D []string
+ E []*big.Int
+ F []common.Address
+ }{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
+ common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
+ "0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
+ "0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
+ "0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
+ "0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
+ "0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
+ "666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
+ "0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
+ "0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
+ "0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
+ "0000000000000000000000000000000000000000000000000000000000000003" + // foo length
+ "666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
+ "0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
+ "6261720000000000000000000000000000000000000000000000000000000000" + // bar
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // 1
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
+ "0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
+ "0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2}
+ },
+ {
+ // nested tuple
+ "tuple",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}},
+ {Name: "b", Type: "int256[]"},
+ },
+ struct {
+ A struct {
+ FieldA *big.Int `abi:"a"`
+ B []*big.Int
+ }
+ B []*big.Int
+ }{
+ A: struct {
+ FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
+ B []*big.Int
+ }{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
+ B: []*big.Int{big.NewInt(1), big.NewInt(0)}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset
+ "00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
+ "0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
+ "0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
+ "0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value
+ "0000000000000000000000000000000000000000000000000000000000000002" + // b length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
+ "0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value
+ },
+ {
+ // tuple slice
+ "tuple[]",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "int256"},
+ {Name: "b", Type: "int256[]"},
+ },
+ []struct {
+ A *big.Int
+ B []*big.Int
+ }{
+ {big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
+ {big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
+ },
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
+ "00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
+ "0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value
+ },
+ {
+ // static tuple array
+ "tuple[2]",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "int256"},
+ {Name: "b", Type: "int256"},
+ },
+ [2]struct {
+ A *big.Int
+ B *big.Int
+ }{
+ {big.NewInt(-1), big.NewInt(1)},
+ {big.NewInt(1), big.NewInt(-1)},
+ },
+ common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b
+ },
+ {
+ // dynamic tuple array
+ "tuple[2]",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "int256[]"},
+ },
+ [2]struct {
+ A []*big.Int
+ }{
+ {[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
+ {[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
+ },
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
+ "00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
+ "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
+ "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
+ },
} {
- typ, err := NewType(test.typ)
+ typ, err := NewType(test.typ, test.components)
if err != nil {
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
}
-
output, err := typ.pack(reflect.ValueOf(test.input))
if err != nil {
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
@@ -466,6 +693,59 @@ func TestMethodPack(t *testing.T) {
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
+
+ a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
+ sig = abi.Methods["nestedArray"].Id()
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
+ sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
+ packed, err = abi.Pack("nestedArray", a, []common.Address{addrC, addrD})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(packed, sig) {
+ t.Errorf("expected %x got %x", sig, packed)
+ }
+
+ sig = abi.Methods["nestedArray2"].Id()
+ sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ packed, err = abi.Pack("nestedArray2", [2][]uint8{{1}, {1}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(packed, sig) {
+ t.Errorf("expected %x got %x", sig, packed)
+ }
+
+ sig = abi.Methods["nestedSlice"].Id()
+ sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ packed, err = abi.Pack("nestedSlice", [][]uint8{{1, 2}, {1, 2}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(packed, sig) {
+ t.Errorf("expected %x got %x", sig, packed)
+ }
}
func TestPackNumber(t *testing.T) {
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect.go
index 0193517a..1b0bb004 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect.go
@@ -71,22 +71,36 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
//
// set is a bit more lenient when it comes to assignment and doesn't force an as
// strict ruleset as bare `reflect` does.
-func set(dst, src reflect.Value, output Argument) error {
- dstType := dst.Type()
- srcType := src.Type()
+func set(dst, src reflect.Value) error {
+ dstType, srcType := dst.Type(), src.Type()
switch {
- case dstType.AssignableTo(srcType):
- dst.Set(src)
case dstType.Kind() == reflect.Interface:
+ return set(dst.Elem(), src)
+ case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT:
+ return set(dst.Elem(), src)
+ case srcType.AssignableTo(dstType) && dst.CanSet():
dst.Set(src)
- case dstType.Kind() == reflect.Ptr:
- return set(dst.Elem(), src, output)
+ case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice:
+ return setSlice(dst, src)
default:
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
}
return nil
}
+// setSlice attempts to assign src to dst when slices are not assignable by default
+// e.g. src: [][]byte -> dst: [][15]byte
+func setSlice(dst, src reflect.Value) error {
+ slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
+ for i := 0; i < src.Len(); i++ {
+ v := src.Index(i)
+ reflect.Copy(slice.Index(i), v)
+ }
+
+ dst.Set(slice)
+ return nil
+}
+
// requireAssignable assures that `dest` is a pointer and it's not an interface.
func requireAssignable(dst, src reflect.Value) error {
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
@@ -112,14 +126,14 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
return nil
}
-// mapAbiToStringField maps abi to struct fields.
+// mapArgNamesToStructFields maps a slice of argument names to struct fields.
// first round: for each Exportable field that contains a `abi:""` tag
-// and this field name exists in the arguments, pair them together.
-// second round: for each argument field that has not been already linked,
+// and this field name exists in the given argument name list, pair them together.
+// second round: for each argument name that has not been already linked,
// find what variable is expected to be mapped into, if it exists and has not been
// used, pair them.
-func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
-
+// Note this function assumes the given value is a struct value.
+func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
typ := value.Type()
abi2struct := make(map[string]string)
@@ -133,45 +147,39 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
continue
}
-
// skip fields that have no abi:"" tag.
var ok bool
var tagName string
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
continue
}
-
// check if tag is empty.
if tagName == "" {
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
}
-
// check which argument field matches with the abi tag.
found := false
- for _, abiField := range args.NonIndexed() {
- if abiField.Name == tagName {
- if abi2struct[abiField.Name] != "" {
+ for _, arg := range argNames {
+ if arg == tagName {
+ if abi2struct[arg] != "" {
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
}
// pair them
- abi2struct[abiField.Name] = structFieldName
- struct2abi[structFieldName] = abiField.Name
+ abi2struct[arg] = structFieldName
+ struct2abi[structFieldName] = arg
found = true
}
}
-
// check if this tag has been mapped.
if !found {
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
}
-
}
// second round ~~~
- for _, arg := range args {
+ for _, argName := range argNames {
- abiFieldName := arg.Name
- structFieldName := capitalise(abiFieldName)
+ structFieldName := ToCamelCase(argName)
if structFieldName == "" {
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
@@ -181,11 +189,11 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
// struct field with the same field name. If so, raise an error:
// abi: [ { "name": "value" } ]
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
- if abi2struct[abiFieldName] != "" {
- if abi2struct[abiFieldName] != structFieldName &&
+ if abi2struct[argName] != "" {
+ if abi2struct[argName] != structFieldName &&
struct2abi[structFieldName] == "" &&
value.FieldByName(structFieldName).IsValid() {
- return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
+ return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", argName)
}
continue
}
@@ -197,16 +205,14 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
if value.FieldByName(structFieldName).IsValid() {
// pair them
- abi2struct[abiFieldName] = structFieldName
- struct2abi[structFieldName] = abiFieldName
+ abi2struct[argName] = structFieldName
+ struct2abi[structFieldName] = argName
} else {
// not paired, but annotate as used, to detect cases like
// abi : [ { "name": "value" }, { "name": "_value" } ]
// struct { Value *big.Int }
- struct2abi[structFieldName] = abiFieldName
+ struct2abi[structFieldName] = argName
}
-
}
-
return abi2struct, nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect_test.go
new file mode 100644
index 00000000..c425e6e5
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect_test.go
@@ -0,0 +1,191 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package abi
+
+import (
+ "reflect"
+ "testing"
+)
+
+type reflectTest struct {
+ name string
+ args []string
+ struc interface{}
+ want map[string]string
+ err string
+}
+
+var reflectTests = []reflectTest{
+ {
+ name: "OneToOneCorrespondance",
+ args: []string{"fieldA"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ },
+ },
+ {
+ name: "MissingFieldsInStruct",
+ args: []string{"fieldA", "fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ },
+ },
+ {
+ name: "MoreFieldsInStructThanArgs",
+ args: []string{"fieldA"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ FieldB int
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ },
+ },
+ {
+ name: "MissingFieldInArgs",
+ args: []string{"fieldA"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ FieldB int `abi:"fieldB"`
+ }{},
+ err: "struct: abi tag 'fieldB' defined but not found in abi",
+ },
+ {
+ name: "NoAbiDescriptor",
+ args: []string{"fieldA"},
+ struc: struct {
+ FieldA int
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ },
+ },
+ {
+ name: "NoArgs",
+ args: []string{},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ }{},
+ err: "struct: abi tag 'fieldA' defined but not found in abi",
+ },
+ {
+ name: "DifferentName",
+ args: []string{"fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldB"`
+ }{},
+ want: map[string]string{
+ "fieldB": "FieldA",
+ },
+ },
+ {
+ name: "DifferentName",
+ args: []string{"fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldB"`
+ }{},
+ want: map[string]string{
+ "fieldB": "FieldA",
+ },
+ },
+ {
+ name: "MultipleFields",
+ args: []string{"fieldA", "fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ FieldB int `abi:"fieldB"`
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ "fieldB": "FieldB",
+ },
+ },
+ {
+ name: "MultipleFieldsABIMissing",
+ args: []string{"fieldA", "fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ FieldB int
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ "fieldB": "FieldB",
+ },
+ },
+ {
+ name: "NameConflict",
+ args: []string{"fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldB"`
+ FieldB int
+ }{},
+ err: "abi: multiple variables maps to the same abi field 'fieldB'",
+ },
+ {
+ name: "Underscored",
+ args: []string{"_"},
+ struc: struct {
+ FieldA int
+ }{},
+ err: "abi: purely underscored output cannot unpack to struct",
+ },
+ {
+ name: "DoubleMapping",
+ args: []string{"fieldB", "fieldC", "fieldA"},
+ struc: struct {
+ FieldA int `abi:"fieldC"`
+ FieldB int
+ }{},
+ err: "abi: multiple outputs mapping to the same struct field 'FieldA'",
+ },
+ {
+ name: "AlreadyMapped",
+ args: []string{"fieldB", "fieldB"},
+ struc: struct {
+ FieldB int `abi:"fieldB"`
+ }{},
+ err: "struct: abi tag in 'FieldB' already mapped",
+ },
+}
+
+func TestReflectNameToStruct(t *testing.T) {
+ for _, test := range reflectTests {
+ t.Run(test.name, func(t *testing.T) {
+ m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
+ if len(test.err) > 0 {
+ if err == nil || err.Error() != test.err {
+ t.Fatalf("Invalid error: expected %v, got %v", test.err, err)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ for fname := range test.want {
+ if m[fname] != test.want[fname] {
+ t.Fatalf("Incorrect value for field %s: expected %v, got %v", fname, test.want[fname], m[fname])
+ }
+ }
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go
index 6bfaabf5..26151dbd 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go
@@ -17,6 +17,7 @@
package abi
import (
+ "errors"
"fmt"
"reflect"
"regexp"
@@ -32,6 +33,7 @@ const (
StringTy
SliceTy
ArrayTy
+ TupleTy
AddressTy
FixedBytesTy
BytesTy
@@ -43,13 +45,16 @@ const (
// Type is the reflection of the supported argument type
type Type struct {
Elem *Type
-
Kind reflect.Kind
Type reflect.Type
Size int
T byte // Our own type checking
stringKind string // holds the unparsed string for deriving signatures
+
+ // Tuple relative fields
+ TupleElems []*Type // Type information of all tuple fields
+ TupleRawNames []string // Raw field name of all tuple fields
}
var (
@@ -58,7 +63,7 @@ var (
)
// NewType creates a new reflection type of abi type given in t.
-func NewType(t string) (typ Type, err error) {
+func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
// check that array brackets are equal if they exist
if strings.Count(t, "[") != strings.Count(t, "]") {
return Type{}, fmt.Errorf("invalid arg type in abi")
@@ -71,7 +76,7 @@ func NewType(t string) (typ Type, err error) {
if strings.Count(t, "[") != 0 {
i := strings.LastIndex(t, "[")
// recursively embed the type
- embeddedType, err := NewType(t[:i])
+ embeddedType, err := NewType(t[:i], components)
if err != nil {
return Type{}, err
}
@@ -87,6 +92,9 @@ func NewType(t string) (typ Type, err error) {
typ.Kind = reflect.Slice
typ.Elem = &embeddedType
typ.Type = reflect.SliceOf(embeddedType.Type)
+ if embeddedType.T == TupleTy {
+ typ.stringKind = embeddedType.stringKind + sliced
+ }
} else if len(intz) == 1 {
// is a array
typ.T = ArrayTy
@@ -97,6 +105,9 @@ func NewType(t string) (typ Type, err error) {
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
}
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
+ if embeddedType.T == TupleTy {
+ typ.stringKind = embeddedType.stringKind + sliced
+ }
} else {
return Type{}, fmt.Errorf("invalid formatting of array type")
}
@@ -158,6 +169,40 @@ func NewType(t string) (typ Type, err error) {
typ.Size = varSize
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
}
+ case "tuple":
+ var (
+ fields []reflect.StructField
+ elems []*Type
+ names []string
+ expression string // canonical parameter expression
+ )
+ expression += "("
+ for idx, c := range components {
+ cType, err := NewType(c.Type, c.Components)
+ if err != nil {
+ return Type{}, err
+ }
+ if ToCamelCase(c.Name) == "" {
+ return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
+ }
+ fields = append(fields, reflect.StructField{
+ Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field.
+ Type: cType.Type,
+ })
+ elems = append(elems, &cType)
+ names = append(names, c.Name)
+ expression += cType.stringKind
+ if idx != len(components)-1 {
+ expression += ","
+ }
+ }
+ expression += ")"
+ typ.Kind = reflect.Struct
+ typ.Type = reflect.StructOf(fields)
+ typ.TupleElems = elems
+ typ.TupleRawNames = names
+ typ.T = TupleTy
+ typ.stringKind = expression
case "function":
typ.Kind = reflect.Array
typ.T = FunctionTy
@@ -178,7 +223,6 @@ func (t Type) String() (out string) {
func (t Type) pack(v reflect.Value) ([]byte, error) {
// dereference pointer first if it's a pointer
v = indirect(v)
-
if err := typeCheck(t, v); err != nil {
return nil, err
}
@@ -196,7 +240,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
offset := 0
offsetReq := isDynamicType(*t.Elem)
if offsetReq {
- offset = getDynamicTypeOffset(*t.Elem) * v.Len()
+ offset = getTypeSize(*t.Elem) * v.Len()
}
var tail []byte
for i := 0; i < v.Len(); i++ {
@@ -213,6 +257,45 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
tail = append(tail, val...)
}
return append(ret, tail...), nil
+ case TupleTy:
+ // (T1,...,Tk) for k >= 0 and any types T1, …, Tk
+ // enc(X) = head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(k))
+ // where X = (X(1), ..., X(k)) and head and tail are defined for Ti being a static
+ // type as
+ // head(X(i)) = enc(X(i)) and tail(X(i)) = "" (the empty string)
+ // and as
+ // head(X(i)) = enc(len(head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(i-1))))
+ // tail(X(i)) = enc(X(i))
+ // otherwise, i.e. if Ti is a dynamic type.
+ fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, v)
+ if err != nil {
+ return nil, err
+ }
+ // Calculate prefix occupied size.
+ offset := 0
+ for _, elem := range t.TupleElems {
+ offset += getTypeSize(*elem)
+ }
+ var ret, tail []byte
+ for i, elem := range t.TupleElems {
+ field := v.FieldByName(fieldmap[t.TupleRawNames[i]])
+ if !field.IsValid() {
+ return nil, fmt.Errorf("field %s for tuple not found in the given struct", t.TupleRawNames[i])
+ }
+ val, err := elem.pack(field)
+ if err != nil {
+ return nil, err
+ }
+ if isDynamicType(*elem) {
+ ret = append(ret, packNum(reflect.ValueOf(offset))...)
+ tail = append(tail, val...)
+ offset += len(val)
+ } else {
+ ret = append(ret, val...)
+ }
+ }
+ return append(ret, tail...), nil
+
default:
return packElement(t, v), nil
}
@@ -225,25 +308,45 @@ func (t Type) requiresLengthPrefix() bool {
}
// isDynamicType returns true if the type is dynamic.
-// StringTy, BytesTy, and SliceTy(irrespective of slice element type) are dynamic types
-// ArrayTy is considered dynamic if and only if the Array element is a dynamic type.
-// This function recursively checks the type for slice and array elements.
+// The following types are called “dynamic”:
+// * bytes
+// * string
+// * T[] for any T
+// * T[k] for any dynamic T and any k >= 0
+// * (T1,...,Tk) if Ti is dynamic for some 1 <= i <= k
func isDynamicType(t Type) bool {
- // dynamic types
- // array is also a dynamic type if the array type is dynamic
+ if t.T == TupleTy {
+ for _, elem := range t.TupleElems {
+ if isDynamicType(*elem) {
+ return true
+ }
+ }
+ return false
+ }
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
}
-// getDynamicTypeOffset returns the offset for the type.
-// See `isDynamicType` to know which types are considered dynamic.
-// If the type t is an array and element type is not a dynamic type, then we consider it a static type and
-// return 32 * size of array since length prefix is not required.
-// If t is a dynamic type or element type(for slices and arrays) is dynamic, then we simply return 32 as offset.
-func getDynamicTypeOffset(t Type) int {
- // if it is an array and there are no dynamic types
- // then the array is static type
+// getTypeSize returns the size that this type needs to occupy.
+// We distinguish static and dynamic types. Static types are encoded in-place
+// and dynamic types are encoded at a separately allocated location after the
+// current block.
+// So for a static variable, the size returned represents the size that the
+// variable actually occupies.
+// For a dynamic variable, the returned size is fixed 32 bytes, which is used
+// to store the location reference for actual value storage.
+func getTypeSize(t Type) int {
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
- return 32 * t.Size
+ // Recursively calculate type size if it is a nested array
+ if t.Elem.T == ArrayTy {
+ return t.Size * getTypeSize(*t.Elem)
+ }
+ return t.Size * 32
+ } else if t.T == TupleTy && !isDynamicType(t) {
+ total := 0
+ for _, elem := range t.TupleElems {
+ total += getTypeSize(*elem)
+ }
+ return total
}
return 32
}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type_test.go
index f6b36f18..7ef47330 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type_test.go
@@ -32,72 +32,75 @@ type typeWithoutStringer Type
// Tests that all allowed types get recognized by the type parser.
func TestTypeRegexp(t *testing.T) {
tests := []struct {
- blob string
- kind Type
+ blob string
+ components []ArgumentMarshaling
+ kind Type
}{
- {"bool", Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
- {"bool[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
- {"bool[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
- {"bool[2][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
- {"bool[][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
- {"bool[][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
- {"bool[2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
- {"bool[2][][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
- {"bool[2][2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
- {"bool[][][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
- {"bool[][2][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
- {"int8", Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
- {"int16", Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
- {"int32", Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
- {"int64", Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
- {"int256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
- {"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
- {"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
- {"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
- {"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
- {"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
- {"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
- {"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
- {"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
- {"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
- {"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
- {"uint8", Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
- {"uint16", Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
- {"uint32", Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
- {"uint64", Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
- {"uint256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
- {"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
- {"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
- {"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
- {"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
- {"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
- {"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
- {"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
- {"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
- {"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
- {"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
- {"bytes32", Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
- {"bytes[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
- {"bytes[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
- {"bytes32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
- {"bytes32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
- {"string", Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
- {"string[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
- {"string[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
- {"address", Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
- {"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
- {"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
+ {"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
+ {"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
+ {"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
+ {"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
+ {"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
+ {"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
+ {"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
+ {"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
+ {"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
+ {"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
+ {"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
+ {"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
+ {"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
+ {"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
+ {"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
+ {"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
+ {"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
+ {"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
+ {"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
+ {"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
+ {"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
+ {"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
+ {"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
+ {"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
+ {"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
+ {"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
+ {"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
+ {"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
+ {"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
+ {"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
+ {"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
+ {"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
+ {"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
+ {"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
+ {"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
+ {"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
+ {"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
+ {"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
+ {"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
+ {"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
+ {"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
+ {"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
+ {"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
+ {"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
+ {"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
+ {"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
+ {"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
+ {"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
+ {"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
+ {"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
+ {"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
+ {"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
// TODO when fixed types are implemented properly
- // {"fixed", Type{}},
- // {"fixed128x128", Type{}},
- // {"fixed[]", Type{}},
- // {"fixed[2]", Type{}},
- // {"fixed128x128[]", Type{}},
- // {"fixed128x128[2]", Type{}},
+ // {"fixed", nil, Type{}},
+ // {"fixed128x128", nil, Type{}},
+ // {"fixed[]", nil, Type{}},
+ // {"fixed[2]", nil, Type{}},
+ // {"fixed128x128[]", nil, Type{}},
+ // {"fixed128x128[2]", nil, Type{}},
+ {"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct{ A int64 }{}), stringKind: "(int64)",
+ TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
}
for _, tt := range tests {
- typ, err := NewType(tt.blob)
+ typ, err := NewType(tt.blob, tt.components)
if err != nil {
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
}
@@ -109,154 +112,170 @@ func TestTypeRegexp(t *testing.T) {
func TestTypeCheck(t *testing.T) {
for i, test := range []struct {
- typ string
- input interface{}
- err string
+ typ string
+ components []ArgumentMarshaling
+ input interface{}
+ err string
}{
- {"uint", big.NewInt(1), "unsupported arg type: uint"},
- {"int", big.NewInt(1), "unsupported arg type: int"},
- {"uint256", big.NewInt(1), ""},
- {"uint256[][3][]", [][3][]*big.Int{{{}}}, ""},
- {"uint256[][][3]", [3][][]*big.Int{{{}}}, ""},
- {"uint256[3][][]", [][][3]*big.Int{{{}}}, ""},
- {"uint256[3][3][3]", [3][3][3]*big.Int{{{}}}, ""},
- {"uint8[][]", [][]uint8{}, ""},
- {"int256", big.NewInt(1), ""},
- {"uint8", uint8(1), ""},
- {"uint16", uint16(1), ""},
- {"uint32", uint32(1), ""},
- {"uint64", uint64(1), ""},
- {"int8", int8(1), ""},
- {"int16", int16(1), ""},
- {"int32", int32(1), ""},
- {"int64", int64(1), ""},
- {"uint24", big.NewInt(1), ""},
- {"uint40", big.NewInt(1), ""},
- {"uint48", big.NewInt(1), ""},
- {"uint56", big.NewInt(1), ""},
- {"uint72", big.NewInt(1), ""},
- {"uint80", big.NewInt(1), ""},
- {"uint88", big.NewInt(1), ""},
- {"uint96", big.NewInt(1), ""},
- {"uint104", big.NewInt(1), ""},
- {"uint112", big.NewInt(1), ""},
- {"uint120", big.NewInt(1), ""},
- {"uint128", big.NewInt(1), ""},
- {"uint136", big.NewInt(1), ""},
- {"uint144", big.NewInt(1), ""},
- {"uint152", big.NewInt(1), ""},
- {"uint160", big.NewInt(1), ""},
- {"uint168", big.NewInt(1), ""},
- {"uint176", big.NewInt(1), ""},
- {"uint184", big.NewInt(1), ""},
- {"uint192", big.NewInt(1), ""},
- {"uint200", big.NewInt(1), ""},
- {"uint208", big.NewInt(1), ""},
- {"uint216", big.NewInt(1), ""},
- {"uint224", big.NewInt(1), ""},
- {"uint232", big.NewInt(1), ""},
- {"uint240", big.NewInt(1), ""},
- {"uint248", big.NewInt(1), ""},
- {"int24", big.NewInt(1), ""},
- {"int40", big.NewInt(1), ""},
- {"int48", big.NewInt(1), ""},
- {"int56", big.NewInt(1), ""},
- {"int72", big.NewInt(1), ""},
- {"int80", big.NewInt(1), ""},
- {"int88", big.NewInt(1), ""},
- {"int96", big.NewInt(1), ""},
- {"int104", big.NewInt(1), ""},
- {"int112", big.NewInt(1), ""},
- {"int120", big.NewInt(1), ""},
- {"int128", big.NewInt(1), ""},
- {"int136", big.NewInt(1), ""},
- {"int144", big.NewInt(1), ""},
- {"int152", big.NewInt(1), ""},
- {"int160", big.NewInt(1), ""},
- {"int168", big.NewInt(1), ""},
- {"int176", big.NewInt(1), ""},
- {"int184", big.NewInt(1), ""},
- {"int192", big.NewInt(1), ""},
- {"int200", big.NewInt(1), ""},
- {"int208", big.NewInt(1), ""},
- {"int216", big.NewInt(1), ""},
- {"int224", big.NewInt(1), ""},
- {"int232", big.NewInt(1), ""},
- {"int240", big.NewInt(1), ""},
- {"int248", big.NewInt(1), ""},
- {"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
- {"uint8", uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
- {"uint8", uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
- {"uint8", uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
- {"uint8", int8(1), "abi: cannot use int8 as type uint8 as argument"},
- {"uint8", int16(1), "abi: cannot use int16 as type uint8 as argument"},
- {"uint8", int32(1), "abi: cannot use int32 as type uint8 as argument"},
- {"uint8", int64(1), "abi: cannot use int64 as type uint8 as argument"},
- {"uint16", uint16(1), ""},
- {"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
- {"uint16[]", []uint16{1, 2, 3}, ""},
- {"uint16[]", [3]uint16{1, 2, 3}, ""},
- {"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
- {"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
- {"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
- {"uint16[3]", []uint16{1, 2, 3}, ""},
- {"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
- {"address[]", []common.Address{{1}}, ""},
- {"address[1]", []common.Address{{1}}, ""},
- {"address[1]", [1]common.Address{{1}}, ""},
- {"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
- {"bytes32", [32]byte{}, ""},
- {"bytes31", [31]byte{}, ""},
- {"bytes30", [30]byte{}, ""},
- {"bytes29", [29]byte{}, ""},
- {"bytes28", [28]byte{}, ""},
- {"bytes27", [27]byte{}, ""},
- {"bytes26", [26]byte{}, ""},
- {"bytes25", [25]byte{}, ""},
- {"bytes24", [24]byte{}, ""},
- {"bytes23", [23]byte{}, ""},
- {"bytes22", [22]byte{}, ""},
- {"bytes21", [21]byte{}, ""},
- {"bytes20", [20]byte{}, ""},
- {"bytes19", [19]byte{}, ""},
- {"bytes18", [18]byte{}, ""},
- {"bytes17", [17]byte{}, ""},
- {"bytes16", [16]byte{}, ""},
- {"bytes15", [15]byte{}, ""},
- {"bytes14", [14]byte{}, ""},
- {"bytes13", [13]byte{}, ""},
- {"bytes12", [12]byte{}, ""},
- {"bytes11", [11]byte{}, ""},
- {"bytes10", [10]byte{}, ""},
- {"bytes9", [9]byte{}, ""},
- {"bytes8", [8]byte{}, ""},
- {"bytes7", [7]byte{}, ""},
- {"bytes6", [6]byte{}, ""},
- {"bytes5", [5]byte{}, ""},
- {"bytes4", [4]byte{}, ""},
- {"bytes3", [3]byte{}, ""},
- {"bytes2", [2]byte{}, ""},
- {"bytes1", [1]byte{}, ""},
- {"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
- {"bytes32", common.Hash{1}, ""},
- {"bytes31", common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
- {"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
- {"bytes", []byte{0, 1}, ""},
- {"bytes", [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
- {"bytes", common.Hash{1}, "abi: cannot use array as type slice as argument"},
- {"string", "hello world", ""},
- {"string", string(""), ""},
- {"string", []byte{}, "abi: cannot use slice as type string as argument"},
- {"bytes32[]", [][32]byte{{}}, ""},
- {"function", [24]byte{}, ""},
- {"bytes20", common.Address{}, ""},
- {"address", [20]byte{}, ""},
- {"address", common.Address{}, ""},
- {"bytes32[]]", "", "invalid arg type in abi"},
- {"invalidType", "", "unsupported arg type: invalidType"},
- {"invalidSlice[]", "", "unsupported arg type: invalidSlice"},
+ {"uint", nil, big.NewInt(1), "unsupported arg type: uint"},
+ {"int", nil, big.NewInt(1), "unsupported arg type: int"},
+ {"uint256", nil, big.NewInt(1), ""},
+ {"uint256[][3][]", nil, [][3][]*big.Int{{{}}}, ""},
+ {"uint256[][][3]", nil, [3][][]*big.Int{{{}}}, ""},
+ {"uint256[3][][]", nil, [][][3]*big.Int{{{}}}, ""},
+ {"uint256[3][3][3]", nil, [3][3][3]*big.Int{{{}}}, ""},
+ {"uint8[][]", nil, [][]uint8{}, ""},
+ {"int256", nil, big.NewInt(1), ""},
+ {"uint8", nil, uint8(1), ""},
+ {"uint16", nil, uint16(1), ""},
+ {"uint32", nil, uint32(1), ""},
+ {"uint64", nil, uint64(1), ""},
+ {"int8", nil, int8(1), ""},
+ {"int16", nil, int16(1), ""},
+ {"int32", nil, int32(1), ""},
+ {"int64", nil, int64(1), ""},
+ {"uint24", nil, big.NewInt(1), ""},
+ {"uint40", nil, big.NewInt(1), ""},
+ {"uint48", nil, big.NewInt(1), ""},
+ {"uint56", nil, big.NewInt(1), ""},
+ {"uint72", nil, big.NewInt(1), ""},
+ {"uint80", nil, big.NewInt(1), ""},
+ {"uint88", nil, big.NewInt(1), ""},
+ {"uint96", nil, big.NewInt(1), ""},
+ {"uint104", nil, big.NewInt(1), ""},
+ {"uint112", nil, big.NewInt(1), ""},
+ {"uint120", nil, big.NewInt(1), ""},
+ {"uint128", nil, big.NewInt(1), ""},
+ {"uint136", nil, big.NewInt(1), ""},
+ {"uint144", nil, big.NewInt(1), ""},
+ {"uint152", nil, big.NewInt(1), ""},
+ {"uint160", nil, big.NewInt(1), ""},
+ {"uint168", nil, big.NewInt(1), ""},
+ {"uint176", nil, big.NewInt(1), ""},
+ {"uint184", nil, big.NewInt(1), ""},
+ {"uint192", nil, big.NewInt(1), ""},
+ {"uint200", nil, big.NewInt(1), ""},
+ {"uint208", nil, big.NewInt(1), ""},
+ {"uint216", nil, big.NewInt(1), ""},
+ {"uint224", nil, big.NewInt(1), ""},
+ {"uint232", nil, big.NewInt(1), ""},
+ {"uint240", nil, big.NewInt(1), ""},
+ {"uint248", nil, big.NewInt(1), ""},
+ {"int24", nil, big.NewInt(1), ""},
+ {"int40", nil, big.NewInt(1), ""},
+ {"int48", nil, big.NewInt(1), ""},
+ {"int56", nil, big.NewInt(1), ""},
+ {"int72", nil, big.NewInt(1), ""},
+ {"int80", nil, big.NewInt(1), ""},
+ {"int88", nil, big.NewInt(1), ""},
+ {"int96", nil, big.NewInt(1), ""},
+ {"int104", nil, big.NewInt(1), ""},
+ {"int112", nil, big.NewInt(1), ""},
+ {"int120", nil, big.NewInt(1), ""},
+ {"int128", nil, big.NewInt(1), ""},
+ {"int136", nil, big.NewInt(1), ""},
+ {"int144", nil, big.NewInt(1), ""},
+ {"int152", nil, big.NewInt(1), ""},
+ {"int160", nil, big.NewInt(1), ""},
+ {"int168", nil, big.NewInt(1), ""},
+ {"int176", nil, big.NewInt(1), ""},
+ {"int184", nil, big.NewInt(1), ""},
+ {"int192", nil, big.NewInt(1), ""},
+ {"int200", nil, big.NewInt(1), ""},
+ {"int208", nil, big.NewInt(1), ""},
+ {"int216", nil, big.NewInt(1), ""},
+ {"int224", nil, big.NewInt(1), ""},
+ {"int232", nil, big.NewInt(1), ""},
+ {"int240", nil, big.NewInt(1), ""},
+ {"int248", nil, big.NewInt(1), ""},
+ {"uint30", nil, uint8(1), "abi: cannot use uint8 as type ptr as argument"},
+ {"uint8", nil, uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
+ {"uint8", nil, uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
+ {"uint8", nil, uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
+ {"uint8", nil, int8(1), "abi: cannot use int8 as type uint8 as argument"},
+ {"uint8", nil, int16(1), "abi: cannot use int16 as type uint8 as argument"},
+ {"uint8", nil, int32(1), "abi: cannot use int32 as type uint8 as argument"},
+ {"uint8", nil, int64(1), "abi: cannot use int64 as type uint8 as argument"},
+ {"uint16", nil, uint16(1), ""},
+ {"uint16", nil, uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
+ {"uint16[]", nil, []uint16{1, 2, 3}, ""},
+ {"uint16[]", nil, [3]uint16{1, 2, 3}, ""},
+ {"uint16[]", nil, []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
+ {"uint16[3]", nil, [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
+ {"uint16[3]", nil, [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
+ {"uint16[3]", nil, []uint16{1, 2, 3}, ""},
+ {"uint16[3]", nil, []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
+ {"address[]", nil, []common.Address{{1}}, ""},
+ {"address[1]", nil, []common.Address{{1}}, ""},
+ {"address[1]", nil, [1]common.Address{{1}}, ""},
+ {"address[2]", nil, [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
+ {"bytes32", nil, [32]byte{}, ""},
+ {"bytes31", nil, [31]byte{}, ""},
+ {"bytes30", nil, [30]byte{}, ""},
+ {"bytes29", nil, [29]byte{}, ""},
+ {"bytes28", nil, [28]byte{}, ""},
+ {"bytes27", nil, [27]byte{}, ""},
+ {"bytes26", nil, [26]byte{}, ""},
+ {"bytes25", nil, [25]byte{}, ""},
+ {"bytes24", nil, [24]byte{}, ""},
+ {"bytes23", nil, [23]byte{}, ""},
+ {"bytes22", nil, [22]byte{}, ""},
+ {"bytes21", nil, [21]byte{}, ""},
+ {"bytes20", nil, [20]byte{}, ""},
+ {"bytes19", nil, [19]byte{}, ""},
+ {"bytes18", nil, [18]byte{}, ""},
+ {"bytes17", nil, [17]byte{}, ""},
+ {"bytes16", nil, [16]byte{}, ""},
+ {"bytes15", nil, [15]byte{}, ""},
+ {"bytes14", nil, [14]byte{}, ""},
+ {"bytes13", nil, [13]byte{}, ""},
+ {"bytes12", nil, [12]byte{}, ""},
+ {"bytes11", nil, [11]byte{}, ""},
+ {"bytes10", nil, [10]byte{}, ""},
+ {"bytes9", nil, [9]byte{}, ""},
+ {"bytes8", nil, [8]byte{}, ""},
+ {"bytes7", nil, [7]byte{}, ""},
+ {"bytes6", nil, [6]byte{}, ""},
+ {"bytes5", nil, [5]byte{}, ""},
+ {"bytes4", nil, [4]byte{}, ""},
+ {"bytes3", nil, [3]byte{}, ""},
+ {"bytes2", nil, [2]byte{}, ""},
+ {"bytes1", nil, [1]byte{}, ""},
+ {"bytes32", nil, [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
+ {"bytes32", nil, common.Hash{1}, ""},
+ {"bytes31", nil, common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
+ {"bytes31", nil, [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
+ {"bytes", nil, []byte{0, 1}, ""},
+ {"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
+ {"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"},
+ {"string", nil, "hello world", ""},
+ {"string", nil, string(""), ""},
+ {"string", nil, []byte{}, "abi: cannot use slice as type string as argument"},
+ {"bytes32[]", nil, [][32]byte{{}}, ""},
+ {"function", nil, [24]byte{}, ""},
+ {"bytes20", nil, common.Address{}, ""},
+ {"address", nil, [20]byte{}, ""},
+ {"address", nil, common.Address{}, ""},
+ {"bytes32[]]", nil, "", "invalid arg type in abi"},
+ {"invalidType", nil, "", "unsupported arg type: invalidType"},
+ {"invalidSlice[]", nil, "", "unsupported arg type: invalidSlice"},
+ // simple tuple
+ {"tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, struct {
+ A *big.Int
+ B *big.Int
+ }{}, ""},
+ // tuple slice
+ {"tuple[]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
+ A *big.Int
+ B *big.Int
+ }{}, ""},
+ // tuple array
+ {"tuple[2]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
+ A *big.Int
+ B *big.Int
+ }{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
} {
- typ, err := NewType(test.typ)
+ typ, err := NewType(test.typ, test.components)
if err != nil && len(test.err) == 0 {
t.Fatal("unexpected parse error:", err)
} else if err != nil && len(test.err) != 0 {
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go
index d5875140..8406b09c 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go
@@ -115,17 +115,6 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
}
-func getFullElemSize(elem *Type) int {
- //all other should be counted as 32 (slices have pointers to respective elements)
- size := 32
- //arrays wrap it, each element being the same size
- for elem.T == ArrayTy {
- size *= elem.Size
- elem = elem.Elem
- }
- return size
-}
-
// iteratively unpack elements
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
if size < 0 {
@@ -150,13 +139,9 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
// Arrays have packed elements, resulting in longer unpack steps.
// Slices have just 32 bytes per element (pointing to the contents).
- elemSize := 32
- if t.T == ArrayTy {
- elemSize = getFullElemSize(t.Elem)
- }
+ elemSize := getTypeSize(*t.Elem)
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
-
inter, err := toGoType(i, *t.Elem, output)
if err != nil {
return nil, err
@@ -170,6 +155,36 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
return refSlice.Interface(), nil
}
+func forTupleUnpack(t Type, output []byte) (interface{}, error) {
+ retval := reflect.New(t.Type).Elem()
+ virtualArgs := 0
+ for index, elem := range t.TupleElems {
+ marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
+ if elem.T == ArrayTy && !isDynamicType(*elem) {
+ // If we have a static array, like [3]uint256, these are coded as
+ // just like uint256,uint256,uint256.
+ // This means that we need to add two 'virtual' arguments when
+ // we count the index from now on.
+ //
+ // Array values nested multiple levels deep are also encoded inline:
+ // [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
+ //
+ // Calculate the full array size to get the correct offset for the next argument.
+ // Decrement it by 1, as the normal index increment is still applied.
+ virtualArgs += getTypeSize(*elem)/32 - 1
+ } else if elem.T == TupleTy && !isDynamicType(*elem) {
+ // If we have a static tuple, like (uint256, bool, uint256), these are
+ // coded as just like uint256,bool,uint256
+ virtualArgs += getTypeSize(*elem)/32 - 1
+ }
+ if err != nil {
+ return nil, err
+ }
+ retval.Field(index).Set(reflect.ValueOf(marshalledValue))
+ }
+ return retval.Interface(), nil
+}
+
// toGoType parses the output bytes and recursively assigns the value of these bytes
// into a go type with accordance with the ABI spec.
func toGoType(index int, t Type, output []byte) (interface{}, error) {
@@ -178,14 +193,14 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
}
var (
- returnOutput []byte
- begin, end int
- err error
+ returnOutput []byte
+ begin, length int
+ err error
)
// if we require a length prefix, find the beginning word and size returned.
if t.requiresLengthPrefix() {
- begin, end, err = lengthPrefixPointsTo(index, output)
+ begin, length, err = lengthPrefixPointsTo(index, output)
if err != nil {
return nil, err
}
@@ -194,12 +209,26 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
}
switch t.T {
+ case TupleTy:
+ if isDynamicType(t) {
+ begin, err := tuplePointsTo(index, output)
+ if err != nil {
+ return nil, err
+ }
+ return forTupleUnpack(t, output[begin:])
+ } else {
+ return forTupleUnpack(t, output[index:])
+ }
case SliceTy:
- return forEachUnpack(t, output, begin, end)
+ return forEachUnpack(t, output[begin:], 0, length)
case ArrayTy:
- return forEachUnpack(t, output, index, t.Size)
+ if isDynamicType(*t.Elem) {
+ offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]))
+ return forEachUnpack(t, output[offset:], 0, t.Size)
+ }
+ return forEachUnpack(t, output[index:], 0, t.Size)
case StringTy: // variable arrays are written at the end of the return bytes
- return string(output[begin : begin+end]), nil
+ return string(output[begin : begin+length]), nil
case IntTy, UintTy:
return readInteger(t.T, t.Kind, returnOutput), nil
case BoolTy:
@@ -209,7 +238,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
case HashTy:
return common.BytesToHash(returnOutput), nil
case BytesTy:
- return output[begin : begin+end], nil
+ return output[begin : begin+length], nil
case FixedBytesTy:
return readFixedBytes(t, returnOutput)
case FunctionTy:
@@ -250,3 +279,17 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
length = int(lengthBig.Uint64())
return
}
+
+// tuplePointsTo resolves the location reference for dynamic tuple.
+func tuplePointsTo(index int, output []byte) (start int, err error) {
+ offset := big.NewInt(0).SetBytes(output[index : index+32])
+ outputLen := big.NewInt(int64(len(output)))
+
+ if offset.Cmp(big.NewInt(int64(len(output)))) > 0 {
+ return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
+ }
+ if offset.BitLen() > 63 {
+ return 0, fmt.Errorf("abi offset larger than int64: %v", offset)
+ }
+ return int(offset.Uint64()), nil
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack_test.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack_test.go
index 97552b90..ff88be3d 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack_test.go
@@ -173,9 +173,14 @@ var unpackTests = []unpackTest{
// multi dimensional, if these pass, all types that don't require length prefix should pass
{
def: `[{"type": "uint8[][]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000E0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: [][]uint8{{1, 2}, {1, 2}},
},
+ {
+ def: `[{"type": "uint8[][]"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
+ want: [][]uint8{{1, 2}, {1, 2, 3}},
+ },
{
def: `[{"type": "uint8[2][2]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -183,7 +188,7 @@ var unpackTests = []unpackTest{
},
{
def: `[{"type": "uint8[][2]"}]`,
- enc: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
+ enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
want: [2][]uint8{{1}, {1}},
},
{
@@ -191,6 +196,11 @@ var unpackTests = []unpackTest{
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: [][2]uint8{{1, 2}},
},
+ {
+ def: `[{"type": "uint8[2][]"}]`,
+ enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: [][2]uint8{{1, 2}, {1, 2}},
+ },
{
def: `[{"type": "uint16[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -236,6 +246,26 @@ var unpackTests = []unpackTest{
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
},
+ {
+ def: `[{"type": "string[4]"}]`,
+ enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000",
+ want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
+ },
+ {
+ def: `[{"type": "string[]"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000",
+ want: []string{"Ethereum", "go-ethereum"},
+ },
+ {
+ def: `[{"type": "bytes[]"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000",
+ want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
+ },
+ {
+ def: `[{"type": "uint256[2][][]"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8",
+ want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
+ },
{
def: `[{"type": "int8[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -295,6 +325,53 @@ var unpackTests = []unpackTest{
Int2 *big.Int
}{big.NewInt(1), big.NewInt(2)},
},
+ {
+ def: `[{"name":"int_one","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int__one","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int_one_","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ Intone *big.Int
+ }{big.NewInt(1), big.NewInt(2)},
+ },
+ {
+ def: `[{"name":"___","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ Intone *big.Int
+ }{},
+ err: "abi: purely underscored output cannot unpack to struct",
+ },
+ {
+ def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ Int1 *big.Int
+ Int2 *big.Int
+ }{},
+ err: "abi: multiple outputs mapping to the same struct field 'IntOne'",
+ },
{
def: `[{"name":"int","type":"int256"},{"name":"Int","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -359,6 +436,55 @@ func TestUnpack(t *testing.T) {
}
}
+func TestUnpackSetDynamicArrayOutput(t *testing.T) {
+ abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var (
+ marshalledReturn32 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783132333435363738393000000000000000000000000000000000000000003078303938373635343332310000000000000000000000000000000000000000")
+ marshalledReturn15 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783031323334350000000000000000000000000000000000000000000000003078393837363534000000000000000000000000000000000000000000000000")
+
+ out32 [][32]byte
+ out15 [][15]byte
+ )
+
+ // test 32
+ err = abi.Unpack(&out32, "testDynamicFixedBytes32", marshalledReturn32)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(out32) != 2 {
+ t.Fatalf("expected array with 2 values, got %d", len(out32))
+ }
+ expected := common.Hex2Bytes("3078313233343536373839300000000000000000000000000000000000000000")
+ if !bytes.Equal(out32[0][:], expected) {
+ t.Errorf("expected %x, got %x\n", expected, out32[0])
+ }
+ expected = common.Hex2Bytes("3078303938373635343332310000000000000000000000000000000000000000")
+ if !bytes.Equal(out32[1][:], expected) {
+ t.Errorf("expected %x, got %x\n", expected, out32[1])
+ }
+
+ // test 15
+ err = abi.Unpack(&out15, "testDynamicFixedBytes32", marshalledReturn15)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(out15) != 2 {
+ t.Fatalf("expected array with 2 values, got %d", len(out15))
+ }
+ expected = common.Hex2Bytes("307830313233343500000000000000")
+ if !bytes.Equal(out15[0][:], expected) {
+ t.Errorf("expected %x, got %x\n", expected, out15[0])
+ }
+ expected = common.Hex2Bytes("307839383736353400000000000000")
+ if !bytes.Equal(out15[1][:], expected) {
+ t.Errorf("expected %x, got %x\n", expected, out15[1])
+ }
+}
+
type methodMultiOutput struct {
Int *big.Int
String string
@@ -462,6 +588,68 @@ func TestMultiReturnWithArray(t *testing.T) {
}
}
+func TestMultiReturnWithStringArray(t *testing.T) {
+ const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
+ abi, err := JSON(strings.NewReader(definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+ buff := new(bytes.Buffer)
+ buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000"))
+ temp, _ := big.NewInt(0).SetString("30000000000000000000", 10)
+ ret1, ret1Exp := new([3]*big.Int), [3]*big.Int{big.NewInt(1545304298), big.NewInt(6), temp}
+ ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f")
+ ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"}
+ ret4, ret4Exp := new(bool), false
+ if err := abi.Unpack(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*ret1, ret1Exp) {
+ t.Error("big.Int array result", *ret1, "!= Expected", ret1Exp)
+ }
+ if !reflect.DeepEqual(*ret2, ret2Exp) {
+ t.Error("address result", *ret2, "!= Expected", ret2Exp)
+ }
+ if !reflect.DeepEqual(*ret3, ret3Exp) {
+ t.Error("string array result", *ret3, "!= Expected", ret3Exp)
+ }
+ if !reflect.DeepEqual(*ret4, ret4Exp) {
+ t.Error("bool result", *ret4, "!= Expected", ret4Exp)
+ }
+}
+
+func TestMultiReturnWithStringSlice(t *testing.T) {
+ const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
+ abi, err := JSON(strings.NewReader(definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+ buff := new(bytes.Buffer)
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0] offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000120")) // output[1] offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[0] length
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0][0] offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // output[0][1] offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008")) // output[0][0] length
+ buff.Write(common.Hex2Bytes("657468657265756d000000000000000000000000000000000000000000000000")) // output[0][0] value
+ buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000b")) // output[0][1] length
+ buff.Write(common.Hex2Bytes("676f2d657468657265756d000000000000000000000000000000000000000000")) // output[0][1] value
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[1] length
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000064")) // output[1][0] value
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value
+ ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"}
+ ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)}
+ if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*ret1, ret1Exp) {
+ t.Error("string slice result", *ret1, "!= Expected", ret1Exp)
+ }
+ if !reflect.DeepEqual(*ret2, ret2Exp) {
+ t.Error("uint256 slice result", *ret2, "!= Expected", ret2Exp)
+ }
+}
+
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
// Similar to TestMultiReturnWithArray, but with a special case in mind:
// values of nested static arrays count towards the size as well, and any element following
@@ -751,6 +939,108 @@ func TestUnmarshal(t *testing.T) {
}
}
+func TestUnpackTuple(t *testing.T) {
+ const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
+ abi, err := JSON(strings.NewReader(simpleTuple))
+ if err != nil {
+ t.Fatal(err)
+ }
+ buff := new(bytes.Buffer)
+
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // ret[a] = 1
+ buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
+
+ v := struct {
+ Ret struct {
+ A *big.Int
+ B *big.Int
+ }
+ }{Ret: struct {
+ A *big.Int
+ B *big.Int
+ }{new(big.Int), new(big.Int)}}
+
+ err = abi.Unpack(&v, "tuple", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ } else {
+ if v.Ret.A.Cmp(big.NewInt(1)) != 0 {
+ t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.Ret.A)
+ }
+ if v.Ret.B.Cmp(big.NewInt(-1)) != 0 {
+ t.Errorf("unexpected value unpacked: want %x, got %x", v.Ret.B, -1)
+ }
+ }
+
+ // Test nested tuple
+ const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[
+ {"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]},
+ {"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]},
+ {"type":"uint256","name":"a"}
+ ]}]`
+
+ abi, err = JSON(strings.NewReader(nestedTuple))
+ if err != nil {
+ t.Fatal(err)
+ }
+ buff.Reset()
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // s offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")) // t.X = 0
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // t.Y = 1
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // a = 1
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.A = 1
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")) // s.B offset
+ buff.Write(common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0")) // s.C offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B length
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.B[0] = 1
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B[0] = 2
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C length
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[0].X
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[0].Y
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[1].X
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[1].Y
+
+ type T struct {
+ X *big.Int `abi:"x"`
+ Z *big.Int `abi:"y"` // Test whether the abi tag works.
+ }
+
+ type S struct {
+ A *big.Int
+ B []*big.Int
+ C []T
+ }
+
+ type Ret struct {
+ FieldS S `abi:"s"`
+ FieldT T `abi:"t"`
+ A *big.Int
+ }
+ var ret Ret
+ var expected = Ret{
+ FieldS: S{
+ A: big.NewInt(1),
+ B: []*big.Int{big.NewInt(1), big.NewInt(2)},
+ C: []T{
+ {big.NewInt(1), big.NewInt(2)},
+ {big.NewInt(2), big.NewInt(1)},
+ },
+ },
+ FieldT: T{
+ big.NewInt(0), big.NewInt(1),
+ },
+ A: big.NewInt(1),
+ }
+
+ err = abi.Unpack(&ret, "tuple", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+ if reflect.DeepEqual(ret, expected) {
+ t.Error("unexpected unpack value")
+ }
+}
+
func TestOOMMaliciousInput(t *testing.T) {
oomTests := []unpackTest{
{
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/wallet.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/wallet.go
index 758fdfe3..2f774cc9 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/wallet.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/wallet.go
@@ -52,8 +52,8 @@ func (w *keystoreWallet) Status() (string, error) {
// is no connection or decryption step necessary to access the list of accounts.
func (w *keystoreWallet) Open(passphrase string) error { return nil }
-// Close implements accounts.Wallet, but is a noop for plain wallets since is no
-// meaningful open operation.
+// Close implements accounts.Wallet, but is a noop for plain wallets since there
+// is no meaningful open operation.
func (w *keystoreWallet) Close() error { return nil }
// Accounts implements accounts.Wallet, returning an account list consisting of
@@ -84,10 +84,7 @@ func (w *keystoreWallet) SelfDerive(base accounts.DerivationPath, chain ethereum
// able to sign via our shared keystore backend).
func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
- if account.Address != w.account.Address {
- return nil, accounts.ErrUnknownAccount
- }
- if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
+ if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@@ -100,10 +97,7 @@ func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte
// be able to sign via our shared keystore backend).
func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
- if account.Address != w.account.Address {
- return nil, accounts.ErrUnknownAccount
- }
- if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
+ if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@@ -114,10 +108,7 @@ func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction,
// given hash with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
- if account.Address != w.account.Address {
- return nil, accounts.ErrUnknownAccount
- }
- if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
+ if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@@ -128,10 +119,7 @@ func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passph
// transaction with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
- if account.Address != w.account.Address {
- return nil, accounts.ErrUnknownAccount
- }
- if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
+ if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go b/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go
index 7d5f6790..c30903b5 100644
--- a/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go
+++ b/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go
@@ -257,7 +257,9 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
// Decode the hex sting into an Ethereum address and return
var address common.Address
- hex.Decode(address[:], hexstr)
+ if _, err = hex.Decode(address[:], hexstr); err != nil {
+ return common.Address{}, err
+ }
return address, nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/appveyor.yml b/vendor/github.com/ethereum/go-ethereum/appveyor.yml
index e5126b25..defad29c 100644
--- a/vendor/github.com/ethereum/go-ethereum/appveyor.yml
+++ b/vendor/github.com/ethereum/go-ethereum/appveyor.yml
@@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip
- - 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.4.windows-%GETH_ARCH%.zip
+ - 7z x go1.11.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version
diff --git a/vendor/github.com/ethereum/go-ethereum/build/update-license.go b/vendor/github.com/ethereum/go-ethereum/build/update-license.go
index 22e40334..e3e00d4c 100644
--- a/vendor/github.com/ethereum/go-ethereum/build/update-license.go
+++ b/vendor/github.com/ethereum/go-ethereum/build/update-license.go
@@ -1,3 +1,19 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
// +build none
/*
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go
index 59f759f0..f1e28119 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go
@@ -20,7 +20,6 @@ import (
"bufio"
"errors"
"fmt"
- "io"
"math/big"
"os"
"reflect"
@@ -198,7 +197,17 @@ func dumpConfig(ctx *cli.Context) error {
if err != nil {
return err
}
- io.WriteString(os.Stdout, comment)
- os.Stdout.Write(out)
+
+ dump := os.Stdout
+ if ctx.NArg() > 0 {
+ dump, err = os.OpenFile(ctx.Args().Get(0), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return err
+ }
+ defer dump.Close()
+ }
+ dump.WriteString(comment)
+ dump.Write(out)
+
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go
index 1025dfe8..c95c81a6 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go
@@ -174,7 +174,11 @@ func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpec
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
- spec.Accounts[common.UnprefixedAddress(common.BytesToAddress([]byte{address}))].Precompiled = data
+ addr := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
+ if _, exist := spec.Accounts[addr]; !exist {
+ spec.Accounts[addr] = &alethGenesisSpecAccount{}
+ }
+ spec.Accounts[addr].Precompiled = data
}
func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access_test.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access_test.go
index 9357c577..967ef274 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access_test.go
@@ -33,11 +33,11 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -598,7 +598,7 @@ func TestKeypairSanity(t *testing.T) {
t.Fatal(err)
}
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(salt)
shared, err := hex.DecodeString(sharedSecret)
if err != nil {
diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go
index 0dedca67..12edc8cc 100644
--- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go
+++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go
@@ -164,10 +164,6 @@ var (
Name: "topic",
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
}
- SwarmFeedDataOnCreateFlag = cli.StringFlag{
- Name: "data",
- Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
- }
SwarmFeedManifestFlag = cli.StringFlag{
Name: "manifest",
Usage: "Refers to the feed through a manifest",
diff --git a/vendor/github.com/ethereum/go-ethereum/common/types.go b/vendor/github.com/ethereum/go-ethereum/common/types.go
index a4b99952..0f4892d2 100644
--- a/vendor/github.com/ethereum/go-ethereum/common/types.go
+++ b/vendor/github.com/ethereum/go-ethereum/common/types.go
@@ -27,7 +27,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/crypto/sha3"
+ "golang.org/x/crypto/sha3"
)
// Lengths of hashes and addresses in bytes.
@@ -196,7 +196,7 @@ func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Hex returns an EIP55-compliant hex string representation of the address.
func (a Address) Hex() string {
unchecksummed := hex.EncodeToString(a[:])
- sha := sha3.NewKeccak256()
+ sha := sha3.NewLegacyKeccak256()
sha.Write([]byte(unchecksummed))
hash := sha.Sum(nil)
diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go b/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go
index 0cb72c35..c79c30ca 100644
--- a/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go
+++ b/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go
@@ -33,13 +33,13 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
lru "github.com/hashicorp/golang-lru"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -148,7 +148,7 @@ type SignerFn func(accounts.Account, []byte) ([]byte, error)
// panics. This is done to avoid accidentally using both forms (signature present
// or not), which could be abused to produce different hashes for the same header.
func sigHash(header *types.Header) (hash common.Hash) {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, []interface{}{
header.ParentHash,
diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/algorithm.go b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/algorithm.go
index f252a7f3..d6c87109 100644
--- a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/algorithm.go
+++ b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/algorithm.go
@@ -30,8 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -123,7 +123,7 @@ func seedHash(block uint64) []byte {
if block < epochLength {
return seed
}
- keccak256 := makeHasher(sha3.NewKeccak256())
+ keccak256 := makeHasher(sha3.NewLegacyKeccak256())
for i := 0; i < int(block/epochLength); i++ {
keccak256(seed, seed)
}
@@ -177,7 +177,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
}
}()
// Create a hasher to reuse between invocations
- keccak512 := makeHasher(sha3.NewKeccak512())
+ keccak512 := makeHasher(sha3.NewLegacyKeccak512())
// Sequentially produce the initial dataset
keccak512(cache, seed)
@@ -301,7 +301,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
defer pend.Done()
// Create a hasher to reuse between invocations
- keccak512 := makeHasher(sha3.NewKeccak512())
+ keccak512 := makeHasher(sha3.NewLegacyKeccak512())
// Calculate the data segment this thread should generate
batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
@@ -375,7 +375,7 @@ func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32)
// in-memory cache) in order to produce our final value for a particular header
// hash and nonce.
func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
- keccak512 := makeHasher(sha3.NewKeccak512())
+ keccak512 := makeHasher(sha3.NewLegacyKeccak512())
lookup := func(index uint32) []uint32 {
rawData := generateDatasetItem(cache, index, keccak512)
diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go
index 548c57cd..62e3f8fc 100644
--- a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go
+++ b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go
@@ -31,9 +31,9 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
// Ethash proof-of-work protocol constants.
@@ -575,7 +575,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header
// SealHash returns the hash of a block prior to it being sealed.
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, []interface{}{
header.ParentHash,
diff --git a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go
index c29063a7..49aedf66 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go
@@ -65,7 +65,7 @@ const (
triesInMemory = 128
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
- BlockChainVersion = 3
+ BlockChainVersion uint64 = 3
)
// CacheConfig contains the configuration values for the trie caching/pruning
diff --git a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain_test.go b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain_test.go
index 9ddae6e2..fcc36dc2 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain_test.go
@@ -23,9 +23,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
// Tests block header storage and retrieval operations.
@@ -47,7 +47,7 @@ func TestHeaderStorage(t *testing.T) {
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
t.Fatalf("Stored header RLP not found")
} else {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(entry)
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
@@ -68,7 +68,7 @@ func TestBodyStorage(t *testing.T) {
// Create a test body to move around the database and make sure it's really new
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, body)
hash := common.BytesToHash(hasher.Sum(nil))
@@ -85,7 +85,7 @@ func TestBodyStorage(t *testing.T) {
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
t.Fatalf("Stored body RLP not found")
} else {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(entry)
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
diff --git a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_metadata.go b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_metadata.go
index 3b6e6548..82e4bf04 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_metadata.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_metadata.go
@@ -26,19 +26,27 @@ import (
)
// ReadDatabaseVersion retrieves the version number of the database.
-func ReadDatabaseVersion(db DatabaseReader) int {
- var version int
+func ReadDatabaseVersion(db DatabaseReader) *uint64 {
+ var version uint64
enc, _ := db.Get(databaseVerisionKey)
- rlp.DecodeBytes(enc, &version)
+ if len(enc) == 0 {
+ return nil
+ }
+ if err := rlp.DecodeBytes(enc, &version); err != nil {
+ return nil
+ }
- return version
+ return &version
}
// WriteDatabaseVersion stores the version number of the database
-func WriteDatabaseVersion(db DatabaseWriter, version int) {
- enc, _ := rlp.EncodeToBytes(version)
- if err := db.Put(databaseVerisionKey, enc); err != nil {
+func WriteDatabaseVersion(db DatabaseWriter, version uint64) {
+ enc, err := rlp.EncodeToBytes(version)
+ if err != nil {
+ log.Crit("Failed to encode database version", "err", err)
+ }
+ if err = db.Put(databaseVerisionKey, enc); err != nil {
log.Crit("Failed to store the database version", "err", err)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go b/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go
index 76e67d83..2230b10e 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go
@@ -468,9 +468,9 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
//
// Carrying over the balance ensures that Ether doesn't disappear.
func (self *StateDB) CreateAccount(addr common.Address) {
- new, prev := self.createObject(addr)
+ newObj, prev := self.createObject(addr)
if prev != nil {
- new.setBalance(prev.data.Balance)
+ newObj.setBalance(prev.data.Balance)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/core/tx_cacher.go b/vendor/github.com/ethereum/go-ethereum/core/tx_cacher.go
index bcaa5ead..b1e5d676 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/tx_cacher.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/tx_cacher.go
@@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
-// senderCacher is a concurrent transaction sender recoverer anc cacher.
+// senderCacher is a concurrent transaction sender recoverer and cacher.
var senderCacher = newTxSenderCacher(runtime.NumCPU())
// txSenderCacherRequest is a request for recovering transaction senders with a
diff --git a/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go b/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go
index fc35d1f2..552d3692 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go
@@ -172,6 +172,26 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig {
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
conf.PriceBump = DefaultTxPoolConfig.PriceBump
}
+ if conf.AccountSlots < 1 {
+ log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
+ conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
+ }
+ if conf.GlobalSlots < 1 {
+ log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
+ conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
+ }
+ if conf.AccountQueue < 1 {
+ log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
+ conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
+ }
+ if conf.GlobalQueue < 1 {
+ log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
+ conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
+ }
+ if conf.Lifetime < 1 {
+ log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
+ conf.Lifetime = DefaultTxPoolConfig.Lifetime
+ }
return conf
}
diff --git a/vendor/github.com/ethereum/go-ethereum/core/tx_pool_test.go b/vendor/github.com/ethereum/go-ethereum/core/tx_pool_test.go
index 5a592054..6d3bd7a5 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/tx_pool_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/tx_pool_test.go
@@ -1095,7 +1095,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
- config.GlobalSlots = 0
+ config.GlobalSlots = 1
pool := NewTxPool(config, params.TestChainConfig, blockchain)
defer pool.Stop()
diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/block.go b/vendor/github.com/ethereum/go-ethereum/core/types/block.go
index 9d11f60d..57905d8c 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/types/block.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/types/block.go
@@ -28,8 +28,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
var (
@@ -109,7 +109,7 @@ func (h *Header) Size() common.StorageSize {
}
func rlpHash(x interface{}) (h common.Hash) {
- hw := sha3.NewKeccak256()
+ hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/transaction.go b/vendor/github.com/ethereum/go-ethereum/core/types/transaction.go
index 7b53cac2..ba3d5de9 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/types/transaction.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/types/transaction.go
@@ -234,7 +234,7 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) {
}
// WithSignature returns a new transaction with the given signature.
-// This signature needs to be formatted as described in the yellow paper (v+27).
+// This signature needs to be in the [R || S || V] format where V is 0 or 1.
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
r, s, v, err := signer.SignatureValues(tx, sig)
if err != nil {
diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go b/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go
index 6696c6e3..5195e716 100644
--- a/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go
+++ b/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go
@@ -24,8 +24,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params"
+ "golang.org/x/crypto/sha3"
)
var (
@@ -387,7 +387,7 @@ func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory
data := memory.Get(offset.Int64(), size.Int64())
if interpreter.hasher == nil {
- interpreter.hasher = sha3.NewKeccak256().(keccakState)
+ interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState)
} else {
interpreter.hasher.Reset()
}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/crypto.go b/vendor/github.com/ethereum/go-ethereum/crypto/crypto.go
index 9b3e76d4..4567fafc 100644
--- a/vendor/github.com/ethereum/go-ethereum/crypto/crypto.go
+++ b/vendor/github.com/ethereum/go-ethereum/crypto/crypto.go
@@ -30,8 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
var (
@@ -43,7 +43,7 @@ var errInvalidPubkey = errors.New("invalid secp256k1 public key")
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
- d := sha3.NewKeccak256()
+ d := sha3.NewLegacyKeccak256()
for _, b := range data {
d.Write(b)
}
@@ -53,7 +53,7 @@ func Keccak256(data ...[]byte) []byte {
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
// converting it to an internal Hash data structure.
func Keccak256Hash(data ...[]byte) (h common.Hash) {
- d := sha3.NewKeccak256()
+ d := sha3.NewLegacyKeccak256()
for _, b := range data {
d.Write(b)
}
@@ -63,7 +63,7 @@ func Keccak256Hash(data ...[]byte) (h common.Hash) {
// Keccak512 calculates and returns the Keccak512 hash of the input data.
func Keccak512(data ...[]byte) []byte {
- d := sha3.NewKeccak512()
+ d := sha3.NewLegacyKeccak512()
for _, b := range data {
d.Write(b)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/curve.go b/vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/curve.go
index 56be235b..5409ee1d 100644
--- a/vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/curve.go
+++ b/vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/curve.go
@@ -310,7 +310,7 @@ var theCurve = new(BitCurve)
func init() {
// See SEC 2 section 2.7.1
// curve parameters taken from:
- // http://www.secg.org/collateral/sec2_final.pdf
+ // http://www.secg.org/sec2-v2.pdf
theCurve.P, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 0)
theCurve.N, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 0)
theCurve.B, _ = new(big.Int).SetString("0x0000000000000000000000000000000000000000000000000000000000000007", 0)
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/LICENSE b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/LICENSE
deleted file mode 100644
index 6a66aea5..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/PATENTS b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/PATENTS
deleted file mode 100644
index 73309904..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/doc.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/doc.go
deleted file mode 100644
index 3dab530f..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/doc.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha3 implements the SHA-3 fixed-output-length hash functions and
-// the SHAKE variable-output-length hash functions defined by FIPS-202.
-//
-// Both types of hash function use the "sponge" construction and the Keccak
-// permutation. For a detailed specification see http://keccak.noekeon.org/
-//
-//
-// Guidance
-//
-// If you aren't sure what function you need, use SHAKE256 with at least 64
-// bytes of output. The SHAKE instances are faster than the SHA3 instances;
-// the latter have to allocate memory to conform to the hash.Hash interface.
-//
-// If you need a secret-key MAC (message authentication code), prepend the
-// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
-// output.
-//
-//
-// Security strengths
-//
-// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
-// strength against preimage attacks of x bits. Since they only produce "x"
-// bits of output, their collision-resistance is only "x/2" bits.
-//
-// The SHAKE-256 and -128 functions have a generic security strength of 256 and
-// 128 bits against all attacks, provided that at least 2x bits of their output
-// is used. Requesting more than 64 or 32 bytes of output, respectively, does
-// not increase the collision-resistance of the SHAKE functions.
-//
-//
-// The sponge construction
-//
-// A sponge builds a pseudo-random function from a public pseudo-random
-// permutation, by applying the permutation to a state of "rate + capacity"
-// bytes, but hiding "capacity" of the bytes.
-//
-// A sponge starts out with a zero state. To hash an input using a sponge, up
-// to "rate" bytes of the input are XORed into the sponge's state. The sponge
-// is then "full" and the permutation is applied to "empty" it. This process is
-// repeated until all the input has been "absorbed". The input is then padded.
-// The digest is "squeezed" from the sponge in the same way, except that output
-// output is copied out instead of input being XORed in.
-//
-// A sponge is parameterized by its generic security strength, which is equal
-// to half its capacity; capacity + rate is equal to the permutation's width.
-// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
-// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
-//
-//
-// Recommendations
-//
-// The SHAKE functions are recommended for most new uses. They can produce
-// output of arbitrary length. SHAKE256, with an output length of at least
-// 64 bytes, provides 256-bit security against all attacks. The Keccak team
-// recommends it for most applications upgrading from SHA2-512. (NIST chose a
-// much stronger, but much slower, sponge instance for SHA3-512.)
-//
-// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
-// They produce output of the same length, with the same security strengths
-// against all attacks. This means, in particular, that SHA3-256 only has
-// 128-bit collision resistance, because its output length is 32 bytes.
-package sha3
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/hashes.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/hashes.go
deleted file mode 100644
index fa0d7b43..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/hashes.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file provides functions for creating instances of the SHA-3
-// and SHAKE hash functions, as well as utility functions for hashing
-// bytes.
-
-import (
- "hash"
-)
-
-// NewKeccak256 creates a new Keccak-256 hash.
-func NewKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
-
-// NewKeccak512 creates a new Keccak-512 hash.
-func NewKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
-
-// New224 creates a new SHA3-224 hash.
-// Its generic security strength is 224 bits against preimage attacks,
-// and 112 bits against collision attacks.
-func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
-
-// New256 creates a new SHA3-256 hash.
-// Its generic security strength is 256 bits against preimage attacks,
-// and 128 bits against collision attacks.
-func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
-
-// New384 creates a new SHA3-384 hash.
-// Its generic security strength is 384 bits against preimage attacks,
-// and 192 bits against collision attacks.
-func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
-
-// New512 creates a new SHA3-512 hash.
-// Its generic security strength is 512 bits against preimage attacks,
-// and 256 bits against collision attacks.
-func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
-
-// Sum224 returns the SHA3-224 digest of the data.
-func Sum224(data []byte) (digest [28]byte) {
- h := New224()
- h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum256 returns the SHA3-256 digest of the data.
-func Sum256(data []byte) (digest [32]byte) {
- h := New256()
- h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum384 returns the SHA3-384 digest of the data.
-func Sum384(data []byte) (digest [48]byte) {
- h := New384()
- h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum512 returns the SHA3-512 digest of the data.
-func Sum512(data []byte) (digest [64]byte) {
- h := New512()
- h.Write(data)
- h.Sum(digest[:0])
- return
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf.go
deleted file mode 100644
index 46d03ed3..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf.go
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !amd64 appengine gccgo
-
-package sha3
-
-// rc stores the round constants for use in the ι step.
-var rc = [24]uint64{
- 0x0000000000000001,
- 0x0000000000008082,
- 0x800000000000808A,
- 0x8000000080008000,
- 0x000000000000808B,
- 0x0000000080000001,
- 0x8000000080008081,
- 0x8000000000008009,
- 0x000000000000008A,
- 0x0000000000000088,
- 0x0000000080008009,
- 0x000000008000000A,
- 0x000000008000808B,
- 0x800000000000008B,
- 0x8000000000008089,
- 0x8000000000008003,
- 0x8000000000008002,
- 0x8000000000000080,
- 0x000000000000800A,
- 0x800000008000000A,
- 0x8000000080008081,
- 0x8000000000008080,
- 0x0000000080000001,
- 0x8000000080008008,
-}
-
-// keccakF1600 applies the Keccak permutation to a 1600b-wide
-// state represented as a slice of 25 uint64s.
-func keccakF1600(a *[25]uint64) {
- // Implementation translated from Keccak-inplace.c
- // in the keccak reference code.
- var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
-
- for i := 0; i < 24; i += 4 {
- // Combines the 5 steps in each round into 2 steps.
- // Unrolls 4 rounds per loop and spreads some steps across rounds.
-
- // Round 1
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[6] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[12] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[18] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[24] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[16] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[22] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[3] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[1] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[7] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[19] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[11] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[23] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[4] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[2] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[8] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[14] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- // Round 2
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[16] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[7] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[23] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[14] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[11] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[2] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[18] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[6] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[22] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[4] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[1] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[8] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[24] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[12] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[3] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[19] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- // Round 3
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[11] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[22] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[8] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[19] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[1] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[12] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[23] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[16] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[2] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[24] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[6] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[3] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[14] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[7] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[18] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[4] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- // Round 4
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[1] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[2] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[3] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[4] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[6] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[7] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[8] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[11] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[12] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[14] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[16] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[18] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[19] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[22] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[23] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[24] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
- }
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf_amd64.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf_amd64.go
deleted file mode 100644
index de035c55..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf_amd64.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64,!appengine,!gccgo
-
-package sha3
-
-// This function is implemented in keccakf_amd64.s.
-
-//go:noescape
-
-func keccakF1600(state *[25]uint64)
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf_amd64.s b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf_amd64.s
deleted file mode 100644
index f88533ac..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/keccakf_amd64.s
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64,!appengine,!gccgo
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources at https://github.com/gvanas/KeccakCodePackage
-
-// Offsets in state
-#define _ba (0*8)
-#define _be (1*8)
-#define _bi (2*8)
-#define _bo (3*8)
-#define _bu (4*8)
-#define _ga (5*8)
-#define _ge (6*8)
-#define _gi (7*8)
-#define _go (8*8)
-#define _gu (9*8)
-#define _ka (10*8)
-#define _ke (11*8)
-#define _ki (12*8)
-#define _ko (13*8)
-#define _ku (14*8)
-#define _ma (15*8)
-#define _me (16*8)
-#define _mi (17*8)
-#define _mo (18*8)
-#define _mu (19*8)
-#define _sa (20*8)
-#define _se (21*8)
-#define _si (22*8)
-#define _so (23*8)
-#define _su (24*8)
-
-// Temporary registers
-#define rT1 AX
-
-// Round vars
-#define rpState DI
-#define rpStack SP
-
-#define rDa BX
-#define rDe CX
-#define rDi DX
-#define rDo R8
-#define rDu R9
-
-#define rBa R10
-#define rBe R11
-#define rBi R12
-#define rBo R13
-#define rBu R14
-
-#define rCa SI
-#define rCe BP
-#define rCi rBi
-#define rCo rBo
-#define rCu R15
-
-#define MOVQ_RBI_RCE MOVQ rBi, rCe
-#define XORQ_RT1_RCA XORQ rT1, rCa
-#define XORQ_RT1_RCE XORQ rT1, rCe
-#define XORQ_RBA_RCU XORQ rBa, rCu
-#define XORQ_RBE_RCU XORQ rBe, rCu
-#define XORQ_RDU_RCU XORQ rDu, rCu
-#define XORQ_RDA_RCA XORQ rDa, rCa
-#define XORQ_RDE_RCE XORQ rDe, rCe
-
-#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
- /* Prepare round */ \
- MOVQ rCe, rDa; \
- ROLQ $1, rDa; \
- \
- MOVQ _bi(iState), rCi; \
- XORQ _gi(iState), rDi; \
- XORQ rCu, rDa; \
- XORQ _ki(iState), rCi; \
- XORQ _mi(iState), rDi; \
- XORQ rDi, rCi; \
- \
- MOVQ rCi, rDe; \
- ROLQ $1, rDe; \
- \
- MOVQ _bo(iState), rCo; \
- XORQ _go(iState), rDo; \
- XORQ rCa, rDe; \
- XORQ _ko(iState), rCo; \
- XORQ _mo(iState), rDo; \
- XORQ rDo, rCo; \
- \
- MOVQ rCo, rDi; \
- ROLQ $1, rDi; \
- \
- MOVQ rCu, rDo; \
- XORQ rCe, rDi; \
- ROLQ $1, rDo; \
- \
- MOVQ rCa, rDu; \
- XORQ rCi, rDo; \
- ROLQ $1, rDu; \
- \
- /* Result b */ \
- MOVQ _ba(iState), rBa; \
- MOVQ _ge(iState), rBe; \
- XORQ rCo, rDu; \
- MOVQ _ki(iState), rBi; \
- MOVQ _mo(iState), rBo; \
- MOVQ _su(iState), rBu; \
- XORQ rDe, rBe; \
- ROLQ $44, rBe; \
- XORQ rDi, rBi; \
- XORQ rDa, rBa; \
- ROLQ $43, rBi; \
- \
- MOVQ rBe, rCa; \
- MOVQ rc, rT1; \
- ORQ rBi, rCa; \
- XORQ rBa, rT1; \
- XORQ rT1, rCa; \
- MOVQ rCa, _ba(oState); \
- \
- XORQ rDu, rBu; \
- ROLQ $14, rBu; \
- MOVQ rBa, rCu; \
- ANDQ rBe, rCu; \
- XORQ rBu, rCu; \
- MOVQ rCu, _bu(oState); \
- \
- XORQ rDo, rBo; \
- ROLQ $21, rBo; \
- MOVQ rBo, rT1; \
- ANDQ rBu, rT1; \
- XORQ rBi, rT1; \
- MOVQ rT1, _bi(oState); \
- \
- NOTQ rBi; \
- ORQ rBa, rBu; \
- ORQ rBo, rBi; \
- XORQ rBo, rBu; \
- XORQ rBe, rBi; \
- MOVQ rBu, _bo(oState); \
- MOVQ rBi, _be(oState); \
- B_RBI_RCE; \
- \
- /* Result g */ \
- MOVQ _gu(iState), rBe; \
- XORQ rDu, rBe; \
- MOVQ _ka(iState), rBi; \
- ROLQ $20, rBe; \
- XORQ rDa, rBi; \
- ROLQ $3, rBi; \
- MOVQ _bo(iState), rBa; \
- MOVQ rBe, rT1; \
- ORQ rBi, rT1; \
- XORQ rDo, rBa; \
- MOVQ _me(iState), rBo; \
- MOVQ _si(iState), rBu; \
- ROLQ $28, rBa; \
- XORQ rBa, rT1; \
- MOVQ rT1, _ga(oState); \
- G_RT1_RCA; \
- \
- XORQ rDe, rBo; \
- ROLQ $45, rBo; \
- MOVQ rBi, rT1; \
- ANDQ rBo, rT1; \
- XORQ rBe, rT1; \
- MOVQ rT1, _ge(oState); \
- G_RT1_RCE; \
- \
- XORQ rDi, rBu; \
- ROLQ $61, rBu; \
- MOVQ rBu, rT1; \
- ORQ rBa, rT1; \
- XORQ rBo, rT1; \
- MOVQ rT1, _go(oState); \
- \
- ANDQ rBe, rBa; \
- XORQ rBu, rBa; \
- MOVQ rBa, _gu(oState); \
- NOTQ rBu; \
- G_RBA_RCU; \
- \
- ORQ rBu, rBo; \
- XORQ rBi, rBo; \
- MOVQ rBo, _gi(oState); \
- \
- /* Result k */ \
- MOVQ _be(iState), rBa; \
- MOVQ _gi(iState), rBe; \
- MOVQ _ko(iState), rBi; \
- MOVQ _mu(iState), rBo; \
- MOVQ _sa(iState), rBu; \
- XORQ rDi, rBe; \
- ROLQ $6, rBe; \
- XORQ rDo, rBi; \
- ROLQ $25, rBi; \
- MOVQ rBe, rT1; \
- ORQ rBi, rT1; \
- XORQ rDe, rBa; \
- ROLQ $1, rBa; \
- XORQ rBa, rT1; \
- MOVQ rT1, _ka(oState); \
- K_RT1_RCA; \
- \
- XORQ rDu, rBo; \
- ROLQ $8, rBo; \
- MOVQ rBi, rT1; \
- ANDQ rBo, rT1; \
- XORQ rBe, rT1; \
- MOVQ rT1, _ke(oState); \
- K_RT1_RCE; \
- \
- XORQ rDa, rBu; \
- ROLQ $18, rBu; \
- NOTQ rBo; \
- MOVQ rBo, rT1; \
- ANDQ rBu, rT1; \
- XORQ rBi, rT1; \
- MOVQ rT1, _ki(oState); \
- \
- MOVQ rBu, rT1; \
- ORQ rBa, rT1; \
- XORQ rBo, rT1; \
- MOVQ rT1, _ko(oState); \
- \
- ANDQ rBe, rBa; \
- XORQ rBu, rBa; \
- MOVQ rBa, _ku(oState); \
- K_RBA_RCU; \
- \
- /* Result m */ \
- MOVQ _ga(iState), rBe; \
- XORQ rDa, rBe; \
- MOVQ _ke(iState), rBi; \
- ROLQ $36, rBe; \
- XORQ rDe, rBi; \
- MOVQ _bu(iState), rBa; \
- ROLQ $10, rBi; \
- MOVQ rBe, rT1; \
- MOVQ _mi(iState), rBo; \
- ANDQ rBi, rT1; \
- XORQ rDu, rBa; \
- MOVQ _so(iState), rBu; \
- ROLQ $27, rBa; \
- XORQ rBa, rT1; \
- MOVQ rT1, _ma(oState); \
- M_RT1_RCA; \
- \
- XORQ rDi, rBo; \
- ROLQ $15, rBo; \
- MOVQ rBi, rT1; \
- ORQ rBo, rT1; \
- XORQ rBe, rT1; \
- MOVQ rT1, _me(oState); \
- M_RT1_RCE; \
- \
- XORQ rDo, rBu; \
- ROLQ $56, rBu; \
- NOTQ rBo; \
- MOVQ rBo, rT1; \
- ORQ rBu, rT1; \
- XORQ rBi, rT1; \
- MOVQ rT1, _mi(oState); \
- \
- ORQ rBa, rBe; \
- XORQ rBu, rBe; \
- MOVQ rBe, _mu(oState); \
- \
- ANDQ rBa, rBu; \
- XORQ rBo, rBu; \
- MOVQ rBu, _mo(oState); \
- M_RBE_RCU; \
- \
- /* Result s */ \
- MOVQ _bi(iState), rBa; \
- MOVQ _go(iState), rBe; \
- MOVQ _ku(iState), rBi; \
- XORQ rDi, rBa; \
- MOVQ _ma(iState), rBo; \
- ROLQ $62, rBa; \
- XORQ rDo, rBe; \
- MOVQ _se(iState), rBu; \
- ROLQ $55, rBe; \
- \
- XORQ rDu, rBi; \
- MOVQ rBa, rDu; \
- XORQ rDe, rBu; \
- ROLQ $2, rBu; \
- ANDQ rBe, rDu; \
- XORQ rBu, rDu; \
- MOVQ rDu, _su(oState); \
- \
- ROLQ $39, rBi; \
- S_RDU_RCU; \
- NOTQ rBe; \
- XORQ rDa, rBo; \
- MOVQ rBe, rDa; \
- ANDQ rBi, rDa; \
- XORQ rBa, rDa; \
- MOVQ rDa, _sa(oState); \
- S_RDA_RCA; \
- \
- ROLQ $41, rBo; \
- MOVQ rBi, rDe; \
- ORQ rBo, rDe; \
- XORQ rBe, rDe; \
- MOVQ rDe, _se(oState); \
- S_RDE_RCE; \
- \
- MOVQ rBo, rDi; \
- MOVQ rBu, rDo; \
- ANDQ rBu, rDi; \
- ORQ rBa, rDo; \
- XORQ rBi, rDi; \
- XORQ rBo, rDo; \
- MOVQ rDi, _si(oState); \
- MOVQ rDo, _so(oState) \
-
-// func keccakF1600(state *[25]uint64)
-TEXT ·keccakF1600(SB), 0, $200-8
- MOVQ state+0(FP), rpState
-
- // Convert the user state into an internal state
- NOTQ _be(rpState)
- NOTQ _bi(rpState)
- NOTQ _go(rpState)
- NOTQ _ki(rpState)
- NOTQ _mi(rpState)
- NOTQ _sa(rpState)
-
- // Execute the KeccakF permutation
- MOVQ _ba(rpState), rCa
- MOVQ _be(rpState), rCe
- MOVQ _bu(rpState), rCu
-
- XORQ _ga(rpState), rCa
- XORQ _ge(rpState), rCe
- XORQ _gu(rpState), rCu
-
- XORQ _ka(rpState), rCa
- XORQ _ke(rpState), rCe
- XORQ _ku(rpState), rCu
-
- XORQ _ma(rpState), rCa
- XORQ _me(rpState), rCe
- XORQ _mu(rpState), rCu
-
- XORQ _sa(rpState), rCa
- XORQ _se(rpState), rCe
- MOVQ _si(rpState), rDi
- MOVQ _so(rpState), rDo
- XORQ _su(rpState), rCu
-
- mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
-
- // Revert the internal state to the user state
- NOTQ _be(rpState)
- NOTQ _bi(rpState)
- NOTQ _go(rpState)
- NOTQ _ki(rpState)
- NOTQ _mi(rpState)
- NOTQ _sa(rpState)
-
- RET
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/register.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/register.go
deleted file mode 100644
index 3cf6a22e..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/register.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.4
-
-package sha3
-
-import (
- "crypto"
-)
-
-func init() {
- crypto.RegisterHash(crypto.SHA3_224, New224)
- crypto.RegisterHash(crypto.SHA3_256, New256)
- crypto.RegisterHash(crypto.SHA3_384, New384)
- crypto.RegisterHash(crypto.SHA3_512, New512)
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/sha3.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/sha3.go
deleted file mode 100644
index b12a35c8..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/sha3.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// spongeDirection indicates the direction bytes are flowing through the sponge.
-type spongeDirection int
-
-const (
- // spongeAbsorbing indicates that the sponge is absorbing input.
- spongeAbsorbing spongeDirection = iota
- // spongeSqueezing indicates that the sponge is being squeezed.
- spongeSqueezing
-)
-
-const (
- // maxRate is the maximum size of the internal buffer. SHAKE-256
- // currently needs the largest buffer.
- maxRate = 168
-)
-
-type state struct {
- // Generic sponge components.
- a [25]uint64 // main state of the hash
- buf []byte // points into storage
- rate int // the number of bytes of state to use
-
- // dsbyte contains the "domain separation" bits and the first bit of
- // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
- // SHA-3 and SHAKE functions by appending bitstrings to the message.
- // Using a little-endian bit-ordering convention, these are "01" for SHA-3
- // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
- // padding rule from section 5.1 is applied to pad the message to a multiple
- // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
- // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
- // giving 00000110b (0x06) and 00011111b (0x1f).
- // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
- // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
- // Extendable-Output Functions (May 2014)"
- dsbyte byte
- storage [maxRate]byte
-
- // Specific to SHA-3 and SHAKE.
- outputLen int // the default output size in bytes
- state spongeDirection // whether the sponge is absorbing or squeezing
-}
-
-// BlockSize returns the rate of sponge underlying this hash function.
-func (d *state) BlockSize() int { return d.rate }
-
-// Size returns the output size of the hash function in bytes.
-func (d *state) Size() int { return d.outputLen }
-
-// Reset clears the internal state by zeroing the sponge state and
-// the byte buffer, and setting Sponge.state to absorbing.
-func (d *state) Reset() {
- // Zero the permutation's state.
- for i := range d.a {
- d.a[i] = 0
- }
- d.state = spongeAbsorbing
- d.buf = d.storage[:0]
-}
-
-func (d *state) clone() *state {
- ret := *d
- if ret.state == spongeAbsorbing {
- ret.buf = ret.storage[:len(ret.buf)]
- } else {
- ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
- }
-
- return &ret
-}
-
-// permute applies the KeccakF-1600 permutation. It handles
-// any input-output buffering.
-func (d *state) permute() {
- switch d.state {
- case spongeAbsorbing:
- // If we're absorbing, we need to xor the input into the state
- // before applying the permutation.
- xorIn(d, d.buf)
- d.buf = d.storage[:0]
- keccakF1600(&d.a)
- case spongeSqueezing:
- // If we're squeezing, we need to apply the permutatin before
- // copying more output.
- keccakF1600(&d.a)
- d.buf = d.storage[:d.rate]
- copyOut(d, d.buf)
- }
-}
-
-// pads appends the domain separation bits in dsbyte, applies
-// the multi-bitrate 10..1 padding rule, and permutes the state.
-func (d *state) padAndPermute(dsbyte byte) {
- if d.buf == nil {
- d.buf = d.storage[:0]
- }
- // Pad with this instance's domain-separator bits. We know that there's
- // at least one byte of space in d.buf because, if it were full,
- // permute would have been called to empty it. dsbyte also contains the
- // first one bit for the padding. See the comment in the state struct.
- d.buf = append(d.buf, dsbyte)
- zerosStart := len(d.buf)
- d.buf = d.storage[:d.rate]
- for i := zerosStart; i < d.rate; i++ {
- d.buf[i] = 0
- }
- // This adds the final one bit for the padding. Because of the way that
- // bits are numbered from the LSB upwards, the final bit is the MSB of
- // the last byte.
- d.buf[d.rate-1] ^= 0x80
- // Apply the permutation
- d.permute()
- d.state = spongeSqueezing
- d.buf = d.storage[:d.rate]
- copyOut(d, d.buf)
-}
-
-// Write absorbs more data into the hash's state. It produces an error
-// if more data is written to the ShakeHash after writing
-func (d *state) Write(p []byte) (written int, err error) {
- if d.state != spongeAbsorbing {
- panic("sha3: write to sponge after read")
- }
- if d.buf == nil {
- d.buf = d.storage[:0]
- }
- written = len(p)
-
- for len(p) > 0 {
- if len(d.buf) == 0 && len(p) >= d.rate {
- // The fast path; absorb a full "rate" bytes of input and apply the permutation.
- xorIn(d, p[:d.rate])
- p = p[d.rate:]
- keccakF1600(&d.a)
- } else {
- // The slow path; buffer the input until we can fill the sponge, and then xor it in.
- todo := d.rate - len(d.buf)
- if todo > len(p) {
- todo = len(p)
- }
- d.buf = append(d.buf, p[:todo]...)
- p = p[todo:]
-
- // If the sponge is full, apply the permutation.
- if len(d.buf) == d.rate {
- d.permute()
- }
- }
- }
-
- return
-}
-
-// Read squeezes an arbitrary number of bytes from the sponge.
-func (d *state) Read(out []byte) (n int, err error) {
- // If we're still absorbing, pad and apply the permutation.
- if d.state == spongeAbsorbing {
- d.padAndPermute(d.dsbyte)
- }
-
- n = len(out)
-
- // Now, do the squeezing.
- for len(out) > 0 {
- n := copy(out, d.buf)
- d.buf = d.buf[n:]
- out = out[n:]
-
- // Apply the permutation if we've squeezed the sponge dry.
- if len(d.buf) == 0 {
- d.permute()
- }
- }
-
- return
-}
-
-// Sum applies padding to the hash state and then squeezes out the desired
-// number of output bytes.
-func (d *state) Sum(in []byte) []byte {
- // Make a copy of the original hash so that caller can keep writing
- // and summing.
- dup := d.clone()
- hash := make([]byte, dup.outputLen)
- dup.Read(hash)
- return append(in, hash...)
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/sha3_test.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/sha3_test.go
deleted file mode 100644
index 0e33676c..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/sha3_test.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// Tests include all the ShortMsgKATs provided by the Keccak team at
-// https://github.com/gvanas/KeccakCodePackage
-//
-// They only include the zero-bit case of the bitwise testvectors
-// published by NIST in the draft of FIPS-202.
-
-import (
- "bytes"
- "compress/flate"
- "encoding/hex"
- "encoding/json"
- "hash"
- "os"
- "strings"
- "testing"
-)
-
-const (
- testString = "brekeccakkeccak koax koax"
- katFilename = "testdata/keccakKats.json.deflate"
-)
-
-// Internal-use instances of SHAKE used to test against KATs.
-func newHashShake128() hash.Hash {
- return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
-}
-func newHashShake256() hash.Hash {
- return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
-}
-
-// testDigests contains functions returning hash.Hash instances
-// with output-length equal to the KAT length for both SHA-3 and
-// SHAKE instances.
-var testDigests = map[string]func() hash.Hash{
- "SHA3-224": New224,
- "SHA3-256": New256,
- "SHA3-384": New384,
- "SHA3-512": New512,
- "SHAKE128": newHashShake128,
- "SHAKE256": newHashShake256,
-}
-
-// testShakes contains functions that return ShakeHash instances for
-// testing the ShakeHash-specific interface.
-var testShakes = map[string]func() ShakeHash{
- "SHAKE128": NewShake128,
- "SHAKE256": NewShake256,
-}
-
-// structs used to marshal JSON test-cases.
-type KeccakKats struct {
- Kats map[string][]struct {
- Digest string `json:"digest"`
- Length int64 `json:"length"`
- Message string `json:"message"`
- }
-}
-
-func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) {
- xorInOrig, copyOutOrig := xorIn, copyOut
- xorIn, copyOut = xorInGeneric, copyOutGeneric
- testf("generic")
- if xorImplementationUnaligned != "generic" {
- xorIn, copyOut = xorInUnaligned, copyOutUnaligned
- testf("unaligned")
- }
- xorIn, copyOut = xorInOrig, copyOutOrig
-}
-
-// TestKeccakKats tests the SHA-3 and Shake implementations against all the
-// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
-// (The testvectors are stored in keccakKats.json.deflate due to their length.)
-func TestKeccakKats(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- // Read the KATs.
- deflated, err := os.Open(katFilename)
- if err != nil {
- t.Errorf("error opening %s: %s", katFilename, err)
- }
- file := flate.NewReader(deflated)
- dec := json.NewDecoder(file)
- var katSet KeccakKats
- err = dec.Decode(&katSet)
- if err != nil {
- t.Errorf("error decoding KATs: %s", err)
- }
-
- // Do the KATs.
- for functionName, kats := range katSet.Kats {
- d := testDigests[functionName]()
- for _, kat := range kats {
- d.Reset()
- in, err := hex.DecodeString(kat.Message)
- if err != nil {
- t.Errorf("error decoding KAT: %s", err)
- }
- d.Write(in[:kat.Length/8])
- got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
- if got != kat.Digest {
- t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s",
- functionName, impl, kat.Length, kat.Message, got, kat.Digest)
- t.Logf("wanted %+v", kat)
- t.FailNow()
- }
- continue
- }
- }
- })
-}
-
-// TestUnalignedWrite tests that writing data in an arbitrary pattern with
-// small input buffers.
-func TestUnalignedWrite(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- buf := sequentialBytes(0x10000)
- for alg, df := range testDigests {
- d := df()
- d.Reset()
- d.Write(buf)
- want := d.Sum(nil)
- d.Reset()
- for i := 0; i < len(buf); {
- // Cycle through offsets which make a 137 byte sequence.
- // Because 137 is prime this sequence should exercise all corner cases.
- offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
- for _, j := range offsets {
- if v := len(buf) - i; v < j {
- j = v
- }
- d.Write(buf[i : i+j])
- i += j
- }
- }
- got := d.Sum(nil)
- if !bytes.Equal(got, want) {
- t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want)
- }
- }
- })
-}
-
-// TestAppend checks that appending works when reallocation is necessary.
-func TestAppend(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- d := New224()
-
- for capacity := 2; capacity <= 66; capacity += 64 {
- // The first time around the loop, Sum will have to reallocate.
- // The second time, it will not.
- buf := make([]byte, 2, capacity)
- d.Reset()
- d.Write([]byte{0xcc})
- buf = d.Sum(buf)
- expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
- if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
- t.Errorf("got %s, want %s", got, expected)
- }
- }
- })
-}
-
-// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
-func TestAppendNoRealloc(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- buf := make([]byte, 1, 200)
- d := New224()
- d.Write([]byte{0xcc})
- buf = d.Sum(buf)
- expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
- if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
- t.Errorf("%s: got %s, want %s", impl, got, expected)
- }
- })
-}
-
-// TestSqueezing checks that squeezing the full output a single time produces
-// the same output as repeatedly squeezing the instance.
-func TestSqueezing(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- for functionName, newShakeHash := range testShakes {
- d0 := newShakeHash()
- d0.Write([]byte(testString))
- ref := make([]byte, 32)
- d0.Read(ref)
-
- d1 := newShakeHash()
- d1.Write([]byte(testString))
- var multiple []byte
- for range ref {
- one := make([]byte, 1)
- d1.Read(one)
- multiple = append(multiple, one...)
- }
- if !bytes.Equal(ref, multiple) {
- t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
- }
- }
- })
-}
-
-// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
-func sequentialBytes(size int) []byte {
- result := make([]byte, size)
- for i := range result {
- result[i] = byte(i)
- }
- return result
-}
-
-// BenchmarkPermutationFunction measures the speed of the permutation function
-// with no input data.
-func BenchmarkPermutationFunction(b *testing.B) {
- b.SetBytes(int64(200))
- var lanes [25]uint64
- for i := 0; i < b.N; i++ {
- keccakF1600(&lanes)
- }
-}
-
-// benchmarkHash tests the speed to hash num buffers of buflen each.
-func benchmarkHash(b *testing.B, h hash.Hash, size, num int) {
- b.StopTimer()
- h.Reset()
- data := sequentialBytes(size)
- b.SetBytes(int64(size * num))
- b.StartTimer()
-
- var state []byte
- for i := 0; i < b.N; i++ {
- for j := 0; j < num; j++ {
- h.Write(data)
- }
- state = h.Sum(state[:0])
- }
- b.StopTimer()
- h.Reset()
-}
-
-// benchmarkShake is specialized to the Shake instances, which don't
-// require a copy on reading output.
-func benchmarkShake(b *testing.B, h ShakeHash, size, num int) {
- b.StopTimer()
- h.Reset()
- data := sequentialBytes(size)
- d := make([]byte, 32)
-
- b.SetBytes(int64(size * num))
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- h.Reset()
- for j := 0; j < num; j++ {
- h.Write(data)
- }
- h.Read(d)
- }
-}
-
-func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) }
-func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) }
-func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) }
-func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) }
-
-func BenchmarkShake128_MTU(b *testing.B) { benchmarkShake(b, NewShake128(), 1350, 1) }
-func BenchmarkShake256_MTU(b *testing.B) { benchmarkShake(b, NewShake256(), 1350, 1) }
-func BenchmarkShake256_16x(b *testing.B) { benchmarkShake(b, NewShake256(), 16, 1024) }
-func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) }
-
-func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) }
-
-func Example_sum() {
- buf := []byte("some data to hash")
- // A hash needs to be 64 bytes long to have 256-bit collision resistance.
- h := make([]byte, 64)
- // Compute a 64-byte hash of buf and put it in h.
- ShakeSum256(h, buf)
-}
-
-func Example_mac() {
- k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long")
- buf := []byte("and this is some data to authenticate")
- // A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
- h := make([]byte, 32)
- d := NewShake256()
- // Write the key into the hash.
- d.Write(k)
- // Now write the data.
- d.Write(buf)
- // Read 32 bytes of output from the hash into h.
- d.Read(h)
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/shake.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/shake.go
deleted file mode 100644
index 841f9860..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/shake.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file defines the ShakeHash interface, and provides
-// functions for creating SHAKE instances, as well as utility
-// functions for hashing bytes to arbitrary-length output.
-
-import (
- "io"
-)
-
-// ShakeHash defines the interface to hash functions that
-// support arbitrary-length output.
-type ShakeHash interface {
- // Write absorbs more data into the hash's state. It panics if input is
- // written to it after output has been read from it.
- io.Writer
-
- // Read reads more output from the hash; reading affects the hash's
- // state. (ShakeHash.Read is thus very different from Hash.Sum)
- // It never returns an error.
- io.Reader
-
- // Clone returns a copy of the ShakeHash in its current state.
- Clone() ShakeHash
-
- // Reset resets the ShakeHash to its initial state.
- Reset()
-}
-
-func (d *state) Clone() ShakeHash {
- return d.clone()
-}
-
-// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 128 bits against all attacks if at
-// least 32 bytes of its output are used.
-func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
-
-// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 256 bits against all attacks if
-// at least 64 bytes of its output are used.
-func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
-
-// ShakeSum128 writes an arbitrary-length digest of data into hash.
-func ShakeSum128(hash, data []byte) {
- h := NewShake128()
- h.Write(data)
- h.Read(hash)
-}
-
-// ShakeSum256 writes an arbitrary-length digest of data into hash.
-func ShakeSum256(hash, data []byte) {
- h := NewShake256()
- h.Write(data)
- h.Read(hash)
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/testdata/keccakKats.json.deflate b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/testdata/keccakKats.json.deflate
deleted file mode 100644
index 62e85ae2..00000000
Binary files a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/testdata/keccakKats.json.deflate and /dev/null differ
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor.go
deleted file mode 100644
index 46a0d63a..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !amd64,!386,!ppc64le appengine
-
-package sha3
-
-var (
- xorIn = xorInGeneric
- copyOut = copyOutGeneric
- xorInUnaligned = xorInGeneric
- copyOutUnaligned = copyOutGeneric
-)
-
-const xorImplementationUnaligned = "generic"
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor_generic.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor_generic.go
deleted file mode 100644
index fd35f02e..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor_generic.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-import "encoding/binary"
-
-// xorInGeneric xors the bytes in buf into the state; it
-// makes no non-portable assumptions about memory layout
-// or alignment.
-func xorInGeneric(d *state, buf []byte) {
- n := len(buf) / 8
-
- for i := 0; i < n; i++ {
- a := binary.LittleEndian.Uint64(buf)
- d.a[i] ^= a
- buf = buf[8:]
- }
-}
-
-// copyOutGeneric copies ulint64s to a byte buffer.
-func copyOutGeneric(d *state, b []byte) {
- for i := 0; len(b) >= 8; i++ {
- binary.LittleEndian.PutUint64(b, d.a[i])
- b = b[8:]
- }
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor_unaligned.go b/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor_unaligned.go
deleted file mode 100644
index 929a486a..00000000
--- a/vendor/github.com/ethereum/go-ethereum/crypto/sha3/xor_unaligned.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64 386 ppc64le
-// +build !appengine
-
-package sha3
-
-import "unsafe"
-
-func xorInUnaligned(d *state, buf []byte) {
- bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))
- n := len(buf)
- if n >= 72 {
- d.a[0] ^= bw[0]
- d.a[1] ^= bw[1]
- d.a[2] ^= bw[2]
- d.a[3] ^= bw[3]
- d.a[4] ^= bw[4]
- d.a[5] ^= bw[5]
- d.a[6] ^= bw[6]
- d.a[7] ^= bw[7]
- d.a[8] ^= bw[8]
- }
- if n >= 104 {
- d.a[9] ^= bw[9]
- d.a[10] ^= bw[10]
- d.a[11] ^= bw[11]
- d.a[12] ^= bw[12]
- }
- if n >= 136 {
- d.a[13] ^= bw[13]
- d.a[14] ^= bw[14]
- d.a[15] ^= bw[15]
- d.a[16] ^= bw[16]
- }
- if n >= 144 {
- d.a[17] ^= bw[17]
- }
- if n >= 168 {
- d.a[18] ^= bw[18]
- d.a[19] ^= bw[19]
- d.a[20] ^= bw[20]
- }
-}
-
-func copyOutUnaligned(d *state, buf []byte) {
- ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
- copy(buf, ab[:])
-}
-
-var (
- xorIn = xorInUnaligned
- copyOut = copyOutUnaligned
-)
-
-const xorImplementationUnaligned = "unaligned"
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/backend.go b/vendor/github.com/ethereum/go-ethereum/eth/backend.go
index 354fc17d..2a9d56c5 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/backend.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/backend.go
@@ -143,8 +143,10 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if !config.SkipBcVersionCheck {
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
- if bcVersion != core.BlockChainVersion && bcVersion != 0 {
- return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d).\n", bcVersion, core.BlockChainVersion)
+ if bcVersion != nil && *bcVersion > core.BlockChainVersion {
+ return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)
+ } else if bcVersion != nil && *bcVersion < core.BlockChainVersion {
+ log.Warn("Upgrade blockchain database version", "from", *bcVersion, "to", core.BlockChainVersion)
}
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go
index 3a177ab9..4db689f7 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go
@@ -1488,7 +1488,15 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
}
if index, err := d.blockchain.InsertChain(blocks); err != nil {
- log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
+ if index < len(results) {
+ log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
+ } else {
+ // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
+ // when it needs to preprocess blocks to import a sidechain.
+ // The importer will put together a new list of blocks to import, which is a superset
+ // of the blocks delivered from the downloader, and the indexing will be off.
+ log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
+ }
return errInvalidChain
}
return nil
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/statesync.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/statesync.go
index 29d5ee4d..0675a91c 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/statesync.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/statesync.go
@@ -25,10 +25,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
+ "golang.org/x/crypto/sha3"
)
// stateReq represents a batch of state fetch requests grouped together into
@@ -152,7 +152,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
finished = append(finished, req)
delete(active, pack.PeerId())
- // Handle dropped peer connections:
+ // Handle dropped peer connections:
case p := <-peerDrop:
// Skip if no request is currently pending
req := active[p.id]
@@ -240,7 +240,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
return &stateSync{
d: d,
sched: state.NewStateSync(root, d.stateDB),
- keccak: sha3.NewKeccak256(),
+ keccak: sha3.NewLegacyKeccak256(),
tasks: make(map[common.Hash]*stateTask),
deliver: make(chan *stateReq),
cancel: make(chan struct{}),
@@ -398,9 +398,8 @@ func (s *stateSync) fillTasks(n int, req *stateReq) {
// process iterates over a batch of delivered state data, injecting each item
// into a running state sync, re-queuing any items that were requested but not
-// delivered.
-// Returns whether the peer actually managed to deliver anything of value,
-// and any error that occurred
+// delivered. Returns whether the peer actually managed to deliver anything of
+// value, and any error that occurred.
func (s *stateSync) process(req *stateReq) (int, error) {
// Collect processing stats and update progress if valid data was received
duplicate, unexpected, successful := 0, 0, 0
@@ -412,14 +411,12 @@ func (s *stateSync) process(req *stateReq) (int, error) {
}(time.Now())
// Iterate over all the delivered data and inject one-by-one into the trie
- progress := false
for _, blob := range req.response {
- prog, hash, err := s.processNodeData(blob)
+ _, hash, err := s.processNodeData(blob)
switch err {
case nil:
s.numUncommitted++
s.bytesUncommitted += len(blob)
- progress = progress || prog
successful++
case trie.ErrNotRequested:
unexpected++
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go
index addd3288..d0a0bf7c 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go
@@ -2,11 +2,11 @@
// sources:
// 4byte_tracer.js (2.933kB)
// bigram_tracer.js (1.712kB)
-// call_tracer.js (8.596kB)
+// call_tracer.js (8.643kB)
// evmdis_tracer.js (4.194kB)
// noop_tracer.js (1.271kB)
// opcount_tracer.js (1.372kB)
-// prestate_tracer.js (3.892kB)
+// prestate_tracer.js (4.234kB)
// trigram_tracer.js (1.788kB)
// unigram_tracer.js (1.51kB)
@@ -117,7 +117,7 @@ func bigram_tracerJs() (*asset, error) {
return a, nil
}
-var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\xdf\x6f\x1b\x37\xf2\x7f\x96\xfe\x8a\x49\x1e\x6a\x09\x51\x24\x27\xe9\xb7\x5f\xc0\xae\x7a\xd0\x39\x4a\x6a\xc0\x8d\x03\x5b\x69\x10\x04\x79\xa0\x76\x67\x25\xd6\x5c\x72\x4b\x72\x2d\xef\xa5\xfe\xdf\x0f\x33\xe4\xae\x56\x3f\xec\xe8\x7a\xb8\x43\xef\x45\xd0\x2e\x67\x86\xc3\x99\xcf\xfc\xe2\x8e\x46\x70\x66\x8a\xca\xca\xc5\xd2\xc3\xcb\xe3\x17\xff\x0f\xb3\x25\xc2\xc2\x3c\x47\xbf\x44\x8b\x65\x0e\x93\xd2\x2f\x8d\x75\xdd\xd1\x08\x66\x4b\xe9\x20\x93\x0a\x41\x3a\x28\x84\xf5\x60\x32\xf0\x5b\xf4\x4a\xce\xad\xb0\xd5\xb0\x3b\x1a\x05\x9e\xbd\xcb\x24\x21\xb3\x88\xe0\x4c\xe6\x57\xc2\xe2\x09\x54\xa6\x84\x44\x68\xb0\x98\x4a\xe7\xad\x9c\x97\x1e\x41\x7a\x10\x3a\x1d\x19\x0b\xb9\x49\x65\x56\x91\x48\xe9\xa1\xd4\x29\x5a\xde\xda\xa3\xcd\x5d\xad\xc7\xdb\x77\x1f\xe0\x02\x9d\x43\x0b\x6f\x51\xa3\x15\x0a\xde\x97\x73\x25\x13\xb8\x90\x09\x6a\x87\x20\x1c\x14\xf4\xc6\x2d\x31\x85\x39\x8b\x23\xc6\x37\xa4\xca\x75\x54\x05\xde\x98\x52\xa7\xc2\x4b\xa3\x07\x80\x92\x34\x87\x5b\xb4\x4e\x1a\x0d\xaf\xea\xad\xa2\xc0\x01\x18\x4b\x42\x7a\xc2\xd3\x01\x2c\x98\x82\xf8\xfa\x20\x74\x05\x4a\xf8\x35\xeb\x01\x06\x59\x9f\x3b\x05\xa9\x79\x9b\xa5\x29\x10\xfc\x52\x78\x3a\xf5\x4a\x2a\x05\x73\x84\xd2\x61\x56\xaa\x01\x49\x9b\x97\x1e\x3e\x9e\xcf\x7e\xbe\xfc\x30\x83\xc9\xbb\x4f\xf0\x71\x72\x75\x35\x79\x37\xfb\x74\x0a\x2b\xe9\x97\xa6\xf4\x80\xb7\x18\x44\xc9\xbc\x50\x12\x53\x58\x09\x6b\x85\xf6\x15\x98\x8c\x24\xfc\x32\xbd\x3a\xfb\x79\xf2\x6e\x36\xf9\xfb\xf9\xc5\xf9\xec\x13\x18\x0b\x6f\xce\x67\xef\xa6\xd7\xd7\xf0\xe6\xf2\x0a\x26\xf0\x7e\x72\x35\x3b\x3f\xfb\x70\x31\xb9\x82\xf7\x1f\xae\xde\x5f\x5e\x4f\x87\x70\x8d\xa4\x15\x12\xff\xb7\x6d\x9e\xb1\xf7\x2c\x42\x8a\x5e\x48\xe5\x6a\x4b\x7c\x32\x25\xb8\xa5\x29\x55\x0a\x4b\x71\x8b\x60\x31\x41\x79\x8b\x29\x08\x48\x4c\x51\x1d\xec\x54\x92\x25\x94\xd1\x0b\x3e\xf3\x83\x80\x84\xf3\x0c\xb4\xf1\x03\x70\x88\xf0\xe3\xd2\xfb\xe2\x64\x34\x5a\xad\x56\xc3\x85\x2e\x87\xc6\x2e\x46\x2a\x88\x73\xa3\x9f\x86\x5d\x92\x99\x08\xa5\x66\x56\x24\x68\xc9\x39\x02\xb2\x92\xcc\xaf\xcc\x4a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x7f\xc2\x60\x14\x1e\xf0\x8e\x9e\xbc\x23\xd0\x82\xc5\xc2\x58\xfa\xaf\x54\x8d\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\x90\x8b\x14\x61\x5e\x81\x68\x0b\x1c\xb4\x0f\x43\x30\x0a\xee\x06\xa9\x33\x63\x73\x86\xe5\xb0\xfb\xb5\xdb\x89\x1a\x3a\x2f\x92\x1b\x52\x90\xe4\x27\xa5\xb5\xa8\x3d\x99\xb2\xb4\x4e\xde\x22\x93\x40\xa0\x89\xf6\x9c\xfe\xfa\x0b\xe0\x1d\x26\x65\x90\xd4\x69\x84\x9c\xc0\xe7\xaf\xf7\x5f\x06\x5d\x16\x9d\xa2\x4b\x50\xa7\x98\xf2\xf9\x6e\x1c\xac\x96\x6c\x51\x58\xe1\xd1\x2d\xc2\x6f\xa5\xf3\x2d\x9a\xcc\x9a\x1c\x84\x06\x53\x12\xe2\xdb\xd6\x91\xda\x1b\x16\x28\xe8\xbf\x46\xcb\x1a\x0d\xbb\x9d\x86\xf9\x04\x32\xa1\x1c\xc6\x7d\x9d\xc7\x82\x4e\x23\xf5\xad\xb9\x21\xc9\xc6\x12\x84\x6d\x05\xa6\x48\x4c\x1a\x83\x81\xce\xd1\x1c\x03\xdd\xb0\xdb\x21\xbe\x13\xc8\x4a\xcd\xdb\xf6\x94\x59\x0c\x20\x9d\xf7\xe1\x6b\xb7\x43\x62\xcf\x44\xe1\x4b\x8b\x6c\x4f\xb4\xd6\x58\x07\x32\xcf\x31\x95\xc2\xa3\xaa\xba\x9d\xce\xad\xb0\x61\x01\xc6\xa0\xcc\x62\xb8\x40\x3f\xa5\xc7\x5e\xff\xb4\xdb\xe9\xc8\x0c\x7a\x61\xf5\xc9\x78\xcc\xd9\x27\x93\x1a\xd3\x20\xbe\xe3\x97\xd2\x0d\x33\x51\x2a\xdf\xec\x4b\x4c\x1d\x8b\xbe\xb4\x9a\xfe\xde\x07\x2d\x3e\x22\x18\xad\x2a\x48\x28\xcb\x88\x39\x85\xa7\xab\x9c\xc7\x3c\x1e\xce\x0d\x20\x13\x8e\x4c\x28\x33\x58\x21\x14\x16\x9f\x27\x4b\x24\xdf\xe9\x04\xa3\x96\xae\x72\xec\xd4\x31\xd0\x6e\x43\x53\x0c\xbd\x79\x57\xe6\x73\xb4\xbd\x3e\x7c\x07\xc7\x77\xd9\x71\x1f\xc6\x63\xfe\x53\xeb\x1e\x79\xa2\xbe\x24\xc5\x14\xf1\xa0\xcc\x7f\xed\xad\xd4\x8b\x70\xd6\xa8\xeb\x79\x06\x02\x34\xae\x20\x31\x9a\x41\x4d\x5e\x99\xa3\xd4\x0b\x48\x2c\x0a\x8f\xe9\x00\x44\x9a\x82\x37\x01\x79\x0d\xce\x36\xb7\x84\xef\xbe\xe3\xbd\xc6\x70\x74\x76\x35\x9d\xcc\xa6\x47\x2d\x25\xa4\xbe\xcc\xb2\xa8\x07\xf3\x0e\x0b\xc4\x9b\xde\x8b\xfe\xf0\x56\xa8\x12\x2f\xb3\xa0\x51\xa4\x9d\xea\x14\xc6\x91\xe7\xd9\x36\xcf\xcb\x0d\x1e\x62\x1a\x8d\x60\xe2\x1c\xe6\x73\x85\xbb\xb1\x17\x83\x93\xe3\xd4\x79\x4a\x4e\x04\xb4\xc4\xe4\x85\x42\x02\x50\xbd\x6b\xb4\x34\x6b\xdc\xf1\x55\x81\x27\x00\x00\xa6\x18\xf0\x0b\x82\x3d\xbf\xf0\xe6\x67\xbc\x63\x77\xd4\xd6\x22\x00\x4d\xd2\xd4\xa2\x73\xbd\x7e\x3f\x90\x4b\x5d\x94\xfe\x64\x83\x3c\xc7\xdc\xd8\x6a\xe8\x28\xf7\xf4\xf8\x68\x83\x70\xd2\x9a\x67\x21\xdc\xb9\x26\x9e\x08\xca\xb7\xc2\xf5\xd6\x4b\x67\xc6\xf9\x93\x7a\x89\x1e\xea\x35\xb6\x05\xb1\x1d\x1d\xdf\x1d\xed\x5a\xeb\xb8\xbf\x76\xfa\x8b\x1f\xfa\xc4\x72\x7f\xda\x40\xb9\xc9\x08\xc3\xa2\x74\xcb\x1e\x23\x67\xbd\xba\x8e\xfa\x31\x78\x5b\xe2\x5e\xa4\x33\x7a\x76\x91\xe3\x50\x65\x94\x36\xbc\x2d\x13\x46\xd0\x42\x70\x52\xe1\xa0\x16\x94\x64\x5d\x39\x67\x9b\x7b\x63\x1e\x04\xd2\xf5\xf4\xe2\xcd\xeb\xe9\xf5\xec\xea\xc3\xd9\xac\x0d\x27\x85\x99\x27\xa5\x36\xcf\xa0\x50\x2f\xfc\x92\xf5\x27\x71\x9b\xab\x9f\x89\xe7\xf9\x8b\x2f\xe1\x0d\x8c\xf7\x44\x77\xe7\x71\x0e\xf8\xfc\x85\x65\xdf\xef\x9a\x6f\x93\x34\x18\xf3\x6b\x00\x91\x29\xee\xdb\x39\x62\x4f\xd8\xe5\xe8\x97\x26\xe5\x3c\x98\x88\x90\x4a\x6b\x2b\xa6\x46\xe3\xc1\xc1\xd7\xab\xa3\x6f\x72\x71\x71\x04\x7f\xfc\x01\xad\xe7\xb3\xcb\xd7\xd3\xf6\xbb\xd7\xd3\x8b\xe9\xdb\xc9\x6c\xba\x4d\x7b\x3d\x9b\xcc\xce\xcf\xf8\x6d\x3f\x5a\x65\x34\x82\xeb\x1b\x59\x70\x42\xe5\x34\x65\xf2\x82\x3b\xc3\x46\x5f\x37\x00\xbf\x34\xd4\x73\xd9\x58\x2f\x32\xa1\x93\x3a\x8f\xbb\xda\x69\xde\x90\xcb\x4c\x1d\x2b\xbb\xa9\xa0\x0d\xd4\x7e\xe3\x46\xe9\xde\x5b\x8c\x9b\xa6\x3d\x6f\x6a\xbd\xd6\x06\x0d\x1e\xe1\x5c\xc7\x49\xa6\x77\xf8\x21\xe1\x6f\x70\x0c\x27\xf0\x22\x66\x92\x47\x52\xd5\x4b\x78\x46\xe2\xff\x44\xc2\x7a\xb5\x87\xf3\xaf\x99\xb6\xbc\x61\xe2\x9a\xdc\x9b\xff\x7e\x3a\x33\xa5\xbf\xcc\xb2\x13\xd8\x36\xe2\xf7\x3b\x46\x6c\xe8\x2f\x50\xef\xd2\xff\xdf\x0e\xfd\x3a\xf5\x11\xaa\x4c\x01\x4f\x76\x20\x12\x12\xcf\x93\xad\x38\x88\xc6\xe5\x6e\x86\xa5\xc1\xf8\x81\x64\xfb\x72\x13\xc3\x0f\x65\x8b\x7f\x2b\xd9\xee\xed\xca\xa8\xf7\xda\xec\xbb\x06\x60\xd1\x5b\x89\xb7\x34\x59\x1d\x39\x16\x49\xfd\xa9\x59\x09\x9d\xe0\x10\x3e\x62\x90\xa8\x11\x39\xb9\xc4\x7e\x96\xda\x11\x6e\xf1\xa8\x27\x8d\x93\x09\x43\x4c\x70\xdb\x69\x11\x72\x51\xd1\x64\x92\x95\xfa\xa6\x82\x85\x70\x90\x56\x5a\xe4\x32\x71\x41\x1e\xf7\xb2\x16\x17\xc2\xb2\x58\x8b\xbf\x97\xe8\x68\xcc\x21\x20\x8b\xc4\x97\x42\xa9\x0a\x16\x92\x66\x15\xe2\xee\xbd\x7c\x75\x7c\x0c\xce\xcb\x02\x75\x3a\x80\x1f\x5e\x8d\x7e\xf8\x1e\x6c\xa9\xb0\x3f\xec\xb6\xd2\x78\x73\xd4\xe8\x0d\x5a\x88\xe8\x79\x8d\x85\x5f\xf6\xfa\xf0\xd3\x03\xf5\xe0\x81\xe4\xbe\x97\x16\x9e\xc3\x8b\x2f\x43\xd2\x6b\xbc\x81\xdb\xe0\x49\x40\xe5\x30\x4a\xa3\xf9\xee\xf2\xf5\x65\xef\x46\x58\xa1\xc4\x1c\xfb\x27\x3c\xef\xb1\xad\x56\x22\x36\xfc\xe4\x14\x28\x94\x90\x1a\x44\x92\x98\x52\x7b\x32\x7c\xdd\xbb\xab\x8a\xf2\xfb\x91\xaf\xe5\xf1\x68\x24\x92\x04\x9d\xab\xd3\x3d\x7b\x8d\xd4\x11\x39\x71\x83\xd4\x4e\xa6\xd8\xf2\x0a\x65\x07\xc3\xa9\x39\x52\xd0\xe4\x58\x0b\xcc\x8d\xa3\x4d\xe6\x08\x2b\x4b\x73\x86\x93\x3a\xe1\x41\x3b\x45\xb2\xb6\x03\xa3\x41\x80\x32\x3c\xdd\x73\x8c\x83\xb0\x0b\x37\x0c\xf9\x9e\xb6\xa5\x9c\xa3\xcd\x6a\xb8\x09\xe4\x36\x54\xb9\xa3\xdf\x6a\x07\x34\xe0\x9d\x74\x9e\x1b\x48\xd2\x52\x3a\x08\x48\x96\x7a\x31\x80\xc2\x14\x9c\xa7\x0f\xec\x25\xaf\xa6\xbf\x4e\xaf\x9a\xe2\x7f\xb8\x13\xeb\x16\xff\x69\x33\x01\x81\xa5\xf1\xc2\x63\xfa\x74\x4f\xcf\xbe\x07\x50\xe3\x07\x00\x45\xf2\xd7\xb5\xf1\x7d\xeb\x38\x4a\x38\xbf\x76\xcc\x02\xc3\xf8\xd2\x56\xc0\x95\xca\xbb\xad\xdc\xbd\x9d\x1c\x4c\x51\x57\x08\x52\x8a\xd3\x0e\x25\xf6\x3d\x9d\x75\x34\xb8\x6f\x03\x4f\x40\xa0\x69\x25\x00\x5e\xaf\x3b\x34\x11\x72\x3e\x6b\x68\x4a\x4f\x4e\xa7\x2a\xbd\x4e\x71\x0b\xe1\x3e\x38\xf6\x6d\x4c\x72\x73\xb9\x38\xd7\xbe\x57\x2f\x9e\x6b\x78\x0e\xf5\x03\xa5\x6e\x78\xbe\x11\x2b\x7b\x72\x60\x27\x45\x85\x1e\x61\x2d\xe2\x14\xb6\x5e\x91\xa0\x70\x68\x36\x8d\x45\xbf\x5b\x82\x8f\xa3\x34\x32\xcb\x13\x8b\x7e\x88\xbf\x97\x42\xb9\xde\x71\xd3\x12\x84\x13\x78\xc3\x45\x6c\xdc\x94\xb1\xba\xce\x11\xcf\x46\x93\x11\x05\x06\xb6\x68\x8d\x9a\x2d\x9d\x87\xda\x94\xe2\xa3\x12\xa2\x88\x98\x1c\x1a\x8f\x45\xf8\xed\xeb\x32\x3b\x6d\x02\x78\xda\x94\xfd\x4c\x48\x55\x5a\x7c\x7a\x0a\x7b\x92\x8b\x2b\x6d\x26\x12\xf6\xa5\x43\xe0\x11\xd4\x81\x33\x39\x2e\xcd\x2a\x28\xb0\x2f\x45\xed\x82\xa3\xc1\xc1\x56\x91\xe0\xbb\x14\xe1\xa0\x74\x62\x81\x2d\x70\x34\x06\xaf\x1d\xb5\x77\x2e\xfe\xd3\xd0\x79\xd6\x3c\x7e\x03\x45\x61\x97\x6f\x42\xe3\x31\x6c\xec\xf5\xf2\x4e\x2f\x53\x13\x71\x47\xd3\x7a\xa8\x55\x0d\x0d\x47\x83\x9c\x7f\xc5\xef\xff\x19\xc7\x07\xcf\xc7\xdf\x43\x03\x6d\x9b\x36\x9c\x71\x93\x38\x9c\x74\xdd\xc4\x7c\x1b\x05\xcd\xea\x43\x00\x78\xa8\x3f\x22\xa8\xea\xdf\x30\xf1\x6b\xb8\x72\x4b\x43\x4f\x85\xc5\x5b\x69\x4a\xaa\x56\xf8\xbf\x34\xff\x35\xfd\xdd\x7d\xb7\x73\x1f\xef\xbc\xd8\x7d\xed\x4b\xaf\xd5\x32\xde\xd9\x86\xd6\xa8\x55\x2b\x0c\x17\xd2\x78\x15\x96\x85\xdb\xd4\x0e\xf3\x3f\x72\xf9\x15\xe3\xdd\x9b\x82\x6a\x7f\x2c\x45\xca\xa2\x48\xab\xa6\xfa\x0d\x42\xd7\x01\x4b\xa1\xd3\x38\x79\x88\x34\x95\x24\x8f\xb1\x48\x1a\x8a\x85\x90\xba\xbb\xd7\x8c\xdf\x2c\xb9\xfb\x90\xb1\xd3\xc8\xb6\xab\x66\x9c\x18\x69\xbc\x63\x8d\xbb\x07\x54\xc7\xad\x58\xda\xbe\xc7\x8b\x57\x81\x46\xbb\x32\xe7\xb6\x17\xc4\xad\x90\x4a\xd0\xa8\xc5\xed\x94\x4e\x21\x51\x28\x74\xb8\xbd\xc7\xcc\x9b\x5b\xb4\xae\x7b\x00\xc8\xff\x0c\xc6\xb7\x92\x63\xfd\x18\xcd\x71\x78\xcc\x1e\x1a\xb1\xe1\xf8\x6f\x94\xf0\x3e\xc2\xab\x65\xde\x10\x59\xd2\xf3\x87\x1d\xd4\xbe\x7b\x58\x48\x71\x83\x44\x34\x3f\xc1\x71\xab\x09\xff\xab\x04\xd9\x2e\xc4\x2e\x9a\x66\x2c\x1e\xde\x1b\x33\x00\x85\x82\x47\xa2\xfa\xb3\x4b\xdd\x7c\x3e\x36\xa1\xd5\xd1\x1b\xda\xb7\x9d\xf0\xe5\x4b\xac\x25\xd6\xd7\x1d\xa1\x8f\x9f\x23\x6a\x90\x1e\xad\xa0\xe1\x87\xd0\x15\xbf\x14\x90\x96\x8e\xc5\xb1\x5f\x24\x05\x5d\x14\x1c\xaf\xed\xa9\x3e\x4b\xbd\x18\x76\x3b\xe1\x7d\x2b\xde\x13\x7f\xb7\x8e\xf7\x50\x0c\x99\x33\x5e\x00\x34\xf3\x7f\xe2\xef\xb8\x67\xe4\x19\x79\xeb\x12\x80\xd6\xe8\x55\x18\xa0\xb7\x46\x7e\x66\x8c\x63\xff\xf6\xcd\x22\xad\xf1\xbb\x0d\x80\x33\xe9\x42\xb8\x20\x66\x2b\x24\xfc\xdd\x6e\x44\xd4\x0c\x14\x0c\x27\xfb\x19\x68\x69\x0f\xd3\xd6\x35\x04\x11\xf3\xab\xb0\x1a\x0a\xfb\x49\x7b\x35\xbc\x8a\x07\x95\x79\xcb\x36\x32\x67\xdb\xdc\x9f\xee\x4f\x72\xc7\x35\x1e\xf7\x27\x33\xb2\x79\x03\xd8\x07\x58\xdb\x83\xc5\x2e\xc9\x63\xa9\x92\xa5\xd7\x99\xed\x01\x56\x96\xde\x6a\x3d\xfc\xdd\xe1\x22\x1b\xe2\xb6\x8a\x1b\x34\xfb\x84\xc4\x3c\x13\xe9\x82\x65\x6b\x01\x01\xd5\x41\x57\x46\xb4\xfc\x07\x46\x89\xed\xf8\xa9\x97\xc0\x62\xf8\xb0\xc0\x0d\x29\x85\x8f\x99\x73\xf1\x2f\x1d\xcd\x8c\xeb\xb8\x48\xd1\x49\x8b\x29\x64\x12\x55\x0a\x26\x45\xcb\x13\xe9\x6f\xce\xe8\xf0\x09\x09\xad\x24\x89\xe1\x53\x59\xf8\x6a\xcd\x1f\xf0\xb4\x4c\xd0\x57\x90\xa1\xe0\x6f\x41\xde\x40\x21\x9c\x83\x1c\x05\xcd\xa0\x59\xa9\x54\x05\xc6\xa6\x48\xc2\x9b\xa1\x8c\x42\xd2\x40\xe9\xd0\x3a\x58\x2d\x4d\x2c\x93\xdc\xa5\x15\xd4\x74\x4a\x3f\x88\xf7\x2e\xd2\x15\x4a\x54\x20\x3d\x95\xe4\x78\xa8\x76\x94\x36\x1f\x60\xf8\x2b\x8e\xa1\xaa\xbb\x1b\xa2\xf5\x5c\xb7\x19\xa3\xfc\x9a\x9e\x36\xa3\x33\xce\x35\x9b\x71\xb9\xbe\x91\xda\x0c\xc2\xba\x6c\x6c\x46\x5a\xbb\x08\x6d\x86\x13\xaf\xf0\xd3\x66\x20\xb5\xfa\x65\x5e\x60\x70\x34\x0c\xfc\xb4\x15\x5a\xac\x65\x8c\xad\xf0\xb9\xb1\x21\xe7\xa7\x41\x04\x0c\x79\xb1\x47\xc6\xb9\xc1\x8a\x32\x71\xb0\x51\xab\xac\x84\x17\x9f\x6f\xb0\xfa\xb2\xbf\x8a\x44\x38\xb6\xe8\x9a\xb2\x51\x43\x3a\xac\x3d\x12\xc8\x8d\x16\x72\x7c\x7c\x0a\xf2\xc7\x36\x43\x5d\xf9\x40\x3e\x7b\x56\xef\xd9\x5e\xff\x2c\xbf\xd4\xd1\xd9\x20\x7e\x6b\xbd\xbf\xa1\x51\x8c\x91\x40\x43\x41\xd1\xbd\xef\xfe\x33\x00\x00\xff\xff\xb5\x25\x8b\x4d\x94\x21\x00\x00")
+var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\x5f\x6f\x1b\xb7\xb2\x7f\x96\x3e\xc5\x24\x0f\xb5\x84\x28\x92\x93\xf4\xf6\x02\x76\xd5\x0b\x5d\x47\x49\x0d\xb8\x71\x60\x2b\x0d\x82\x20\x0f\xd4\xee\xac\xc4\x9a\x4b\x6e\x49\xae\xe4\x3d\xa9\xbf\xfb\xc1\x0c\xb9\xab\xd5\x1f\x3b\x6e\x0f\xce\x41\xcf\x8b\xa0\x5d\xce\x0c\x87\x33\xbf\xf9\xc7\x1d\x8d\xe0\xcc\x14\x95\x95\x8b\xa5\x87\x97\xc7\x2f\xfe\x17\x66\x4b\x84\x85\x79\x8e\x7e\x89\x16\xcb\x1c\x26\xa5\x5f\x1a\xeb\xba\xa3\x11\xcc\x96\xd2\x41\x26\x15\x82\x74\x50\x08\xeb\xc1\x64\xe0\x77\xe8\x95\x9c\x5b\x61\xab\x61\x77\x34\x0a\x3c\x07\x97\x49\x42\x66\x11\xc1\x99\xcc\xaf\x85\xc5\x13\xa8\x4c\x09\x89\xd0\x60\x31\x95\xce\x5b\x39\x2f\x3d\x82\xf4\x20\x74\x3a\x32\x16\x72\x93\xca\xac\x22\x91\xd2\x43\xa9\x53\xb4\xbc\xb5\x47\x9b\xbb\x5a\x8f\xb7\xef\x3e\xc0\x05\x3a\x87\x16\xde\xa2\x46\x2b\x14\xbc\x2f\xe7\x4a\x26\x70\x21\x13\xd4\x0e\x41\x38\x28\xe8\x8d\x5b\x62\x0a\x73\x16\x47\x8c\x6f\x48\x95\xeb\xa8\x0a\xbc\x31\xa5\x4e\x85\x97\x46\x0f\x00\x25\x69\x0e\x2b\xb4\x4e\x1a\x0d\xaf\xea\xad\xa2\xc0\x01\x18\x4b\x42\x7a\xc2\xd3\x01\x2c\x98\x82\xf8\xfa\x20\x74\x05\x4a\xf8\x0d\xeb\x23\x0c\xb2\x39\x77\x0a\x52\xf3\x36\x4b\x53\x20\xf8\xa5\xf0\x74\xea\xb5\x54\x0a\xe6\x08\xa5\xc3\xac\x54\x03\x92\x36\x2f\x3d\x7c\x3c\x9f\xfd\x7c\xf9\x61\x06\x93\x77\x9f\xe0\xe3\xe4\xea\x6a\xf2\x6e\xf6\xe9\x14\xd6\xd2\x2f\x4d\xe9\x01\x57\x18\x44\xc9\xbc\x50\x12\x53\x58\x0b\x6b\x85\xf6\x15\x98\x8c\x24\xfc\x32\xbd\x3a\xfb\x79\xf2\x6e\x36\xf9\xff\xf3\x8b\xf3\xd9\x27\x30\x16\xde\x9c\xcf\xde\x4d\xaf\xaf\xe1\xcd\xe5\x15\x4c\xe0\xfd\xe4\x6a\x76\x7e\xf6\xe1\x62\x72\x05\xef\x3f\x5c\xbd\xbf\xbc\x9e\x0e\xe1\x1a\x49\x2b\x24\xfe\x6f\xdb\x3c\x63\xef\x59\x84\x14\xbd\x90\xca\xd5\x96\xf8\x64\x4a\x70\x4b\x53\xaa\x14\x96\x62\x85\x60\x31\x41\xb9\xc2\x14\x04\x24\xa6\xa8\x1e\xed\x54\x92\x25\x94\xd1\x0b\x3e\xf3\xbd\x80\x84\xf3\x0c\xb4\xf1\x03\x70\x88\xf0\xe3\xd2\xfb\xe2\x64\x34\x5a\xaf\xd7\xc3\x85\x2e\x87\xc6\x2e\x46\x2a\x88\x73\xa3\x9f\x86\x5d\x92\x99\x08\xa5\x66\x56\x24\x68\xc9\x39\x02\xb2\x92\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x7f\xc2\x60\x14\x1e\xf0\x96\x9e\xbc\x23\xd0\x82\xc5\xc2\x58\xfa\xaf\x54\x8d\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\x90\x8b\x14\x61\x5e\x81\x68\x0b\x1c\xb4\x0f\x43\x30\x0a\xee\x06\xa9\x33\x63\x73\x86\xe5\xb0\xfb\xb5\xdb\x89\x1a\x3a\x2f\x92\x1b\x52\x90\xe4\x27\xa5\xb5\xa8\x3d\x99\xb2\xb4\x4e\xae\x90\x49\x20\xd0\x44\x7b\x4e\x7f\xfd\x05\xf0\x16\x93\x32\x48\xea\x34\x42\x4e\xe0\xf3\xd7\xbb\x2f\x83\x2e\x8b\x4e\xd1\x25\xa8\x53\x4c\xf9\x7c\x37\x0e\xd6\x4b\xb6\x28\xac\xf1\x68\x85\xf0\x5b\xe9\x7c\x8b\x26\xb3\x26\x07\xa1\xc1\x94\x84\xf8\xb6\x75\xa4\xf6\x86\x05\x0a\xfa\xaf\xd1\xb2\x46\xc3\x6e\xa7\x61\x3e\x81\x4c\x28\x87\x71\x5f\xe7\xb1\xa0\xd3\x48\xbd\x32\x37\x24\xd9\x58\x82\xb0\xad\xc0\x14\x89\x49\x63\x30\xd0\x39\x9a\x63\xa0\x1b\x76\x3b\xc4\x77\x02\x59\xa9\x79\xdb\x9e\x32\x8b\x01\xa4\xf3\x3e\x7c\xed\x76\x48\xec\x99\x28\x7c\x69\x91\xed\x89\xd6\x1a\xeb\x40\xe6\x39\xa6\x52\x78\x54\x55\xb7\xd3\x59\x09\x1b\x16\x60\x0c\xca\x2c\x86\x0b\xf4\x53\x7a\xec\xf5\x4f\xbb\x9d\x8e\xcc\xa0\x17\x56\x9f\x8c\xc7\x9c\x7d\x32\xa9\x31\x0d\xe2\x3b\x7e\x29\xdd\x30\x13\xa5\xf2\xcd\xbe\xc4\xd4\xb1\xe8\x4b\xab\xe9\xef\x5d\xd0\xe2\x23\x82\xd1\xaa\x82\x84\xb2\x8c\x98\x53\x78\xba\xca\x79\xcc\xe3\xe1\xdc\x00\x32\xe1\xc8\x84\x32\x83\x35\x42\x61\xf1\x79\xb2\x44\xf2\x9d\x4e\x30\x6a\xe9\x2a\xc7\x4e\x1d\x03\xed\x36\x34\xc5\xd0\x9b\x77\x65\x3e\x47\xdb\xeb\xc3\x77\x70\x7c\x9b\x1d\xf7\x61\x3c\xe6\x3f\xb5\xee\x91\x27\xea\x4b\x52\x4c\x11\x0f\xca\xfc\xd7\xde\x4a\xbd\x08\x67\x8d\xba\x9e\x67\x20\x40\xe3\x1a\x12\xa3\x19\xd4\xe4\x95\x39\x4a\xbd\x80\xc4\xa2\xf0\x98\x0e\x40\xa4\x29\x78\x13\x90\xd7\xe0\x6c\x7b\x4b\xf8\xee\x3b\xe8\xd1\x66\x63\x38\x3a\xbb\x9a\x4e\x66\xd3\x23\xf8\xe3\x0f\x08\x6f\x9e\x86\x37\x2f\x9f\xf6\x5b\x9a\x49\x7d\x99\x65\x51\x39\x16\x38\x2c\x10\x6f\x7a\x2f\xfa\xc3\x95\x50\x25\x5e\x66\x41\xcd\x48\x3b\xd5\x29\x8c\x23\xcf\xb3\x5d\x9e\x97\x5b\x3c\xc4\x34\x1a\xc1\xc4\x39\xcc\xe7\x0a\xf7\x03\x32\x46\x2c\x07\xaf\xf3\x94\xb1\x08\x7d\x89\xc9\x0b\x85\x84\xaa\x7a\xd7\x68\x7e\xd6\xb8\xe3\xab\x02\x4f\x00\x00\x4c\x31\xe0\x17\x14\x0b\xfc\xc2\x9b\x9f\xf1\x96\x7d\x54\x9b\x90\x50\x35\x49\x53\x8b\xce\xf5\xfa\xfd\x40\x2e\x75\x51\xfa\x93\x2d\xf2\x1c\x73\x63\xab\xa1\xa3\x84\xd4\xe3\xa3\x0d\xc2\x49\x6b\x9e\x85\x70\xe7\x9a\x78\x22\x52\xdf\x0a\xd7\xdb\x2c\x9d\x19\xe7\x4f\xea\x25\x7a\xa8\xd7\xd8\x16\xc4\x76\x74\x7c\x7b\xb4\x6f\xad\xe3\xfe\x06\x09\x2f\x7e\xe8\x13\xcb\xdd\x69\x83\xef\x26\x4d\x0c\x8b\xd2\x2d\x7b\x0c\xa7\xcd\xea\x26\x15\x8c\xc1\xdb\x12\x0f\xc2\x9f\x21\xb5\x0f\x27\x87\x2a\xa3\x5c\xe2\x6d\x99\x30\xac\x16\x82\x33\x0d\x47\xba\xa0\xcc\xeb\xca\x39\xdb\xdc\x1b\xb3\x8f\xae\x08\xae\xeb\xe9\xc5\x9b\xd7\xd3\xeb\xd9\xd5\x87\xb3\xd9\x51\x0b\x4e\x0a\x33\x4f\x4a\x6d\x9f\x41\xa1\x5e\xf8\x25\xeb\x4f\xe2\xb6\x57\x3f\x13\xcf\xf3\x17\x5f\xc2\x1b\x18\x1f\x08\xf9\xce\xc3\x1c\xf0\xf9\x0b\xcb\xbe\xdb\x37\xdf\x36\x69\x30\xe6\xd7\x00\x22\x53\xdc\xb5\x13\xc7\x81\x58\xcc\xd1\x2f\x4d\xca\xc9\x31\x11\x21\xbf\xd6\x56\x4c\x8d\xc6\x3f\x1f\x91\x93\x8b\x8b\x56\x3c\xf2\xf3\xd9\xe5\xeb\x76\x8c\x1e\xbd\x9e\x5e\x4c\xdf\x4e\x66\xd3\x5d\xda\xeb\xd9\x64\x76\x7e\xc6\x6f\xeb\xf0\x1d\x8d\xe0\xfa\x46\x16\x9c\x65\x39\x77\x99\xbc\xe0\x76\xb1\xd1\xd7\x0d\xc0\x2f\x0d\x35\x62\x36\x16\x91\x4c\xe8\xa4\x4e\xee\xae\x76\x9a\x37\xe4\x32\x53\xc7\xca\x7e\x2a\x68\x03\xb5\xdf\xb8\x51\xba\xf7\x16\xe3\xa6\x69\xcf\x9b\x5a\xaf\x8d\x41\x83\x47\x38\x01\x72\x92\xe9\x3d\xfe\x90\xf0\x7f\x70\x0c\x27\xf0\x22\x66\x92\x07\x52\xd5\x4b\x78\x46\xe2\xff\x42\xc2\x7a\x75\x80\xf3\xef\x99\xb6\xbc\x61\xe2\x9a\xdc\x9b\xff\x7c\x3a\x33\xa5\xbf\xcc\xb2\x13\xd8\x35\xe2\xf7\x7b\x46\x6c\xe8\x2f\x50\xef\xd3\xff\xcf\x1e\xfd\x26\xf5\x11\xaa\x4c\x01\x4f\xf6\x20\x12\x12\xcf\x93\x9d\x38\x88\xc6\xe5\x16\x87\xa5\xc1\xf8\x9e\x64\xfb\x72\x1b\xc3\xf7\x65\x8b\x7f\x29\xd9\x1e\x6c\xd5\xa8\x21\xdb\x6e\xc6\x06\x60\xd1\x5b\x89\x2b\x1a\xb7\x8e\x1c\x8b\xa4\xa6\xd5\xac\x85\x4e\x70\x08\x1f\x31\x48\xd4\x88\x9c\x5c\x62\x93\x4b\x3d\x0a\xf7\x7d\xd4\xa8\xc6\x71\x85\x21\x26\xb8\x17\xb5\x08\xb9\xa8\x68\x5c\xc9\x4a\x7d\x53\xc1\x42\x38\x48\x2b\x2d\x72\x99\xb8\x20\x8f\x1b\x5c\x8b\x0b\x61\x59\xac\xc5\xdf\x4b\x74\x34\xfb\x10\x90\x45\xe2\x4b\xa1\x54\x05\x0b\x49\x03\x0c\x71\xf7\x5e\xbe\x3a\x3e\x06\xe7\x65\x81\x3a\x1d\xc0\x0f\xaf\x46\x3f\x7c\x0f\xb6\x54\xd8\x1f\x76\x5b\x69\xbc\x39\x6a\xf4\x06\x2d\x44\xf4\xbc\xc6\xc2\x2f\x7b\x7d\xf8\xe9\x9e\x7a\x70\x4f\x72\x3f\x48\x0b\xcf\xe1\xc5\x97\x21\xe9\x35\xde\xc2\x6d\xf0\x24\xa0\x72\x18\xa5\xd1\xd0\x77\xf9\xfa\xb2\x77\x23\xac\x50\x62\x8e\xfd\x13\x1e\x02\xd9\x56\x6b\x11\xa7\x00\x72\x0a\x14\x4a\x48\x0d\x22\x49\x4c\xa9\x3d\x19\xbe\x6e\xe8\x55\x45\xf9\xfd\xc8\xd7\xf2\x78\x5e\x12\x49\x82\xce\xd5\xe9\x9e\xbd\x46\xea\x88\x9c\xb8\x41\x6a\x27\x53\x6c\x79\x85\xb2\x83\xe1\xd4\x1c\x29\x68\x9c\xac\x05\xe6\xc6\xd1\x26\x73\x84\xb5\xa5\xe1\xc3\x49\x9d\xf0\xf4\x9d\x22\x59\xdb\x81\xd1\x20\x40\x19\x1e\xf9\x39\xc6\x41\xd8\x85\x1b\x86\x7c\x4f\xdb\x52\xce\xd1\x66\x3d\xdc\x06\x72\x1b\xaa\xdc\xe6\xef\xb4\x03\x1a\xf0\x56\x3a\xcf\x5d\x25\x69\x29\x1d\x04\x24\x4b\xbd\x18\x40\x61\x0a\xce\xd3\xdf\x2a\x67\x31\x59\x5f\x4d\x7f\x9d\x5e\x35\xc5\xff\xf1\x4e\xac\xfb\xfe\xa7\xcd\x58\x04\x96\x66\x0e\x8f\xe9\xd3\x03\x8d\xfc\x01\x40\x8d\xef\x01\x14\xc9\xdf\xd4\xc6\xf7\xad\xe3\x28\xe1\xfc\xc6\x31\x0b\x0c\x33\x4d\x5b\x01\x57\x2a\xef\x76\x72\xf7\x6e\x72\x30\x45\x5d\x21\x48\x29\x4e\x3b\x94\xd8\x77\xbb\xed\xad\x85\x4d\xd3\xbd\xc1\xe7\x79\xcb\xc6\x6b\x6e\xb9\x02\x51\x2b\x35\xf0\x7a\xdd\xbb\x89\x50\x0d\x58\x77\x53\x7a\x82\x03\xd5\xef\x4d\xf2\x5b\x08\xf7\xc1\xb1\xd7\x63\xfa\x9b\xcb\xc5\xb9\xf6\xbd\x7a\xf1\x5c\xc3\x73\xa8\x1f\x28\xa9\xc3\xf3\xad\x28\x3a\x90\x1d\x3b\x29\x2a\xf4\x08\x1b\x11\xa7\xb0\xf3\x8a\x04\x05\x73\xb0\xd1\x2c\xfa\xfd\xe2\x7c\x1c\xa5\x91\xc1\x9e\x58\xf4\x43\xfc\xbd\x14\xca\xf5\x8e\x9b\x66\x21\x9c\xc0\x1b\x2e\x6f\xe3\xa6\xc0\xd5\x15\x90\x78\xb6\xda\x8f\x28\x30\xb0\x45\x6b\xd4\x6c\xe9\x3c\x54\xad\x14\x1f\x94\x10\x45\xc4\xb4\xd1\xf8\x32\x02\xf3\x50\xff\xd9\x69\x13\xc0\xd3\xa6\x21\xc8\x84\x54\xa5\xc5\xa7\xa7\x70\x20\xed\xb8\xd2\x66\x22\x61\x5f\x3a\x04\x9e\x58\x1d\x38\x93\xe3\xd2\xac\x83\x02\x87\x92\xd7\x3e\x38\x1a\x1c\xec\x94\x0f\xbe\x7a\x11\x0e\x4a\x27\x16\xd8\x02\x47\x63\xf0\xda\x51\x07\xc7\xe8\xbf\x0c\x9d\x67\xcd\xe3\x37\x50\x14\x76\xf9\x26\x34\x1e\xc2\xc6\x41\x2f\xef\x75\x39\x35\x11\xf7\x3a\xad\x87\x5a\xd5\xd0\x8a\x34\xc8\xf9\x33\x7e\xff\xf7\x38\x3e\x78\x3e\xfe\x3e\x36\xd0\x76\x69\xc3\x19\xb7\x89\xc3\x49\x37\xed\xcd\xb7\x51\xd0\xac\xde\x07\x80\xfb\x3a\x27\x82\xaa\xfe\x0d\x13\xbf\x81\x2b\x37\x3b\xf4\x54\x58\x5c\x49\x53\x52\x1d\xc3\xff\xa6\xc9\xb0\xe9\xfc\xee\xba\x9d\xbb\x78\x45\xc6\xee\x6b\xdf\x91\xad\x97\xf1\x8a\x37\x34\x4d\xad\x2a\x62\xb8\xc4\xc6\x9b\xb3\x2c\x5c\xbe\x76\x98\xff\x81\xbb\xb2\x18\xef\xde\x14\xd4\x15\xc4\x22\xa5\x2c\x8a\xb4\x6a\xea\xe2\x20\xf4\x23\xb0\x14\x3a\x8d\x33\x89\x48\x53\x49\xf2\x18\x8b\xa4\xa1\x58\x08\xa9\xbb\x07\xcd\xf8\xcd\x62\x7c\x08\x19\x7b\x2d\x6e\xbb\x9e\xc6\x59\x92\x06\x3f\xd6\xb8\xfb\x88\xba\xb9\x13\x4b\xbb\xd7\x7e\xf1\xe6\xd0\x68\x57\xe6\xdc\x10\x83\x58\x09\xa9\x04\x0d\x61\xdc\x68\xe9\x14\x12\x85\x42\x87\xcb\x7e\xcc\xbc\x59\xa1\x75\xdd\x47\x80\xfc\xaf\x60\x7c\x27\x39\xd6\x8f\xd1\x1c\x8f\x8f\xd9\xc7\x46\x6c\x38\xfe\x1b\x25\xbc\x8f\xf0\x6a\x99\x37\x44\x96\xf4\xfc\x1d\x08\xb5\xef\x3e\x2e\xa4\xb8\x75\x22\x9a\x9f\xe0\xb8\xd5\x9e\xff\x5d\x82\x6c\x1f\x62\x17\x4d\x9b\x16\x0f\xef\x8d\x19\x80\x42\xc1\xc3\x52\xfd\x95\xa6\x6e\x4b\x1f\x9a\xdd\xea\xe8\x0d\x8d\xdd\x5e\xf8\xf2\xf5\xd6\x12\xeb\x8b\x90\xd0\xe1\xcf\x11\x35\x48\x8f\x56\xd0\x58\x44\xe8\x8a\x1f\x16\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\xb7\xfc\x54\x9f\xa5\x5e\x0c\xbb\x9d\xf0\xbe\x15\xef\x89\xbf\xdd\xc4\x7b\x28\x86\xcc\x19\xaf\x06\x9a\x9b\x81\xc4\xdf\x72\xd3\xc8\xd3\xf3\xce\xf5\x00\xad\xd1\xab\x30\x5a\xef\x5c\x06\x30\x63\xbc\x10\xd8\xbd\x73\xa4\x35\x7e\xb7\x05\x70\x26\x5d\x08\x17\xc4\xec\x84\x84\xbf\xdd\x8f\x88\x9a\x81\x82\xe1\xe4\x30\x03\x2d\x1d\x60\xda\xb9\xa0\x20\x62\x7e\x15\x56\x43\x61\x3f\x69\xaf\x86\x57\xf1\xa0\x32\x6f\xd9\x46\xe6\x6c\x9b\xbb\xd3\xc3\x49\xee\xb8\xc6\xe3\xe1\x64\x46\x36\x6f\x00\x7b\x0f\x6b\x7b\xe4\xd8\x27\x79\x28\x55\xb2\xf4\x3a\xb3\xdd\xc3\xca\xd2\x5b\xad\x87\xbf\x7d\xbc\xc8\x86\xb8\xad\xe2\x16\xcd\x21\x21\x31\xcf\x44\xba\x60\xd9\x5a\x40\x40\x75\xd0\x95\x11\x2d\xff\x81\x51\x62\x3b\x7e\xea\x25\xb0\x18\xbe\x43\x70\x43\x4a\xe1\x63\xe6\x5c\xfc\x4b\x47\xd3\xe4\x26\x2e\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xac\xfa\x9b\x33\x3a\x7c\x71\x42\x2b\x49\x62\xf8\xb2\x16\x3e\x72\xf3\xf7\x3e\x2d\x13\xf4\x15\x64\x28\xf8\xd3\x91\x37\x50\x08\xe7\x20\x47\x41\xd3\x69\x56\x2a\x55\x81\xb1\x29\x92\xf0\x66\x5c\xa3\x90\x34\x50\x3a\xb4\x0e\xd6\x4b\x13\xcb\x24\x77\x69\x05\x35\x9d\xd2\x0f\xe2\x8d\x8c\x74\x85\x12\x15\x48\x4f\x25\x39\x1e\xaa\x1d\xa5\xcd\xf7\x1a\xfe\xe8\x63\xa8\xea\xee\x87\x68\x3d\xd8\x6d\xc7\x28\xbf\xa6\xa7\xed\xe8\x8c\x73\xcd\x76\x5c\x6e\xee\xaa\xb6\x83\xb0\x2e\x1b\xdb\x91\xd6\x2e\x42\xdb\xe1\xc4\x2b\xfc\xb4\x1d\x48\xad\x7e\x99\x17\x18\x1c\x0d\x03\x3f\xed\x84\x16\x6b\x19\x63\x2b\x7c\x9d\x6c\xc8\xf9\x69\x10\x01\x43\x5e\xec\x91\x71\x6e\xb0\xa2\x4c\x1c\x6c\xd4\x2a\x2b\xe1\xc5\xe7\x1b\xac\xbe\x1c\xae\x22\x11\x8e\x2d\xba\xa6\x6c\xd4\x90\x0e\x6b\x0f\x04\x72\xa3\x85\x1c\x1f\x9f\x82\xfc\xb1\xcd\x50\x57\x3e\x90\xcf\x9e\xd5\x7b\xb6\xd7\x3f\xcb\x2f\x75\x74\x36\x88\xdf\x59\xef\x6f\x69\x14\x63\x24\xd0\x50\x50\x74\xef\xba\xff\x0c\x00\x00\xff\xff\x00\x24\x55\x1f\xc3\x21\x00\x00")
func call_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -133,7 +133,7 @@ func call_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "call_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0xb3, 0xb6, 0xe8, 0x19, 0xc3, 0xa, 0xce, 0xfd, 0x50, 0x84, 0xf7, 0x8a, 0xc5, 0x99, 0x10, 0x58, 0xc4, 0x69, 0xfb, 0x8, 0xad, 0x67, 0xea, 0x12, 0x38, 0xcb, 0xd, 0x2a, 0x94, 0xa1, 0x70}}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xef, 0x68, 0xda, 0xd8, 0x9, 0xf5, 0xd5, 0x71, 0xa8, 0x8a, 0xfb, 0x30, 0xe8, 0xf0, 0x72, 0x14, 0x36, 0x6b, 0x62, 0x5a, 0x4e, 0xff, 0x16, 0xdc, 0xd3, 0x2c, 0x68, 0x7b, 0x79, 0x9f, 0xd3}}
return a, nil
}
@@ -197,7 +197,7 @@ func opcount_tracerJs() (*asset, error) {
return a, nil
}
-var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdf\x6f\x1a\x49\x12\x7e\x9e\xf9\x2b\x4a\xfb\x02\x68\xc9\x90\xec\x49\x7b\x12\x3e\x9f\x34\x21\x24\x41\x62\x6d\x0b\xc8\xf9\x7c\xab\x7d\xe8\xe9\xae\x19\x7a\x69\xba\x47\xfd\x03\x8c\x22\xff\xef\xa7\xea\x99\x01\xc3\xda\x49\xee\xde\xcc\x74\xf5\x57\x55\x5f\x55\x7d\x5d\x1e\x8d\x60\x62\xea\x83\x95\xd5\xda\xc3\x2f\x6f\xdf\xfd\x1d\x56\x6b\x84\xca\xbc\x41\xbf\x46\x8b\x61\x0b\x79\xf0\x6b\x63\x5d\x3a\x1a\xc1\x6a\x2d\x1d\x94\x52\x21\x48\x07\x35\xb3\x1e\x4c\x09\xfe\xc2\x5e\xc9\xc2\x32\x7b\xc8\xd2\xd1\xa8\xb9\xf3\xe2\x31\x21\x94\x16\x11\x9c\x29\xfd\x9e\x59\x1c\xc3\xc1\x04\xe0\x4c\x83\x45\x21\x9d\xb7\xb2\x08\x1e\x41\x7a\x60\x5a\x8c\x8c\x85\xad\x11\xb2\x3c\x10\xa4\xf4\x10\xb4\x40\x1b\x5d\x7b\xb4\x5b\xd7\xc5\xf1\xe9\xe6\x0b\xcc\xd1\x39\xb4\xf0\x09\x35\x5a\xa6\xe0\x2e\x14\x4a\x72\x98\x4b\x8e\xda\x21\x30\x07\x35\x7d\x71\x6b\x14\x50\x44\x38\xba\xf8\x91\x42\x59\xb6\xa1\xc0\x47\x13\xb4\x60\x5e\x1a\x3d\x04\x94\x14\x39\xec\xd0\x3a\x69\x34\xfc\xad\x73\xd5\x02\x0e\xc1\x58\x02\xe9\x33\x4f\x09\x58\x30\x35\xdd\x1b\x00\xd3\x07\x50\xcc\x9f\xae\xfe\x00\x21\xa7\xbc\x05\x48\x1d\xdd\xac\x4d\x8d\xe0\xd7\xcc\x53\xd6\x7b\xa9\x14\x14\x08\xc1\x61\x19\xd4\x90\xd0\x8a\xe0\xe1\x7e\xb6\xfa\x7c\xfb\x65\x05\xf9\xcd\x03\xdc\xe7\x8b\x45\x7e\xb3\x7a\xb8\x82\xbd\xf4\x6b\x13\x3c\xe0\x0e\x1b\x28\xb9\xad\x95\x44\x01\x7b\x66\x2d\xd3\xfe\x00\xa6\x24\x84\xdf\xa6\x8b\xc9\xe7\xfc\x66\x95\xbf\x9f\xcd\x67\xab\x07\x30\x16\x3e\xce\x56\x37\xd3\xe5\x12\x3e\xde\x2e\x20\x87\xbb\x7c\xb1\x9a\x4d\xbe\xcc\xf3\x05\xdc\x7d\x59\xdc\xdd\x2e\xa7\x19\x2c\x91\xa2\x42\xba\xff\x7d\xce\xcb\x58\x3d\x8b\x20\xd0\x33\xa9\x5c\xc7\xc4\x83\x09\xe0\xd6\x26\x28\x01\x6b\xb6\x43\xb0\xc8\x51\xee\x50\x00\x03\x6e\xea\xc3\x0f\x17\x95\xb0\x98\x32\xba\x8a\x39\xbf\xda\x90\x30\x2b\x41\x1b\x3f\x04\x87\x08\xff\x58\x7b\x5f\x8f\x47\xa3\xfd\x7e\x9f\x55\x3a\x64\xc6\x56\x23\xd5\xc0\xb9\xd1\x3f\xb3\x94\x30\x6b\x8b\xce\x33\x8f\x2b\xcb\x38\x5a\x30\xc1\xd7\xc1\x3b\x70\xa1\x2c\x25\x97\xa8\x3d\x48\x5d\x1a\xbb\x8d\x9d\x02\xde\x00\xb7\xc8\x3c\x02\x03\x65\x38\x53\x80\x8f\xc8\x43\x3c\x6b\x98\x8e\xed\x6a\x99\x76\x8c\xc7\xaf\xa5\x35\x5b\xca\x35\x38\x4f\x7f\x38\x87\xdb\x42\xa1\x80\x0a\x35\x3a\xe9\xa0\x50\x86\x6f\xb2\xf4\x6b\x9a\x3c\x0b\x86\xfa\x24\x66\xd8\x1a\xc5\xde\xd8\x63\xcf\x22\x14\x41\x2a\x21\x75\x95\xa5\x49\x67\x3d\x06\x1d\x94\x1a\xa6\x11\x42\x19\xb3\x09\x75\xce\xb9\x09\x31\xf6\x3f\x91\xfb\x06\xcc\xd5\xc8\x65\x49\xcd\xc1\x8e\xa7\xde\xc4\xa3\xa3\x5f\x53\x90\x7d\x96\x26\x67\x30\x63\x28\x83\x8e\xe9\xf4\x99\x10\x76\x08\xa2\x18\x7c\x4d\x93\x64\xc7\x2c\x61\xc1\x35\x78\xf3\x19\x1f\xe3\xe1\xe0\x2a\x4d\x12\x59\x42\xdf\xaf\xa5\xcb\x3a\xe0\xdf\x19\xe7\x7f\xc0\xf5\xf5\x75\x1c\xea\x52\x6a\x14\x03\x20\x88\xe4\x25\xb3\xe6\x24\x29\x98\x62\x9a\xe3\x18\x7a\x6f\x1f\x7b\xf0\x33\x88\x22\xab\xd0\xbf\x6f\xbe\x36\xce\x32\x6f\x96\xde\x4a\x5d\xf5\xdf\xfd\x3a\x18\xc6\x5b\xda\xc4\x3b\xd0\x9a\xdf\x98\xa3\x71\x73\xce\x8d\x88\xc7\x6d\xcc\x8d\xd5\xc4\x88\xd6\xa8\xb5\x72\xde\x58\x56\xe1\x18\xbe\x3e\xd1\xef\x27\xca\xea\x29\x4d\x9e\xce\x58\x5e\x36\x46\xaf\xb0\xdc\x42\x00\x6a\x6f\x8f\x7d\x5e\x49\x9a\xd4\xe7\x05\x88\x78\xdf\x2a\xc2\xb2\x0b\xe5\xa2\x08\x1b\x3c\x7c\xbf\x12\x74\x20\xc5\xe3\xf1\x60\x83\x87\xc1\x55\xfa\x6a\x89\xb2\x36\xe8\xdf\xa5\x78\xfc\xd1\x7a\x5d\xdc\x39\xe3\x75\x49\x56\xa7\x78\x07\x83\x0b\x1e\x2d\xba\xa0\x3c\xb5\xbb\xd4\x3b\xb3\x21\xe1\x5a\x13\x3f\x4a\x45\x4a\x4c\x4d\xd5\x72\x8d\x72\x14\x88\x1a\xa4\x47\xcb\x48\x3a\xcd\x0e\x2d\xbd\x1a\x60\xd1\x07\xab\xdd\x91\xc6\x52\x6a\xa6\x3a\xe0\x96\x75\x6f\x19\x6f\x66\xa6\xf9\xfe\x8c\x4b\xee\x1f\x23\x8b\x31\xbb\xd1\x08\x72\x0f\x94\x22\xd4\x46\x6a\x3f\x84\x3d\x82\x46\x14\x34\xf8\x02\x45\xe0\x3e\xe2\xf5\x76\x4c\x05\xec\x35\xc3\x4d\x12\x19\xaf\x9a\x40\x2f\xc1\xb3\xe1\x1f\xc6\x00\xb7\x66\x17\x9f\xb8\x82\xf1\x0d\xb4\x03\x67\xac\xac\xa4\x4e\x5b\x3a\xcf\x86\x8d\x22\xca\x08\x38\x86\x15\x6b\x45\x45\xa4\x2f\xef\x99\x82\x6b\x28\x64\x35\xd3\xfe\xa2\x78\x0d\xe9\xdd\xd5\xc1\x1f\x59\x3b\x3c\x99\x23\xc1\xeb\xff\x32\x18\xc2\xbb\x5f\x8f\x1d\xe1\x0d\x41\xc1\xf7\xc1\xbc\x79\x1d\x2a\xbd\x6c\x86\x97\xaf\x45\x37\x34\xc1\x3f\x47\xaf\x99\x0b\x05\x95\xa3\xc9\x33\xf2\x78\x3e\xc5\x57\xdf\xc0\x3d\xcf\xad\xc3\x6d\xa9\xc9\x98\x10\xaf\x83\x36\x25\xfa\x80\xdc\xe2\x96\x54\x9d\xaa\xc0\x99\x52\x68\x7b\x0e\xa2\x66\x0c\xdb\x76\x8a\xf5\xc2\x6d\xed\x0f\x9d\xd6\x7b\x66\x2b\xf4\xee\xfb\x81\x45\x9c\x37\x6f\x3a\x09\x8c\x54\x1c\x6a\x84\xeb\x6b\xe8\x4d\x16\xd3\x7c\x35\xed\xb5\x63\x34\x1a\xc1\x3d\xc6\x4d\xa8\x50\xb2\x10\xea\x00\x02\x15\x7a\x6c\xe2\x32\x3a\x52\x74\x94\x84\x21\xad\x34\xb4\x6c\xe0\xa3\x74\x5e\xea\x0a\x1a\xa5\xd8\xd3\xbb\xda\xc2\xc5\x19\xe1\x2c\x38\xea\xd6\x8b\x47\xc8\x1b\xda\x28\x2c\x92\xae\x90\xfe\xc7\x71\x63\x4a\x1e\x37\x90\x52\x5a\xe7\xa1\x56\x8c\x63\x46\x78\xc7\x60\x5e\xaf\x6f\x3b\xc9\xe4\x7a\x11\x47\x30\x02\x9d\x1e\x38\xa6\xe8\x81\x24\xf7\x0e\xfa\x1d\xc6\x20\x4d\x12\xdb\x59\x3f\xc3\xbe\x3a\x49\x82\xf3\x58\x3f\x17\x04\x5a\x2c\x70\x87\x24\xa1\x51\x0d\x9a\xc7\x90\x7c\xfd\xeb\xb7\xf6\xf5\x45\x97\xa5\x09\xdd\x7b\x36\xd7\xca\x54\xe7\x73\x2d\x1a\x5a\x78\xb0\x96\xea\x7f\x94\xe0\x92\x66\xfc\xcf\xe0\x3c\x71\x6a\x89\x9e\x56\x2d\x5e\x12\xc9\x28\x89\xf4\xda\x0e\xfe\x2a\x86\xf4\x6e\xc5\x77\x82\xdc\xb5\xaf\x54\xb3\xcd\xd5\xc6\xa3\xf6\x92\x29\x75\xa0\x3a\xec\x2d\xad\x31\xb4\xb8\x0c\xc1\x49\xb2\x8a\x8a\x13\x4d\xa5\xe6\x2a\x88\xa6\x0d\x62\x1f\xb7\x78\x2e\xc6\x7c\xbe\xff\x6c\xd1\x39\x56\x61\x46\x9d\x54\xca\xc7\x76\x83\xd4\xd0\x6b\x44\xae\x3f\xe8\x65\xc7\x20\xcf\x25\x46\x99\x2a\xeb\x9a\x8c\x64\x3a\x17\xc2\xa2\x73\xfd\x41\xab\x39\xc7\xca\xde\xaf\x51\x13\xf9\xa0\x71\x0f\xc7\xd5\x84\x71\x4e\xab\x9a\x18\x02\x13\x82\xa4\xed\x62\x8d\x48\x93\xc4\xed\xa5\xe7\x6b\x88\x9e\x4c\x7d\x9a\xc5\x41\xdb\xff\x9c\x39\x84\x9f\xa6\xff\x5e\x4d\x6e\x3f\x4c\x27\xb7\x77\x0f\x3f\x8d\xe1\xec\xdb\x72\xf6\x9f\xe9\xf1\xdb\xfb\x7c\x9e\xdf\x4c\xa6\x3f\x8d\xe3\xdb\xfc\x42\x42\xde\x74\x29\x90\x43\xe7\x19\xdf\x64\x35\xe2\xa6\xff\xf6\x5c\x07\x4e\x09\x26\x49\x61\x91\x6d\xae\x4e\xc1\x34\x03\xda\xfa\xe8\x24\x17\xae\xe1\x55\xb2\xae\x5e\x8f\x66\xd2\xda\xf7\x3b\x21\x3f\xad\x22\x51\x2a\xbe\x19\x47\x3e\x9f\x1f\x33\xa7\x1f\x44\xc7\xf1\xc3\x87\xe9\x7c\xfa\x29\x5f\x4d\xcf\xac\x96\xab\x7c\x35\x9b\x34\x9f\xfe\x67\x8a\xde\xfd\x30\x45\xbd\xe5\x72\x75\xbb\x98\xf6\xc6\xed\xaf\xf9\x6d\xfe\xa1\xf7\x17\x87\xed\xbe\xf2\xad\x26\xf3\xe6\xde\x58\xf1\xff\xd4\xea\xd9\xee\x50\xb2\x97\x56\x87\x28\x42\xdc\x87\x8b\xd5\x1c\x98\xee\xf4\xa3\x6c\xfe\x3d\x49\xe2\xfd\x17\x15\xe3\x29\x7d\x4a\xff\x1b\x00\x00\xff\xff\x7c\xdb\x3f\x79\x34\x0f\x00\x00")
+var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6f\xdb\x38\x12\x7f\x96\xfe\x8a\x41\x5f\x6c\xa3\xae\xdc\x64\x81\x3d\xc0\xb9\x1c\xa0\xba\x6e\x1b\x20\x9b\x04\xb6\x7b\xb9\xdc\x62\x1f\x28\x72\x24\x73\x4d\x93\x02\x49\xd9\xf1\x15\xf9\xdf\x0f\x43\x7d\xf8\xa3\x49\xd3\xdd\x37\x9b\x1c\xfe\xe6\xfb\x37\xa3\xd1\x08\x26\xa6\xdc\x59\x59\x2c\x3d\x9c\xbf\x3f\xfb\x07\x2c\x96\x08\x85\x79\x87\x7e\x89\x16\xab\x35\xa4\x95\x5f\x1a\xeb\xe2\xd1\x08\x16\x4b\xe9\x20\x97\x0a\x41\x3a\x28\x99\xf5\x60\x72\xf0\x27\xf2\x4a\x66\x96\xd9\x5d\x12\x8f\x46\xf5\x9b\x67\xaf\x09\x21\xb7\x88\xe0\x4c\xee\xb7\xcc\xe2\x18\x76\xa6\x02\xce\x34\x58\x14\xd2\x79\x2b\xb3\xca\x23\x48\x0f\x4c\x8b\x91\xb1\xb0\x36\x42\xe6\x3b\x82\x94\x1e\x2a\x2d\xd0\x06\xd5\x1e\xed\xda\xb5\x76\x7c\xbe\xf9\x0a\xd7\xe8\x1c\x5a\xf8\x8c\x1a\x2d\x53\x70\x57\x65\x4a\x72\xb8\x96\x1c\xb5\x43\x60\x0e\x4a\x3a\x71\x4b\x14\x90\x05\x38\x7a\xf8\x89\x4c\x99\x37\xa6\xc0\x27\x53\x69\xc1\xbc\x34\x7a\x08\x28\xc9\x72\xd8\xa0\x75\xd2\x68\xf8\xa5\x55\xd5\x00\x0e\xc1\x58\x02\xe9\x33\x4f\x0e\x58\x30\x25\xbd\x1b\x00\xd3\x3b\x50\xcc\xef\x9f\xfe\x44\x40\xf6\x7e\x0b\x90\x3a\xa8\x59\x9a\x12\xc1\x2f\x99\x27\xaf\xb7\x52\x29\xc8\x10\x2a\x87\x79\xa5\x86\x84\x96\x55\x1e\xee\xaf\x16\x5f\x6e\xbf\x2e\x20\xbd\x79\x80\xfb\x74\x36\x4b\x6f\x16\x0f\x17\xb0\x95\x7e\x69\x2a\x0f\xb8\xc1\x1a\x4a\xae\x4b\x25\x51\xc0\x96\x59\xcb\xb4\xdf\x81\xc9\x09\xe1\xb7\xe9\x6c\xf2\x25\xbd\x59\xa4\x1f\xae\xae\xaf\x16\x0f\x60\x2c\x7c\xba\x5a\xdc\x4c\xe7\x73\xf8\x74\x3b\x83\x14\xee\xd2\xd9\xe2\x6a\xf2\xf5\x3a\x9d\xc1\xdd\xd7\xd9\xdd\xed\x7c\x9a\xc0\x1c\xc9\x2a\xa4\xf7\xaf\xc7\x3c\x0f\xd9\xb3\x08\x02\x3d\x93\xca\xb5\x91\x78\x30\x15\xb8\xa5\xa9\x94\x80\x25\xdb\x20\x58\xe4\x28\x37\x28\x80\x01\x37\xe5\xee\xa7\x93\x4a\x58\x4c\x19\x5d\x04\x9f\x5f\x2c\x48\xb8\xca\x41\x1b\x3f\x04\x87\x08\xff\x5c\x7a\x5f\x8e\x47\xa3\xed\x76\x9b\x14\xba\x4a\x8c\x2d\x46\xaa\x86\x73\xa3\x7f\x25\x31\x61\x96\x16\x9d\x67\x1e\x17\x96\x71\xb4\x60\x2a\x5f\x56\xde\x81\xab\xf2\x5c\x72\x89\xda\x83\xd4\xb9\xb1\xeb\x50\x29\xe0\x0d\x70\x8b\xcc\x23\x30\x50\x86\x33\x05\xf8\x88\xbc\x0a\x77\x75\xa4\x43\xb9\x5a\xa6\x1d\xe3\xe1\x34\xb7\x66\x4d\xbe\x56\xce\xd3\x0f\xe7\x70\x9d\x29\x14\x50\xa0\x46\x27\x1d\x64\xca\xf0\x55\x12\x7f\x8b\xa3\x03\x63\xa8\x4e\x82\x87\x8d\x50\xa8\x8d\x2d\xf6\x2c\x42\x56\x49\x25\xa4\x2e\x92\x38\x6a\xa5\xc7\xa0\x2b\xa5\x86\x71\x80\x50\xc6\xac\xaa\x32\xe5\xdc\x54\xc1\xf6\x3f\x91\xfb\x1a\xcc\x95\xc8\x65\x4e\xc5\xc1\xba\x5b\x6f\xc2\x55\xa7\xd7\x64\x24\x9f\xc4\xd1\x11\xcc\x18\xf2\x4a\x07\x77\xfa\x4c\x08\x3b\x04\x91\x0d\xbe\xc5\x51\xb4\x61\x96\xb0\xe0\x12\xbc\xf9\x82\x8f\xe1\x72\x70\x11\x47\x91\xcc\xa1\xef\x97\xd2\x25\x2d\xf0\xef\x8c\xf3\x3f\xe0\xf2\xf2\x32\x34\x75\x2e\x35\x8a\x01\x10\x44\xf4\x9c\x58\x7d\x13\x65\x4c\x31\xcd\x71\x0c\xbd\xf7\x8f\x3d\x78\x0b\x22\x4b\x0a\xf4\x1f\xea\xd3\x5a\x59\xe2\xcd\xdc\x5b\xa9\x8b\xfe\xd9\xaf\x83\x61\x78\xa5\x4d\x78\x03\x8d\xf8\x8d\xe9\x84\xeb\x7b\x6e\x44\xb8\x6e\x6c\xae\xa5\x26\x46\x34\x42\x8d\x94\xf3\xc6\xb2\x02\xc7\xf0\xed\x89\xfe\x3f\x91\x57\x4f\x71\xf4\x74\x14\xe5\x79\x2d\xf4\x42\x94\x1b\x08\x40\xed\x6d\x57\xe7\x85\xa4\x4e\x3d\x4c\x40\xc0\xfb\x51\x12\xe6\xad\x29\x27\x49\x58\xe1\xee\xf5\x4c\xd0\x85\x14\x8f\xdd\xc5\x0a\x77\x83\x8b\xf8\xc5\x14\x25\x8d\xd1\xbf\x4b\xf1\xf8\xb3\xf9\x3a\x79\x73\x14\xd7\x39\x49\xed\xed\x1d\x0c\x4e\xe2\x68\xd1\x55\xca\x53\xb9\x4b\xbd\x31\x2b\x22\xae\x25\xc5\x47\xa9\x10\x12\x53\x52\xb6\x5c\xcd\x1c\x19\xa2\x06\xe9\xd1\x32\xa2\x4e\xb3\x41\x4b\x53\x03\x2c\xfa\xca\x6a\xd7\x85\x31\x97\x9a\xa9\x16\xb8\x89\xba\xb7\x8c\xd7\x3d\x53\x9f\x1f\xc4\x92\xfb\xc7\x10\xc5\xe0\xdd\x68\x04\xa9\x07\x72\x11\x4a\x23\xb5\x1f\xc2\x16\x41\x23\x0a\x6a\x7c\x81\xa2\xe2\x3e\xe0\xf5\x36\x4c\x55\xd8\xab\x9b\x9b\x28\x32\x3c\x35\x15\x4d\x82\x83\xe6\x1f\x06\x03\xd7\x66\x13\x46\x5c\xc6\xf8\x0a\x9a\x86\x33\x56\x16\x52\xc7\x4d\x38\x8f\x9a\x8d\x2c\x4a\x08\x38\x98\x15\x72\x45\x49\xa4\x93\x0f\x4c\xc1\x25\x64\xb2\xb8\xd2\xfe\x24\x79\x75\xd0\xdb\xa7\x83\x3f\x92\xa6\x79\x12\x47\x84\xd7\x3f\x1f\x0c\xe1\xec\xd7\xae\x22\xbc\x21\x28\x78\x1d\xcc\x9b\x97\xa1\xe2\xd3\x62\x78\xfe\x59\x50\x43\x1d\xfc\x36\x68\x4d\x5c\x95\x51\x3a\x6a\x3f\x43\x1c\x8f\xbb\xf8\xe2\x07\xb8\xc7\xbe\xb5\xb8\x4d\x68\x12\x26\xc4\xcb\xa0\x75\x8a\x3e\x22\xb7\xb8\x26\x56\xa7\x2c\x70\xa6\x14\xda\x9e\x83\xc0\x19\xc3\xa6\x9c\x42\xbe\x70\x5d\xfa\x5d\xcb\xf5\x9e\xd9\x02\xbd\x7b\xdd\xb0\x80\xf3\xee\x5d\x4b\x81\x21\x14\xbb\x12\xe1\xf2\x12\x7a\x93\xd9\x34\x5d\x4c\x7b\x4d\x1b\x8d\x46\x70\x8f\x61\x13\xca\x94\xcc\x84\xda\x81\x40\x85\x1e\x6b\xbb\x8c\x0e\x21\xea\x28\x61\x48\x2b\x0d\x2d\x1b\xf8\x28\x9d\x97\xba\x80\x9a\x29\xb6\x34\x57\x1b\xb8\xd0\x23\x9c\x55\x8e\xaa\xf5\x64\x08\x79\x43\x1b\x85\x45\xe2\x15\xe2\xff\xd0\x6e\x4c\xc9\x6e\x03\xc9\xa5\x75\x1e\x4a\xc5\x38\x26\x84\xd7\x19\xf3\x72\x7e\x9b\x4e\x26\xd5\xb3\xd0\x82\x01\x68\x3f\xe0\x98\xa2\x01\x49\xea\x1d\xf4\x5b\x8c\x41\x1c\x45\xb6\x95\x3e\xc0\xbe\xd8\x53\x82\xf3\x58\x1e\x12\x02\x2d\x16\xb8\x41\xa2\xd0\xc0\x06\xf5\x30\x24\x5d\xff\xfe\xad\x99\xbe\xe8\x92\x38\xa2\x77\x07\x7d\xad\x4c\x71\xdc\xd7\xa2\x0e\x0b\xaf\xac\xa5\xfc\x77\x14\x9c\x53\x8f\xff\x59\x39\x4f\x31\xb5\x14\x9e\x86\x2d\x9e\x23\xc9\x40\x89\x34\x6d\x07\xdf\x93\x21\xcd\xad\x30\x27\x48\x5d\x33\xa5\xea\x6d\xae\x34\x1e\xb5\x97\x4c\xa9\x1d\xe5\x61\x6b\x69\x8d\xa1\xc5\x65\x08\x4e\x92\x54\x60\x9c\x20\x2a\x35\x57\x95\xa8\xcb\x20\xd4\x71\x83\xe7\x82\xcd\xc7\xfb\xcf\x1a\x9d\x63\x05\x26\x54\x49\xb9\x7c\x6c\x36\x48\x0d\xbd\x9a\xe4\xfa\x83\x5e\xd2\x19\x79\x4c\x31\xca\x14\x49\x5b\x64\x44\xd3\xa9\x10\x16\x9d\xeb\x0f\x1a\xce\xe9\x32\x7b\xbf\x44\x4d\xc1\x07\x8d\x5b\xe8\x56\x13\xc6\x39\xad\x6a\x62\x08\x4c\x08\xa2\xb6\x93\x35\x22\x8e\x22\xb7\x95\x9e\x2f\x21\x68\x32\xe5\xbe\x17\x07\x4d\xfd\x73\xe6\x10\xde\x4c\xff\xb3\x98\xdc\x7e\x9c\x4e\x6e\xef\x1e\xde\x8c\xe1\xe8\x6c\x7e\xf5\xdf\x69\x77\xf6\x21\xbd\x4e\x6f\x26\xd3\x37\xe3\x30\x9b\x9f\x71\xc8\x9b\xd6\x05\x52\xe8\x3c\xe3\xab\xa4\x44\x5c\xf5\xdf\x1f\xf3\xc0\xde\xc1\x28\xca\x2c\xb2\xd5\xc5\xde\x98\xba\x41\x1b\x1d\x2d\xe5\xc2\x25\xbc\x18\xac\x8b\x97\xad\x99\x34\xf2\xfd\x96\xc8\xf7\xab\x48\xa0\x8a\xd7\xed\x38\xff\xcb\x86\x84\xde\x61\x7c\x35\x06\xc7\x14\x6d\xc0\xf2\x7f\xf4\xe5\x92\xe7\x0e\xfd\x10\x50\x0b\xb3\x25\xe6\xeb\x50\xeb\x9b\x06\xf7\x20\x64\x67\x83\x9a\x41\x6f\xf3\xfe\xa0\x13\x26\xb0\xef\x45\xcf\x9f\x13\x45\x2d\xe0\xb2\x45\x7f\x1b\x5e\xbe\x1e\xa8\xf3\x26\x52\x27\x0a\x7e\x39\xd9\xf0\xc2\xfd\x1a\xd7\xc6\xee\x9a\x71\x74\xe0\xdf\x8f\xa3\x9a\x5e\x5f\x77\xf5\x44\x7f\xa8\xc8\xba\x83\x8f\xd3\xeb\xe9\xe7\x74\x31\x3d\x92\x9a\x2f\xd2\xc5\xd5\xa4\x3e\xfa\xcb\x85\x77\xf6\xd3\x85\xd7\x9b\xcf\x17\xb7\xb3\x69\x6f\xdc\xfc\xbb\xbe\x4d\x3f\xf6\xbe\x53\xd8\x6c\x81\x3f\x6a\x5d\x6f\xee\x8d\x15\x7f\xa7\x03\x0e\x36\xb2\x9c\x3d\xb7\x90\x05\x6a\xe7\xbe\x3a\xf9\xe0\x01\xa6\x5b\x56\xce\xeb\x8f\xbe\x28\xbc\x7f\x96\x87\x9f\xe2\xa7\xf8\xff\x01\x00\x00\xff\xff\xb1\x28\x85\x2a\x8a\x10\x00\x00")
func prestate_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -213,7 +213,7 @@ func prestate_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0xb0, 0x72, 0x28, 0xc7, 0x27, 0x97, 0x4d, 0xe, 0xbf, 0x29, 0xe1, 0xa8, 0xd7, 0x52, 0x13, 0xa1, 0x19, 0xc3, 0xfb, 0x8d, 0x5b, 0xcb, 0xdd, 0xa5, 0xd7, 0x98, 0x34, 0x6a, 0xbf, 0x33, 0x6c}}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0x79, 0x70, 0x4f, 0xc5, 0x78, 0x57, 0x63, 0x6f, 0x5, 0x31, 0xce, 0x3e, 0x5d, 0xbd, 0x71, 0x4, 0x46, 0x78, 0xcd, 0x1d, 0xcd, 0xb9, 0xd8, 0x10, 0xff, 0xe6, 0xc5, 0x59, 0xb9, 0x25, 0x6e}}
return a, nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/call_tracer.js b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/call_tracer.js
index 83495b15..f8b383cd 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/call_tracer.js
+++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/call_tracer.js
@@ -38,7 +38,7 @@
var op = log.op.toString();
}
// If a new contract is being created, add to the call stack
- if (syscall && op == 'CREATE') {
+ if (syscall && (op == 'CREATE' || op == "CREATE2")) {
var inOff = log.stack.peek(1).valueOf();
var inEnd = inOff + log.stack.peek(2).valueOf();
@@ -116,7 +116,7 @@
// Pop off the last call and get the execution results
var call = this.callstack.pop();
- if (call.type == 'CREATE') {
+ if (call.type == 'CREATE' || call.type == "CREATE2") {
// If the call was a CREATE, retrieve the contract address and output code
call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16);
delete call.gasIn; delete call.gasCost;
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js
index 56aa2b21..e0a22bf1 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js
+++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js
@@ -86,6 +86,14 @@
var from = log.contract.getAddress();
this.lookupAccount(toContract(from, db.getNonce(from)), db);
break;
+ case "CREATE2":
+ var from = log.contract.getAddress();
+ // stack: salt, size, offset, endowment
+ var offset = log.stack.peek(1).valueOf()
+ var size = log.stack.peek(2).valueOf()
+ var end = offset + size
+ this.lookupAccount(toContract2(from, log.stack.peek(3).toString(16), log.memory.slice(offset, end)), db);
+ break;
case "CALL": case "CALLCODE": case "DELEGATECALL": case "STATICCALL":
this.lookupAccount(toAddress(log.stack.peek(1).toString(16)), db);
break;
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/tracers.go b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/tracers.go
index dcf0d49d..2e40975b 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/tracers.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/tracers.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore ((tracers)|(assets)).go ./...
+//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore tracers.go -ignore assets.go ./...
//go:generate gofmt -s -w assets.go
// Package tracers contains the actual JavaScript tracer assets.
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go b/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go
index 3533a831..9d670186 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go
@@ -367,6 +367,28 @@ func New(code string) (*Tracer, error) {
copy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])
return 1
})
+ tracer.vm.PushGlobalGoFunction("toContract2", func(ctx *duktape.Context) int {
+ var from common.Address
+ if ptr, size := ctx.GetBuffer(-3); ptr != nil {
+ from = common.BytesToAddress(makeSlice(ptr, size))
+ } else {
+ from = common.HexToAddress(ctx.GetString(-3))
+ }
+ // Retrieve salt hex string from js stack
+ salt := common.HexToHash(ctx.GetString(-2))
+ // Retrieve code slice from js stack
+ var code []byte
+ if ptr, size := ctx.GetBuffer(-1); ptr != nil {
+ code = common.CopyBytes(makeSlice(ptr, size))
+ } else {
+ code = common.FromHex(ctx.GetString(-1))
+ }
+ codeHash := crypto.Keccak256(code)
+ ctx.Pop3()
+ contract := crypto.CreateAddress2(from, salt, codeHash)
+ copy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])
+ return 1
+ })
tracer.vm.PushGlobalGoFunction("isPrecompiled", func(ctx *duktape.Context) int {
_, ok := vm.PrecompiledContractsByzantium[common.BytesToAddress(popSlice(ctx))]
ctx.PushBoolean(ok)
diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracers_test.go b/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracers_test.go
index d25fc459..b435b169 100644
--- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracers_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracers_test.go
@@ -17,6 +17,8 @@
package tracers
import (
+ "crypto/ecdsa"
+ "crypto/rand"
"encoding/json"
"io/ioutil"
"math/big"
@@ -31,7 +33,9 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests"
)
@@ -116,6 +120,83 @@ type callTracerTest struct {
Result *callTrace `json:"result"`
}
+func TestPrestateTracerCreate2(t *testing.T) {
+ unsigned_tx := types.NewTransaction(1, common.HexToAddress("0x00000000000000000000000000000000deadbeef"),
+ new(big.Int), 5000000, big.NewInt(1), []byte{})
+
+ privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader)
+ if err != nil {
+ t.Fatalf("err %v", err)
+ }
+ signer := types.NewEIP155Signer(big.NewInt(1))
+ tx, err := types.SignTx(unsigned_tx, signer, privateKeyECDSA)
+ if err != nil {
+ t.Fatalf("err %v", err)
+ }
+ /**
+ This comes from one of the test-vectors on the Skinny Create2 - EIP
+
+ address 0x00000000000000000000000000000000deadbeef
+ salt 0x00000000000000000000000000000000000000000000000000000000cafebabe
+ init_code 0xdeadbeef
+ gas (assuming no mem expansion): 32006
+ result: 0x60f3f640a8508fC6a86d45DF051962668E1e8AC7
+ */
+ origin, _ := signer.Sender(tx)
+ context := vm.Context{
+ CanTransfer: core.CanTransfer,
+ Transfer: core.Transfer,
+ Origin: origin,
+ Coinbase: common.Address{},
+ BlockNumber: new(big.Int).SetUint64(8000000),
+ Time: new(big.Int).SetUint64(5),
+ Difficulty: big.NewInt(0x30000),
+ GasLimit: uint64(6000000),
+ GasPrice: big.NewInt(1),
+ }
+ alloc := core.GenesisAlloc{}
+ // The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns
+ // the address
+ alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = core.GenesisAccount{
+ Nonce: 1,
+ Code: hexutil.MustDecode("0x63deadbeef60005263cafebabe6004601c6000F560005260206000F3"),
+ Balance: big.NewInt(1),
+ }
+ alloc[origin] = core.GenesisAccount{
+ Nonce: 1,
+ Code: []byte{},
+ Balance: big.NewInt(500000000000000),
+ }
+ statedb := tests.MakePreState(ethdb.NewMemDatabase(), alloc)
+ // Create the tracer, the EVM environment and run it
+ tracer, err := New("prestateTracer")
+ if err != nil {
+ t.Fatalf("failed to create call tracer: %v", err)
+ }
+ evm := vm.NewEVM(context, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
+
+ msg, err := tx.AsMessage(signer)
+ if err != nil {
+ t.Fatalf("failed to prepare transaction for tracing: %v", err)
+ }
+ st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
+ if _, _, _, err = st.TransitionDb(); err != nil {
+ t.Fatalf("failed to execute transaction: %v", err)
+ }
+ // Retrieve the trace result and compare against the etalon
+ res, err := tracer.GetResult()
+ if err != nil {
+ t.Fatalf("failed to retrieve trace result: %v", err)
+ }
+ ret := make(map[string]interface{})
+ if err := json.Unmarshal(res, &ret); err != nil {
+ t.Fatalf("failed to unmarshal trace result: %v", err)
+ }
+ if _, has := ret["0x60f3f640a8508fc6a86d45df051962668e1e8ac7"]; !has {
+ t.Fatalf("Expected 0x60f3f640a8508fc6a86d45df051962668e1e8ac7 in result")
+ }
+}
+
// Iterates over all the input-output datasets in the tracer test harness and
// runs the JavaScript tracers against them.
func TestCallTracer(t *testing.T) {
@@ -185,8 +266,9 @@ func TestCallTracer(t *testing.T) {
if err := json.Unmarshal(res, ret); err != nil {
t.Fatalf("failed to unmarshal trace result: %v", err)
}
+
if !reflect.DeepEqual(ret, test.Result) {
- t.Fatalf("trace mismatch: have %+v, want %+v", ret, test.Result)
+ t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result)
}
})
}
diff --git a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go
index 656555b3..73b629bd 100644
--- a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go
+++ b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go
@@ -1074,6 +1074,15 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockHashAndIndex(ctx cont
// GetTransactionCount returns the number of transactions the given address has sent for the given block number
func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*hexutil.Uint64, error) {
+ // Ask transaction pool for the nonce which includes pending transactions
+ if blockNr == rpc.PendingBlockNumber {
+ nonce, err := s.b.GetPoolNonce(ctx, address)
+ if err != nil {
+ return nil, err
+ }
+ return (*hexutil.Uint64)(&nonce), nil
+ }
+ // Resolve block number and use its state to ask for the nonce
state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
if state == nil || err != nil {
return nil, err
diff --git a/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go b/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go
index 06bfcef6..6b98c8b7 100644
--- a/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go
+++ b/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go
@@ -18,6 +18,7 @@
package web3ext
var Modules = map[string]string{
+ "accounting": Accounting_JS,
"admin": Admin_JS,
"chequebook": Chequebook_JS,
"clique": Clique_JS,
@@ -704,3 +705,47 @@ web3._extend({
]
});
`
+
+const Accounting_JS = `
+web3._extend({
+ property: 'accounting',
+ methods: [
+ new web3._extend.Property({
+ name: 'balance',
+ getter: 'account_balance'
+ }),
+ new web3._extend.Property({
+ name: 'balanceCredit',
+ getter: 'account_balanceCredit'
+ }),
+ new web3._extend.Property({
+ name: 'balanceDebit',
+ getter: 'account_balanceDebit'
+ }),
+ new web3._extend.Property({
+ name: 'bytesCredit',
+ getter: 'account_bytesCredit'
+ }),
+ new web3._extend.Property({
+ name: 'bytesDebit',
+ getter: 'account_bytesDebit'
+ }),
+ new web3._extend.Property({
+ name: 'msgCredit',
+ getter: 'account_msgCredit'
+ }),
+ new web3._extend.Property({
+ name: 'msgDebit',
+ getter: 'account_msgDebit'
+ }),
+ new web3._extend.Property({
+ name: 'peerDrops',
+ getter: 'account_peerDrops'
+ }),
+ new web3._extend.Property({
+ name: 'selfDrops',
+ getter: 'account_selfDrops'
+ }),
+ ]
+});
+`
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go
index cdeb28dd..de7d8de6 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go
@@ -27,10 +27,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
var (
@@ -800,7 +800,7 @@ func (n *nodeNetGuts) startNextQuery(net *Network) {
func (q *findnodeQuery) start(net *Network) bool {
// Satisfy queries against the local node directly.
if q.remote == net.tab.self {
- closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize)
+ closest := net.tab.closest(q.target, bucketSize)
q.reply <- closest.entries
return true
}
@@ -1234,7 +1234,7 @@ func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
}
func rlpHash(x interface{}) (h common.Hash) {
- hw := sha3.NewKeccak256()
+ hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/enode/idscheme.go b/vendor/github.com/ethereum/go-ethereum/p2p/enode/idscheme.go
index 9b495fd4..c1834f06 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/enode/idscheme.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/enode/idscheme.go
@@ -23,9 +23,9 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
// List of known secure identity schemes.
@@ -48,7 +48,7 @@ func SignV4(r *enr.Record, privkey *ecdsa.PrivateKey) error {
cpy.Set(enr.ID("v4"))
cpy.Set(Secp256k1(privkey.PublicKey))
- h := sha3.NewKeccak256()
+ h := sha3.NewLegacyKeccak256()
rlp.Encode(h, cpy.AppendElements(nil))
sig, err := crypto.Sign(h.Sum(nil), privkey)
if err != nil {
@@ -69,7 +69,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error {
return fmt.Errorf("invalid public key")
}
- h := sha3.NewKeccak256()
+ h := sha3.NewLegacyKeccak256()
rlp.Encode(h, r.AppendElements(nil))
if !crypto.VerifySignature(entry, h.Sum(nil), sig) {
return enr.ErrInvalidSig
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go
index 770406a2..bdc490e5 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go
@@ -22,31 +22,33 @@ import (
"github.com/ethereum/go-ethereum/metrics"
)
-//define some metrics
+// define some metrics
var (
- //All metrics are cumulative
+ // All metrics are cumulative
- //total amount of units credited
+ // total amount of units credited
mBalanceCredit metrics.Counter
- //total amount of units debited
+ // total amount of units debited
mBalanceDebit metrics.Counter
- //total amount of bytes credited
+ // total amount of bytes credited
mBytesCredit metrics.Counter
- //total amount of bytes debited
+ // total amount of bytes debited
mBytesDebit metrics.Counter
- //total amount of credited messages
+ // total amount of credited messages
mMsgCredit metrics.Counter
- //total amount of debited messages
+ // total amount of debited messages
mMsgDebit metrics.Counter
- //how many times local node had to drop remote peers
+ // how many times local node had to drop remote peers
mPeerDrops metrics.Counter
- //how many times local node overdrafted and dropped
+ // how many times local node overdrafted and dropped
mSelfDrops metrics.Counter
+
+ MetricsRegistry metrics.Registry
)
-//Prices defines how prices are being passed on to the accounting instance
+// Prices defines how prices are being passed on to the accounting instance
type Prices interface {
- //Return the Price for a message
+ // Return the Price for a message
Price(interface{}) *Price
}
@@ -57,20 +59,20 @@ const (
Receiver = Payer(false)
)
-//Price represents the costs of a message
+// Price represents the costs of a message
type Price struct {
- Value uint64 //
- PerByte bool //True if the price is per byte or for unit
+ Value uint64
+ PerByte bool // True if the price is per byte or for unit
Payer Payer
}
-//For gives back the price for a message
-//A protocol provides the message price in absolute value
-//This method then returns the correct signed amount,
-//depending on who pays, which is identified by the `payer` argument:
-//`Send` will pass a `Sender` payer, `Receive` will pass the `Receiver` argument.
-//Thus: If Sending and sender pays, amount positive, otherwise negative
-//If Receiving, and receiver pays, amount positive, otherwise negative
+// For gives back the price for a message
+// A protocol provides the message price in absolute value
+// This method then returns the correct signed amount,
+// depending on who pays, which is identified by the `payer` argument:
+// `Send` will pass a `Sender` payer, `Receive` will pass the `Receiver` argument.
+// Thus: If Sending and sender pays, amount positive, otherwise negative
+// If Receiving, and receiver pays, amount positive, otherwise negative
func (p *Price) For(payer Payer, size uint32) int64 {
price := p.Value
if p.PerByte {
@@ -82,22 +84,22 @@ func (p *Price) For(payer Payer, size uint32) int64 {
return int64(price)
}
-//Balance is the actual accounting instance
-//Balance defines the operations needed for accounting
-//Implementations internally maintain the balance for every peer
+// Balance is the actual accounting instance
+// Balance defines the operations needed for accounting
+// Implementations internally maintain the balance for every peer
type Balance interface {
- //Adds amount to the local balance with remote node `peer`;
- //positive amount = credit local node
- //negative amount = debit local node
+ // Adds amount to the local balance with remote node `peer`;
+ // positive amount = credit local node
+ // negative amount = debit local node
Add(amount int64, peer *Peer) error
}
-//Accounting implements the Hook interface
-//It interfaces to the balances through the Balance interface,
-//while interfacing with protocols and its prices through the Prices interface
+// Accounting implements the Hook interface
+// It interfaces to the balances through the Balance interface,
+// while interfacing with protocols and its prices through the Prices interface
type Accounting struct {
- Balance //interface to accounting logic
- Prices //interface to prices logic
+ Balance // interface to accounting logic
+ Prices // interface to prices logic
}
func NewAccounting(balance Balance, po Prices) *Accounting {
@@ -108,79 +110,77 @@ func NewAccounting(balance Balance, po Prices) *Accounting {
return ah
}
-//SetupAccountingMetrics creates a separate registry for p2p accounting metrics;
-//this registry should be independent of any other metrics as it persists at different endpoints.
-//It also instantiates the given metrics and starts the persisting go-routine which
-//at the passed interval writes the metrics to a LevelDB
+// SetupAccountingMetrics creates a separate registry for p2p accounting metrics;
+// this registry should be independent of any other metrics as it persists at different endpoints.
+// It also instantiates the given metrics and starts the persisting go-routine which
+// at the passed interval writes the metrics to a LevelDB
func SetupAccountingMetrics(reportInterval time.Duration, path string) *AccountingMetrics {
- //create an empty registry
- registry := metrics.NewRegistry()
- //instantiate the metrics
- mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", registry)
- mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", registry)
- mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", registry)
- mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", registry)
- mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", registry)
- mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", registry)
- mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", registry)
- mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", registry)
- //create the DB and start persisting
- return NewAccountingMetrics(registry, reportInterval, path)
+ // create an empty registry
+ MetricsRegistry = metrics.NewRegistry()
+ // instantiate the metrics
+ mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", MetricsRegistry)
+ mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", MetricsRegistry)
+ mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", MetricsRegistry)
+ mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", MetricsRegistry)
+ mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", MetricsRegistry)
+ mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", MetricsRegistry)
+ mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", MetricsRegistry)
+ mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", MetricsRegistry)
+ // create the DB and start persisting
+ return NewAccountingMetrics(MetricsRegistry, reportInterval, path)
}
-//Implement Hook.Send
// Send takes a peer, a size and a msg and
-// - calculates the cost for the local node sending a msg of size to peer using the Prices interface
-// - credits/debits local node using balance interface
+// - calculates the cost for the local node sending a msg of size to peer using the Prices interface
+// - credits/debits local node using balance interface
func (ah *Accounting) Send(peer *Peer, size uint32, msg interface{}) error {
- //get the price for a message (through the protocol spec)
+ // get the price for a message (through the protocol spec)
price := ah.Price(msg)
- //this message doesn't need accounting
+ // this message doesn't need accounting
if price == nil {
return nil
}
- //evaluate the price for sending messages
+ // evaluate the price for sending messages
costToLocalNode := price.For(Sender, size)
- //do the accounting
+ // do the accounting
err := ah.Add(costToLocalNode, peer)
- //record metrics: just increase counters for user-facing metrics
+ // record metrics: just increase counters for user-facing metrics
ah.doMetrics(costToLocalNode, size, err)
return err
}
-//Implement Hook.Receive
// Receive takes a peer, a size and a msg and
-// - calculates the cost for the local node receiving a msg of size from peer using the Prices interface
-// - credits/debits local node using balance interface
+// - calculates the cost for the local node receiving a msg of size from peer using the Prices interface
+// - credits/debits local node using balance interface
func (ah *Accounting) Receive(peer *Peer, size uint32, msg interface{}) error {
- //get the price for a message (through the protocol spec)
+ // get the price for a message (through the protocol spec)
price := ah.Price(msg)
- //this message doesn't need accounting
+ // this message doesn't need accounting
if price == nil {
return nil
}
- //evaluate the price for receiving messages
+ // evaluate the price for receiving messages
costToLocalNode := price.For(Receiver, size)
- //do the accounting
+ // do the accounting
err := ah.Add(costToLocalNode, peer)
- //record metrics: just increase counters for user-facing metrics
+ // record metrics: just increase counters for user-facing metrics
ah.doMetrics(costToLocalNode, size, err)
return err
}
-//record some metrics
-//this is not an error handling. `err` is returned by both `Send` and `Receive`
-//`err` will only be non-nil if a limit has been violated (overdraft), in which case the peer has been dropped.
-//if the limit has been violated and `err` is thus not nil:
-// * if the price is positive, local node has been credited; thus `err` implicitly signals the REMOTE has been dropped
-// * if the price is negative, local node has been debited, thus `err` implicitly signals LOCAL node "overdraft"
+// record some metrics
+// this is not an error handling. `err` is returned by both `Send` and `Receive`
+// `err` will only be non-nil if a limit has been violated (overdraft), in which case the peer has been dropped.
+// if the limit has been violated and `err` is thus not nil:
+// * if the price is positive, local node has been credited; thus `err` implicitly signals the REMOTE has been dropped
+// * if the price is negative, local node has been debited, thus `err` implicitly signals LOCAL node "overdraft"
func (ah *Accounting) doMetrics(price int64, size uint32, err error) {
if price > 0 {
mBalanceCredit.Inc(price)
mBytesCredit.Inc(int64(size))
mMsgCredit.Inc(1)
if err != nil {
- //increase the number of times a remote node has been dropped due to "overdraft"
+ // increase the number of times a remote node has been dropped due to "overdraft"
mPeerDrops.Inc(1)
}
} else {
@@ -188,7 +188,7 @@ func (ah *Accounting) doMetrics(price int64, size uint32, err error) {
mBytesDebit.Inc(int64(size))
mMsgDebit.Inc(1)
if err != nil {
- //increase the number of times the local node has done an "overdraft" in respect to other nodes
+ // increase the number of times the local node has done an "overdraft" in respect to other nodes
mSelfDrops.Inc(1)
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting_api.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting_api.go
new file mode 100644
index 00000000..48e2af9f
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting_api.go
@@ -0,0 +1,94 @@
+package protocols
+
+import (
+ "errors"
+)
+
+// Textual version number of accounting API
+const AccountingVersion = "1.0"
+
+var errNoAccountingMetrics = errors.New("accounting metrics not enabled")
+
+// AccountingApi provides an API to access account related information
+type AccountingApi struct {
+ metrics *AccountingMetrics
+}
+
+// NewAccountingApi creates a new AccountingApi
+// m will be used to check if accounting metrics are enabled
+func NewAccountingApi(m *AccountingMetrics) *AccountingApi {
+ return &AccountingApi{m}
+}
+
+// Balance returns local node balance (units credited - units debited)
+func (self *AccountingApi) Balance() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ balance := mBalanceCredit.Count() - mBalanceDebit.Count()
+ return balance, nil
+}
+
+// BalanceCredit returns total amount of units credited by local node
+func (self *AccountingApi) BalanceCredit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mBalanceCredit.Count(), nil
+}
+
+// BalanceCredit returns total amount of units debited by local node
+func (self *AccountingApi) BalanceDebit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mBalanceDebit.Count(), nil
+}
+
+// BytesCredit returns total amount of bytes credited by local node
+func (self *AccountingApi) BytesCredit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mBytesCredit.Count(), nil
+}
+
+// BalanceCredit returns total amount of bytes debited by local node
+func (self *AccountingApi) BytesDebit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mBytesDebit.Count(), nil
+}
+
+// MsgCredit returns total amount of messages credited by local node
+func (self *AccountingApi) MsgCredit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mMsgCredit.Count(), nil
+}
+
+// MsgDebit returns total amount of messages debited by local node
+func (self *AccountingApi) MsgDebit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mMsgDebit.Count(), nil
+}
+
+// PeerDrops returns number of times when local node had to drop remote peers
+func (self *AccountingApi) PeerDrops() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mPeerDrops.Count(), nil
+}
+
+// SelfDrops returns number of times when local node was overdrafted and dropped
+func (self *AccountingApi) SelfDrops() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mSelfDrops.Count(), nil
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/rlpx.go b/vendor/github.com/ethereum/go-ethereum/p2p/rlpx.go
index 22a27dd9..67cc1d9b 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/rlpx.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/rlpx.go
@@ -39,9 +39,9 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -253,10 +253,10 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
}
// setup sha3 instances for the MACs
- mac1 := sha3.NewKeccak256()
+ mac1 := sha3.NewLegacyKeccak256()
mac1.Write(xor(s.MAC, h.respNonce))
mac1.Write(auth)
- mac2 := sha3.NewKeccak256()
+ mac2 := sha3.NewLegacyKeccak256()
mac2.Write(xor(s.MAC, h.initNonce))
mac2.Write(authResp)
if h.initiator {
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/rlpx_test.go b/vendor/github.com/ethereum/go-ethereum/p2p/rlpx_test.go
index 64172217..5d898180 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/rlpx_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/rlpx_test.go
@@ -34,9 +34,9 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/p2p/simulations/pipes"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
func TestSharedSecret(t *testing.T) {
@@ -334,8 +334,8 @@ func TestRLPXFrameRW(t *testing.T) {
s1 := secrets{
AES: aesSecret,
MAC: macSecret,
- EgressMAC: sha3.NewKeccak256(),
- IngressMAC: sha3.NewKeccak256(),
+ EgressMAC: sha3.NewLegacyKeccak256(),
+ IngressMAC: sha3.NewLegacyKeccak256(),
}
s1.EgressMAC.Write(egressMACinit)
s1.IngressMAC.Write(ingressMACinit)
@@ -344,8 +344,8 @@ func TestRLPXFrameRW(t *testing.T) {
s2 := secrets{
AES: aesSecret,
MAC: macSecret,
- EgressMAC: sha3.NewKeccak256(),
- IngressMAC: sha3.NewKeccak256(),
+ EgressMAC: sha3.NewLegacyKeccak256(),
+ IngressMAC: sha3.NewLegacyKeccak256(),
}
s2.EgressMAC.Write(ingressMACinit)
s2.IngressMAC.Write(egressMACinit)
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/server_test.go b/vendor/github.com/ethereum/go-ethereum/p2p/server_test.go
index 7e11577d..f665c142 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/server_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/server_test.go
@@ -26,10 +26,10 @@ import (
"time"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
+ "golang.org/x/crypto/sha3"
)
// func init() {
@@ -48,8 +48,8 @@ func newTestTransport(rpub *ecdsa.PublicKey, fd net.Conn) transport {
wrapped.rw = newRLPXFrameRW(fd, secrets{
MAC: zero16,
AES: zero16,
- IngressMAC: sha3.NewKeccak256(),
- EgressMAC: sha3.NewKeccak256(),
+ IngressMAC: sha3.NewLegacyKeccak256(),
+ EgressMAC: sha3.NewLegacyKeccak256(),
})
return &testTransport{rpub: rpub, rlpx: wrapped}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go
index abb19671..9b588db1 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go
@@ -46,7 +46,7 @@ import (
func init() {
// Register a reexec function to start a simulation node when the current binary is
- // executed as "p2p-node" (rather than whataver the main() function would normally do).
+ // executed as "p2p-node" (rather than whatever the main() function would normally do).
reexec.Register("p2p-node", execP2PNode)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/inproc.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/inproc.go
index 52a662be..eada9579 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/inproc.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/inproc.go
@@ -130,7 +130,7 @@ func (s *SimAdapter) Dial(dest *enode.Node) (conn net.Conn, err error) {
return nil, err
}
// this is simulated 'listening'
- // asynchronously call the dialed destintion node's p2p server
+ // asynchronously call the dialed destination node's p2p server
// to set up connection on the 'listening' side
go srv.SetupConn(pipe1, 0, nil)
return pipe2, nil
@@ -351,17 +351,3 @@ func (sn *SimNode) NodeInfo() *p2p.NodeInfo {
}
return server.NodeInfo()
}
-
-func setSocketBuffer(conn net.Conn, socketReadBuffer int, socketWriteBuffer int) error {
- if v, ok := conn.(*net.UnixConn); ok {
- err := v.SetReadBuffer(socketReadBuffer)
- if err != nil {
- return err
- }
- err = v.SetWriteBuffer(socketWriteBuffer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/connect.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/connect.go
new file mode 100644
index 00000000..bb7e7999
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/connect.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package simulations
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/p2p/enode"
+)
+
+var (
+ ErrNodeNotFound = errors.New("node not found")
+)
+
+// ConnectToLastNode connects the node with provided NodeID
+// to the last node that is up, and avoiding connection to self.
+// It is useful when constructing a chain network topology
+// when Network adds and removes nodes dynamically.
+func (net *Network) ConnectToLastNode(id enode.ID) (err error) {
+ ids := net.getUpNodeIDs()
+ l := len(ids)
+ if l < 2 {
+ return nil
+ }
+ last := ids[l-1]
+ if last == id {
+ last = ids[l-2]
+ }
+ return net.connect(last, id)
+}
+
+// ConnectToRandomNode connects the node with provided NodeID
+// to a random node that is up.
+func (net *Network) ConnectToRandomNode(id enode.ID) (err error) {
+ selected := net.GetRandomUpNode(id)
+ if selected == nil {
+ return ErrNodeNotFound
+ }
+ return net.connect(selected.ID(), id)
+}
+
+// ConnectNodesFull connects all nodes one to another.
+// It provides a complete connectivity in the network
+// which should be rarely needed.
+func (net *Network) ConnectNodesFull(ids []enode.ID) (err error) {
+ if ids == nil {
+ ids = net.getUpNodeIDs()
+ }
+ for i, lid := range ids {
+ for _, rid := range ids[i+1:] {
+ if err = net.connect(lid, rid); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// ConnectNodesChain connects all nodes in a chain topology.
+// If ids argument is nil, all nodes that are up will be connected.
+func (net *Network) ConnectNodesChain(ids []enode.ID) (err error) {
+ if ids == nil {
+ ids = net.getUpNodeIDs()
+ }
+ l := len(ids)
+ for i := 0; i < l-1; i++ {
+ if err := net.connect(ids[i], ids[i+1]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ConnectNodesRing connects all nodes in a ring topology.
+// If ids argument is nil, all nodes that are up will be connected.
+func (net *Network) ConnectNodesRing(ids []enode.ID) (err error) {
+ if ids == nil {
+ ids = net.getUpNodeIDs()
+ }
+ l := len(ids)
+ if l < 2 {
+ return nil
+ }
+ if err := net.ConnectNodesChain(ids); err != nil {
+ return err
+ }
+ return net.connect(ids[l-1], ids[0])
+}
+
+// ConnectNodesStar connects all nodes into a star topology
+// If ids argument is nil, all nodes that are up will be connected.
+func (net *Network) ConnectNodesStar(ids []enode.ID, center enode.ID) (err error) {
+ if ids == nil {
+ ids = net.getUpNodeIDs()
+ }
+ for _, id := range ids {
+ if center == id {
+ continue
+ }
+ if err := net.connect(center, id); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// connect connects two nodes but ignores already connected error.
+func (net *Network) connect(oneID, otherID enode.ID) error {
+ return ignoreAlreadyConnectedErr(net.Connect(oneID, otherID))
+}
+
+func ignoreAlreadyConnectedErr(err error) error {
+ if err == nil || strings.Contains(err.Error(), "already connected") {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/connect_test.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/connect_test.go
new file mode 100644
index 00000000..32d18347
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/connect_test.go
@@ -0,0 +1,172 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package simulations
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+)
+
+func newTestNetwork(t *testing.T, nodeCount int) (*Network, []enode.ID) {
+ t.Helper()
+ adapter := adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ return NewNoopService(nil), nil
+ },
+ })
+
+ // create network
+ network := NewNetwork(adapter, &NetworkConfig{
+ DefaultService: "noopwoop",
+ })
+
+ // create and start nodes
+ ids := make([]enode.ID, nodeCount)
+ for i := range ids {
+ conf := adapters.RandomNodeConfig()
+ node, err := network.NewNodeWithConfig(conf)
+ if err != nil {
+ t.Fatalf("error creating node: %s", err)
+ }
+ if err := network.Start(node.ID()); err != nil {
+ t.Fatalf("error starting node: %s", err)
+ }
+ ids[i] = node.ID()
+ }
+
+ if len(network.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ return network, ids
+}
+
+func TestConnectToLastNode(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ first := ids[0]
+ if err := net.ConnectToLastNode(first); err != nil {
+ t.Fatal(err)
+ }
+
+ last := ids[len(ids)-1]
+ for i, id := range ids {
+ if id == first || id == last {
+ continue
+ }
+
+ if net.GetConn(first, id) != nil {
+ t.Errorf("connection must not exist with node(ind: %v, id: %v)", i, id)
+ }
+ }
+
+ if net.GetConn(first, last) == nil {
+ t.Error("first and last node must be connected")
+ }
+}
+
+func TestConnectToRandomNode(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ err := net.ConnectToRandomNode(ids[0])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var cc int
+ for i, a := range ids {
+ for _, b := range ids[i:] {
+ if net.GetConn(a, b) != nil {
+ cc++
+ }
+ }
+ }
+
+ if cc != 1 {
+ t.Errorf("expected one connection, got %v", cc)
+ }
+}
+
+func TestConnectNodesFull(t *testing.T) {
+ tests := []struct {
+ name string
+ nodeCount int
+ }{
+ {name: "no node", nodeCount: 0},
+ {name: "single node", nodeCount: 1},
+ {name: "2 nodes", nodeCount: 2},
+ {name: "3 nodes", nodeCount: 3},
+ {name: "even number of nodes", nodeCount: 12},
+ {name: "odd number of nodes", nodeCount: 13},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ net, ids := newTestNetwork(t, test.nodeCount)
+ defer net.Shutdown()
+
+ err := net.ConnectNodesFull(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ VerifyFull(t, net, ids)
+ })
+ }
+}
+
+func TestConnectNodesChain(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ err := net.ConnectNodesChain(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ VerifyChain(t, net, ids)
+}
+
+func TestConnectNodesRing(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ err := net.ConnectNodesRing(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ VerifyRing(t, net, ids)
+}
+
+func TestConnectNodesStar(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ pivotIndex := 2
+
+ err := net.ConnectNodesStar(ids, ids[pivotIndex])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ VerifyStar(t, net, ids, pivotIndex)
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/http_test.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/http_test.go
index d9513caa..c0a5acb3 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/http_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/http_test.go
@@ -35,7 +35,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
- colorable "github.com/mattn/go-colorable"
+ "github.com/mattn/go-colorable"
)
var (
@@ -294,6 +294,7 @@ var testServices = adapters.Services{
}
func testHTTPServer(t *testing.T) (*Network, *httptest.Server) {
+ t.Helper()
adapter := adapters.NewSimAdapter(testServices)
network := NewNetwork(adapter, &NetworkConfig{
DefaultService: "test",
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/mocker_test.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/mocker_test.go
index 7c7016a5..192be173 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/mocker_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/mocker_test.go
@@ -15,7 +15,7 @@
// along with the go-ethereum library. If not, see .
// Package simulations simulates p2p networks.
-// A mokcer simulates starting and stopping real nodes in a network.
+// A mocker simulates starting and stopping real nodes in a network.
package simulations
import (
@@ -135,13 +135,13 @@ func TestMocker(t *testing.T) {
wg.Wait()
//check there are nodeCount number of nodes in the network
- nodes_info, err := client.GetNodes()
+ nodesInfo, err := client.GetNodes()
if err != nil {
t.Fatalf("Could not get nodes list: %s", err)
}
- if len(nodes_info) != nodeCount {
- t.Fatalf("Expected %d number of nodes, got: %d", nodeCount, len(nodes_info))
+ if len(nodesInfo) != nodeCount {
+ t.Fatalf("Expected %d number of nodes, got: %d", nodeCount, len(nodesInfo))
}
//stop the mocker
@@ -160,12 +160,12 @@ func TestMocker(t *testing.T) {
}
//now the number of nodes in the network should be zero
- nodes_info, err = client.GetNodes()
+ nodesInfo, err = client.GetNodes()
if err != nil {
t.Fatalf("Could not get nodes list: %s", err)
}
- if len(nodes_info) != 0 {
- t.Fatalf("Expected empty list of nodes, got: %d", len(nodes_info))
+ if len(nodesInfo) != 0 {
+ t.Fatalf("Expected empty list of nodes, got: %d", len(nodesInfo))
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
index ab9f582c..86f7dc9b 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
@@ -22,6 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math/rand"
"sync"
"time"
@@ -370,23 +371,32 @@ func (net *Network) DidReceive(sender, receiver enode.ID, proto string, code uin
// GetNode gets the node with the given ID, returning nil if the node does not
// exist
func (net *Network) GetNode(id enode.ID) *Node {
- net.lock.Lock()
- defer net.lock.Unlock()
+ net.lock.RLock()
+ defer net.lock.RUnlock()
return net.getNode(id)
}
// GetNode gets the node with the given name, returning nil if the node does
// not exist
func (net *Network) GetNodeByName(name string) *Node {
- net.lock.Lock()
- defer net.lock.Unlock()
+ net.lock.RLock()
+ defer net.lock.RUnlock()
return net.getNodeByName(name)
}
+func (net *Network) getNodeByName(name string) *Node {
+ for _, node := range net.Nodes {
+ if node.Config.Name == name {
+ return node
+ }
+ }
+ return nil
+}
+
// GetNodes returns the existing nodes
func (net *Network) GetNodes() (nodes []*Node) {
- net.lock.Lock()
- defer net.lock.Unlock()
+ net.lock.RLock()
+ defer net.lock.RUnlock()
nodes = append(nodes, net.Nodes...)
return nodes
@@ -400,20 +410,67 @@ func (net *Network) getNode(id enode.ID) *Node {
return net.Nodes[i]
}
-func (net *Network) getNodeByName(name string) *Node {
+// GetRandomUpNode returns a random node on the network, which is running.
+func (net *Network) GetRandomUpNode(excludeIDs ...enode.ID) *Node {
+ net.lock.RLock()
+ defer net.lock.RUnlock()
+ return net.getRandomNode(net.getUpNodeIDs(), excludeIDs)
+}
+
+func (net *Network) getUpNodeIDs() (ids []enode.ID) {
for _, node := range net.Nodes {
- if node.Config.Name == name {
- return node
+ if node.Up {
+ ids = append(ids, node.ID())
}
}
- return nil
+ return ids
+}
+
+// GetRandomDownNode returns a random node on the network, which is stopped.
+func (net *Network) GetRandomDownNode(excludeIDs ...enode.ID) *Node {
+ net.lock.RLock()
+ defer net.lock.RUnlock()
+ return net.getRandomNode(net.getDownNodeIDs(), excludeIDs)
+}
+
+func (net *Network) getDownNodeIDs() (ids []enode.ID) {
+ for _, node := range net.GetNodes() {
+ if !node.Up {
+ ids = append(ids, node.ID())
+ }
+ }
+ return ids
+}
+
+func (net *Network) getRandomNode(ids []enode.ID, excludeIDs []enode.ID) *Node {
+ filtered := filterIDs(ids, excludeIDs)
+
+ l := len(filtered)
+ if l == 0 {
+ return nil
+ }
+ return net.GetNode(filtered[rand.Intn(l)])
+}
+
+func filterIDs(ids []enode.ID, excludeIDs []enode.ID) []enode.ID {
+ exclude := make(map[enode.ID]bool)
+ for _, id := range excludeIDs {
+ exclude[id] = true
+ }
+ var filtered []enode.ID
+ for _, id := range ids {
+ if _, found := exclude[id]; !found {
+ filtered = append(filtered, id)
+ }
+ }
+ return filtered
}
// GetConn returns the connection which exists between "one" and "other"
// regardless of which node initiated the connection
func (net *Network) GetConn(oneID, otherID enode.ID) *Conn {
- net.lock.Lock()
- defer net.lock.Unlock()
+ net.lock.RLock()
+ defer net.lock.RUnlock()
return net.getConn(oneID, otherID)
}
@@ -459,7 +516,7 @@ func (net *Network) getConn(oneID, otherID enode.ID) *Conn {
return net.Conns[i]
}
-// InitConn(one, other) retrieves the connectiton model for the connection between
+// InitConn(one, other) retrieves the connection model for the connection between
// peers one and other, or creates a new one if it does not exist
// the order of nodes does not matter, i.e., Conn(i,j) == Conn(j, i)
// it checks if the connection is already up, and if the nodes are running
@@ -505,8 +562,8 @@ func (net *Network) Shutdown() {
close(net.quitc)
}
-//Reset resets all network properties:
-//emtpies the nodes and the connection list
+// Reset resets all network properties:
+// empties the nodes and the connection list
func (net *Network) Reset() {
net.lock.Lock()
defer net.lock.Unlock()
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network_test.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network_test.go
index f3493526..b7852add 100644
--- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network_test.go
@@ -18,14 +18,266 @@ package simulations
import (
"context"
+ "encoding/json"
"fmt"
+ "strconv"
+ "strings"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
)
+// Tests that a created snapshot with a minimal service only contains the expected connections
+// and that a network when loaded with this snapshot only contains those same connections
+func TestSnapshot(t *testing.T) {
+
+ // PART I
+ // create snapshot from ring network
+
+ // this is a minimal service, whose protocol will take exactly one message OR close of connection before quitting
+ adapter := adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ return NewNoopService(nil), nil
+ },
+ })
+
+ // create network
+ network := NewNetwork(adapter, &NetworkConfig{
+ DefaultService: "noopwoop",
+ })
+ // \todo consider making a member of network, set to true threadsafe when shutdown
+ runningOne := true
+ defer func() {
+ if runningOne {
+ network.Shutdown()
+ }
+ }()
+
+ // create and start nodes
+ nodeCount := 20
+ ids := make([]enode.ID, nodeCount)
+ for i := 0; i < nodeCount; i++ {
+ conf := adapters.RandomNodeConfig()
+ node, err := network.NewNodeWithConfig(conf)
+ if err != nil {
+ t.Fatalf("error creating node: %s", err)
+ }
+ if err := network.Start(node.ID()); err != nil {
+ t.Fatalf("error starting node: %s", err)
+ }
+ ids[i] = node.ID()
+ }
+
+ // subscribe to peer events
+ evC := make(chan *Event)
+ sub := network.Events().Subscribe(evC)
+ defer sub.Unsubscribe()
+
+ // connect nodes in a ring
+ // spawn separate thread to avoid deadlock in the event listeners
+ go func() {
+ for i, id := range ids {
+ peerID := ids[(i+1)%len(ids)]
+ if err := network.Connect(id, peerID); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }()
+
+ // collect connection events up to expected number
+ ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ checkIds := make(map[enode.ID][]enode.ID)
+ connEventCount := nodeCount
+OUTER:
+ for {
+ select {
+ case <-ctx.Done():
+ t.Fatal(ctx.Err())
+ case ev := <-evC:
+ if ev.Type == EventTypeConn && !ev.Control {
+
+ // fail on any disconnect
+ if !ev.Conn.Up {
+ t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other)
+ }
+ checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other)
+ checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One)
+ connEventCount--
+ log.Debug("ev", "count", connEventCount)
+ if connEventCount == 0 {
+ break OUTER
+ }
+ }
+ }
+ }
+
+ // create snapshot of current network
+ snap, err := network.Snapshot()
+ if err != nil {
+ t.Fatal(err)
+ }
+ j, err := json.Marshal(snap)
+ if err != nil {
+ t.Fatal(err)
+ }
+ log.Debug("snapshot taken", "nodes", len(snap.Nodes), "conns", len(snap.Conns), "json", string(j))
+
+ // verify that the snap element numbers check out
+ if len(checkIds) != len(snap.Conns) || len(checkIds) != len(snap.Nodes) {
+ t.Fatalf("snapshot wrong node,conn counts %d,%d != %d", len(snap.Nodes), len(snap.Conns), len(checkIds))
+ }
+
+ // shut down sim network
+ runningOne = false
+ sub.Unsubscribe()
+ network.Shutdown()
+
+ // check that we have all the expected connections in the snapshot
+ for nodid, nodConns := range checkIds {
+ for _, nodConn := range nodConns {
+ var match bool
+ for _, snapConn := range snap.Conns {
+ if snapConn.One == nodid && snapConn.Other == nodConn {
+ match = true
+ break
+ } else if snapConn.Other == nodid && snapConn.One == nodConn {
+ match = true
+ break
+ }
+ }
+ if !match {
+ t.Fatalf("snapshot missing conn %v -> %v", nodid, nodConn)
+ }
+ }
+ }
+ log.Info("snapshot checked")
+
+ // PART II
+ // load snapshot and verify that exactly same connections are formed
+
+ adapter = adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ return NewNoopService(nil), nil
+ },
+ })
+ network = NewNetwork(adapter, &NetworkConfig{
+ DefaultService: "noopwoop",
+ })
+ defer func() {
+ network.Shutdown()
+ }()
+
+ // subscribe to peer events
+ // every node up and conn up event will generate one additional control event
+ // therefore multiply the count by two
+ evC = make(chan *Event, (len(snap.Conns)*2)+(len(snap.Nodes)*2))
+ sub = network.Events().Subscribe(evC)
+ defer sub.Unsubscribe()
+
+ // load the snapshot
+ // spawn separate thread to avoid deadlock in the event listeners
+ err = network.Load(snap)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // collect connection events up to expected number
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second*3)
+ defer cancel()
+
+ connEventCount = nodeCount
+
+OUTER_TWO:
+ for {
+ select {
+ case <-ctx.Done():
+ t.Fatal(ctx.Err())
+ case ev := <-evC:
+ if ev.Type == EventTypeConn && !ev.Control {
+
+ // fail on any disconnect
+ if !ev.Conn.Up {
+ t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other)
+ }
+ log.Debug("conn", "on", ev.Conn.One, "other", ev.Conn.Other)
+ checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other)
+ checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One)
+ connEventCount--
+ log.Debug("ev", "count", connEventCount)
+ if connEventCount == 0 {
+ break OUTER_TWO
+ }
+ }
+ }
+ }
+
+ // check that we have all expected connections in the network
+ for _, snapConn := range snap.Conns {
+ var match bool
+ for nodid, nodConns := range checkIds {
+ for _, nodConn := range nodConns {
+ if snapConn.One == nodid && snapConn.Other == nodConn {
+ match = true
+ break
+ } else if snapConn.Other == nodid && snapConn.One == nodConn {
+ match = true
+ break
+ }
+ }
+ }
+ if !match {
+ t.Fatalf("network missing conn %v -> %v", snapConn.One, snapConn.Other)
+ }
+ }
+
+ // verify that network didn't generate any other additional connection events after the ones we have collected within a reasonable period of time
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-ctx.Done():
+ case ev := <-evC:
+ if ev.Type == EventTypeConn {
+ t.Fatalf("Superfluous conn found %v -> %v", ev.Conn.One, ev.Conn.Other)
+ }
+ }
+
+ // This test validates if all connections from the snapshot
+ // are created in the network.
+ t.Run("conns after load", func(t *testing.T) {
+ // Create new network.
+ n := NewNetwork(
+ adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ return NewNoopService(nil), nil
+ },
+ }),
+ &NetworkConfig{
+ DefaultService: "noopwoop",
+ },
+ )
+ defer n.Shutdown()
+
+ // Load the same snapshot.
+ err := n.Load(snap)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check every connection from the snapshot
+ // if it is in the network, too.
+ for _, c := range snap.Conns {
+ if n.GetConn(c.One, c.Other) == nil {
+ t.Errorf("missing connection: %s -> %s", c.One, c.Other)
+ }
+ }
+ })
+}
+
// TestNetworkSimulation creates a multi-node simulation network with each node
// connected in a ring topology, checks that all nodes successfully handshake
// with each other and that a snapshot fully represents the desired topology
@@ -158,3 +410,78 @@ func triggerChecks(ctx context.Context, ids []enode.ID, trigger chan enode.ID, i
}
}
}
+
+// \todo: refactor to implement shapshots
+// and connect configuration methods once these are moved from
+// swarm/network/simulations/connect.go
+func BenchmarkMinimalService(b *testing.B) {
+ b.Run("ring/32", benchmarkMinimalServiceTmp)
+}
+
+func benchmarkMinimalServiceTmp(b *testing.B) {
+
+ // stop timer to discard setup time pollution
+ args := strings.Split(b.Name(), "/")
+ nodeCount, err := strconv.ParseInt(args[2], 10, 16)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ // this is a minimal service, whose protocol will close a channel upon run of protocol
+ // making it possible to bench the time it takes for the service to start and protocol actually to be run
+ protoCMap := make(map[enode.ID]map[enode.ID]chan struct{})
+ adapter := adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ protoCMap[ctx.Config.ID] = make(map[enode.ID]chan struct{})
+ svc := NewNoopService(protoCMap[ctx.Config.ID])
+ return svc, nil
+ },
+ })
+
+ // create network
+ network := NewNetwork(adapter, &NetworkConfig{
+ DefaultService: "noopwoop",
+ })
+ defer network.Shutdown()
+
+ // create and start nodes
+ ids := make([]enode.ID, nodeCount)
+ for i := 0; i < int(nodeCount); i++ {
+ conf := adapters.RandomNodeConfig()
+ node, err := network.NewNodeWithConfig(conf)
+ if err != nil {
+ b.Fatalf("error creating node: %s", err)
+ }
+ if err := network.Start(node.ID()); err != nil {
+ b.Fatalf("error starting node: %s", err)
+ }
+ ids[i] = node.ID()
+ }
+
+ // ready, set, go
+ b.ResetTimer()
+
+ // connect nodes in a ring
+ for i, id := range ids {
+ peerID := ids[(i+1)%len(ids)]
+ if err := network.Connect(id, peerID); err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ // wait for all protocols to signal to close down
+ ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ for nodid, peers := range protoCMap {
+ for peerid, peerC := range peers {
+ log.Debug("getting ", "node", nodid, "peer", peerid)
+ select {
+ case <-ctx.Done():
+ b.Fatal(ctx.Err())
+ case <-peerC:
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/test.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/test.go
new file mode 100644
index 00000000..beeb414e
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/test.go
@@ -0,0 +1,134 @@
+package simulations
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/enr"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// NoopService is the service that does not do anything
+// but implements node.Service interface.
+type NoopService struct {
+ c map[enode.ID]chan struct{}
+}
+
+func NewNoopService(ackC map[enode.ID]chan struct{}) *NoopService {
+ return &NoopService{
+ c: ackC,
+ }
+}
+
+func (t *NoopService) Protocols() []p2p.Protocol {
+ return []p2p.Protocol{
+ {
+ Name: "noop",
+ Version: 666,
+ Length: 0,
+ Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
+ if t.c != nil {
+ t.c[peer.ID()] = make(chan struct{})
+ close(t.c[peer.ID()])
+ }
+ rw.ReadMsg()
+ return nil
+ },
+ NodeInfo: func() interface{} {
+ return struct{}{}
+ },
+ PeerInfo: func(id enode.ID) interface{} {
+ return struct{}{}
+ },
+ Attributes: []enr.Entry{},
+ },
+ }
+}
+
+func (t *NoopService) APIs() []rpc.API {
+ return []rpc.API{}
+}
+
+func (t *NoopService) Start(server *p2p.Server) error {
+ return nil
+}
+
+func (t *NoopService) Stop() error {
+ return nil
+}
+
+func VerifyRing(t *testing.T, net *Network, ids []enode.ID) {
+ t.Helper()
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := net.GetConn(ids[i], ids[j])
+ if i == j-1 || (i == 0 && j == n-1) {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
+
+func VerifyChain(t *testing.T, net *Network, ids []enode.ID) {
+ t.Helper()
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := net.GetConn(ids[i], ids[j])
+ if i == j-1 {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
+
+func VerifyFull(t *testing.T, net *Network, ids []enode.ID) {
+ t.Helper()
+ n := len(ids)
+ var connections int
+ for i, lid := range ids {
+ for _, rid := range ids[i+1:] {
+ if net.GetConn(lid, rid) != nil {
+ connections++
+ }
+ }
+ }
+
+ want := n * (n - 1) / 2
+ if connections != want {
+ t.Errorf("wrong number of connections, got: %v, want: %v", connections, want)
+ }
+}
+
+func VerifyStar(t *testing.T, net *Network, ids []enode.ID, centerIndex int) {
+ t.Helper()
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := net.GetConn(ids[i], ids[j])
+ if i == centerIndex || j == centerIndex {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/params/config.go b/vendor/github.com/ethereum/go-ethereum/params/config.go
index 2935ef1f..fefc1610 100644
--- a/vendor/github.com/ethereum/go-ethereum/params/config.go
+++ b/vendor/github.com/ethereum/go-ethereum/params/config.go
@@ -42,7 +42,7 @@ var (
EIP155Block: big.NewInt(2675000),
EIP158Block: big.NewInt(2675000),
ByzantiumBlock: big.NewInt(4370000),
- ConstantinopleBlock: big.NewInt(7080000),
+ ConstantinopleBlock: nil,
Ethash: new(EthashConfig),
}
diff --git a/vendor/github.com/ethereum/go-ethereum/params/version.go b/vendor/github.com/ethereum/go-ethereum/params/version.go
index ba9ab202..f8848d47 100644
--- a/vendor/github.com/ethereum/go-ethereum/params/version.go
+++ b/vendor/github.com/ethereum/go-ethereum/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 8 // Minor version component of the current release
- VersionPatch = 20 // Patch version component of the current release
+ VersionPatch = 21 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/http.go b/vendor/github.com/ethereum/go-ethereum/rpc/http.go
index af79858e..674166fb 100644
--- a/vendor/github.com/ethereum/go-ethereum/rpc/http.go
+++ b/vendor/github.com/ethereum/go-ethereum/rpc/http.go
@@ -36,11 +36,15 @@ import (
)
const (
- contentType = "application/json"
maxRequestContentLength = 1024 * 512
)
-var nullAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:0")
+var (
+ // https://www.jsonrpc.org/historical/json-rpc-over-http.html#id13
+ acceptedContentTypes = []string{"application/json", "application/json-rpc", "application/jsonrequest"}
+ contentType = acceptedContentTypes[0]
+ nullAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:0")
+)
type httpConn struct {
client *http.Client
@@ -263,12 +267,21 @@ func validateRequest(r *http.Request) (int, error) {
err := fmt.Errorf("content length too large (%d>%d)", r.ContentLength, maxRequestContentLength)
return http.StatusRequestEntityTooLarge, err
}
- mt, _, err := mime.ParseMediaType(r.Header.Get("content-type"))
- if r.Method != http.MethodOptions && (err != nil || mt != contentType) {
- err := fmt.Errorf("invalid content type, only %s is supported", contentType)
- return http.StatusUnsupportedMediaType, err
+ // Allow OPTIONS (regardless of content-type)
+ if r.Method == http.MethodOptions {
+ return 0, nil
}
- return 0, nil
+ // Check content-type
+ if mt, _, err := mime.ParseMediaType(r.Header.Get("content-type")); err == nil {
+ for _, accepted := range acceptedContentTypes {
+ if accepted == mt {
+ return 0, nil
+ }
+ }
+ }
+ // Invalid content-type
+ err := fmt.Errorf("invalid content type, only %s is supported", contentType)
+ return http.StatusUnsupportedMediaType, err
}
func newCorsHandler(srv *Server, allowedOrigins []string) http.Handler {
diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/ipc_unix.go b/vendor/github.com/ethereum/go-ethereum/rpc/ipc_unix.go
index 0851ea61..707b47fd 100644
--- a/vendor/github.com/ethereum/go-ethereum/rpc/ipc_unix.go
+++ b/vendor/github.com/ethereum/go-ethereum/rpc/ipc_unix.go
@@ -20,13 +20,31 @@ package rpc
import (
"context"
+ "fmt"
"net"
"os"
"path/filepath"
+
+ "github.com/ethereum/go-ethereum/log"
)
+/*
+#include
+
+int max_socket_path_size() {
+struct sockaddr_un s;
+return sizeof(s.sun_path);
+}
+*/
+import "C"
+
// ipcListen will create a Unix socket on the given endpoint.
func ipcListen(endpoint string) (net.Listener, error) {
+ if len(endpoint) > int(C.max_socket_path_size()) {
+ log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", C.max_socket_path_size()),
+ "endpoint", endpoint)
+ }
+
// Ensure the IPC path exists and remove any previous leftover
if err := os.MkdirAll(filepath.Dir(endpoint), 0751); err != nil {
return nil, err
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go
index e54369f9..9566720b 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go
@@ -15,11 +15,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/storage"
"golang.org/x/crypto/scrypt"
+ "golang.org/x/crypto/sha3"
cli "gopkg.in/urfave/cli.v1"
)
@@ -336,7 +336,7 @@ func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.Priva
}
func (a *API) getACTDecryptionKey(ctx context.Context, actManifestAddress storage.Address, sessionKey []byte) (found bool, ciphertext, decryptionKey []byte, err error) {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)
hasher.Reset()
@@ -462,7 +462,7 @@ func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees
return nil, nil, nil, err
}
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)
@@ -484,7 +484,7 @@ func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees
if err != nil {
return nil, nil, nil, err
}
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go
index 33a8e353..c6ca1b57 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go
@@ -50,10 +50,6 @@ import (
opentracing "github.com/opentracing/opentracing-go"
)
-var (
- ErrNotFound = errors.New("not found")
-)
-
var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
@@ -136,13 +132,6 @@ func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolv
}
}
-// MultiResolverOptionWithNameHash is unused at the time of this writing
-func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption {
- return func(m *MultiResolver) {
- m.nameHash = nameHash
- }
-}
-
// NewMultiResolver creates a new instance of MultiResolver.
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
m = &MultiResolver{
@@ -173,40 +162,6 @@ func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
return
}
-// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address
-func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) {
- rs, err := m.getResolveValidator(name)
- if err != nil {
- return false, err
- }
- var addr common.Address
- for _, r := range rs {
- addr, err = r.Owner(m.nameHash(name))
- // we hide the error if it is not for the last resolver we check
- if err == nil {
- return addr == address, nil
- }
- }
- return false, err
-}
-
-// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number
-func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) {
- rs, err := m.getResolveValidator(name)
- if err != nil {
- return nil, err
- }
- for _, r := range rs {
- var header *types.Header
- header, err = r.HeaderByNumber(ctx, blockNr)
- // we hide the error if it is not for the last resolver we check
- if err == nil {
- return header, nil
- }
- }
- return nil, err
-}
-
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
rs := m.resolvers[""]
@@ -224,11 +179,6 @@ func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, er
return rs, nil
}
-// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses
-func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) {
- m.nameHash = nameHash
-}
-
/*
API implements webserver/file system related content storage and retrieval
on top of the FileStore
@@ -265,9 +215,6 @@ func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt b
return a.fileStore.Store(ctx, data, size, toEncrypt)
}
-// ErrResolve is returned when an URI cannot be resolved from ENS.
-type ErrResolve error
-
// Resolve a name into a content-addressed hash
// where address could be an ENS name, or a content addressed hash
func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) {
@@ -980,11 +927,6 @@ func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.A
return a.feed.Update(ctx, request)
}
-// FeedsHashSize returned the size of the digest produced by Swarm feeds' hashing function
-func (a *API) FeedsHashSize() int {
- return a.feed.HashSize
-}
-
// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails
var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest")
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go
index f793ca8b..5e293cca 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go
@@ -45,11 +45,6 @@ import (
"github.com/pborman/uuid"
)
-var (
- DefaultGateway = "http://localhost:8500"
- DefaultClient = NewClient(DefaultGateway)
-)
-
var (
ErrUnauthorized = errors.New("unauthorized")
)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/encrypt.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/encrypt.go
index ffe6c16d..0d516b3d 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/encrypt.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/encrypt.go
@@ -20,8 +20,8 @@ import (
"encoding/binary"
"errors"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
+ "golang.org/x/crypto/sha3"
)
type RefEncryption struct {
@@ -39,12 +39,12 @@ func NewRefEncryption(refSize int) *RefEncryption {
}
func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) {
- spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewKeccak256)
+ spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256)
encryptedSpan, err := spanEncryption.Encrypt(re.span)
if err != nil {
return nil, err
}
- dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewKeccak256)
+ dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256)
encryptedData, err := dataEncryption.Encrypt(ref)
if err != nil {
return nil, err
@@ -57,7 +57,7 @@ func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) {
}
func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) {
- spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewKeccak256)
+ spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256)
decryptedSpan, err := spanEncryption.Decrypt(ref[:8])
if err != nil {
return nil, err
@@ -68,7 +68,7 @@ func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) {
return nil, errors.New("invalid span in encrypted reference")
}
- dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewKeccak256)
+ dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256)
decryptedRef, err := dataEncryption.Decrypt(ref[8:])
if err != nil {
return nil, err
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go
index f7f819ea..320da304 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go
@@ -80,7 +80,7 @@ func InitLoggingResponseWriter(h http.Handler) http.Handler {
h.ServeHTTP(writer, r)
ts := time.Since(tn)
- log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts*time.Millisecond)
+ log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts)
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).Update(ts)
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.%d.time", r.Method, writer.statusCode), nil).Update(ts)
})
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/storage.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/storage.go
index 8a48fe5b..254375b7 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/storage.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/storage.go
@@ -83,23 +83,3 @@ func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) {
}
return &Response{mimeType, status, expsize, string(body[:size])}, err
}
-
-// Modify(rootHash, basePath, contentHash, contentType) takes th e manifest trie rooted in rootHash,
-// and merge on to it. creating an entry w conentType (mime)
-//
-// DEPRECATED: Use the HTTP API instead
-func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, contentType string) (newRootHash string, err error) {
- uri, err := Parse("bzz:/" + rootHash)
- if err != nil {
- return "", err
- }
- addr, err := s.api.Resolve(ctx, uri.Addr)
- if err != nil {
- return "", err
- }
- addr, err = s.api.Modify(ctx, addr, path, contentHash, contentType)
- if err != nil {
- return "", err
- }
- return addr.Hex(), nil
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/testapi.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/testapi.go
index 4c7d0982..6fec55f5 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/testapi.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/testapi.go
@@ -29,18 +29,6 @@ func NewControl(api *API, hive *network.Hive) *Control {
return &Control{api, hive}
}
-//func (self *Control) BlockNetworkRead(on bool) {
-// self.hive.BlockNetworkRead(on)
-//}
-//
-//func (self *Control) SyncEnabled(on bool) {
-// self.hive.SyncEnabled(on)
-//}
-//
-//func (self *Control) SwapEnabled(on bool) {
-// self.hive.SwapEnabled(on)
-//}
-//
func (c *Control) Hive() string {
return c.hive.String()
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/uri_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/uri_test.go
index ea649e27..a03874c4 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/api/uri_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/uri_test.go
@@ -26,17 +26,15 @@ import (
func TestParseURI(t *testing.T) {
type test struct {
- uri string
- expectURI *URI
- expectErr bool
- expectRaw bool
- expectImmutable bool
- expectList bool
- expectHash bool
- expectDeprecatedRaw bool
- expectDeprecatedImmutable bool
- expectValidKey bool
- expectAddr storage.Address
+ uri string
+ expectURI *URI
+ expectErr bool
+ expectRaw bool
+ expectImmutable bool
+ expectList bool
+ expectHash bool
+ expectValidKey bool
+ expectAddr storage.Address
}
tests := []test{
{
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/bmt/bmt.go b/vendor/github.com/ethereum/go-ethereum/swarm/bmt/bmt.go
index a85d4369..18eab5a2 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/bmt/bmt.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/bmt/bmt.go
@@ -61,7 +61,7 @@ const (
)
// BaseHasherFunc is a hash.Hash constructor function used for the base hash of the BMT.
-// implemented by Keccak256 SHA3 sha3.NewKeccak256
+// implemented by Keccak256 SHA3 sha3.NewLegacyKeccak256
type BaseHasherFunc func() hash.Hash
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/bmt/bmt_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/bmt/bmt_test.go
index 683ba4f5..ab712d08 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/bmt/bmt_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/bmt/bmt_test.go
@@ -26,8 +26,8 @@ import (
"testing"
"time"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
// the actual data length generated (could be longer than max datalength of the BMT)
@@ -44,7 +44,7 @@ var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65,
// calculates the Keccak256 SHA3 hash of the data
func sha3hash(data ...[]byte) []byte {
- h := sha3.NewKeccak256()
+ h := sha3.NewLegacyKeccak256()
return doSum(h, nil, data...)
}
@@ -121,7 +121,7 @@ func TestRefHasher(t *testing.T) {
t.Run(fmt.Sprintf("%d_segments_%d_bytes", segmentCount, length), func(t *testing.T) {
data := testutil.RandomBytes(i, length)
expected := x.expected(data)
- actual := NewRefHasher(sha3.NewKeccak256, segmentCount).Hash(data)
+ actual := NewRefHasher(sha3.NewLegacyKeccak256, segmentCount).Hash(data)
if !bytes.Equal(actual, expected) {
t.Fatalf("expected %x, got %x", expected, actual)
}
@@ -133,7 +133,7 @@ func TestRefHasher(t *testing.T) {
// tests if hasher responds with correct hash comparing the reference implementation return value
func TestHasherEmptyData(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
var data []byte
for _, count := range counts {
t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) {
@@ -153,7 +153,7 @@ func TestHasherEmptyData(t *testing.T) {
// tests sequential write with entire max size written in one go
func TestSyncHasherCorrectness(t *testing.T) {
data := testutil.RandomBytes(1, BufferSize)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
size := hasher().Size()
var err error
@@ -179,7 +179,7 @@ func TestSyncHasherCorrectness(t *testing.T) {
// tests order-neutral concurrent writes with entire max size written in one go
func TestAsyncCorrectness(t *testing.T) {
data := testutil.RandomBytes(1, BufferSize)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
size := hasher().Size()
whs := []whenHash{first, last, random}
@@ -226,7 +226,7 @@ func TestHasherReuse(t *testing.T) {
// tests if bmt reuse is not corrupting result
func testHasherReuse(poolsize int, t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, poolsize)
defer pool.Drain(0)
bmt := New(pool)
@@ -243,7 +243,7 @@ func testHasherReuse(poolsize int, t *testing.T) {
// Tests if pool can be cleanly reused even in concurrent use by several hasher
func TestBMTConcurrentUse(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
defer pool.Drain(0)
cycles := 100
@@ -277,7 +277,7 @@ LOOP:
// Tests BMT Hasher io.Writer interface is working correctly
// even multiple short random write buffers
func TestBMTWriterBuffers(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
for _, count := range counts {
t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) {
@@ -410,7 +410,7 @@ func BenchmarkPool(t *testing.B) {
// benchmarks simple sha3 hash on chunks
func benchmarkSHA3(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
h := hasher()
t.ReportAllocs()
@@ -426,7 +426,7 @@ func benchmarkSHA3(t *testing.B, n int) {
// the premise is that this is the minimum computation needed for a BMT
// therefore this serves as a theoretical optimum for concurrent implementations
func benchmarkBMTBaseline(t *testing.B, n int) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
hashSize := hasher().Size()
data := testutil.RandomBytes(1, hashSize)
@@ -453,7 +453,7 @@ func benchmarkBMTBaseline(t *testing.B, n int) {
// benchmarks BMT Hasher
func benchmarkBMT(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
bmt := New(pool)
@@ -467,7 +467,7 @@ func benchmarkBMT(t *testing.B, n int) {
// benchmarks BMT hasher with asynchronous concurrent segment/section writes
func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
bmt := New(pool).NewAsyncWriter(double)
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
@@ -485,7 +485,7 @@ func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
// benchmarks 100 concurrent bmt hashes with pool capacity
func benchmarkPool(t *testing.B, poolsize, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, poolsize)
cycles := 100
@@ -508,7 +508,7 @@ func benchmarkPool(t *testing.B, poolsize, n int) {
// benchmarks the reference hasher
func benchmarkRefHasher(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
rbmt := NewRefHasher(hasher, 128)
t.ReportAllocs()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/docker/Dockerfile b/vendor/github.com/ethereum/go-ethereum/swarm/docker/Dockerfile
new file mode 100644
index 00000000..1ee4e973
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/docker/Dockerfile
@@ -0,0 +1,23 @@
+FROM golang:1.11-alpine as builder
+
+ARG VERSION
+
+RUN apk add --update git gcc g++ linux-headers
+RUN mkdir -p $GOPATH/src/github.com/ethereum && \
+ cd $GOPATH/src/github.com/ethereum && \
+ git clone https://github.com/ethersphere/go-ethereum && \
+ cd $GOPATH/src/github.com/ethereum/go-ethereum && \
+ git checkout ${VERSION} && \
+ go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm && \
+ go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm/swarm-smoke && \
+ go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/geth && \
+ cp $GOPATH/bin/swarm /swarm && cp $GOPATH/bin/geth /geth && cp $GOPATH/bin/swarm-smoke /swarm-smoke
+
+
+# Release image with the required binaries and scripts
+FROM alpine:3.8
+WORKDIR /
+COPY --from=builder /swarm /geth /swarm-smoke /
+ADD run.sh /run.sh
+ADD run-smoke.sh /run-smoke.sh
+ENTRYPOINT ["/run.sh"]
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/docker/run-smoke.sh b/vendor/github.com/ethereum/go-ethereum/swarm/docker/run-smoke.sh
new file mode 100755
index 00000000..ba57a7ec
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/docker/run-smoke.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+set -o errexit
+set -o pipefail
+set -o nounset
+
+/swarm-smoke $@ 2>&1 || true
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/docker/run.sh b/vendor/github.com/ethereum/go-ethereum/swarm/docker/run.sh
new file mode 100755
index 00000000..3e613b56
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/docker/run.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+set -o errexit
+set -o pipefail
+set -o nounset
+
+PASSWORD=${PASSWORD:-}
+DATADIR=${DATADIR:-/root/.ethereum/}
+
+if [ "$PASSWORD" == "" ]; then echo "Password must be set, in order to use swarm non-interactively." && exit 1; fi
+
+echo $PASSWORD > /password
+
+KEYFILE=`find $DATADIR | grep UTC | head -n 1` || true
+if [ ! -f "$KEYFILE" ]; then echo "No keyfile found. Generating..." && /geth --datadir $DATADIR --password /password account new; fi
+KEYFILE=`find $DATADIR | grep UTC | head -n 1` || true
+if [ ! -f "$KEYFILE" ]; then echo "Could not find nor generate a BZZ keyfile." && exit 1; else echo "Found keyfile $KEYFILE"; fi
+
+VERSION=`/swarm version`
+echo "Running Swarm:"
+echo $VERSION
+
+export BZZACCOUNT="`echo -n $KEYFILE | tail -c 40`" || true
+if [ "$BZZACCOUNT" == "" ]; then echo "Could not parse BZZACCOUNT from keyfile." && exit 1; fi
+
+exec /swarm --bzzaccount=$BZZACCOUNT --password /password --datadir $DATADIR $@ 2>&1
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/bitvector/bitvector.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/bitvector/bitvector.go
index edc7c50c..95832850 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/bitvector/bitvector.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/bitvector/bitvector.go
@@ -60,7 +60,3 @@ func (bv *BitVector) Set(i int, v bool) {
func (bv *BitVector) Bytes() []byte {
return bv.b
}
-
-func (bv *BitVector) Length() int {
- return bv.len
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/discovery.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/discovery.go
index 21703e70..4c503047 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/discovery.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/discovery.go
@@ -65,7 +65,7 @@ func (d *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
// NotifyDepth sends a message to all connections if depth of saturation is changed
func NotifyDepth(depth uint8, kad *Kademlia) {
- f := func(val *Peer, po int, _ bool) bool {
+ f := func(val *Peer, po int) bool {
val.NotifyDepth(depth)
return true
}
@@ -74,7 +74,7 @@ func NotifyDepth(depth uint8, kad *Kademlia) {
// NotifyPeer informs all peers about a newly added node
func NotifyPeer(p *BzzAddr, k *Kademlia) {
- f := func(val *Peer, po int, _ bool) bool {
+ f := func(val *Peer, po int) bool {
val.NotifyPeer(p, uint8(po))
return true
}
@@ -160,8 +160,8 @@ func (d *Peer) handleSubPeersMsg(msg *subPeersMsg) error {
if !d.sentPeers {
d.setDepth(msg.Depth)
var peers []*BzzAddr
- d.kad.EachConn(d.Over(), 255, func(p *Peer, po int, isproxbin bool) bool {
- if pob, _ := pof(d, d.kad.BaseAddr(), 0); pob > po {
+ d.kad.EachConn(d.Over(), 255, func(p *Peer, po int) bool {
+ if pob, _ := Pof(d, d.kad.BaseAddr(), 0); pob > po {
return false
}
if !d.seen(p.BzzAddr) {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go
index ebef5459..a0b6b988 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go
@@ -114,7 +114,7 @@ func (h *Hive) Stop() error {
}
}
log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4]))
- h.EachConn(nil, 255, func(p *Peer, _ int, _ bool) bool {
+ h.EachConn(nil, 255, func(p *Peer, _ int) bool {
log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4]))
p.Drop(nil)
return true
@@ -228,7 +228,7 @@ func (h *Hive) loadPeers() error {
// savePeers, savePeer implement persistence callback/
func (h *Hive) savePeers() error {
var peers []*BzzAddr
- h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int, _ bool) bool {
+ h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int) bool {
if pa == nil {
log.Warn(fmt.Sprintf("empty addr: %v", i))
return true
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive_test.go
index 56adc5a8..a29e7308 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive_test.go
@@ -103,7 +103,7 @@ func TestHiveStatePersistance(t *testing.T) {
pp.Start(s1.Server)
i := 0
- pp.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int, nn bool) bool {
+ pp.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int) bool {
delete(peers, addr.String())
i++
return true
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go
index a8ecaa4b..7d52f26f 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go
@@ -49,47 +49,46 @@ a guaranteed constant maximum limit on the number of hops needed to reach one
node from the other.
*/
-var pof = pot.DefaultPof(256)
+var Pof = pot.DefaultPof(256)
// KadParams holds the config params for Kademlia
type KadParams struct {
// adjustable parameters
- MaxProxDisplay int // number of rows the table shows
- MinProxBinSize int // nearest neighbour core minimum cardinality
- MinBinSize int // minimum number of peers in a row
- MaxBinSize int // maximum number of peers in a row before pruning
- RetryInterval int64 // initial interval before a peer is first redialed
- RetryExponent int // exponent to multiply retry intervals with
- MaxRetries int // maximum number of redial attempts
+ MaxProxDisplay int // number of rows the table shows
+ NeighbourhoodSize int // nearest neighbour core minimum cardinality
+ MinBinSize int // minimum number of peers in a row
+ MaxBinSize int // maximum number of peers in a row before pruning
+ RetryInterval int64 // initial interval before a peer is first redialed
+ RetryExponent int // exponent to multiply retry intervals with
+ MaxRetries int // maximum number of redial attempts
// function to sanction or prevent suggesting a peer
- Reachable func(*BzzAddr) bool
+ Reachable func(*BzzAddr) bool `json:"-"`
}
// NewKadParams returns a params struct with default values
func NewKadParams() *KadParams {
return &KadParams{
- MaxProxDisplay: 16,
- MinProxBinSize: 2,
- MinBinSize: 2,
- MaxBinSize: 4,
- RetryInterval: 4200000000, // 4.2 sec
- MaxRetries: 42,
- RetryExponent: 2,
+ MaxProxDisplay: 16,
+ NeighbourhoodSize: 2,
+ MinBinSize: 2,
+ MaxBinSize: 4,
+ RetryInterval: 4200000000, // 4.2 sec
+ MaxRetries: 42,
+ RetryExponent: 2,
}
}
// Kademlia is a table of live peers and a db of known peers (node records)
type Kademlia struct {
lock sync.RWMutex
- *KadParams // Kademlia configuration parameters
- base []byte // immutable baseaddress of the table
- addrs *pot.Pot // pots container for known peer addresses
- conns *pot.Pot // pots container for live peer connections
- depth uint8 // stores the last current depth of saturation
- nDepth int // stores the last neighbourhood depth
- nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
- addrCountC chan int // returned by AddrCountC function to signal peer count change
- Pof func(pot.Val, pot.Val, int) (int, bool) // function for calculating kademlia routing distance between two addresses
+ *KadParams // Kademlia configuration parameters
+ base []byte // immutable baseaddress of the table
+ addrs *pot.Pot // pots container for known peer addresses
+ conns *pot.Pot // pots container for live peer connections
+ depth uint8 // stores the last current depth of saturation
+ nDepth int // stores the last neighbourhood depth
+ nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
+ addrCountC chan int // returned by AddrCountC function to signal peer count change
}
// NewKademlia creates a Kademlia table for base address addr
@@ -104,7 +103,6 @@ func NewKademlia(addr []byte, params *KadParams) *Kademlia {
KadParams: params,
addrs: pot.NewPot(nil, 0),
conns: pot.NewPot(nil, 0),
- Pof: pof,
}
}
@@ -147,7 +145,7 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
return fmt.Errorf("add peers: %x is self", k.base)
}
var found bool
- k.addrs, _, found, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
+ k.addrs, _, found, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
// if not found
if v == nil {
// insert new offline peer into conns
@@ -177,11 +175,11 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
k.lock.Lock()
defer k.lock.Unlock()
minsize := k.MinBinSize
- depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
// if there is a callable neighbour within the current proxBin, connect
// this makes sure nearest neighbour set is fully connected
var ppo int
- k.addrs.EachNeighbour(k.base, pof, func(val pot.Val, po int) bool {
+ k.addrs.EachNeighbour(k.base, Pof, func(val pot.Val, po int) bool {
if po < depth {
return false
}
@@ -200,7 +198,7 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
var bpo []int
prev := -1
- k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
+ k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
prev++
for ; prev < po; prev++ {
bpo = append(bpo, prev)
@@ -221,12 +219,12 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
// try to select a candidate peer
// find the first callable peer
nxt := bpo[0]
- k.addrs.EachBin(k.base, pof, nxt, func(po, _ int, f func(func(pot.Val, int) bool) bool) bool {
+ k.addrs.EachBin(k.base, Pof, nxt, func(po, _ int, f func(func(pot.Val) bool) bool) bool {
// for each bin (up until depth) we find callable candidate peers
if po >= depth {
return false
}
- return f(func(val pot.Val, _ int) bool {
+ return f(func(val pot.Val) bool {
e := val.(*entry)
c := k.callable(e)
if c {
@@ -253,7 +251,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
k.lock.Lock()
defer k.lock.Unlock()
var ins bool
- k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(v pot.Val) pot.Val {
+ k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(v pot.Val) pot.Val {
// if not found live
if v == nil {
ins = true
@@ -267,7 +265,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
a := newEntry(p.BzzAddr)
a.conn = p
// insert new online peer into addrs
- k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
+ k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
return a
})
// send new address count value only if the peer is inserted
@@ -277,7 +275,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
}
log.Trace(k.string())
// calculate if depth of saturation changed
- depth := uint8(k.saturation(k.MinBinSize))
+ depth := uint8(k.saturation())
var changed bool
if depth != k.depth {
changed = true
@@ -308,7 +306,7 @@ func (k *Kademlia) sendNeighbourhoodDepthChange() {
// It provides signaling of neighbourhood depth change.
// This part of the code is sending new neighbourhood depth to nDepthC if that condition is met.
if k.nDepthC != nil {
- nDepth := depthForPot(k.conns, k.MinProxBinSize, k.base)
+ nDepth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
if nDepth != k.nDepth {
k.nDepth = nDepth
k.nDepthC <- nDepth
@@ -333,7 +331,7 @@ func (k *Kademlia) Off(p *Peer) {
defer k.lock.Unlock()
var del bool
if !p.BzzPeer.LightNode {
- k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
+ k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
// v cannot be nil, must check otherwise we overwrite entry
if v == nil {
panic(fmt.Sprintf("connected peer not found %v", p))
@@ -346,7 +344,7 @@ func (k *Kademlia) Off(p *Peer) {
}
if del {
- k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(_ pot.Val) pot.Val {
+ k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(_ pot.Val) pot.Val {
// v cannot be nil, but no need to check
return nil
})
@@ -358,100 +356,70 @@ func (k *Kademlia) Off(p *Peer) {
}
}
-func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(conn *Peer, po int) bool) {
- k.lock.RLock()
- defer k.lock.RUnlock()
-
- var startPo int
- var endPo int
- kadDepth := depthForPot(k.conns, k.MinProxBinSize, k.base)
-
- k.conns.EachBin(base, pof, o, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
- if startPo > 0 && endPo != k.MaxProxDisplay {
- startPo = endPo + 1
- }
- if po < kadDepth {
- endPo = po
- } else {
- endPo = k.MaxProxDisplay
- }
-
- for bin := startPo; bin <= endPo; bin++ {
- f(func(val pot.Val, _ int) bool {
- return eachBinFunc(val.(*Peer), bin)
- })
- }
- return true
- })
-}
-
// EachConn is an iterator with args (base, po, f) applies f to each live peer
// that has proximity order po or less as measured from the base
// if base is nil, kademlia base address is used
-func (k *Kademlia) EachConn(base []byte, o int, f func(*Peer, int, bool) bool) {
+func (k *Kademlia) EachConn(base []byte, o int, f func(*Peer, int) bool) {
k.lock.RLock()
defer k.lock.RUnlock()
k.eachConn(base, o, f)
}
-func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int, bool) bool) {
+func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int) bool) {
if len(base) == 0 {
base = k.base
}
- depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
- k.conns.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
+ k.conns.EachNeighbour(base, Pof, func(val pot.Val, po int) bool {
if po > o {
return true
}
- return f(val.(*Peer), po, po >= depth)
+ return f(val.(*Peer), po)
})
}
// EachAddr called with (base, po, f) is an iterator applying f to each known peer
-// that has proximity order po or less as measured from the base
+// that has proximity order o or less as measured from the base
// if base is nil, kademlia base address is used
-func (k *Kademlia) EachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool) {
+func (k *Kademlia) EachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
k.lock.RLock()
defer k.lock.RUnlock()
k.eachAddr(base, o, f)
}
-func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool) {
+func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
if len(base) == 0 {
base = k.base
}
- depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
- k.addrs.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
+ k.addrs.EachNeighbour(base, Pof, func(val pot.Val, po int) bool {
if po > o {
return true
}
- return f(val.(*entry).BzzAddr, po, po >= depth)
+ return f(val.(*entry).BzzAddr, po)
})
}
func (k *Kademlia) NeighbourhoodDepth() (depth int) {
k.lock.RLock()
defer k.lock.RUnlock()
- return depthForPot(k.conns, k.MinProxBinSize, k.base)
+ return depthForPot(k.conns, k.NeighbourhoodSize, k.base)
}
// depthForPot returns the proximity order that defines the distance of
-// the nearest neighbour set with cardinality >= MinProxBinSize
-// if there is altogether less than MinProxBinSize peers it returns 0
+// the nearest neighbour set with cardinality >= NeighbourhoodSize
+// if there is altogether less than NeighbourhoodSize peers it returns 0
// caller must hold the lock
-func depthForPot(p *pot.Pot, minProxBinSize int, pivotAddr []byte) (depth int) {
- if p.Size() <= minProxBinSize {
+func depthForPot(p *pot.Pot, neighbourhoodSize int, pivotAddr []byte) (depth int) {
+ if p.Size() <= neighbourhoodSize {
return 0
}
// total number of peers in iteration
var size int
- // true if iteration has all prox peers
- var b bool
-
- // last po recorded in iteration
- var lastPo int
+ // determining the depth is a two-step process
+ // first we find the proximity bin of the shallowest of the NeighbourhoodSize peers
+ // the numeric value of depth cannot be higher than this
+ var maxDepth int
f := func(v pot.Val, i int) bool {
// po == 256 means that addr is the pivot address(self)
@@ -462,39 +430,29 @@ func depthForPot(p *pot.Pot, minProxBinSize int, pivotAddr []byte) (depth int) {
// this means we have all nn-peers.
// depth is by default set to the bin of the farthest nn-peer
- if size == minProxBinSize {
- b = true
- depth = i
- return true
- }
-
- // if there are empty bins between farthest nn and current node,
- // the depth should recalculated to be
- // the farthest of those empty bins
- //
- // 0 abac ccde
- // 1 2a2a
- // 2 589f <--- nearest non-nn
- // ============ DEPTH 3 ===========
- // 3 <--- don't count as empty bins
- // 4 <--- don't count as empty bins
- // 5 cbcb cdcd <---- furthest nn
- // 6 a1a2 b3c4
- if b && i < depth {
- depth = i + 1
- lastPo = i
+ if size == neighbourhoodSize {
+ maxDepth = i
return false
}
- lastPo = i
+
return true
}
- p.EachNeighbour(pivotAddr, pof, f)
+ p.EachNeighbour(pivotAddr, Pof, f)
+
+ // the second step is to test for empty bins in order from shallowest to deepest
+ // if an empty bin is found, this will be the actual depth
+ // we stop iterating if we hit the maxDepth determined in the first step
+ p.EachBin(pivotAddr, Pof, 0, func(po int, _ int, f func(func(pot.Val) bool) bool) bool {
+ if po == depth {
+ if maxDepth == depth {
+ return false
+ }
+ depth++
+ return true
+ }
+ return false
+ })
- // cover edge case where more than one farthest nn
- // AND we only have nn-peers
- if lastPo == depth {
- depth = 0
- }
return depth
}
@@ -549,21 +507,21 @@ func (k *Kademlia) string() string {
rows = append(rows, "=========================================================================")
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()[:3]))
- rows = append(rows, fmt.Sprintf("population: %d (%d), MinProxBinSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.MinProxBinSize, k.MinBinSize, k.MaxBinSize))
+ rows = append(rows, fmt.Sprintf("population: %d (%d), NeighbourhoodSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.NeighbourhoodSize, k.MinBinSize, k.MaxBinSize))
liverows := make([]string, k.MaxProxDisplay)
peersrows := make([]string, k.MaxProxDisplay)
- depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
rest := k.conns.Size()
- k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
+ k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
var rowlen int
if po >= k.MaxProxDisplay {
po = k.MaxProxDisplay - 1
}
row := []string{fmt.Sprintf("%2d", size)}
rest -= size
- f(func(val pot.Val, vpo int) bool {
+ f(func(val pot.Val) bool {
e := val.(*Peer)
row = append(row, fmt.Sprintf("%x", e.Address()[:2]))
rowlen++
@@ -575,7 +533,7 @@ func (k *Kademlia) string() string {
return true
})
- k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
+ k.addrs.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
var rowlen int
if po >= k.MaxProxDisplay {
po = k.MaxProxDisplay - 1
@@ -585,7 +543,7 @@ func (k *Kademlia) string() string {
}
row := []string{fmt.Sprintf("%2d", size)}
// we are displaying live peers too
- f(func(val pot.Val, vpo int) bool {
+ f(func(val pot.Val) bool {
e := val.(*entry)
row = append(row, Label(e))
rowlen++
@@ -613,172 +571,148 @@ func (k *Kademlia) string() string {
return "\n" + strings.Join(rows, "\n")
}
-// PeerPot keeps info about expected nearest neighbours and empty bins
+// PeerPot keeps info about expected nearest neighbours
// used for testing only
+// TODO move to separate testing tools file
type PeerPot struct {
- NNSet [][]byte
- EmptyBins []int
+ NNSet [][]byte
}
// NewPeerPotMap creates a map of pot record of *BzzAddr with keys
// as hexadecimal representations of the address.
+// the NeighbourhoodSize of the passed kademlia is used
// used for testing only
-func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot {
+// TODO move to separate testing tools file
+func NewPeerPotMap(neighbourhoodSize int, addrs [][]byte) map[string]*PeerPot {
// create a table of all nodes for health check
np := pot.NewPot(nil, 0)
for _, addr := range addrs {
- np, _, _ = pot.Add(np, addr, pof)
+ np, _, _ = pot.Add(np, addr, Pof)
}
ppmap := make(map[string]*PeerPot)
+ // generate an allknowing source of truth for connections
+ // for every kademlia passed
for i, a := range addrs {
// actual kademlia depth
- depth := depthForPot(np, kadMinProxSize, a)
-
- // upon entering a new iteration
- // this will hold the value the po should be
- // if it's one higher than the po in the last iteration
- prevPo := 256
-
- // all empty bins which are outside neighbourhood depth
- var emptyBins []int
+ depth := depthForPot(np, neighbourhoodSize, a)
// all nn-peers
var nns [][]byte
- np.EachNeighbour(a, pof, func(val pot.Val, po int) bool {
+ // iterate through the neighbours, going from the deepest to the shallowest
+ np.EachNeighbour(a, Pof, func(val pot.Val, po int) bool {
addr := val.([]byte)
// po == 256 means that addr is the pivot address(self)
+ // we do not include self in the map
if po == 256 {
return true
}
-
- // iterate through the neighbours, going from the closest to the farthest
- // we calculate the nearest neighbours that should be in the set
- // depth in this case equates to:
- // 1. Within all bins that are higher or equal than depth there are
- // at least minProxBinSize peers connected
- // 2. depth-1 bin is not empty
+ // append any neighbors found
+ // a neighbor is any peer in or deeper than the depth
if po >= depth {
nns = append(nns, addr)
- prevPo = depth - 1
return true
}
- for j := prevPo; j > po; j-- {
- emptyBins = append(emptyBins, j)
- }
- prevPo = po - 1
- return true
+ return false
})
- log.Trace(fmt.Sprintf("%x NNS: %s, emptyBins: %s", addrs[i][:4], LogAddrs(nns), logEmptyBins(emptyBins)))
- ppmap[common.Bytes2Hex(a)] = &PeerPot{nns, emptyBins}
+ log.Trace(fmt.Sprintf("%x PeerPotMap NNS: %s", addrs[i][:4], LogAddrs(nns)))
+ ppmap[common.Bytes2Hex(a)] = &PeerPot{
+ NNSet: nns,
+ }
}
return ppmap
}
-// saturation returns the lowest proximity order that the bin for that order
-// has less than n peers
-// It is used in Healthy function for testing only
-func (k *Kademlia) saturation(n int) int {
+// saturation iterates through all peers and
+// returns the smallest po value in which the node has less than n peers
+// if the iterator reaches depth, then value for depth is returned
+// TODO move to separate testing tools file
+// TODO this function will stop at the first bin with less than MinBinSize peers, even if there are empty bins between that bin and the depth. This may not be correct behavior
+func (k *Kademlia) saturation() int {
prev := -1
- k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
+ k.addrs.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
prev++
- return prev == po && size >= n
+ return prev == po && size >= k.MinBinSize
})
- depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
+ // TODO evaluate whether this check cannot just as well be done within the eachbin
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
if depth < prev {
return depth
}
return prev
}
-// full returns true if all required bins have connected peers.
+// knowNeighbours tests if all neighbours in the peerpot
+// are found among the peers known to the kademlia
// It is used in Healthy function for testing only
-func (k *Kademlia) full(emptyBins []int) (full bool) {
- prev := 0
- e := len(emptyBins)
- ok := true
- depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
- k.conns.EachBin(k.base, pof, 0, func(po, _ int, _ func(func(val pot.Val, i int) bool) bool) bool {
- if po >= depth {
- return false
- }
- if prev == depth+1 {
- return true
- }
- for i := prev; i < po; i++ {
- e--
- if e < 0 {
- ok = false
- return false
- }
- if emptyBins[e] != i {
- log.Trace(fmt.Sprintf("%08x po: %d, i: %d, e: %d, emptybins: %v", k.BaseAddr()[:4], po, i, e, logEmptyBins(emptyBins)))
- if emptyBins[e] < i {
- panic("incorrect peerpot")
- }
- ok = false
- return false
- }
- }
- prev = po + 1
- return true
- })
- if !ok {
- return false
- }
- return e == 0
-}
-
-// knowNearestNeighbours tests if all known nearest neighbours given as arguments
-// are found in the addressbook
-// It is used in Healthy function for testing only
-func (k *Kademlia) knowNearestNeighbours(peers [][]byte) bool {
+// TODO move to separate testing tools file
+func (k *Kademlia) knowNeighbours(addrs [][]byte) (got bool, n int, missing [][]byte) {
pm := make(map[string]bool)
-
- k.eachAddr(nil, 255, func(p *BzzAddr, po int, nn bool) bool {
- if !nn {
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
+ // create a map with all peers at depth and deeper known in the kademlia
+ k.eachAddr(nil, 255, func(p *BzzAddr, po int) bool {
+ // in order deepest to shallowest compared to the kademlia base address
+ // all bins (except self) are included (0 <= bin <= 255)
+ if po < depth {
return false
}
- pk := fmt.Sprintf("%x", p.Address())
+ pk := common.Bytes2Hex(p.Address())
pm[pk] = true
return true
})
- for _, p := range peers {
- pk := fmt.Sprintf("%x", p)
- if !pm[pk] {
- log.Trace(fmt.Sprintf("%08x: known nearest neighbour %s not found", k.BaseAddr()[:4], pk[:8]))
- return false
- }
- }
- return true
-}
-// gotNearestNeighbours tests if all known nearest neighbours given as arguments
-// are connected peers
-// It is used in Healthy function for testing only
-func (k *Kademlia) gotNearestNeighbours(peers [][]byte) (got bool, n int, missing [][]byte) {
- pm := make(map[string]bool)
-
- k.eachConn(nil, 255, func(p *Peer, po int, nn bool) bool {
- if !nn {
- return false
- }
- pk := fmt.Sprintf("%x", p.Address())
- pm[pk] = true
- return true
- })
+ // iterate through nearest neighbors in the peerpot map
+ // if we can't find the neighbor in the map we created above
+ // then we don't know all our neighbors
+ // (which sadly is all too common in modern society)
var gots int
var culprits [][]byte
- for _, p := range peers {
- pk := fmt.Sprintf("%x", p)
+ for _, p := range addrs {
+ pk := common.Bytes2Hex(p)
if pm[pk] {
gots++
} else {
- log.Trace(fmt.Sprintf("%08x: ExpNN: %s not found", k.BaseAddr()[:4], pk[:8]))
+ log.Trace(fmt.Sprintf("%08x: known nearest neighbour %s not found", k.base, pk))
+ culprits = append(culprits, p)
+ }
+ }
+ return gots == len(addrs), gots, culprits
+}
+
+// connectedNeighbours tests if all neighbours in the peerpot
+// are currently connected in the kademlia
+// It is used in Healthy function for testing only
+func (k *Kademlia) connectedNeighbours(peers [][]byte) (got bool, n int, missing [][]byte) {
+ pm := make(map[string]bool)
+
+ // create a map with all peers at depth and deeper that are connected in the kademlia
+ // in order deepest to shallowest compared to the kademlia base address
+ // all bins (except self) are included (0 <= bin <= 255)
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
+ k.eachConn(nil, 255, func(p *Peer, po int) bool {
+ if po < depth {
+ return false
+ }
+ pk := common.Bytes2Hex(p.Address())
+ pm[pk] = true
+ return true
+ })
+
+ // iterate through nearest neighbors in the peerpot map
+ // if we can't find the neighbor in the map we created above
+ // then we don't know all our neighbors
+ var gots int
+ var culprits [][]byte
+ for _, p := range peers {
+ pk := common.Bytes2Hex(p)
+ if pm[pk] {
+ gots++
+ } else {
+ log.Trace(fmt.Sprintf("%08x: ExpNN: %s not found", k.base, pk))
culprits = append(culprits, p)
}
}
@@ -788,31 +722,40 @@ func (k *Kademlia) gotNearestNeighbours(peers [][]byte) (got bool, n int, missin
// Health state of the Kademlia
// used for testing only
type Health struct {
- KnowNN bool // whether node knows all its nearest neighbours
- GotNN bool // whether node is connected to all its nearest neighbours
- CountNN int // amount of nearest neighbors connected to
- CulpritsNN [][]byte // which known NNs are missing
- Full bool // whether node has a peer in each kademlia bin (where there is such a peer)
- Hive string
+ KnowNN bool // whether node knows all its neighbours
+ CountKnowNN int // amount of neighbors known
+ MissingKnowNN [][]byte // which neighbours we should have known but we don't
+ ConnectNN bool // whether node is connected to all its neighbours
+ CountConnectNN int // amount of neighbours connected to
+ MissingConnectNN [][]byte // which neighbours we should have been connected to but we're not
+ Saturated bool // whether we are connected to all the peers we would have liked to
+ Hive string
}
// Healthy reports the health state of the kademlia connectivity
-// returns a Health struct
+//
+// The PeerPot argument provides an all-knowing view of the network
+// The resulting Health object is a result of comparisons between
+// what is the actual composition of the kademlia in question (the receiver), and
+// what SHOULD it have been when we take all we know about the network into consideration.
+//
// used for testing only
func (k *Kademlia) Healthy(pp *PeerPot) *Health {
k.lock.RLock()
defer k.lock.RUnlock()
- gotnn, countnn, culpritsnn := k.gotNearestNeighbours(pp.NNSet)
- knownn := k.knowNearestNeighbours(pp.NNSet)
- full := k.full(pp.EmptyBins)
- log.Trace(fmt.Sprintf("%08x: healthy: knowNNs: %v, gotNNs: %v, full: %v\n", k.BaseAddr()[:4], knownn, gotnn, full))
- return &Health{knownn, gotnn, countnn, culpritsnn, full, k.string()}
-}
-
-func logEmptyBins(ebs []int) string {
- var ebss []string
- for _, eb := range ebs {
- ebss = append(ebss, fmt.Sprintf("%d", eb))
+ gotnn, countgotnn, culpritsgotnn := k.connectedNeighbours(pp.NNSet)
+ knownn, countknownn, culpritsknownn := k.knowNeighbours(pp.NNSet)
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
+ saturated := k.saturation() < depth
+ log.Trace(fmt.Sprintf("%08x: healthy: knowNNs: %v, gotNNs: %v, saturated: %v\n", k.base, knownn, gotnn, saturated))
+ return &Health{
+ KnowNN: knownn,
+ CountKnowNN: countknownn,
+ MissingKnowNN: culpritsknownn,
+ ConnectNN: gotnn,
+ CountConnectNN: countgotnn,
+ MissingConnectNN: culpritsgotnn,
+ Saturated: saturated,
+ Hive: k.string(),
}
- return strings.Join(ebss, ", ")
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go
index 184a2d94..fcb277fd 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The go-ethereum Authors
+// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -41,12 +41,17 @@ func testKadPeerAddr(s string) *BzzAddr {
return &BzzAddr{OAddr: a, UAddr: a}
}
-func newTestKademlia(b string) *Kademlia {
+func newTestKademliaParams() *KadParams {
params := NewKadParams()
+ // TODO why is this 1?
params.MinBinSize = 1
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
+ return params
+}
+
+func newTestKademlia(b string) *Kademlia {
base := pot.NewAddressFromString(b)
- return NewKademlia(base, params)
+ return NewKademlia(base, newTestKademliaParams())
}
func newTestKadPeer(k *Kademlia, s string, lightNode bool) *Peer {
@@ -82,72 +87,172 @@ func Register(k *Kademlia, regs ...string) {
// empty bins above the farthest "nearest neighbor-peer" then
// the depth should be set at the farthest of those empty bins
//
-// TODO: Make test adapt to change in MinProxBinSize
+// TODO: Make test adapt to change in NeighbourhoodSize
func TestNeighbourhoodDepth(t *testing.T) {
baseAddressBytes := RandomAddr().OAddr
kad := NewKademlia(baseAddressBytes, NewKadParams())
baseAddress := pot.NewAddressFromBytes(baseAddressBytes)
- closerAddress := pot.RandomAddressAt(baseAddress, 7)
- closerPeer := newTestDiscoveryPeer(closerAddress, kad)
- kad.On(closerPeer)
+ // generate the peers
+ var peers []*Peer
+ for i := 0; i < 7; i++ {
+ addr := pot.RandomAddressAt(baseAddress, i)
+ peers = append(peers, newTestDiscoveryPeer(addr, kad))
+ }
+ var sevenPeers []*Peer
+ for i := 0; i < 2; i++ {
+ addr := pot.RandomAddressAt(baseAddress, 7)
+ sevenPeers = append(sevenPeers, newTestDiscoveryPeer(addr, kad))
+ }
+
+ testNum := 0
+ // first try with empty kademlia
depth := kad.NeighbourhoodDepth()
if depth != 0 {
- t.Fatalf("expected depth 0, was %d", depth)
+ t.Fatalf("%d expected depth 0, was %d", testNum, depth)
}
+ testNum++
- sameAddress := pot.RandomAddressAt(baseAddress, 7)
- samePeer := newTestDiscoveryPeer(sameAddress, kad)
- kad.On(samePeer)
+ // add one peer on 7
+ kad.On(sevenPeers[0])
depth = kad.NeighbourhoodDepth()
if depth != 0 {
- t.Fatalf("expected depth 0, was %d", depth)
+ t.Fatalf("%d expected depth 0, was %d", testNum, depth)
}
+ testNum++
- midAddress := pot.RandomAddressAt(baseAddress, 4)
- midPeer := newTestDiscoveryPeer(midAddress, kad)
- kad.On(midPeer)
- depth = kad.NeighbourhoodDepth()
- if depth != 5 {
- t.Fatalf("expected depth 5, was %d", depth)
- }
-
- kad.Off(midPeer)
+ // add a second on 7
+ kad.On(sevenPeers[1])
depth = kad.NeighbourhoodDepth()
if depth != 0 {
- t.Fatalf("expected depth 0, was %d", depth)
+ t.Fatalf("%d expected depth 0, was %d", testNum, depth)
}
+ testNum++
- fartherAddress := pot.RandomAddressAt(baseAddress, 1)
- fartherPeer := newTestDiscoveryPeer(fartherAddress, kad)
- kad.On(fartherPeer)
- depth = kad.NeighbourhoodDepth()
- if depth != 2 {
- t.Fatalf("expected depth 2, was %d", depth)
+ // add from 0 to 6
+ for i, p := range peers {
+ kad.On(p)
+ depth = kad.NeighbourhoodDepth()
+ if depth != i+1 {
+ t.Fatalf("%d.%d expected depth %d, was %d", i+1, testNum, i, depth)
+ }
}
+ testNum++
- midSameAddress := pot.RandomAddressAt(baseAddress, 4)
- midSamePeer := newTestDiscoveryPeer(midSameAddress, kad)
- kad.Off(closerPeer)
- kad.On(midPeer)
- kad.On(midSamePeer)
+ kad.Off(sevenPeers[1])
depth = kad.NeighbourhoodDepth()
- if depth != 2 {
- t.Fatalf("expected depth 2, was %d", depth)
+ if depth != 6 {
+ t.Fatalf("%d expected depth 6, was %d", testNum, depth)
}
+ testNum++
- kad.Off(fartherPeer)
- log.Trace(kad.string())
- time.Sleep(time.Millisecond)
+ kad.Off(peers[4])
depth = kad.NeighbourhoodDepth()
- if depth != 0 {
- t.Fatalf("expected depth 0, was %d", depth)
+ if depth != 4 {
+ t.Fatalf("%d expected depth 4, was %d", testNum, depth)
+ }
+ testNum++
+
+ kad.Off(peers[3])
+ depth = kad.NeighbourhoodDepth()
+ if depth != 3 {
+ t.Fatalf("%d expected depth 3, was %d", testNum, depth)
+ }
+ testNum++
+}
+
+// TestHealthStrict tests the simplest definition of health
+// Which means whether we are connected to all neighbors we know of
+func TestHealthStrict(t *testing.T) {
+
+ // base address is all zeros
+ // no peers
+ // unhealthy (and lonely)
+ k := newTestKademlia("11111111")
+ assertHealth(t, k, false, false)
+
+ // know one peer but not connected
+ // unhealthy
+ Register(k, "11100000")
+ log.Trace(k.String())
+ assertHealth(t, k, false, false)
+
+ // know one peer and connected
+ // healthy
+ On(k, "11100000")
+ assertHealth(t, k, true, false)
+
+ // know two peers, only one connected
+ // unhealthy
+ Register(k, "11111100")
+ log.Trace(k.String())
+ assertHealth(t, k, false, false)
+
+ // know two peers and connected to both
+ // healthy
+ On(k, "11111100")
+ assertHealth(t, k, true, false)
+
+ // know three peers, connected to the two deepest
+ // healthy
+ Register(k, "00000000")
+ log.Trace(k.String())
+ assertHealth(t, k, true, false)
+
+ // know three peers, connected to all three
+ // healthy
+ On(k, "00000000")
+ assertHealth(t, k, true, false)
+
+ // add fourth peer deeper than current depth
+ // unhealthy
+ Register(k, "11110000")
+ log.Trace(k.String())
+ assertHealth(t, k, false, false)
+
+ // connected to three deepest peers
+ // healthy
+ On(k, "11110000")
+ assertHealth(t, k, true, false)
+
+ // add additional peer in same bin as deepest peer
+ // unhealthy
+ Register(k, "11111101")
+ log.Trace(k.String())
+ assertHealth(t, k, false, false)
+
+ // four deepest of five peers connected
+ // healthy
+ On(k, "11111101")
+ assertHealth(t, k, true, false)
+}
+
+func assertHealth(t *testing.T, k *Kademlia, expectHealthy bool, expectSaturation bool) {
+ t.Helper()
+ kid := common.Bytes2Hex(k.BaseAddr())
+ addrs := [][]byte{k.BaseAddr()}
+ k.EachAddr(nil, 255, func(addr *BzzAddr, po int) bool {
+ addrs = append(addrs, addr.Address())
+ return true
+ })
+
+ pp := NewPeerPotMap(k.NeighbourhoodSize, addrs)
+ healthParams := k.Healthy(pp[kid])
+
+ // definition of health, all conditions but be true:
+ // - we at least know one peer
+ // - we know all neighbors
+ // - we are connected to all known neighbors
+ health := healthParams.KnowNN && healthParams.ConnectNN && healthParams.CountKnowNN > 0
+ if expectHealthy != health {
+ t.Fatalf("expected kademlia health %v, is %v\n%v", expectHealthy, health, k.String())
}
}
func testSuggestPeer(k *Kademlia, expAddr string, expPo int, expWant bool) error {
addr, o, want := k.SuggestPeer()
+ log.Trace("suggestpeer return", "a", addr, "o", o, "want", want)
if binStr(addr) != expAddr {
return fmt.Errorf("incorrect peer address suggested. expected %v, got %v", expAddr, binStr(addr))
}
@@ -167,6 +272,7 @@ func binStr(a *BzzAddr) string {
return pot.ToBin(a.Address())[:8]
}
+// TODO explain why this bug occurred and how it should have been mitigated
func TestSuggestPeerBug(t *testing.T) {
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
k := newTestKademlia("00000000")
@@ -186,72 +292,98 @@ func TestSuggestPeerBug(t *testing.T) {
}
func TestSuggestPeerFindPeers(t *testing.T) {
+ t.Skip("The SuggestPeers implementation seems to have weaknesses exposed by the change in the new depth calculation. The results are no longer predictable")
+
+ testnum := 0
+ // test 0
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
k := newTestKademlia("00000000")
On(k, "00100000")
err := testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 1
// 2 row gap, saturated proxbin, no callables -> want PO 0
On(k, "00010000")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 2
// 1 row gap (1 less), saturated proxbin, no callables -> want PO 1
On(k, "10000000")
err = testSuggestPeer(k, "", 1, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 3
// no gap (1 less), saturated proxbin, no callables -> do not want more
On(k, "01000000", "00100001")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 4
// oversaturated proxbin, > do not want more
On(k, "00100001")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 5
// reintroduce gap, disconnected peer callable
Off(k, "01000000")
+ log.Trace(k.String())
err = testSuggestPeer(k, "01000000", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 6
// second time disconnected peer not callable
// with reasonably set Interval
- err = testSuggestPeer(k, "", 1, true)
+ log.Trace("foo")
+ log.Trace(k.String())
+ err = testSuggestPeer(k, "", 1, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 6
// on and off again, peer callable again
On(k, "01000000")
Off(k, "01000000")
+ log.Trace(k.String())
err = testSuggestPeer(k, "01000000", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
- On(k, "01000000")
+ // test 7
// new closer peer appears, it is immediately wanted
+ On(k, "01000000")
Register(k, "00010001")
err = testSuggestPeer(k, "00010001", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 8
// PO1 disconnects
On(k, "00010001")
log.Info(k.String())
@@ -260,70 +392,94 @@ func TestSuggestPeerFindPeers(t *testing.T) {
// second time, gap filling
err = testSuggestPeer(k, "01000000", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 9
On(k, "01000000")
+ log.Info(k.String())
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 10
k.MinBinSize = 2
+ log.Info(k.String())
err = testSuggestPeer(k, "", 0, true)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 11
Register(k, "01000001")
+ log.Info(k.String())
err = testSuggestPeer(k, "01000001", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 12
On(k, "10000001")
log.Trace(fmt.Sprintf("Kad:\n%v", k.String()))
err = testSuggestPeer(k, "", 1, true)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 13
On(k, "01000001")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 14
k.MinBinSize = 3
Register(k, "10000010")
err = testSuggestPeer(k, "10000010", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 15
On(k, "10000010")
err = testSuggestPeer(k, "", 1, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 16
On(k, "01000010")
err = testSuggestPeer(k, "", 2, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 17
On(k, "00100010")
err = testSuggestPeer(k, "", 3, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 18
On(k, "00010010")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
}
@@ -449,7 +605,7 @@ func TestKademliaHiveString(t *testing.T) {
Register(k, "10000000", "10000001")
k.MaxProxDisplay = 8
h := k.String()
- expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), MinProxBinSize: 2, MinBinSize: 1, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
+ expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), NeighbourhoodSize: 2, MinBinSize: 1, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
if expH[104:] != h[104:] {
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
}
@@ -459,27 +615,28 @@ func TestKademliaHiveString(t *testing.T) {
// the SuggestPeer and Healthy methods for provided hex-encoded addresses.
// Argument pivotAddr is the address of the kademlia.
func testKademliaCase(t *testing.T, pivotAddr string, addrs ...string) {
- addr := common.FromHex(pivotAddr)
- addrs = append(addrs, pivotAddr)
+
+ t.Skip("this test relies on SuggestPeer which is now not reliable. See description in TestSuggestPeerFindPeers")
+ addr := common.Hex2Bytes(pivotAddr)
+ var byteAddrs [][]byte
+ for _, ahex := range addrs {
+ byteAddrs = append(byteAddrs, common.Hex2Bytes(ahex))
+ }
k := NewKademlia(addr, NewKadParams())
- as := make([][]byte, len(addrs))
- for i, a := range addrs {
- as[i] = common.FromHex(a)
- }
-
- for _, a := range as {
+ // our pivot kademlia is the last one in the array
+ for _, a := range byteAddrs {
if bytes.Equal(a, addr) {
continue
}
p := &BzzAddr{OAddr: a, UAddr: a}
if err := k.Register(p); err != nil {
- t.Fatal(err)
+ t.Fatalf("a %x addr %x: %v", a, addr, err)
}
}
- ppmap := NewPeerPotMap(2, as)
+ ppmap := NewPeerPotMap(k.NeighbourhoodSize, byteAddrs)
pp := ppmap[pivotAddr]
@@ -492,7 +649,7 @@ func testKademliaCase(t *testing.T, pivotAddr string, addrs ...string) {
}
h := k.Healthy(pp)
- if !(h.GotNN && h.KnowNN && h.Full) {
+ if !(h.ConnectNN && h.KnowNN && h.CountKnowNN > 0) {
t.Fatalf("not healthy: %#v\n%v", h, k.String())
}
}
@@ -505,7 +662,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 12:18:24 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
-population: 9 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 9 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 d7e5 ec56 | 18 ec56 (0) d7e5 (0) d9e0 (0) c735 (0)
001 2 18f1 3176 | 14 18f1 (0) 10bb (0) 10d1 (0) 0421 (0)
002 2 52aa 47cd | 11 52aa (0) 51d9 (0) 5161 (0) 5130 (0)
@@ -588,7 +745,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 18:43:48 UTC 2018 KΛÐΞMLIΛ hive: queen's address: bc7f3b
-population: 9 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 9 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 0f49 67ff | 28 0f49 (0) 0211 (0) 07b2 (0) 0703 (0)
001 2 e84b f3a4 | 13 f3a4 (0) e84b (0) e58b (0) e60b (0)
002 1 8dba | 1 8dba (0)
@@ -622,7 +779,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:04:35 UTC 2018 KΛÐΞMLIΛ hive: queen's address: b4822e
-population: 8 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 8 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 786c 774b | 29 774b (0) 786c (0) 7a79 (0) 7d2f (0)
001 2 d9de cf19 | 10 cf19 (0) d9de (0) d2ff (0) d2a2 (0)
002 2 8ca1 8d74 | 5 8d74 (0) 8ca1 (0) 9793 (0) 9f51 (0)
@@ -656,7 +813,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:16:25 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 9a90fe
-population: 8 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 8 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 72ef 4e6c | 24 0b1e (0) 0d66 (0) 17f5 (0) 17e8 (0)
001 2 fc2b fa47 | 13 fa47 (0) fc2b (0) fffd (0) ecef (0)
002 2 b847 afa8 | 6 afa8 (0) ad77 (0) bb7c (0) b847 (0)
@@ -691,7 +848,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:25:18 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 5dd5c7
-population: 13 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 13 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 e528 fad0 | 22 fad0 (0) e528 (0) e3bb (0) ed13 (0)
001 3 3f30 18e0 1dd3 | 7 3f30 (0) 23db (0) 10b6 (0) 18e0 (0)
002 4 7c54 7804 61e4 60f9 | 10 61e4 (0) 60f9 (0) 636c (0) 7186 (0)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go
index 191d67e5..99890118 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go
@@ -92,7 +92,7 @@ func TestNetworkID(t *testing.T) {
if kademlias[node].addrs.Size() != len(netIDGroup)-1 {
t.Fatalf("Kademlia size has not expected peer size. Kademlia size: %d, expected size: %d", kademlias[node].addrs.Size(), len(netIDGroup)-1)
}
- kademlias[node].EachAddr(nil, 0, func(addr *BzzAddr, _ int, _ bool) bool {
+ kademlias[node].EachAddr(nil, 0, func(addr *BzzAddr, _ int) bool {
found := false
for _, nd := range netIDGroup {
if bytes.Equal(kademlias[nd].BaseAddr(), addr.Address()) {
@@ -188,7 +188,7 @@ func newServices() adapters.Services {
return k
}
params := NewKadParams()
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
params.MaxBinSize = 3
params.MinBinSize = 1
params.MaxRetries = 1000
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go
index 4b9b28cd..a4b29239 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go
@@ -35,8 +35,6 @@ import (
const (
DefaultNetworkID = 3
- // ProtocolMaxMsgSize maximum allowed message size
- ProtocolMaxMsgSize = 10 * 1024 * 1024
// timeout for waiting
bzzHandshakeTimeout = 3000 * time.Millisecond
)
@@ -250,11 +248,6 @@ func NewBzzPeer(p *protocols.Peer) *BzzPeer {
return &BzzPeer{Peer: p, BzzAddr: NewAddr(p.Node())}
}
-// LastActive returns the time the peer was last active
-func (p *BzzPeer) LastActive() time.Time {
- return p.lastActive
-}
-
// ID returns the peer's underlay node identifier.
func (p *BzzPeer) ID() enode.ID {
// This is here to resolve a method tie: both protocols.Peer and BzzAddr are embedded
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go
index 53ceda74..58477a7b 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go
@@ -20,7 +20,6 @@ import (
"flag"
"fmt"
"os"
- "sync"
"testing"
"github.com/ethereum/go-ethereum/log"
@@ -44,31 +43,7 @@ func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
}
-type testStore struct {
- sync.Mutex
-
- values map[string][]byte
-}
-
-func (t *testStore) Load(key string) ([]byte, error) {
- t.Lock()
- defer t.Unlock()
- v, ok := t.values[key]
- if !ok {
- return nil, fmt.Errorf("key not found: %s", key)
- }
- return v, nil
-}
-
-func (t *testStore) Save(key string, v []byte) error {
- t.Lock()
- defer t.Unlock()
- t.values[key] = v
- return nil
-}
-
func HandshakeMsgExchange(lhs, rhs *HandshakeMsg, id enode.ID) []p2ptest.Exchange {
-
return []p2ptest.Exchange{
{
Expects: []p2ptest.Expect{
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/bucket.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/bucket.go
index bd15ea2a..49a1f430 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/bucket.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/bucket.go
@@ -21,7 +21,7 @@ import "github.com/ethereum/go-ethereum/p2p/enode"
// BucketKey is the type that should be used for keys in simulation buckets.
type BucketKey string
-// NodeItem returns an item set in ServiceFunc function for a particualar node.
+// NodeItem returns an item set in ServiceFunc function for a particular node.
func (s *Simulation) NodeItem(id enode.ID, key interface{}) (value interface{}, ok bool) {
s.mu.Lock()
defer s.mu.Unlock()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/bucket_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/bucket_test.go
index 69df19bf..2273d35a 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/bucket_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/bucket_test.go
@@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
)
-// TestServiceBucket tests all bucket functionalities using subtests.
+// TestServiceBucket tests all bucket functionality using subtests.
// It constructs a simulation of two nodes by adding items to their buckets
// in ServiceFunc constructor, then by SetNodeItem. Testing UpNodesItems
// is done by stopping one node and validating availability of its items.
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/connect.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/connect.go
deleted file mode 100644
index 8b2aa1bf..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/connect.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package simulation
-
-import (
- "strings"
-
- "github.com/ethereum/go-ethereum/p2p/enode"
-)
-
-// ConnectToPivotNode connects the node with provided NodeID
-// to the pivot node, already set by Simulation.SetPivotNode method.
-// It is useful when constructing a star network topology
-// when simulation adds and removes nodes dynamically.
-func (s *Simulation) ConnectToPivotNode(id enode.ID) (err error) {
- pid := s.PivotNodeID()
- if pid == nil {
- return ErrNoPivotNode
- }
- return s.connect(*pid, id)
-}
-
-// ConnectToLastNode connects the node with provided NodeID
-// to the last node that is up, and avoiding connection to self.
-// It is useful when constructing a chain network topology
-// when simulation adds and removes nodes dynamically.
-func (s *Simulation) ConnectToLastNode(id enode.ID) (err error) {
- ids := s.UpNodeIDs()
- l := len(ids)
- if l < 2 {
- return nil
- }
- lid := ids[l-1]
- if lid == id {
- lid = ids[l-2]
- }
- return s.connect(lid, id)
-}
-
-// ConnectToRandomNode connects the node with provieded NodeID
-// to a random node that is up.
-func (s *Simulation) ConnectToRandomNode(id enode.ID) (err error) {
- n := s.RandomUpNode(id)
- if n == nil {
- return ErrNodeNotFound
- }
- return s.connect(n.ID, id)
-}
-
-// ConnectNodesFull connects all nodes one to another.
-// It provides a complete connectivity in the network
-// which should be rarely needed.
-func (s *Simulation) ConnectNodesFull(ids []enode.ID) (err error) {
- if ids == nil {
- ids = s.UpNodeIDs()
- }
- l := len(ids)
- for i := 0; i < l; i++ {
- for j := i + 1; j < l; j++ {
- err = s.connect(ids[i], ids[j])
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// ConnectNodesChain connects all nodes in a chain topology.
-// If ids argument is nil, all nodes that are up will be connected.
-func (s *Simulation) ConnectNodesChain(ids []enode.ID) (err error) {
- if ids == nil {
- ids = s.UpNodeIDs()
- }
- l := len(ids)
- for i := 0; i < l-1; i++ {
- err = s.connect(ids[i], ids[i+1])
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// ConnectNodesRing connects all nodes in a ring topology.
-// If ids argument is nil, all nodes that are up will be connected.
-func (s *Simulation) ConnectNodesRing(ids []enode.ID) (err error) {
- if ids == nil {
- ids = s.UpNodeIDs()
- }
- l := len(ids)
- if l < 2 {
- return nil
- }
- for i := 0; i < l-1; i++ {
- err = s.connect(ids[i], ids[i+1])
- if err != nil {
- return err
- }
- }
- return s.connect(ids[l-1], ids[0])
-}
-
-// ConnectNodesStar connects all nodes in a star topology
-// with the center at provided NodeID.
-// If ids argument is nil, all nodes that are up will be connected.
-func (s *Simulation) ConnectNodesStar(id enode.ID, ids []enode.ID) (err error) {
- if ids == nil {
- ids = s.UpNodeIDs()
- }
- l := len(ids)
- for i := 0; i < l; i++ {
- if id == ids[i] {
- continue
- }
- err = s.connect(id, ids[i])
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// ConnectNodesStarPivot connects all nodes in a star topology
-// with the center at already set pivot node.
-// If ids argument is nil, all nodes that are up will be connected.
-func (s *Simulation) ConnectNodesStarPivot(ids []enode.ID) (err error) {
- id := s.PivotNodeID()
- if id == nil {
- return ErrNoPivotNode
- }
- return s.ConnectNodesStar(*id, ids)
-}
-
-// connect connects two nodes but ignores already connected error.
-func (s *Simulation) connect(oneID, otherID enode.ID) error {
- return ignoreAlreadyConnectedErr(s.Net.Connect(oneID, otherID))
-}
-
-func ignoreAlreadyConnectedErr(err error) error {
- if err == nil || strings.Contains(err.Error(), "already connected") {
- return nil
- }
- return err
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/connect_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/connect_test.go
deleted file mode 100644
index 6c94b3a0..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/connect_test.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package simulation
-
-import (
- "testing"
-
- "github.com/ethereum/go-ethereum/p2p/enode"
-)
-
-func TestConnectToPivotNode(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- pid, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- sim.SetPivotNode(pid)
-
- id, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectToPivotNode(id)
- if err != nil {
- t.Fatal(err)
- }
-
- if sim.Net.GetConn(id, pid) == nil {
- t.Error("node did not connect to pivot node")
- }
-}
-
-func TestConnectToLastNode(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- n := 10
-
- ids, err := sim.AddNodes(n)
- if err != nil {
- t.Fatal(err)
- }
-
- id, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectToLastNode(id)
- if err != nil {
- t.Fatal(err)
- }
-
- for _, i := range ids[:n-2] {
- if sim.Net.GetConn(id, i) != nil {
- t.Error("node connected to the node that is not the last")
- }
- }
-
- if sim.Net.GetConn(id, ids[n-1]) == nil {
- t.Error("node did not connect to the last node")
- }
-}
-
-func TestConnectToRandomNode(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- n := 10
-
- ids, err := sim.AddNodes(n)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectToRandomNode(ids[0])
- if err != nil {
- t.Fatal(err)
- }
-
- var cc int
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- if sim.Net.GetConn(ids[i], ids[j]) != nil {
- cc++
- }
- }
- }
-
- if cc != 1 {
- t.Errorf("expected one connection, got %v", cc)
- }
-}
-
-func TestConnectNodesFull(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(12)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectNodesFull(ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testFull(t, sim, ids)
-}
-
-func testFull(t *testing.T, sim *Simulation, ids []enode.ID) {
- n := len(ids)
- var cc int
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- if sim.Net.GetConn(ids[i], ids[j]) != nil {
- cc++
- }
- }
- }
-
- want := n * (n - 1) / 2
-
- if cc != want {
- t.Errorf("expected %v connection, got %v", want, cc)
- }
-}
-
-func TestConnectNodesChain(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(10)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectNodesChain(ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testChain(t, sim, ids)
-}
-
-func testChain(t *testing.T, sim *Simulation, ids []enode.ID) {
- n := len(ids)
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- c := sim.Net.GetConn(ids[i], ids[j])
- if i == j-1 {
- if c == nil {
- t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
- }
- } else {
- if c != nil {
- t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
- }
- }
- }
- }
-}
-
-func TestConnectNodesRing(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(10)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectNodesRing(ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testRing(t, sim, ids)
-}
-
-func testRing(t *testing.T, sim *Simulation, ids []enode.ID) {
- n := len(ids)
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- c := sim.Net.GetConn(ids[i], ids[j])
- if i == j-1 || (i == 0 && j == n-1) {
- if c == nil {
- t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
- }
- } else {
- if c != nil {
- t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
- }
- }
- }
- }
-}
-
-func TestConnectToNodesStar(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(10)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- centerIndex := 2
-
- err = sim.ConnectNodesStar(ids[centerIndex], ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testStar(t, sim, ids, centerIndex)
-}
-
-func testStar(t *testing.T, sim *Simulation, ids []enode.ID, centerIndex int) {
- n := len(ids)
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- c := sim.Net.GetConn(ids[i], ids[j])
- if i == centerIndex || j == centerIndex {
- if c == nil {
- t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
- }
- } else {
- if c != nil {
- t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
- }
- }
- }
- }
-}
-
-func TestConnectToNodesStarPivot(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(10)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- pivotIndex := 4
-
- sim.SetPivotNode(ids[pivotIndex])
-
- err = sim.ConnectNodesStarPivot(ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testStar(t, sim, ids, pivotIndex)
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events_test.go
index 0c185d97..52984481 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events_test.go
@@ -59,7 +59,7 @@ func TestPeerEvents(t *testing.T) {
}
}()
- err = sim.ConnectNodesChain(sim.NodeIDs())
+ err = sim.Net.ConnectNodesChain(sim.NodeIDs())
if err != nil {
t.Fatal(err)
}
@@ -81,6 +81,7 @@ func TestPeerEventsTimeout(t *testing.T) {
events := sim.PeerEvents(ctx, sim.NodeIDs())
done := make(chan struct{})
+ errC := make(chan error)
go func() {
for e := range events {
if e.Error == context.Canceled {
@@ -90,14 +91,16 @@ func TestPeerEventsTimeout(t *testing.T) {
close(done)
return
} else {
- t.Fatal(e.Error)
+ errC <- e.Error
}
}
}()
select {
case <-time.After(time.Second):
- t.Error("no context deadline received")
+ t.Fatal("no context deadline received")
+ case err := <-errC:
+ t.Fatal(err)
case <-done:
// all good, context deadline detected
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/example_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/example_test.go
index 7b620461..9d149297 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/example_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/example_test.go
@@ -31,12 +31,9 @@ import (
// Every node can have a Kademlia associated using the node bucket under
// BucketKeyKademlia key. This allows to use WaitTillHealthy to block until
-// all nodes have the their Kadmlias healthy.
+// all nodes have the their Kademlias healthy.
func ExampleSimulation_WaitTillHealthy() {
- log.Error("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
- return
-
sim := simulation.New(map[string]simulation.ServiceFunc{
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
addr := network.NewAddr(ctx.Config.Node())
@@ -64,7 +61,7 @@ func ExampleSimulation_WaitTillHealthy() {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
- ill, err := sim.WaitTillHealthy(ctx, 2)
+ ill, err := sim.WaitTillHealthy(ctx)
if err != nil {
// inspect the latest detected not healthy kademlias
for id, kad := range ill {
@@ -75,6 +72,7 @@ func ExampleSimulation_WaitTillHealthy() {
}
// continue with the test
+
}
// Watch all peer events in the simulation network, buy receiving from a channel.
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/http_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/http_test.go
index 775cf921..dffd03a0 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/http_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/http_test.go
@@ -73,7 +73,8 @@ func TestSimulationWithHTTPServer(t *testing.T) {
//this time the timeout should be long enough so that it doesn't kick in too early
ctx, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel2()
- go sendRunSignal(t)
+ errC := make(chan error, 1)
+ go triggerSimulationRun(t, errC)
result = sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
log.Debug("This run waits for the run signal from `frontend`...")
//ensure with a Sleep that simulation doesn't terminate before the signal is received
@@ -83,10 +84,13 @@ func TestSimulationWithHTTPServer(t *testing.T) {
if result.Error != nil {
t.Fatal(result.Error)
}
+ if err := <-errC; err != nil {
+ t.Fatal(err)
+ }
log.Debug("Test terminated successfully")
}
-func sendRunSignal(t *testing.T) {
+func triggerSimulationRun(t *testing.T, errC chan error) {
//We need to first wait for the sim HTTP server to start running...
time.Sleep(2 * time.Second)
//then we can send the signal
@@ -94,16 +98,13 @@ func sendRunSignal(t *testing.T) {
log.Debug("Sending run signal to simulation: POST /runsim...")
resp, err := http.Post(fmt.Sprintf("http://localhost%s/runsim", DefaultHTTPSimAddr), "application/json", nil)
if err != nil {
- t.Fatalf("Request failed: %v", err)
+ errC <- fmt.Errorf("Request failed: %v", err)
+ return
}
- defer func() {
- err := resp.Body.Close()
- if err != nil {
- log.Error("Error closing response body", "err", err)
- }
- }()
log.Debug("Signal sent")
if resp.StatusCode != http.StatusOK {
- t.Fatalf("err %s", resp.Status)
+ errC <- fmt.Errorf("err %s", resp.Status)
+ return
}
+ errC <- resp.Body.Close()
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go
index 7982810c..6d8d0e0a 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go
@@ -28,21 +28,22 @@ import (
)
// BucketKeyKademlia is the key to be used for storing the kademlia
-// instance for particuar node, usually inside the ServiceFunc function.
+// instance for particular node, usually inside the ServiceFunc function.
var BucketKeyKademlia BucketKey = "kademlia"
// WaitTillHealthy is blocking until the health of all kademlias is true.
// If error is not nil, a map of kademlia that was found not healthy is returned.
// TODO: Check correctness since change in kademlia depth calculation logic
-func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (ill map[enode.ID]*network.Kademlia, err error) {
+func (s *Simulation) WaitTillHealthy(ctx context.Context) (ill map[enode.ID]*network.Kademlia, err error) {
// Prepare PeerPot map for checking Kademlia health
var ppmap map[string]*network.PeerPot
kademlias := s.kademlias()
addrs := make([][]byte, 0, len(kademlias))
+ // TODO verify that all kademlias have same params
for _, k := range kademlias {
addrs = append(addrs, k.BaseAddr())
}
- ppmap = network.NewPeerPotMap(kadMinProxSize, addrs)
+ ppmap = network.NewPeerPotMap(s.neighbourhoodSize, addrs)
// Wait for healthy Kademlia on every node before checking files
ticker := time.NewTicker(200 * time.Millisecond)
@@ -66,10 +67,10 @@ func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (i
h := k.Healthy(pp)
//print info
log.Debug(k.String())
- log.Debug("kademlia", "empty bins", pp.EmptyBins, "gotNN", h.GotNN, "knowNN", h.KnowNN, "full", h.Full)
- log.Debug("kademlia", "health", h.GotNN && h.KnowNN && h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
- log.Debug("kademlia", "ill condition", !h.GotNN || !h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
- if !h.GotNN || !h.Full {
+ log.Debug("kademlia", "connectNN", h.ConnectNN, "knowNN", h.KnowNN)
+ log.Debug("kademlia", "health", h.ConnectNN && h.KnowNN, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
+ log.Debug("kademlia", "ill condition", !h.ConnectNN, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
+ if !h.ConnectNN {
ill[id] = k
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia_test.go
index f02b0e54..36b244d3 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia_test.go
@@ -28,6 +28,7 @@ import (
)
func TestWaitTillHealthy(t *testing.T) {
+ t.Skip("WaitTillHealthy depends on discovery, which relies on a reliable SuggestPeer, which is not reliable")
sim := New(map[string]ServiceFunc{
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
@@ -54,7 +55,7 @@ func TestWaitTillHealthy(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
- ill, err := sim.WaitTillHealthy(ctx, 2)
+ ill, err := sim.WaitTillHealthy(ctx)
if err != nil {
for id, kad := range ill {
t.Log("Node", id)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node.go
index a916d3fc..08eb8352 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node.go
@@ -127,7 +127,7 @@ func (s *Simulation) AddNodesAndConnectFull(count int, opts ...AddNodeOption) (i
if err != nil {
return nil, err
}
- err = s.ConnectNodesFull(ids)
+ err = s.Net.ConnectNodesFull(ids)
if err != nil {
return nil, err
}
@@ -145,7 +145,7 @@ func (s *Simulation) AddNodesAndConnectChain(count int, opts ...AddNodeOption) (
if err != nil {
return nil, err
}
- err = s.ConnectToLastNode(id)
+ err = s.Net.ConnectToLastNode(id)
if err != nil {
return nil, err
}
@@ -154,7 +154,7 @@ func (s *Simulation) AddNodesAndConnectChain(count int, opts ...AddNodeOption) (
return nil, err
}
ids = append([]enode.ID{id}, ids...)
- err = s.ConnectNodesChain(ids)
+ err = s.Net.ConnectNodesChain(ids)
if err != nil {
return nil, err
}
@@ -171,7 +171,7 @@ func (s *Simulation) AddNodesAndConnectRing(count int, opts ...AddNodeOption) (i
if err != nil {
return nil, err
}
- err = s.ConnectNodesRing(ids)
+ err = s.Net.ConnectNodesRing(ids)
if err != nil {
return nil, err
}
@@ -188,16 +188,16 @@ func (s *Simulation) AddNodesAndConnectStar(count int, opts ...AddNodeOption) (i
if err != nil {
return nil, err
}
- err = s.ConnectNodesStar(ids[0], ids[1:])
+ err = s.Net.ConnectNodesStar(ids[1:], ids[0])
if err != nil {
return nil, err
}
return ids, nil
}
-//UploadSnapshot uploads a snapshot to the simulation
-//This method tries to open the json file provided, applies the config to all nodes
-//and then loads the snapshot into the Simulation network
+// UploadSnapshot uploads a snapshot to the simulation
+// This method tries to open the json file provided, applies the config to all nodes
+// and then loads the snapshot into the Simulation network
func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption) error {
f, err := os.Open(snapshotFile)
if err != nil {
@@ -241,25 +241,6 @@ func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption)
return nil
}
-// SetPivotNode sets the NodeID of the network's pivot node.
-// Pivot node is just a specific node that should be treated
-// differently then other nodes in test. SetPivotNode and
-// PivotNodeID are just a convenient functions to set and
-// retrieve it.
-func (s *Simulation) SetPivotNode(id enode.ID) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.pivotNodeID = &id
-}
-
-// PivotNodeID returns NodeID of the pivot node set by
-// Simulation.SetPivotNode method.
-func (s *Simulation) PivotNodeID() (id *enode.ID) {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.pivotNodeID
-}
-
// StartNode starts a node by NodeID.
func (s *Simulation) StartNode(id enode.ID) (err error) {
return s.Net.Start(id)
@@ -267,27 +248,26 @@ func (s *Simulation) StartNode(id enode.ID) (err error) {
// StartRandomNode starts a random node.
func (s *Simulation) StartRandomNode() (id enode.ID, err error) {
- n := s.randomDownNode()
+ n := s.Net.GetRandomDownNode()
if n == nil {
return id, ErrNodeNotFound
}
- return n.ID, s.Net.Start(n.ID)
+ return n.ID(), s.Net.Start(n.ID())
}
// StartRandomNodes starts random nodes.
func (s *Simulation) StartRandomNodes(count int) (ids []enode.ID, err error) {
ids = make([]enode.ID, 0, count)
- downIDs := s.DownNodeIDs()
for i := 0; i < count; i++ {
- n := s.randomNode(downIDs, ids...)
+ n := s.Net.GetRandomDownNode()
if n == nil {
return nil, ErrNodeNotFound
}
- err = s.Net.Start(n.ID)
+ err = s.Net.Start(n.ID())
if err != nil {
return nil, err
}
- ids = append(ids, n.ID)
+ ids = append(ids, n.ID())
}
return ids, nil
}
@@ -299,27 +279,26 @@ func (s *Simulation) StopNode(id enode.ID) (err error) {
// StopRandomNode stops a random node.
func (s *Simulation) StopRandomNode() (id enode.ID, err error) {
- n := s.RandomUpNode()
+ n := s.Net.GetRandomUpNode()
if n == nil {
return id, ErrNodeNotFound
}
- return n.ID, s.Net.Stop(n.ID)
+ return n.ID(), s.Net.Stop(n.ID())
}
// StopRandomNodes stops random nodes.
func (s *Simulation) StopRandomNodes(count int) (ids []enode.ID, err error) {
ids = make([]enode.ID, 0, count)
- upIDs := s.UpNodeIDs()
for i := 0; i < count; i++ {
- n := s.randomNode(upIDs, ids...)
+ n := s.Net.GetRandomUpNode()
if n == nil {
return nil, ErrNodeNotFound
}
- err = s.Net.Stop(n.ID)
+ err = s.Net.Stop(n.ID())
if err != nil {
return nil, err
}
- ids = append(ids, n.ID)
+ ids = append(ids, n.ID())
}
return ids, nil
}
@@ -328,35 +307,3 @@ func (s *Simulation) StopRandomNodes(count int) (ids []enode.ID, err error) {
func init() {
rand.Seed(time.Now().UnixNano())
}
-
-// RandomUpNode returns a random SimNode that is up.
-// Arguments are NodeIDs for nodes that should not be returned.
-func (s *Simulation) RandomUpNode(exclude ...enode.ID) *adapters.SimNode {
- return s.randomNode(s.UpNodeIDs(), exclude...)
-}
-
-// randomDownNode returns a random SimNode that is not up.
-func (s *Simulation) randomDownNode(exclude ...enode.ID) *adapters.SimNode {
- return s.randomNode(s.DownNodeIDs(), exclude...)
-}
-
-// randomNode returns a random SimNode from the slice of NodeIDs.
-func (s *Simulation) randomNode(ids []enode.ID, exclude ...enode.ID) *adapters.SimNode {
- for _, e := range exclude {
- var i int
- for _, id := range ids {
- if id == e {
- ids = append(ids[:i], ids[i+1:]...)
- } else {
- i++
- }
- }
- }
- l := len(ids)
- if l == 0 {
- return nil
- }
- n := s.Net.GetNode(ids[rand.Intn(l)])
- node, _ := n.Node.(*adapters.SimNode)
- return node
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go
index 01346ef1..dc9189c9 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/network"
)
@@ -228,7 +229,7 @@ func TestAddNodesAndConnectFull(t *testing.T) {
t.Fatal(err)
}
- testFull(t, sim, ids)
+ simulations.VerifyFull(t, sim.Net, ids)
}
func TestAddNodesAndConnectChain(t *testing.T) {
@@ -247,7 +248,7 @@ func TestAddNodesAndConnectChain(t *testing.T) {
t.Fatal(err)
}
- testChain(t, sim, sim.UpNodeIDs())
+ simulations.VerifyChain(t, sim.Net, sim.UpNodeIDs())
}
func TestAddNodesAndConnectRing(t *testing.T) {
@@ -259,7 +260,7 @@ func TestAddNodesAndConnectRing(t *testing.T) {
t.Fatal(err)
}
- testRing(t, sim, ids)
+ simulations.VerifyRing(t, sim.Net, ids)
}
func TestAddNodesAndConnectStar(t *testing.T) {
@@ -271,7 +272,7 @@ func TestAddNodesAndConnectStar(t *testing.T) {
t.Fatal(err)
}
- testStar(t, sim, ids, 0)
+ simulations.VerifyStar(t, sim.Net, ids, 0)
}
//To test that uploading a snapshot works
@@ -313,45 +314,6 @@ func TestUploadSnapshot(t *testing.T) {
log.Debug("Done.")
}
-func TestPivotNode(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- id, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- id2, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- if sim.PivotNodeID() != nil {
- t.Error("expected no pivot node")
- }
-
- sim.SetPivotNode(id)
-
- pid := sim.PivotNodeID()
-
- if pid == nil {
- t.Error("pivot node not set")
- } else if *pid != id {
- t.Errorf("expected pivot node %s, got %s", id, *pid)
- }
-
- sim.SetPivotNode(id2)
-
- pid = sim.PivotNodeID()
-
- if pid == nil {
- t.Error("pivot node not set")
- } else if *pid != id2 {
- t.Errorf("expected pivot node %s, got %s", id2, *pid)
- }
-}
-
func TestStartStopNode(t *testing.T) {
sim := New(noopServiceFuncMap)
defer sim.Close()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/service.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/service.go
index 819602e9..7dd4dc6d 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/service.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/service.go
@@ -39,7 +39,7 @@ func (s *Simulation) Service(name string, id enode.ID) node.Service {
// RandomService returns a single Service by name on a
// randomly chosen node that is up.
func (s *Simulation) RandomService(name string) node.Service {
- n := s.RandomUpNode()
+ n := s.Net.GetRandomUpNode().Node.(*adapters.SimNode)
if n == nil {
return nil
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go
index e5435b9f..e18d19a6 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go
@@ -28,12 +28,12 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/network"
)
// Common errors that are returned by functions in this package.
var (
ErrNodeNotFound = errors.New("node not found")
- ErrNoPivotNode = errors.New("no pivot node set")
)
// Simulation provides methods on network, nodes and services
@@ -43,13 +43,13 @@ type Simulation struct {
// of p2p/simulations.Network.
Net *simulations.Network
- serviceNames []string
- cleanupFuncs []func()
- buckets map[enode.ID]*sync.Map
- pivotNodeID *enode.ID
- shutdownWG sync.WaitGroup
- done chan struct{}
- mu sync.RWMutex
+ serviceNames []string
+ cleanupFuncs []func()
+ buckets map[enode.ID]*sync.Map
+ shutdownWG sync.WaitGroup
+ done chan struct{}
+ mu sync.RWMutex
+ neighbourhoodSize int
httpSrv *http.Server //attach a HTTP server via SimulationOptions
handler *simulations.Server //HTTP handler for the server
@@ -66,16 +66,16 @@ type Simulation struct {
// after network shutdown.
type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error)
-// New creates a new Simulation instance with new
-// simulations.Network initialized with provided services.
+// New creates a new simulation instance
// Services map must have unique keys as service names and
// every ServiceFunc must return a node.Service of the unique type.
// This restriction is required by node.Node.Start() function
// which is used to start node.Service returned by ServiceFunc.
func New(services map[string]ServiceFunc) (s *Simulation) {
s = &Simulation{
- buckets: make(map[enode.ID]*sync.Map),
- done: make(chan struct{}),
+ buckets: make(map[enode.ID]*sync.Map),
+ done: make(chan struct{}),
+ neighbourhoodSize: network.NewKadParams().NeighbourhoodSize,
}
adapterServices := make(map[string]adapters.ServiceFunc, len(services))
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go
index ca8599d7..f837f938 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go
@@ -26,10 +26,9 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- "github.com/ethereum/go-ethereum/rpc"
- colorable "github.com/mattn/go-colorable"
+ "github.com/mattn/go-colorable"
)
var (
@@ -178,43 +177,27 @@ var noopServiceFuncMap = map[string]ServiceFunc{
}
// a helper function for most basic noop service
-func noopServiceFunc(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+func noopServiceFunc(_ *adapters.ServiceContext, _ *sync.Map) (node.Service, func(), error) {
return newNoopService(), nil, nil
}
-// noopService is the service that does not do anything
-// but implements node.Service interface.
-type noopService struct{}
-
func newNoopService() node.Service {
return &noopService{}
}
-func (t *noopService) Protocols() []p2p.Protocol {
- return []p2p.Protocol{}
-}
-
-func (t *noopService) APIs() []rpc.API {
- return []rpc.API{}
-}
-
-func (t *noopService) Start(server *p2p.Server) error {
- return nil
-}
-
-func (t *noopService) Stop() error {
- return nil
-}
-
-// a helper function for most basic noop service
-// of a different type then noopService to test
+// a helper function for most basic Noop service
+// of a different type then NoopService to test
// multiple services on one node.
-func noopService2Func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+func noopService2Func(_ *adapters.ServiceContext, _ *sync.Map) (node.Service, func(), error) {
return new(noopService2), nil, nil
}
-// noopService2 is the service that does not do anything
+// NoopService2 is the service that does not do anything
// but implements node.Service interface.
type noopService2 struct {
- noopService
+ simulations.NoopService
+}
+
+type noopService struct {
+ simulations.NoopService
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/discovery/discovery_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/discovery/discovery_test.go
index cd5456b7..e5121c47 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/discovery/discovery_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/discovery/discovery_test.go
@@ -31,6 +31,7 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
@@ -45,7 +46,7 @@ import (
// serviceName is used with the exec adapter so the exec'd binary knows which
// service to execute
const serviceName = "discovery"
-const testMinProxBinSize = 2
+const testNeighbourhoodSize = 2
const discoveryPersistenceDatadir = "discovery_persistence_test_store"
var discoveryPersistencePath = path.Join(os.TempDir(), discoveryPersistenceDatadir)
@@ -156,6 +157,7 @@ func testDiscoverySimulationSimAdapter(t *testing.T, nodes, conns int) {
}
func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) {
+ t.Skip("discovery tests depend on suggestpeer, which is unreliable after kademlia depth change.")
startedAt := time.Now()
result, err := discoverySimulation(nodes, conns, adapter)
if err != nil {
@@ -183,6 +185,7 @@ func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.No
}
func testDiscoveryPersistenceSimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) map[int][]byte {
+ t.Skip("discovery tests depend on suggestpeer, which is unreliable after kademlia depth change.")
persistenceEnabled = true
discoveryEnabled = true
@@ -265,7 +268,7 @@ func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simul
wg.Wait()
log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
// construct the peer pot, so that kademlia health can be checked
- ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs)
+ ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
check := func(ctx context.Context, id enode.ID) (bool, error) {
select {
case <-ctx.Done():
@@ -281,12 +284,13 @@ func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simul
if err != nil {
return false, fmt.Errorf("error getting node client: %s", err)
}
+
healthy := &network.Health{}
- if err := client.Call(&healthy, "hive_healthy", ppmap[id.String()]); err != nil {
+ if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
return false, fmt.Errorf("error getting node health: %s", err)
}
- log.Debug(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v\n%v", id, healthy.GotNN, healthy.KnowNN, healthy.Full, healthy.Hive))
- return healthy.KnowNN && healthy.GotNN && healthy.Full, nil
+ log.Info(fmt.Sprintf("node %4s healthy: connected nearest neighbours: %v, know nearest neighbours: %v,\n\n%v", id, healthy.ConnectNN, healthy.KnowNN, healthy.Hive))
+ return healthy.KnowNN && healthy.ConnectNN, nil
}
// 64 nodes ~ 1min
@@ -371,6 +375,7 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
if err := triggerChecks(trigger, net, node.ID()); err != nil {
return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
}
+ // TODO we shouldn't be equating underaddr and overaddr like this, as they are not the same in production
ids[i] = node.ID()
a := ids[i].Bytes()
@@ -379,7 +384,6 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
// run a simulation which connects the 10 nodes in a ring and waits
// for full peer discovery
- ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs)
var restartTime time.Time
@@ -400,12 +404,21 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
}
healthy := &network.Health{}
addr := id.String()
- if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil {
+ ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
+ if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
return fmt.Errorf("error getting node health: %s", err)
}
- log.Info(fmt.Sprintf("NODE: %s, IS HEALTHY: %t", addr, healthy.GotNN && healthy.KnowNN && healthy.Full))
- if !healthy.GotNN || !healthy.Full {
+ log.Info(fmt.Sprintf("NODE: %s, IS HEALTHY: %t", addr, healthy.ConnectNN && healthy.KnowNN && healthy.CountKnowNN > 0))
+ var nodeStr string
+ if err := client.Call(&nodeStr, "hive_string"); err != nil {
+ return fmt.Errorf("error getting node string %s", err)
+ }
+ log.Info(nodeStr)
+ for _, a := range addrs {
+ log.Info(common.Bytes2Hex(a))
+ }
+ if !healthy.ConnectNN || healthy.CountKnowNN == 0 {
isHealthy = false
break
}
@@ -479,12 +492,14 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
return false, fmt.Errorf("error getting node client: %s", err)
}
healthy := &network.Health{}
- if err := client.Call(&healthy, "hive_healthy", ppmap[id.String()]); err != nil {
+ ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
+
+ if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
return false, fmt.Errorf("error getting node health: %s", err)
}
- log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v", id, healthy.GotNN, healthy.KnowNN, healthy.Full))
+ log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v", id, healthy.ConnectNN, healthy.KnowNN))
- return healthy.KnowNN && healthy.GotNN && healthy.Full, nil
+ return healthy.KnowNN && healthy.ConnectNN, nil
}
// 64 nodes ~ 1min
@@ -551,7 +566,7 @@ func newService(ctx *adapters.ServiceContext) (node.Service, error) {
addr := network.NewAddr(ctx.Config.Node())
kp := network.NewKadParams()
- kp.MinProxBinSize = testMinProxBinSize
+ kp.NeighbourhoodSize = testNeighbourhoodSize
if ctx.Config.Reachable != nil {
kp.Reachable = func(o *network.BzzAddr) bool {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go
index 284ae639..63938809 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go
@@ -86,7 +86,7 @@ func (s *Simulation) NewService(ctx *adapters.ServiceContext) (node.Service, err
addr := network.NewAddr(node)
kp := network.NewKadParams()
- kp.MinProxBinSize = 2
+ kp.NeighbourhoodSize = 2
kp.MaxBinSize = 4
kp.MinBinSize = 1
kp.MaxRetries = 1000
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go
index e0a7f7e1..29b917d3 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go
@@ -35,7 +35,6 @@ import (
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
- "github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/testutil"
@@ -57,7 +56,7 @@ var (
bucketKeyRegistry = simulation.BucketKey("registry")
chunkSize = 4096
- pof = pot.DefaultPof(256)
+ pof = network.Pof
)
func init() {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go
index c73298d9..e1a13fe8 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go
@@ -19,7 +19,6 @@ package stream
import (
"context"
"errors"
-
"fmt"
"github.com/ethereum/go-ethereum/metrics"
@@ -245,7 +244,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
return nil, nil, fmt.Errorf("source peer %v not found", spID.String())
}
} else {
- d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int, nn bool) bool {
+ d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int) bool {
id := p.ID()
if p.LightNode {
// skip light nodes
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go
index f69f8049..70d3829b 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go
@@ -19,9 +19,11 @@ package stream
import (
"bytes"
"context"
+ "errors"
"fmt"
"os"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -442,19 +444,17 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
}
func TestDeliveryFromNodes(t *testing.T) {
- testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
+ testDeliveryFromNodes(t, 2, dataChunkCount, true)
+ testDeliveryFromNodes(t, 2, dataChunkCount, false)
+ testDeliveryFromNodes(t, 4, dataChunkCount, true)
+ testDeliveryFromNodes(t, 4, dataChunkCount, false)
+ testDeliveryFromNodes(t, 8, dataChunkCount, true)
+ testDeliveryFromNodes(t, 8, dataChunkCount, false)
+ testDeliveryFromNodes(t, 16, dataChunkCount, true)
+ testDeliveryFromNodes(t, 16, dataChunkCount, false)
}
-func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
-
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
+func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool) {
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
node := ctx.Config.Node()
@@ -502,10 +502,11 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
log.Info("Starting simulation")
ctx := context.Background()
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
nodeIDs := sim.UpNodeIDs()
//determine the pivot node to be the first node of the simulation
- sim.SetPivotNode(nodeIDs[0])
+ pivot := nodeIDs[0]
+
//distribute chunks of a random file into Stores of nodes 1 to nodes
//we will do this by creating a file store with an underlying round-robin store:
//the file store will create a hash for the uploaded file, but every chunk will be
@@ -519,7 +520,7 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
//...iterate the buckets...
for id, bucketVal := range lStores {
//...and remove the one which is the pivot node
- if id == *sim.PivotNodeID() {
+ if id == pivot {
continue
}
//the other ones are added to the array...
@@ -542,25 +543,25 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
}
log.Debug("Waiting for kademlia")
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ // TODO this does not seem to be correct usage of the function, as the simulation may have no kademlias
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
//get the pivot node's filestore
- item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
+ item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
if !ok {
return fmt.Errorf("No filestore")
}
pivotFileStore := item.(*storage.FileStore)
log.Debug("Starting retrieval routine")
+ retErrC := make(chan error)
go func() {
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
// we must wait for the peer connections to have started before requesting
n, err := readAll(pivotFileStore, fileHash)
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
- if err != nil {
- t.Fatalf("requesting chunks action error: %v", err)
- }
+ retErrC <- err
}()
log.Debug("Watching for disconnections")
@@ -570,11 +571,19 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
simulation.NewPeerEventsFilter().Drop(),
)
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal(d.Error)
+ disconnected.Store(true)
+ }
+ }
+ }()
+ defer func() {
+ if err != nil {
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ err = errors.New("disconnect events received")
}
}
}()
@@ -595,6 +604,9 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
if !success {
return fmt.Errorf("Test failed, chunks not available on all nodes")
}
+ if err := <-retErrC; err != nil {
+ t.Fatalf("requesting chunks: %v", err)
+ }
log.Debug("Test terminated successfully")
return nil
})
@@ -609,7 +621,7 @@ func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
b.Run(
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
func(b *testing.B) {
- benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
+ benchmarkDeliveryFromNodes(b, i, chunks, true)
},
)
}
@@ -622,14 +634,14 @@ func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
b.Run(
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
func(b *testing.B) {
- benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
+ benchmarkDeliveryFromNodes(b, i, chunks, false)
},
)
}
}
}
-func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
+func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck bool) {
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
node := ctx.Config.Node()
@@ -675,7 +687,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
}
ctx := context.Background()
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
nodeIDs := sim.UpNodeIDs()
node := nodeIDs[len(nodeIDs)-1]
@@ -692,7 +704,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
}
netStore := item.(*storage.NetStore)
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
@@ -702,11 +714,19 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
simulation.NewPeerEventsFilter().Drop(),
)
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- b.Fatal(d.Error)
+ disconnected.Store(true)
+ }
+ }
+ }()
+ defer func() {
+ if err != nil {
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ err = errors.New("disconnect events received")
}
}
}()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals/store_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals/store_test.go
index 0ab14c06..a36814b7 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals/store_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals/store_test.go
@@ -17,14 +17,11 @@
package intervals
import (
- "errors"
"testing"
"github.com/ethereum/go-ethereum/swarm/state"
)
-var ErrNotFound = errors.New("not found")
-
// TestInmemoryStore tests basic functionality of InmemoryStore.
func TestInmemoryStore(t *testing.T) {
testStore(t, state.NewInmemoryStore())
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go
index 668cf586..8f2bed9d 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go
@@ -19,9 +19,11 @@ package stream
import (
"context"
"encoding/binary"
+ "errors"
"fmt"
"os"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -53,7 +55,6 @@ func TestIntervalsLiveAndHistory(t *testing.T) {
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodes := 2
chunkCount := dataChunkCount
externalStreamName := "externalStream"
@@ -114,11 +115,11 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
t.Fatal(err)
}
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
nodeIDs := sim.UpNodeIDs()
storer := nodeIDs[0]
checker := nodeIDs[1]
@@ -163,11 +164,19 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
return err
}
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal(d.Error)
+ disconnected.Store(true)
+ }
+ }
+ }()
+ defer func() {
+ if err != nil {
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ err = errors.New("disconnect events received")
}
}
}()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go
index eb1b2983..b293724c 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go
@@ -336,7 +336,7 @@ func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg)
// launch in go routine since GetBatch blocks until new hashes arrive
go func() {
if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
- log.Warn("SendOfferedHashes error", "err", err)
+ log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
}
}()
// go p.SendOfferedHashes(s, req.From, req.To)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go
index 932e28b3..d345ac8d 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go
@@ -197,7 +197,7 @@ func runFileRetrievalTest(nodeCount int) error {
if err != nil {
return err
}
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
@@ -246,7 +246,6 @@ simulation's `action` function.
The snapshot should have 'streamer' in its service list.
*/
func runRetrievalTest(chunkCount int, nodeCount int) error {
-
sim := simulation.New(retrievalSimServiceMap)
defer sim.Close()
@@ -278,17 +277,17 @@ func runRetrievalTest(chunkCount int, nodeCount int) error {
}
//this is the node selected for upload
- node := sim.RandomUpNode()
- item, ok := sim.NodeItem(node.ID, bucketKeyStore)
+ node := sim.Net.GetRandomUpNode()
+ item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
if !ok {
return fmt.Errorf("No localstore")
}
lstore := item.(*storage.LocalStore)
- conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
+ conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
if err != nil {
return err
}
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_sync_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_sync_test.go
index 4a632c8c..6af19c12 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_sync_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/snapshot_sync_test.go
@@ -21,6 +21,7 @@ import (
"os"
"runtime"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -105,43 +106,6 @@ func TestSyncingViaGlobalSync(t *testing.T) {
}
}
-func TestSyncingViaDirectSubscribe(t *testing.T) {
- if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
- t.Skip("Flaky on mac on travis")
- }
- //if nodes/chunks have been provided via commandline,
- //run the tests with these values
- if *nodes != 0 && *chunks != 0 {
- log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
- err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
- if err != nil {
- t.Fatal(err)
- }
- } else {
- var nodeCnt []int
- var chnkCnt []int
- //if the `longrunning` flag has been provided
- //run more test combinations
- if *longrunning {
- chnkCnt = []int{1, 8, 32, 256, 1024}
- nodeCnt = []int{32, 16}
- } else {
- //default test
- chnkCnt = []int{4, 32}
- nodeCnt = []int{32, 16}
- }
- for _, chnk := range chnkCnt {
- for _, n := range nodeCnt {
- log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
- err := testSyncingViaDirectSubscribe(t, chnk, n)
- if err != nil {
- t.Fatal(err)
- }
- }
- }
- }
-}
-
var simServiceMap = map[string]simulation.ServiceFunc{
"streamer": streamerFunc,
}
@@ -182,8 +146,6 @@ func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Servic
}
func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
-
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(simServiceMap)
defer sim.Close()
@@ -205,7 +167,7 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancelSimRun()
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
t.Fatal(err)
}
@@ -215,11 +177,13 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
simulation.NewPeerEventsFilter().Drop(),
)
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
- log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal("unexpected disconnect")
- cancelSimRun()
+ if d.Error != nil {
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
+ disconnected.Store(true)
+ }
}
}()
@@ -228,6 +192,9 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
if result.Error != nil {
t.Fatal(result.Error)
}
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ t.Fatal("disconnect events received")
+ }
log.Info("Simulation ended")
}
@@ -248,20 +215,20 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
//get the node at that index
//this is the node selected for upload
- node := sim.RandomUpNode()
- item, ok := sim.NodeItem(node.ID, bucketKeyStore)
+ node := sim.Net.GetRandomUpNode()
+ item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
if !ok {
return fmt.Errorf("No localstore")
}
lstore := item.(*storage.LocalStore)
- hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
+ hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
if err != nil {
return err
}
for _, h := range hashes {
evt := &simulations.Event{
Type: EventTypeChunkCreated,
- Node: sim.Net.GetNode(node.ID),
+ Node: sim.Net.GetNode(node.ID()),
Data: h.String(),
}
sim.Net.Events().Send(evt)
@@ -319,231 +286,6 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
})
}
-/*
-The test generates the given number of chunks
-
-For every chunk generated, the nearest node addresses
-are identified, we verify that the nodes closer to the
-chunk addresses actually do have the chunks in their local stores.
-
-The test loads a snapshot file to construct the swarm network,
-assuming that the snapshot file identifies a healthy
-kademlia network. The snapshot should have 'streamer' in its service list.
-*/
-func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
-
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
- sim := simulation.New(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- n := ctx.Config.Node()
- addr := network.NewAddr(n)
- store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
- if err != nil {
- return nil, nil, err
- }
- bucket.Store(bucketKeyStore, store)
- localStore := store.(*storage.LocalStore)
- netStore, err := storage.NewNetStore(localStore, nil)
- if err != nil {
- return nil, nil, err
- }
- kad := network.NewKademlia(addr.Over(), network.NewKadParams())
- delivery := NewDelivery(kad, netStore)
- netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
-
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingRegisterOnly,
- }, nil)
- bucket.Store(bucketKeyRegistry, r)
-
- fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
- bucket.Store(bucketKeyFileStore, fileStore)
-
- cleanup = func() {
- os.RemoveAll(datadir)
- netStore.Close()
- r.Close()
- }
-
- return r, cleanup, nil
-
- },
- })
- defer sim.Close()
-
- ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
- defer cancelSimRun()
-
- conf := &synctestConfig{}
- //map of discover ID to indexes of chunks expected at that ID
- conf.idToChunksMap = make(map[enode.ID][]int)
- //map of overlay address to discover ID
- conf.addrToIDMap = make(map[string]enode.ID)
- //array where the generated chunk hashes will be stored
- conf.hashes = make([]storage.Address, 0)
-
- err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
- if err != nil {
- return err
- }
-
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
- return err
- }
-
- disconnections := sim.PeerEvents(
- context.Background(),
- sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Drop(),
- )
-
- go func() {
- for d := range disconnections {
- log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal("unexpected disconnect")
- cancelSimRun()
- }
- }()
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- nodeIDs := sim.UpNodeIDs()
- for _, n := range nodeIDs {
- //get the kademlia overlay address from this ID
- a := n.Bytes()
- //append it to the array of all overlay addresses
- conf.addrs = append(conf.addrs, a)
- //the proximity calculation is on overlay addr,
- //the p2p/simulations check func triggers on enode.ID,
- //so we need to know which overlay addr maps to which nodeID
- conf.addrToIDMap[string(a)] = n
- }
-
- var subscriptionCount int
-
- filter := simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("stream").MsgCode(4)
- eventC := sim.PeerEvents(ctx, nodeIDs, filter)
-
- for j, node := range nodeIDs {
- log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
- //start syncing!
- item, ok := sim.NodeItem(node, bucketKeyRegistry)
- if !ok {
- return fmt.Errorf("No registry")
- }
- registry := item.(*Registry)
-
- var cnt int
- cnt, err = startSyncing(registry, conf)
- if err != nil {
- return err
- }
- //increment the number of subscriptions we need to wait for
- //by the count returned from startSyncing (SYNC subscriptions)
- subscriptionCount += cnt
- }
-
- for e := range eventC {
- if e.Error != nil {
- return e.Error
- }
- subscriptionCount--
- if subscriptionCount == 0 {
- break
- }
- }
- //select a random node for upload
- node := sim.RandomUpNode()
- item, ok := sim.NodeItem(node.ID, bucketKeyStore)
- if !ok {
- return fmt.Errorf("No localstore")
- }
- lstore := item.(*storage.LocalStore)
- hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
- if err != nil {
- return err
- }
- conf.hashes = append(conf.hashes, hashes...)
- mapKeysToNodes(conf)
-
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
- return err
- }
-
- var globalStore mock.GlobalStorer
- if *useMockStore {
- globalStore = mockmem.NewGlobalStore()
- }
- // File retrieval check is repeated until all uploaded files are retrieved from all nodes
- // or until the timeout is reached.
- REPEAT:
- for {
- for _, id := range nodeIDs {
- //for each expected chunk, check if it is in the local store
- localChunks := conf.idToChunksMap[id]
- for _, ch := range localChunks {
- //get the real chunk by the index in the index array
- chunk := conf.hashes[ch]
- log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
- //check if the expected chunk is indeed in the localstore
- var err error
- if *useMockStore {
- //use the globalStore if the mockStore should be used; in that case,
- //the complete localStore stack is bypassed for getting the chunk
- _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
- } else {
- //use the actual localstore
- item, ok := sim.NodeItem(id, bucketKeyStore)
- if !ok {
- return fmt.Errorf("Error accessing localstore")
- }
- lstore := item.(*storage.LocalStore)
- _, err = lstore.Get(ctx, chunk)
- }
- if err != nil {
- log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
- // Do not get crazy with logging the warn message
- time.Sleep(500 * time.Millisecond)
- continue REPEAT
- }
- log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
- }
- }
- return nil
- }
- })
-
- if result.Error != nil {
- return result.Error
- }
-
- log.Info("Simulation ended")
- return nil
-}
-
-//the server func to start syncing
-//issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
-//the kademlia's `EachBin` function.
-//returns the number of subscriptions requested
-func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
- var err error
- kad := r.delivery.kad
- subCnt := 0
- //iterate over each bin and solicit needed subscription to bins
- kad.EachBin(r.addr[:], pof, 0, func(conn *network.Peer, po int) bool {
- //identify begin and start index of the bin(s) we want to subscribe to
- subCnt++
- err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
- if err != nil {
- log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
- return false
- }
- return true
-
- })
- return subCnt, nil
-}
-
//map chunk keys to addresses which are responsible
func mapKeysToNodes(conf *synctestConfig) {
nodemap := make(map[string][]int)
@@ -555,9 +297,7 @@ func mapKeysToNodes(conf *synctestConfig) {
np, _, _ = pot.Add(np, a, pof)
}
- var kadMinProxSize = 2
-
- ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
+ ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, conf.addrs)
//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go
index 32e10782..fb571c85 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go
@@ -33,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
- "github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
)
@@ -48,31 +47,36 @@ const (
HashSize = 32
)
-//Enumerate options for syncing and retrieval
+// Enumerate options for syncing and retrieval
type SyncingOption int
type RetrievalOption int
-//Syncing options
+// Syncing options
const (
- //Syncing disabled
+ // Syncing disabled
SyncingDisabled SyncingOption = iota
- //Register the client and the server but not subscribe
+ // Register the client and the server but not subscribe
SyncingRegisterOnly
- //Both client and server funcs are registered, subscribe sent automatically
+ // Both client and server funcs are registered, subscribe sent automatically
SyncingAutoSubscribe
)
const (
- //Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only)
+ // Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only)
RetrievalDisabled RetrievalOption = iota
- //Only the client side of the retrieve request is registered.
- //(light nodes do not serve retrieve requests)
- //once the client is registered, subscription to retrieve request stream is always sent
+ // Only the client side of the retrieve request is registered.
+ // (light nodes do not serve retrieve requests)
+ // once the client is registered, subscription to retrieve request stream is always sent
RetrievalClientOnly
- //Both client and server funcs are registered, subscribe sent automatically
+ // Both client and server funcs are registered, subscribe sent automatically
RetrievalEnabled
)
+// subscriptionFunc is used to determine what to do in order to perform subscriptions
+// usually we would start to really subscribe to nodes, but for tests other functionality may be needed
+// (see TestRequestPeerSubscriptions in streamer_test.go)
+var subscriptionFunc func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool = doRequestSubscription
+
// Registry registry for outgoing and incoming streamer constructors
type Registry struct {
addr enode.ID
@@ -86,7 +90,7 @@ type Registry struct {
peers map[enode.ID]*Peer
delivery *Delivery
intervalsStore state.Store
- autoRetrieval bool //automatically subscribe to retrieve request stream
+ autoRetrieval bool // automatically subscribe to retrieve request stream
maxPeerServers int
spec *protocols.Spec //this protocol's spec
balance protocols.Balance //implements protocols.Balance, for accounting
@@ -96,8 +100,8 @@ type Registry struct {
// RegistryOptions holds optional values for NewRegistry constructor.
type RegistryOptions struct {
SkipCheck bool
- Syncing SyncingOption //Defines syncing behavior
- Retrieval RetrievalOption //Defines retrieval behavior
+ Syncing SyncingOption // Defines syncing behavior
+ Retrieval RetrievalOption // Defines retrieval behavior
SyncUpdateDelay time.Duration
MaxPeerServers int // The limit of servers for each peer in registry
}
@@ -110,7 +114,7 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
if options.SyncUpdateDelay <= 0 {
options.SyncUpdateDelay = 15 * time.Second
}
- //check if retriaval has been disabled
+ // check if retrieval has been disabled
retrieval := options.Retrieval != RetrievalDisabled
streamer := &Registry{
@@ -125,12 +129,13 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
maxPeerServers: options.MaxPeerServers,
balance: balance,
}
+
streamer.setupSpec()
streamer.api = NewAPI(streamer)
delivery.getPeer = streamer.getPeer
- //if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only)
+ // if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only)
if options.Retrieval == RetrievalEnabled {
streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) {
if !live {
@@ -140,20 +145,20 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
})
}
- //if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests)
+ // if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests)
if options.Retrieval != RetrievalDisabled {
streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live))
})
}
- //If syncing is not disabled, the syncing functions are registered (both client and server)
+ // If syncing is not disabled, the syncing functions are registered (both client and server)
if options.Syncing != SyncingDisabled {
RegisterSwarmSyncerServer(streamer, syncChunkStore)
RegisterSwarmSyncerClient(streamer, syncChunkStore)
}
- //if syncing is set to automatically subscribe to the syncing stream, start the subscription process
+ // if syncing is set to automatically subscribe to the syncing stream, start the subscription process
if options.Syncing == SyncingAutoSubscribe {
// latestIntC function ensures that
// - receiving from the in chan is not blocked by processing inside the for loop
@@ -235,13 +240,17 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
return streamer
}
-//we need to construct a spec instance per node instance
+// This is an accounted protocol, therefore we need to provide a pricing Hook to the spec
+// For simulations to be able to run multiple nodes and not override the hook's balance,
+// we need to construct a spec instance per node instance
func (r *Registry) setupSpec() {
- //first create the "bare" spec
+ // first create the "bare" spec
r.createSpec()
- //if balance is nil, this node has been started without swap support (swapEnabled flag is false)
+ // now create the pricing object
+ r.createPriceOracle()
+ // if balance is nil, this node has been started without swap support (swapEnabled flag is false)
if r.balance != nil && !reflect.ValueOf(r.balance).IsNil() {
- //swap is enabled, so setup the hook
+ // swap is enabled, so setup the hook
r.spec.Hook = protocols.NewAccounting(r.balance, r.prices)
}
}
@@ -388,14 +397,6 @@ func (r *Registry) Quit(peerId enode.ID, s Stream) error {
return peer.Send(context.TODO(), msg)
}
-func (r *Registry) NodeInfo() interface{} {
- return nil
-}
-
-func (r *Registry) PeerInfo(id enode.ID) interface{} {
- return nil
-}
-
func (r *Registry) Close() error {
return r.intervalsStore.Close()
}
@@ -471,24 +472,8 @@ func (r *Registry) updateSyncing() {
}
r.peersMu.RUnlock()
- // request subscriptions for all nodes and bins
- kad.EachBin(r.addr[:], pot.DefaultPof(256), 0, func(p *network.Peer, bin int) bool {
- log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr, p.ID(), bin))
-
- // bin is always less then 256 and it is safe to convert it to type uint8
- stream := NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true)
- if streams, ok := subs[p.ID()]; ok {
- // delete live and history streams from the map, so that it won't be removed with a Quit request
- delete(streams, stream)
- delete(streams, getHistoryStream(stream))
- }
- err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
- if err != nil {
- log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
- return false
- }
- return true
- })
+ // start requesting subscriptions from peers
+ r.requestPeerSubscriptions(kad, subs)
// remove SYNC servers that do not need to be subscribed
for id, streams := range subs {
@@ -509,6 +494,66 @@ func (r *Registry) updateSyncing() {
}
}
+// requestPeerSubscriptions calls on each live peer in the kademlia table
+// and sends a `RequestSubscription` to peers according to their bin
+// and their relationship with kademlia's depth.
+// Also check `TestRequestPeerSubscriptions` in order to understand the
+// expected behavior.
+// The function expects:
+// * the kademlia
+// * a map of subscriptions
+// * the actual function to subscribe
+// (in case of the test, it doesn't do real subscriptions)
+func (r *Registry) requestPeerSubscriptions(kad *network.Kademlia, subs map[enode.ID]map[Stream]struct{}) {
+
+ var startPo int
+ var endPo int
+ var ok bool
+
+ // kademlia's depth
+ kadDepth := kad.NeighbourhoodDepth()
+ // request subscriptions for all nodes and bins
+ // nil as base takes the node's base; we need to pass 255 as `EachConn` runs
+ // from deepest bins backwards
+ kad.EachConn(nil, 255, func(p *network.Peer, po int) bool {
+ //if the peer's bin is shallower than the kademlia depth,
+ //only the peer's bin should be subscribed
+ if po < kadDepth {
+ startPo = po
+ endPo = po
+ } else {
+ //if the peer's bin is equal or deeper than the kademlia depth,
+ //each bin from the depth up to k.MaxProxDisplay should be subscribed
+ startPo = kadDepth
+ endPo = kad.MaxProxDisplay
+ }
+
+ for bin := startPo; bin <= endPo; bin++ {
+ //do the actual subscription
+ ok = subscriptionFunc(r, p, uint8(bin), subs)
+ }
+ return ok
+ })
+}
+
+// doRequestSubscription sends the actual RequestSubscription to the peer
+func doRequestSubscription(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
+ log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", p.ID(), "bin", bin)
+ // bin is always less then 256 and it is safe to convert it to type uint8
+ stream := NewStream("SYNC", FormatSyncBinKey(bin), true)
+ if streams, ok := subs[p.ID()]; ok {
+ // delete live and history streams from the map, so that it won't be removed with a Quit request
+ delete(streams, stream)
+ delete(streams, getHistoryStream(stream))
+ }
+ err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
+ if err != nil {
+ log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
+ return false
+ }
+ return true
+}
+
func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := protocols.NewPeer(p, rw, r.spec)
bp := network.NewBzzPeer(peer)
@@ -541,11 +586,11 @@ func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
return p.handleWantedHashesMsg(ctx, msg)
case *ChunkDeliveryMsgRetrieval:
- //handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
+ // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
case *ChunkDeliveryMsgSyncing:
- //handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
+ // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
case *RetrieveRequestMsg:
@@ -734,9 +779,9 @@ func (c *clientParams) clientCreated() {
close(c.clientCreatedC)
}
-//GetSpec returns the streamer spec to callers
-//This used to be a global variable but for simulations with
-//multiple nodes its fields (notably the Hook) would be overwritten
+// GetSpec returns the streamer spec to callers
+// This used to be a global variable but for simulations with
+// multiple nodes its fields (notably the Hook) would be overwritten
func (r *Registry) GetSpec() *protocols.Spec {
return r.spec
}
@@ -764,6 +809,52 @@ func (r *Registry) createSpec() {
r.spec = spec
}
+// An accountable message needs some meta information attached to it
+// in order to evaluate the correct price
+type StreamerPrices struct {
+ priceMatrix map[reflect.Type]*protocols.Price
+ registry *Registry
+}
+
+// Price implements the accounting interface and returns the price for a specific message
+func (sp *StreamerPrices) Price(msg interface{}) *protocols.Price {
+ t := reflect.TypeOf(msg).Elem()
+ return sp.priceMatrix[t]
+}
+
+// Instead of hardcoding the price, get it
+// through a function - it could be quite complex in the future
+func (sp *StreamerPrices) getRetrieveRequestMsgPrice() uint64 {
+ return uint64(1)
+}
+
+// Instead of hardcoding the price, get it
+// through a function - it could be quite complex in the future
+func (sp *StreamerPrices) getChunkDeliveryMsgRetrievalPrice() uint64 {
+ return uint64(1)
+}
+
+// createPriceOracle sets up a matrix which can be queried to get
+// the price for a message via the Price method
+func (r *Registry) createPriceOracle() {
+ sp := &StreamerPrices{
+ registry: r,
+ }
+ sp.priceMatrix = map[reflect.Type]*protocols.Price{
+ reflect.TypeOf(ChunkDeliveryMsgRetrieval{}): {
+ Value: sp.getChunkDeliveryMsgRetrievalPrice(), // arbitrary price for now
+ PerByte: true,
+ Payer: protocols.Receiver,
+ },
+ reflect.TypeOf(RetrieveRequestMsg{}): {
+ Value: sp.getRetrieveRequestMsgPrice(), // arbitrary price for now
+ PerByte: false,
+ Payer: protocols.Sender,
+ },
+ }
+ r.prices = sp
+}
+
func (r *Registry) Protocols() []p2p.Protocol {
return []p2p.Protocol{
{
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/streamer_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/streamer_test.go
index 16c74d3b..cdaeb92d 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/streamer_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/streamer_test.go
@@ -20,12 +20,17 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"strconv"
"testing"
"time"
- "github.com/ethereum/go-ethereum/crypto/sha3"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/enode"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
+ "github.com/ethereum/go-ethereum/swarm/network"
+ "golang.org/x/crypto/sha3"
)
func TestStreamerSubscribe(t *testing.T) {
@@ -921,3 +926,191 @@ func TestMaxPeerServersWithoutUnsubscribe(t *testing.T) {
}
}
}
+
+//TestHasPriceImplementation is to check that the Registry has a
+//`Price` interface implementation
+func TestHasPriceImplementation(t *testing.T) {
+ _, r, _, teardown, err := newStreamerTester(t, &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingDisabled,
+ })
+ defer teardown()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if r.prices == nil {
+ t.Fatal("No prices implementation available for the stream protocol")
+ }
+
+ pricesInstance, ok := r.prices.(*StreamerPrices)
+ if !ok {
+ t.Fatal("`Registry` does not have the expected Prices instance")
+ }
+ price := pricesInstance.Price(&ChunkDeliveryMsgRetrieval{})
+ if price == nil || price.Value == 0 || price.Value != pricesInstance.getChunkDeliveryMsgRetrievalPrice() {
+ t.Fatal("No prices set for chunk delivery msg")
+ }
+
+ price = pricesInstance.Price(&RetrieveRequestMsg{})
+ if price == nil || price.Value == 0 || price.Value != pricesInstance.getRetrieveRequestMsgPrice() {
+ t.Fatal("No prices set for chunk delivery msg")
+ }
+}
+
+/*
+TestRequestPeerSubscriptions is a unit test for stream's pull sync subscriptions.
+
+The test does:
+ * assign each connected peer to a bin map
+ * build up a known kademlia in advance
+ * run the EachConn function, which returns supposed subscription bins
+ * store all supposed bins per peer in a map
+ * check that all peers have the expected subscriptions
+
+This kad table and its peers are copied from network.TestKademliaCase1,
+it represents an edge case but for the purpose of testing the
+syncing subscriptions it is just fine.
+
+Addresses used in this test are discovered as part of the simulation network
+in higher level tests for streaming. They were generated randomly.
+
+The resulting kademlia looks like this:
+=========================================================================
+Fri Dec 21 20:02:39 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
+population: 12 (12), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+000 2 8196 835f | 2 8196 (0) 835f (0)
+001 2 2690 28f0 | 2 2690 (0) 28f0 (0)
+002 2 4d72 4a45 | 2 4d72 (0) 4a45 (0)
+003 1 646e | 1 646e (0)
+004 3 769c 76d1 7656 | 3 769c (0) 76d1 (0) 7656 (0)
+============ DEPTH: 5 ==========================================
+005 1 7a48 | 1 7a48 (0)
+006 1 7cbd | 1 7cbd (0)
+007 0 | 0
+008 0 | 0
+009 0 | 0
+010 0 | 0
+011 0 | 0
+012 0 | 0
+013 0 | 0
+014 0 | 0
+015 0 | 0
+=========================================================================
+*/
+func TestRequestPeerSubscriptions(t *testing.T) {
+ // the pivot address; this is the actual kademlia node
+ pivotAddr := "7efef1c41d77f843ad167be95f6660567eb8a4a59f39240000cce2e0d65baf8e"
+
+ // a map of bin number to addresses from the given kademlia
+ binMap := make(map[int][]string)
+ binMap[0] = []string{
+ "835fbbf1d16ba7347b6e2fc552d6e982148d29c624ea20383850df3c810fa8fc",
+ "81968a2d8fb39114342ee1da85254ec51e0608d7f0f6997c2a8354c260a71009",
+ }
+ binMap[1] = []string{
+ "28f0bc1b44658548d6e05dd16d4c2fe77f1da5d48b6774bc4263b045725d0c19",
+ "2690a910c33ee37b91eb6c4e0731d1d345e2dc3b46d308503a6e85bbc242c69e",
+ }
+ binMap[2] = []string{
+ "4a45f1fc63e1a9cb9dfa44c98da2f3d20c2923e5d75ff60b2db9d1bdb0c54d51",
+ "4d72a04ddeb851a68cd197ef9a92a3e2ff01fbbff638e64929dd1a9c2e150112",
+ }
+ binMap[3] = []string{
+ "646e9540c84f6a2f9cf6585d45a4c219573b4fd1b64a3c9a1386fc5cf98c0d4d",
+ }
+ binMap[4] = []string{
+ "7656caccdc79cd8d7ce66d415cc96a718e8271c62fb35746bfc2b49faf3eebf3",
+ "76d1e83c71ca246d042e37ff1db181f2776265fbcfdc890ce230bfa617c9c2f0",
+ "769ce86aa90b518b7ed382f9fdacfbed93574e18dc98fe6c342e4f9f409c2d5a",
+ }
+ binMap[5] = []string{
+ "7a48f75f8ca60487ae42d6f92b785581b40b91f2da551ae73d5eae46640e02e8",
+ }
+ binMap[6] = []string{
+ "7cbd42350bde8e18ae5b955b5450f8e2cef3419f92fbf5598160c60fd78619f0",
+ }
+
+ // create the pivot's kademlia
+ addr := common.FromHex(pivotAddr)
+ k := network.NewKademlia(addr, network.NewKadParams())
+
+ // construct the peers and the kademlia
+ for _, binaddrs := range binMap {
+ for _, a := range binaddrs {
+ addr := common.FromHex(a)
+ k.On(network.NewPeer(&network.BzzPeer{BzzAddr: &network.BzzAddr{OAddr: addr}}, k))
+ }
+ }
+
+ // TODO: check kad table is same
+ // currently k.String() prints date so it will never be the same :)
+ // --> implement JSON representation of kad table
+ log.Debug(k.String())
+
+ // simulate that we would do subscriptions: just store the bin numbers
+ fakeSubscriptions := make(map[string][]int)
+ //after the test, we need to reset the subscriptionFunc to the default
+ defer func() { subscriptionFunc = doRequestSubscription }()
+ // define the function which should run for each connection
+ // instead of doing real subscriptions, we just store the bin numbers
+ subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
+ // get the peer ID
+ peerstr := fmt.Sprintf("%x", p.Over())
+ // create the array of bins per peer
+ if _, ok := fakeSubscriptions[peerstr]; !ok {
+ fakeSubscriptions[peerstr] = make([]int, 0)
+ }
+ // store the (fake) bin subscription
+ log.Debug(fmt.Sprintf("Adding fake subscription for peer %s with bin %d", peerstr, bin))
+ fakeSubscriptions[peerstr] = append(fakeSubscriptions[peerstr], int(bin))
+ return true
+ }
+ // create just a simple Registry object in order to be able to call...
+ r := &Registry{}
+ r.requestPeerSubscriptions(k, nil)
+ // calculate the kademlia depth
+ kdepth := k.NeighbourhoodDepth()
+
+ // now, check that all peers have the expected (fake) subscriptions
+ // iterate the bin map
+ for bin, peers := range binMap {
+ // for every peer...
+ for _, peer := range peers {
+ // ...get its (fake) subscriptions
+ fakeSubsForPeer := fakeSubscriptions[peer]
+ // if the peer's bin is shallower than the kademlia depth...
+ if bin < kdepth {
+ // (iterate all (fake) subscriptions)
+ for _, subbin := range fakeSubsForPeer {
+ // ...only the peer's bin should be "subscribed"
+ // (and thus have only one subscription)
+ if subbin != bin || len(fakeSubsForPeer) != 1 {
+ t.Fatalf("Did not get expected subscription for bin < depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
+ }
+ }
+ } else { //if the peer's bin is equal or higher than the kademlia depth...
+ // (iterate all (fake) subscriptions)
+ for i, subbin := range fakeSubsForPeer {
+ // ...each bin from the peer's bin number up to k.MaxProxDisplay should be "subscribed"
+ // as we start from depth we can use the iteration index to check
+ if subbin != i+kdepth {
+ t.Fatalf("Did not get expected subscription for bin > depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
+ }
+ // the last "subscription" should be k.MaxProxDisplay
+ if i == len(fakeSubsForPeer)-1 && subbin != k.MaxProxDisplay {
+ t.Fatalf("Expected last subscription to be: %d, but is: %d", k.MaxProxDisplay, subbin)
+ }
+ }
+ }
+ }
+ }
+
+ // print some output
+ for p, subs := range fakeSubscriptions {
+ log.Debug(fmt.Sprintf("Peer %s has the following fake subscriptions: ", p))
+ for _, bin := range subs {
+ log.Debug(fmt.Sprintf("%d,", bin))
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go
index 4bfbac8b..4fb8b934 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go
@@ -127,19 +127,9 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
// SwarmSyncerClient
type SwarmSyncerClient struct {
- sessionAt uint64
- nextC chan struct{}
- sessionRoot storage.Address
- sessionReader storage.LazySectionReader
- retrieveC chan *storage.Chunk
- storeC chan *storage.Chunk
- store storage.SyncChunkStore
- // chunker storage.Chunker
- currentRoot storage.Address
- requestFunc func(chunk *storage.Chunk)
- end, start uint64
- peer *Peer
- stream Stream
+ store storage.SyncChunkStore
+ peer *Peer
+ stream Stream
}
// NewSwarmSyncerClient is a contructor for provable data exchange syncer
@@ -209,46 +199,6 @@ func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte,
return nil
}
-func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) {
- // for provable syncer currentRoot is non-zero length
- // TODO: reenable this with putter/getter
- // if s.chunker != nil {
- // if from > s.sessionAt { // for live syncing currentRoot is always updated
- // //expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC)
- // expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC)
- // if err != nil {
- // return nil, err
- // }
- // if !bytes.Equal(root, expRoot) {
- // return nil, fmt.Errorf("HandoverProof mismatch")
- // }
- // s.currentRoot = root
- // } else {
- // expHashes := make([]byte, len(hashes))
- // _, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize))
- // if err != nil && err != io.EOF {
- // return nil, err
- // }
- // if !bytes.Equal(expHashes, hashes) {
- // return nil, errors.New("invalid proof")
- // }
- // }
- // return nil, nil
- // }
- s.end += uint64(len(hashes)) / HashSize
- takeover := &Takeover{
- Stream: stream,
- Start: s.start,
- End: s.end,
- Root: root,
- }
- // serialise and sign
- return &TakeoverProof{
- Takeover: takeover,
- Sig: nil,
- }, nil
-}
-
func (s *SwarmSyncerClient) Close() {}
// base for parsing and formating sync bin key
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer_test.go
index 3e3cee18..014ec9a9 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer_test.go
@@ -18,11 +18,13 @@ package stream
import (
"context"
+ "errors"
"fmt"
"io/ioutil"
"math"
"os"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -43,10 +45,10 @@ import (
const dataChunkCount = 200
func TestSyncerSimulation(t *testing.T) {
- testSyncBetweenNodes(t, 2, 1, dataChunkCount, true, 1)
- testSyncBetweenNodes(t, 4, 1, dataChunkCount, true, 1)
- testSyncBetweenNodes(t, 8, 1, dataChunkCount, true, 1)
- testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1)
+ testSyncBetweenNodes(t, 2, dataChunkCount, true, 1)
+ testSyncBetweenNodes(t, 4, dataChunkCount, true, 1)
+ testSyncBetweenNodes(t, 8, dataChunkCount, true, 1)
+ testSyncBetweenNodes(t, 16, dataChunkCount, true, 1)
}
func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
@@ -67,9 +69,8 @@ func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.B
return lstore, datadir, nil
}
-func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) {
+func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
var store storage.ChunkStore
@@ -130,7 +131,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
if err != nil {
t.Fatal(err)
}
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
nodeIDs := sim.UpNodeIDs()
nodeIndex := make(map[enode.ID]int)
@@ -144,11 +145,19 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
simulation.NewPeerEventsFilter().Drop(),
)
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal(d.Error)
+ disconnected.Store(true)
+ }
+ }
+ }()
+ defer func() {
+ if err != nil {
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ err = errors.New("disconnect events received")
}
}
}()
@@ -180,7 +189,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
}
}
// here we distribute chunks of a random file into stores 1...nodes
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/visualized_snapshot_sync_sim_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/visualized_snapshot_sync_sim_test.go
index f6d61802..18b4c8fb 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/visualized_snapshot_sync_sim_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/visualized_snapshot_sync_sim_test.go
@@ -19,16 +19,27 @@
package stream
import (
+ "bytes"
"context"
+ "errors"
"fmt"
+ "io"
+ "os"
+ "sync"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
"github.com/ethereum/go-ethereum/p2p/simulations"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
+ "github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
)
@@ -61,19 +72,19 @@ func setupSim(serviceMap map[string]simulation.ServiceFunc) (int, int, *simulati
func watchSim(sim *simulation.Simulation) (context.Context, context.CancelFunc) {
ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
panic(err)
}
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
go func() {
for d := range disconnections {
- log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
panic("unexpected disconnect")
cancelSimRun()
}
@@ -85,7 +96,6 @@ func watchSim(sim *simulation.Simulation) (context.Context, context.CancelFunc)
//This test requests bogus hashes into the network
func TestNonExistingHashesWithServer(t *testing.T) {
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodeCount, _, sim := setupSim(retrievalSimServiceMap)
defer sim.Close()
@@ -103,7 +113,7 @@ func TestNonExistingHashesWithServer(t *testing.T) {
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
//check on the node's FileStore (netstore)
- id := sim.RandomUpNode().ID
+ id := sim.Net.GetRandomUpNode().ID()
item, ok := sim.NodeItem(id, bucketKeyFileStore)
if !ok {
t.Fatalf("No filestore")
@@ -144,8 +154,62 @@ func sendSimTerminatedEvent(sim *simulation.Simulation) {
//It also sends some custom events so that the frontend
//can visualize messages like SendOfferedMsg, WantedHashesMsg, DeliveryMsg
func TestSnapshotSyncWithServer(t *testing.T) {
+ //t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
+
+ //define a wrapper object to be able to pass around data
+ wrapper := &netWrapper{}
+
+ nodeCount := *nodes
+ chunkCount := *chunks
+
+ if nodeCount == 0 || chunkCount == 0 {
+ nodeCount = 32
+ chunkCount = 1
+ }
+
+ log.Info(fmt.Sprintf("Running the simulation with %d nodes and %d chunks", nodeCount, chunkCount))
+
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ n := ctx.Config.Node()
+ addr := network.NewAddr(n)
+ store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyStore, store)
+ localStore := store.(*storage.LocalStore)
+ netStore, err := storage.NewNetStore(localStore, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ delivery := NewDelivery(kad, netStore)
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
+
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingAutoSubscribe,
+ SyncUpdateDelay: 3 * time.Second,
+ }, nil)
+
+ tr := &testRegistry{
+ Registry: r,
+ w: wrapper,
+ }
+
+ bucket.Store(bucketKeyRegistry, tr)
+
+ cleanup = func() {
+ netStore.Close()
+ tr.Close()
+ os.RemoveAll(datadir)
+ }
+
+ return tr, cleanup, nil
+ },
+ }).WithServer(":8888") //start with the HTTP server
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodeCount, chunkCount, sim := setupSim(simServiceMap)
defer sim.Close()
@@ -153,12 +217,13 @@ func TestSnapshotSyncWithServer(t *testing.T) {
conf := &synctestConfig{}
//map of discover ID to indexes of chunks expected at that ID
- conf.idToChunksMap = make(map[discover.NodeID][]int)
+ conf.idToChunksMap = make(map[enode.ID][]int)
//map of overlay address to discover ID
- conf.addrToIDMap = make(map[string]discover.NodeID)
+ conf.addrToIDMap = make(map[string]enode.ID)
//array where the generated chunk hashes will be stored
conf.hashes = make([]storage.Address, 0)
-
+ //pass the network to the wrapper object
+ wrapper.setNetwork(sim.Net)
err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
if err != nil {
panic(err)
@@ -167,49 +232,6 @@ func TestSnapshotSyncWithServer(t *testing.T) {
ctx, cancelSimRun := watchSim(sim)
defer cancelSimRun()
- //setup filters in the event feed
- offeredHashesFilter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(1)
- wantedFilter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(2)
- deliveryFilter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(6)
- eventC := sim.PeerEvents(ctx, sim.UpNodeIDs(), offeredHashesFilter, wantedFilter, deliveryFilter)
-
- quit := make(chan struct{})
-
- go func() {
- for e := range eventC {
- select {
- case <-quit:
- fmt.Println("quitting event loop")
- return
- default:
- }
- if e.Error != nil {
- t.Fatal(e.Error)
- }
- if *e.Event.MsgCode == uint64(1) {
- evt := &simulations.Event{
- Type: EventTypeChunkOffered,
- Node: sim.Net.GetNode(e.NodeID),
- Control: false,
- }
- sim.Net.Events().Send(evt)
- } else if *e.Event.MsgCode == uint64(2) {
- evt := &simulations.Event{
- Type: EventTypeChunkWanted,
- Node: sim.Net.GetNode(e.NodeID),
- Control: false,
- }
- sim.Net.Events().Send(evt)
- } else if *e.Event.MsgCode == uint64(6) {
- evt := &simulations.Event{
- Type: EventTypeChunkDelivered,
- Node: sim.Net.GetNode(e.NodeID),
- Control: false,
- }
- sim.Net.Events().Send(evt)
- }
- }
- }()
//run the sim
result := runSim(conf, ctx, sim, chunkCount)
@@ -218,11 +240,150 @@ func TestSnapshotSyncWithServer(t *testing.T) {
Type: EventTypeSimTerminated,
Control: false,
}
- sim.Net.Events().Send(evt)
+ go sim.Net.Events().Send(evt)
if result.Error != nil {
panic(result.Error)
}
- close(quit)
log.Info("Simulation ended")
}
+
+//testRegistry embeds registry
+//it allows to replace the protocol run function
+type testRegistry struct {
+ *Registry
+ w *netWrapper
+}
+
+//Protocols replaces the protocol's run function
+func (tr *testRegistry) Protocols() []p2p.Protocol {
+ regProto := tr.Registry.Protocols()
+ //set the `stream` protocol's run function with the testRegistry's one
+ regProto[0].Run = tr.runProto
+ return regProto
+}
+
+//runProto is the new overwritten protocol's run function for this test
+func (tr *testRegistry) runProto(p *p2p.Peer, rw p2p.MsgReadWriter) error {
+ //create a custom rw message ReadWriter
+ testRw := &testMsgReadWriter{
+ MsgReadWriter: rw,
+ Peer: p,
+ w: tr.w,
+ Registry: tr.Registry,
+ }
+ //now run the actual upper layer `Registry`'s protocol function
+ return tr.runProtocol(p, testRw)
+}
+
+//testMsgReadWriter is a custom rw
+//it will allow us to re-use the message twice
+type testMsgReadWriter struct {
+ *Registry
+ p2p.MsgReadWriter
+ *p2p.Peer
+ w *netWrapper
+}
+
+//netWrapper wrapper object so we can pass data around
+type netWrapper struct {
+ net *simulations.Network
+}
+
+//set the network to the wrapper for later use (used inside the custom rw)
+func (w *netWrapper) setNetwork(n *simulations.Network) {
+ w.net = n
+}
+
+//get he network from the wrapper (used inside the custom rw)
+func (w *netWrapper) getNetwork() *simulations.Network {
+ return w.net
+}
+
+// ReadMsg reads a message from the underlying MsgReadWriter and emits a
+// "message received" event
+//we do this because we are interested in the Payload of the message for custom use
+//in this test, but messages can only be consumed once (stream io.Reader)
+func (ev *testMsgReadWriter) ReadMsg() (p2p.Msg, error) {
+ //read the message from the underlying rw
+ msg, err := ev.MsgReadWriter.ReadMsg()
+ if err != nil {
+ return msg, err
+ }
+
+ //don't do anything with message codes we actually are not needing/reading
+ subCodes := []uint64{1, 2, 10}
+ found := false
+ for _, c := range subCodes {
+ if c == msg.Code {
+ found = true
+ }
+ }
+ //just return if not a msg code we are interested in
+ if !found {
+ return msg, nil
+ }
+
+ //we use a io.TeeReader so that we can read the message twice
+ //the Payload is a io.Reader, so if we read from it, the actual protocol handler
+ //cannot access it anymore.
+ //But we need that handler to be able to consume the message as normal,
+ //as if we would not do anything here with that message
+ var buf bytes.Buffer
+ tee := io.TeeReader(msg.Payload, &buf)
+
+ mcp := &p2p.Msg{
+ Code: msg.Code,
+ Size: msg.Size,
+ ReceivedAt: msg.ReceivedAt,
+ Payload: tee,
+ }
+ //assign the copy for later use
+ msg.Payload = &buf
+
+ //now let's look into the message
+ var wmsg protocols.WrappedMsg
+ err = mcp.Decode(&wmsg)
+ if err != nil {
+ log.Error(err.Error())
+ return msg, err
+ }
+ //create a new message from the code
+ val, ok := ev.Registry.GetSpec().NewMsg(mcp.Code)
+ if !ok {
+ return msg, errors.New(fmt.Sprintf("Invalid message code: %v", msg.Code))
+ }
+ //decode it
+ if err := rlp.DecodeBytes(wmsg.Payload, val); err != nil {
+ return msg, errors.New(fmt.Sprintf("Decoding error <= %v: %v", msg, err))
+ }
+ //now for every message type we are interested in, create a custom event and send it
+ var evt *simulations.Event
+ switch val := val.(type) {
+ case *OfferedHashesMsg:
+ evt = &simulations.Event{
+ Type: EventTypeChunkOffered,
+ Node: ev.w.getNetwork().GetNode(ev.ID()),
+ Control: false,
+ Data: val.Hashes,
+ }
+ case *WantedHashesMsg:
+ evt = &simulations.Event{
+ Type: EventTypeChunkWanted,
+ Node: ev.w.getNetwork().GetNode(ev.ID()),
+ Control: false,
+ }
+ case *ChunkDeliveryMsgSyncing:
+ evt = &simulations.Event{
+ Type: EventTypeChunkDelivered,
+ Node: ev.w.getNetwork().GetNode(ev.ID()),
+ Control: false,
+ Data: val.Addr.String(),
+ }
+ }
+ if evt != nil {
+ //send custom event to feed; frontend will listen to it and display
+ ev.w.getNetwork().Events().Send(evt)
+ }
+ return msg, nil
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/network_test.go
index 41993dfc..71d4b8f1 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/network_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/network_test.go
@@ -260,7 +260,6 @@ type testSwarmNetworkOptions struct {
// - Checking if a file is retrievable from all nodes.
func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwarmNetworkStep) {
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
if o == nil {
o = new(testSwarmNetworkOptions)
}
@@ -354,7 +353,7 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
}
if *waitKademlia {
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pot/address.go b/vendor/github.com/ethereum/go-ethereum/swarm/pot/address.go
index 728dac14..91cada2e 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pot/address.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pot/address.go
@@ -41,10 +41,6 @@ func NewAddressFromBytes(b []byte) Address {
return Address(h)
}
-func (a Address) IsZero() bool {
- return a.Bin() == zerosBin
-}
-
func (a Address) String() string {
return fmt.Sprintf("%x", a[:])
}
@@ -166,7 +162,6 @@ func ToBytes(v Val) []byte {
}
// DefaultPof returns a proximity order comparison operator function
-// where all
func DefaultPof(max int) func(one, other Val, pos int) (int, bool) {
return func(one, other Val, pos int) (int, bool) {
po, eq := proximityOrder(ToBytes(one), ToBytes(other), pos)
@@ -178,6 +173,9 @@ func DefaultPof(max int) func(one, other Val, pos int) (int, bool) {
}
}
+// proximityOrder returns two parameters:
+// 1. relative proximity order of the arguments one & other;
+// 2. boolean indicating whether the full match occurred (one == other).
func proximityOrder(one, other []byte, pos int) (int, bool) {
for i := pos / 8; i < len(one); i++ {
if one[i] == other[i] {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pot/pot.go b/vendor/github.com/ethereum/go-ethereum/swarm/pot/pot.go
index dfda8480..7e3967f3 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pot/pot.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pot/pot.go
@@ -144,13 +144,10 @@ func add(t *Pot, val Val, pof Pof) (*Pot, int, bool) {
return r, po, found
}
-// Remove called on (v) deletes v from the Pot and returns
-// the proximity order of v and a boolean value indicating
-// if the value was found
-// Remove called on (t, v) returns a new Pot that contains all the elements of t
-// minus the value v, using the applicative remove
-// the second return value is the proximity order of the inserted element
-// the third is boolean indicating if the item was found
+// Remove deletes element v from the Pot t and returns three parameters:
+// 1. new Pot that contains all the elements of t minus the element v;
+// 2. proximity order of the removed element v;
+// 3. boolean indicating whether the item was found.
func Remove(t *Pot, v Val, pof Pof) (*Pot, int, bool) {
return remove(t, v, pof)
}
@@ -161,10 +158,7 @@ func remove(t *Pot, val Val, pof Pof) (r *Pot, po int, found bool) {
if found {
size--
if size == 0 {
- r = &Pot{
- po: t.po,
- }
- return r, po, true
+ return &Pot{}, po, true
}
i := len(t.bins) - 1
last := t.bins[i]
@@ -201,7 +195,7 @@ func remove(t *Pot, val Val, pof Pof) (r *Pot, po int, found bool) {
}
bins = append(bins, t.bins[j:]...)
r = &Pot{
- pin: val,
+ pin: t.pin,
size: size,
po: t.po,
bins: bins,
@@ -453,64 +447,50 @@ func union(t0, t1 *Pot, pof Pof) (*Pot, int) {
return n, common
}
-// Each called with (f) is a synchronous iterator over the bins of a node
-// respecting an ordering
-// proximity > pinnedness
-func (t *Pot) Each(f func(Val, int) bool) bool {
+// Each is a synchronous iterator over the elements of pot with function f.
+func (t *Pot) Each(f func(Val) bool) bool {
return t.each(f)
}
-func (t *Pot) each(f func(Val, int) bool) bool {
- var next bool
- for _, n := range t.bins {
- if n == nil {
- return true
- }
- next = n.each(f)
- if !next {
- return false
- }
- }
- if t.size == 0 {
+// each is a synchronous iterator over the elements of pot with function f.
+// the iteration ends if the function return false or there are no more elements.
+func (t *Pot) each(f func(Val) bool) bool {
+ if t == nil || t.size == 0 {
return false
}
- return f(t.pin, t.po)
-}
-
-// EachFrom called with (f, start) is a synchronous iterator over the elements of a Pot
-// within the inclusive range starting from proximity order start
-// the function argument is passed the value and the proximity order wrt the root pin
-// it does NOT include the pinned item of the root
-// respecting an ordering
-// proximity > pinnedness
-// the iteration ends if the function return false or there are no more elements
-// end of a po range can be implemented since po is passed to the function
-func (t *Pot) EachFrom(f func(Val, int) bool, po int) bool {
- return t.eachFrom(f, po)
-}
-
-func (t *Pot) eachFrom(f func(Val, int) bool, po int) bool {
- var next bool
- _, lim := t.getPos(po)
- for i := lim; i < len(t.bins); i++ {
- n := t.bins[i]
- next = n.each(f)
- if !next {
+ for _, n := range t.bins {
+ if !n.each(f) {
return false
}
}
- return f(t.pin, t.po)
+ return f(t.pin)
+}
+
+// eachFrom is a synchronous iterator over the elements of pot with function f,
+// starting from certain proximity order po, which is passed as a second parameter.
+// the iteration ends if the function return false or there are no more elements.
+func (t *Pot) eachFrom(f func(Val) bool, po int) bool {
+ if t == nil || t.size == 0 {
+ return false
+ }
+ _, beg := t.getPos(po)
+ for i := beg; i < len(t.bins); i++ {
+ if !t.bins[i].each(f) {
+ return false
+ }
+ }
+ return f(t.pin)
}
// EachBin iterates over bins of the pivot node and offers iterators to the caller on each
// subtree passing the proximity order and the size
// the iteration continues until the function's return value is false
// or there are no more subtries
-func (t *Pot) EachBin(val Val, pof Pof, po int, f func(int, int, func(func(val Val, i int) bool) bool) bool) {
+func (t *Pot) EachBin(val Val, pof Pof, po int, f func(int, int, func(func(val Val) bool) bool) bool) {
t.eachBin(val, pof, po, f)
}
-func (t *Pot) eachBin(val Val, pof Pof, po int, f func(int, int, func(func(val Val, i int) bool) bool) bool) {
+func (t *Pot) eachBin(val Val, pof Pof, po int, f func(int, int, func(func(val Val) bool) bool) bool) {
if t == nil || t.size == 0 {
return
}
@@ -530,8 +510,8 @@ func (t *Pot) eachBin(val Val, pof Pof, po int, f func(int, int, func(func(val V
}
if lim == len(t.bins) {
if spr >= po {
- f(spr, 1, func(g func(Val, int) bool) bool {
- return g(t.pin, spr)
+ f(spr, 1, func(g func(Val) bool) bool {
+ return g(t.pin)
})
}
return
@@ -545,9 +525,9 @@ func (t *Pot) eachBin(val Val, pof Pof, po int, f func(int, int, func(func(val V
size += n.size
}
if spr >= po {
- if !f(spr, t.size-size, func(g func(Val, int) bool) bool {
- return t.eachFrom(func(v Val, j int) bool {
- return g(v, spr)
+ if !f(spr, t.size-size, func(g func(Val) bool) bool {
+ return t.eachFrom(func(v Val) bool {
+ return g(v)
}, spo)
}) {
return
@@ -595,7 +575,7 @@ func (t *Pot) eachNeighbour(val Val, pof Pof, f func(Val, int) bool) bool {
}
for i := l - 1; i > ir; i-- {
- next = t.bins[i].each(func(v Val, _ int) bool {
+ next = t.bins[i].each(func(v Val) bool {
return f(v, po)
})
if !next {
@@ -605,7 +585,7 @@ func (t *Pot) eachNeighbour(val Val, pof Pof, f func(Val, int) bool) bool {
for i := il - 1; i >= 0; i-- {
n := t.bins[i]
- next = n.each(func(v Val, _ int) bool {
+ next = n.each(func(v Val) bool {
return f(v, n.po)
})
if !next {
@@ -719,7 +699,7 @@ func (t *Pot) eachNeighbourAsync(val Val, pof Pof, max int, maxPos int, f func(V
wg.Add(m)
}
go func(pn *Pot, pm int) {
- pn.each(func(v Val, _ int) bool {
+ pn.each(func(v Val) bool {
if wg != nil {
defer wg.Done()
}
@@ -746,7 +726,7 @@ func (t *Pot) eachNeighbourAsync(val Val, pof Pof, max int, maxPos int, f func(V
wg.Add(m)
}
go func(pn *Pot, pm int) {
- pn.each(func(v Val, _ int) bool {
+ pn.each(func(v Val) bool {
if wg != nil {
defer wg.Done()
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pot/pot_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pot/pot_test.go
index aeb23dfc..83d60491 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pot/pot_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pot/pot_test.go
@@ -65,14 +65,13 @@ func randomtestAddr(n int, i int) *testAddr {
return newTestAddr(v, i)
}
-func indexes(t *Pot) (i []int, po []int) {
- t.Each(func(v Val, p int) bool {
+func indexes(t *Pot) (i []int) {
+ t.Each(func(v Val) bool {
a := v.(*testAddr)
i = append(i, a.i)
- po = append(po, p)
return true
})
- return i, po
+ return i
}
func testAdd(t *Pot, pof Pof, j int, values ...string) (_ *Pot, n int, f bool) {
@@ -82,6 +81,69 @@ func testAdd(t *Pot, pof Pof, j int, values ...string) (_ *Pot, n int, f bool) {
return t, n, f
}
+// removing non-existing element from pot
+func TestPotRemoveNonExisting(t *testing.T) {
+ pof := DefaultPof(8)
+ n := NewPot(newTestAddr("00111100", 0), 0)
+ n, _, _ = Remove(n, newTestAddr("00000101", 0), pof)
+ exp := "00111100"
+ got := Label(n.Pin())
+ if got[:8] != exp {
+ t.Fatalf("incorrect pinned value. Expected %v, got %v", exp, got[:8])
+ }
+}
+
+// this test creates hierarchical pot tree, and therefore any child node will have
+// child_po = parent_po + 1.
+// then removes a node from the middle of the tree.
+func TestPotRemoveSameBin(t *testing.T) {
+ pof := DefaultPof(8)
+ n := NewPot(newTestAddr("11111111", 0), 0)
+ n, _, _ = testAdd(n, pof, 1, "00000000", "01000000", "01100000", "01110000", "01111000")
+ n, _, _ = Remove(n, newTestAddr("01110000", 0), pof)
+ inds := indexes(n)
+ goti := n.Size()
+ expi := 5
+ if goti != expi {
+ t.Fatalf("incorrect number of elements in Pot. Expected %v, got %v", expi, goti)
+ }
+ inds = indexes(n)
+ got := fmt.Sprintf("%v", inds)
+ exp := "[5 3 2 1 0]"
+ if got != exp {
+ t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
+ }
+}
+
+// this test creates a flat pot tree (all the elements are leafs of one root),
+// and therefore they all have the same po.
+// then removes an arbitrary element from the pot.
+func TestPotRemoveDifferentBins(t *testing.T) {
+ pof := DefaultPof(8)
+ n := NewPot(newTestAddr("11111111", 0), 0)
+ n, _, _ = testAdd(n, pof, 1, "00000000", "10000000", "11000000", "11100000", "11110000")
+ n, _, _ = Remove(n, newTestAddr("11100000", 0), pof)
+ inds := indexes(n)
+ goti := n.Size()
+ expi := 5
+ if goti != expi {
+ t.Fatalf("incorrect number of elements in Pot. Expected %v, got %v", expi, goti)
+ }
+ inds = indexes(n)
+ got := fmt.Sprintf("%v", inds)
+ exp := "[1 2 3 5 0]"
+ if got != exp {
+ t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
+ }
+ n, _, _ = testAdd(n, pof, 4, "11100000")
+ inds = indexes(n)
+ got = fmt.Sprintf("%v", inds)
+ exp = "[1 2 3 4 5 0]"
+ if got != exp {
+ t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
+ }
+}
+
func TestPotAdd(t *testing.T) {
pof := DefaultPof(8)
n := NewPot(newTestAddr("00111100", 0), 0)
@@ -105,17 +167,12 @@ func TestPotAdd(t *testing.T) {
if goti != expi {
t.Fatalf("incorrect number of elements in Pot. Expected %v, got %v", expi, goti)
}
- inds, po := indexes(n)
+ inds := indexes(n)
got = fmt.Sprintf("%v", inds)
exp = "[3 4 2]"
if got != exp {
t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
}
- got = fmt.Sprintf("%v", po)
- exp = "[1 2 0]"
- if got != exp {
- t.Fatalf("incorrect po-s in iteration over Pot. Expected %v, got %v", exp, got)
- }
}
func TestPotRemove(t *testing.T) {
@@ -134,26 +191,25 @@ func TestPotRemove(t *testing.T) {
if goti != expi {
t.Fatalf("incorrect number of elements in Pot. Expected %v, got %v", expi, goti)
}
- inds, po := indexes(n)
+ inds := indexes(n)
got = fmt.Sprintf("%v", inds)
- exp = "[2 4 0]"
+ exp = "[2 4 1]"
if got != exp {
t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
}
- got = fmt.Sprintf("%v", po)
- exp = "[1 3 0]"
+ n, _, _ = Remove(n, newTestAddr("00111100", 0), pof) // remove again same element
+ inds = indexes(n)
+ got = fmt.Sprintf("%v", inds)
if got != exp {
- t.Fatalf("incorrect po-s in iteration over Pot. Expected %v, got %v", exp, got)
+ t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
}
- // remove again
- n, _, _ = Remove(n, newTestAddr("00111100", 0), pof)
- inds, _ = indexes(n)
+ n, _, _ = Remove(n, newTestAddr("00000000", 0), pof) // remove the first element
+ inds = indexes(n)
got = fmt.Sprintf("%v", inds)
exp = "[2 4]"
if got != exp {
t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
}
-
}
func TestPotSwap(t *testing.T) {
@@ -202,7 +258,7 @@ func TestPotSwap(t *testing.T) {
})
}
sum := 0
- n.Each(func(v Val, i int) bool {
+ n.Each(func(v Val) bool {
if v == nil {
return true
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go
index 587382d7..4556d7b7 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go
@@ -92,7 +92,7 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic, raw bool, prox bool
}
func (pssapi *API) GetAddress(topic Topic, asymmetric bool, key string) (PssAddress, error) {
- var addr *PssAddress
+ var addr PssAddress
if asymmetric {
peer, ok := pssapi.Pss.pubKeyPool[key][topic]
if !ok {
@@ -107,7 +107,7 @@ func (pssapi *API) GetAddress(topic Topic, asymmetric bool, key string) (PssAddr
addr = peer.address
}
- return *addr, nil
+ return addr, nil
}
// Retrieves the node's base address in hex form
@@ -128,7 +128,7 @@ func (pssapi *API) SetPeerPublicKey(pubkey hexutil.Bytes, topic Topic, addr PssA
if err != nil {
return fmt.Errorf("Cannot unmarshal pubkey: %x", pubkey)
}
- err = pssapi.Pss.SetPeerPublicKey(pk, topic, &addr)
+ err = pssapi.Pss.SetPeerPublicKey(pk, topic, addr)
if err != nil {
return fmt.Errorf("Invalid key: %x", pk)
}
@@ -141,11 +141,11 @@ func (pssapi *API) GetSymmetricKey(symkeyid string) (hexutil.Bytes, error) {
}
func (pssapi *API) GetSymmetricAddressHint(topic Topic, symkeyid string) (PssAddress, error) {
- return *pssapi.Pss.symKeyPool[symkeyid][topic].address, nil
+ return pssapi.Pss.symKeyPool[symkeyid][topic].address, nil
}
func (pssapi *API) GetAsymmetricAddressHint(topic Topic, pubkeyid string) (PssAddress, error) {
- return *pssapi.Pss.pubKeyPool[pubkeyid][topic].address, nil
+ return pssapi.Pss.pubKeyPool[pubkeyid][topic].address, nil
}
func (pssapi *API) StringToTopic(topicstring string) (Topic, error) {
@@ -157,14 +157,23 @@ func (pssapi *API) StringToTopic(topicstring string) (Topic, error) {
}
func (pssapi *API) SendAsym(pubkeyhex string, topic Topic, msg hexutil.Bytes) error {
+ if err := validateMsg(msg); err != nil {
+ return err
+ }
return pssapi.Pss.SendAsym(pubkeyhex, topic, msg[:])
}
func (pssapi *API) SendSym(symkeyhex string, topic Topic, msg hexutil.Bytes) error {
+ if err := validateMsg(msg); err != nil {
+ return err
+ }
return pssapi.Pss.SendSym(symkeyhex, topic, msg[:])
}
func (pssapi *API) SendRaw(addr hexutil.Bytes, topic Topic, msg hexutil.Bytes) error {
+ if err := validateMsg(msg); err != nil {
+ return err
+ }
return pssapi.Pss.SendRaw(PssAddress(addr), topic, msg[:])
}
@@ -177,3 +186,10 @@ func (pssapi *API) GetPeerTopics(pubkeyhex string) ([]Topic, error) {
func (pssapi *API) GetPeerAddress(pubkeyhex string, topic Topic) (PssAddress, error) {
return pssapi.Pss.getPeerAddress(pubkeyhex, topic)
}
+
+func validateMsg(msg []byte) error {
+ if len(msg) == 0 {
+ return errors.New("invalid message length")
+ }
+ return nil
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client_test.go
index 8f2f0e80..0d6788d6 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client_test.go
@@ -238,7 +238,7 @@ func newServices() adapters.Services {
return k
}
params := network.NewKadParams()
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
params.MaxBinSize = 3
params.MinBinSize = 1
params.MaxRetries = 1000
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/forwarding_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/forwarding_test.go
new file mode 100644
index 00000000..08468843
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/forwarding_test.go
@@ -0,0 +1,356 @@
+package pss
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
+ "github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/pot"
+ whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
+)
+
+type testCase struct {
+ name string
+ recipient []byte
+ peers []pot.Address
+ expected []int
+ exclusive bool
+ nFails int
+ success bool
+ errors string
+}
+
+var testCases []testCase
+
+// the purpose of this test is to see that pss.forward() function correctly
+// selects the peers for message forwarding, depending on the message address
+// and kademlia constellation.
+func TestForwardBasic(t *testing.T) {
+ baseAddrBytes := make([]byte, 32)
+ for i := 0; i < len(baseAddrBytes); i++ {
+ baseAddrBytes[i] = 0xFF
+ }
+ var c testCase
+ base := pot.NewAddressFromBytes(baseAddrBytes)
+ var peerAddresses []pot.Address
+ const depth = 10
+ for i := 0; i <= depth; i++ {
+ // add two peers for each proximity order
+ a := pot.RandomAddressAt(base, i)
+ peerAddresses = append(peerAddresses, a)
+ a = pot.RandomAddressAt(base, i)
+ peerAddresses = append(peerAddresses, a)
+ }
+
+ // skip one level, add one peer at one level deeper.
+ // as a result, we will have an edge case of three peers in nearest neighbours' bin.
+ peerAddresses = append(peerAddresses, pot.RandomAddressAt(base, depth+2))
+
+ kad := network.NewKademlia(base[:], network.NewKadParams())
+ ps := createPss(t, kad)
+ addPeers(kad, peerAddresses)
+
+ const firstNearest = depth * 2 // shallowest peer in the nearest neighbours' bin
+ nearestNeighbours := []int{firstNearest, firstNearest + 1, firstNearest + 2}
+ var all []int // indices of all the peers
+ for i := 0; i < len(peerAddresses); i++ {
+ all = append(all, i)
+ }
+
+ for i := 0; i < len(peerAddresses); i++ {
+ // send msg directly to the known peers (recipient address == peer address)
+ c = testCase{
+ name: fmt.Sprintf("Send direct to known, id: [%d]", i),
+ recipient: peerAddresses[i][:],
+ peers: peerAddresses,
+ expected: []int{i},
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for i := 0; i < firstNearest; i++ {
+ // send random messages with proximity orders, corresponding to PO of each bin,
+ // with one peer being closer to the recipient address
+ a := pot.RandomAddressAt(peerAddresses[i], 64)
+ c = testCase{
+ name: fmt.Sprintf("Send random to each PO, id: [%d]", i),
+ recipient: a[:],
+ peers: peerAddresses,
+ expected: []int{i},
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for i := 0; i < firstNearest; i++ {
+ // send random messages with proximity orders, corresponding to PO of each bin,
+ // with random proximity relative to the recipient address
+ po := i / 2
+ a := pot.RandomAddressAt(base, po)
+ c = testCase{
+ name: fmt.Sprintf("Send direct to known, id: [%d]", i),
+ recipient: a[:],
+ peers: peerAddresses,
+ expected: []int{po * 2, po*2 + 1},
+ exclusive: true,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for i := firstNearest; i < len(peerAddresses); i++ {
+ // recipient address falls into the nearest neighbours' bin
+ a := pot.RandomAddressAt(base, i)
+ c = testCase{
+ name: fmt.Sprintf("recipient address falls into the nearest neighbours' bin, id: [%d]", i),
+ recipient: a[:],
+ peers: peerAddresses,
+ expected: nearestNeighbours,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+ }
+
+ // send msg with proximity order much deeper than the deepest nearest neighbour
+ a2 := pot.RandomAddressAt(base, 77)
+ c = testCase{
+ name: "proximity order much deeper than the deepest nearest neighbour",
+ recipient: a2[:],
+ peers: peerAddresses,
+ expected: nearestNeighbours,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+
+ // test with partial addresses
+ const part = 12
+
+ for i := 0; i < firstNearest; i++ {
+ // send messages with partial address falling into different proximity orders
+ po := i / 2
+ if i%8 != 0 {
+ c = testCase{
+ name: fmt.Sprintf("partial address falling into different proximity orders, id: [%d]", i),
+ recipient: peerAddresses[i][:i],
+ peers: peerAddresses,
+ expected: []int{po * 2, po*2 + 1},
+ exclusive: true,
+ }
+ testCases = append(testCases, c)
+ }
+ c = testCase{
+ name: fmt.Sprintf("extended partial address falling into different proximity orders, id: [%d]", i),
+ recipient: peerAddresses[i][:part],
+ peers: peerAddresses,
+ expected: []int{po * 2, po*2 + 1},
+ exclusive: true,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for i := firstNearest; i < len(peerAddresses); i++ {
+ // partial address falls into the nearest neighbours' bin
+ c = testCase{
+ name: fmt.Sprintf("partial address falls into the nearest neighbours' bin, id: [%d]", i),
+ recipient: peerAddresses[i][:part],
+ peers: peerAddresses,
+ expected: nearestNeighbours,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+ }
+
+ // partial address with proximity order deeper than any of the nearest neighbour
+ a3 := pot.RandomAddressAt(base, part)
+ c = testCase{
+ name: "partial address with proximity order deeper than any of the nearest neighbour",
+ recipient: a3[:part],
+ peers: peerAddresses,
+ expected: nearestNeighbours,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+
+ // special cases where partial address matches a large group of peers
+
+ // zero bytes of address is given, msg should be delivered to all the peers
+ c = testCase{
+ name: "zero bytes of address is given",
+ recipient: []byte{},
+ peers: peerAddresses,
+ expected: all,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+
+ // luminous radius of 8 bits, proximity order 8
+ indexAtPo8 := 16
+ c = testCase{
+ name: "luminous radius of 8 bits",
+ recipient: []byte{0xFF},
+ peers: peerAddresses,
+ expected: all[indexAtPo8:],
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+
+ // luminous radius of 256 bits, proximity order 8
+ a4 := pot.Address{}
+ a4[0] = 0xFF
+ c = testCase{
+ name: "luminous radius of 256 bits",
+ recipient: a4[:],
+ peers: peerAddresses,
+ expected: []int{indexAtPo8, indexAtPo8 + 1},
+ exclusive: true,
+ }
+ testCases = append(testCases, c)
+
+ // check correct behaviour in case send fails
+ for i := 2; i < firstNearest-3; i += 2 {
+ po := i / 2
+ // send random messages with proximity orders, corresponding to PO of each bin,
+ // with different numbers of failed attempts.
+ // msg should be received by only one of the deeper peers.
+ a := pot.RandomAddressAt(base, po)
+ c = testCase{
+ name: fmt.Sprintf("Send direct to known, id: [%d]", i),
+ recipient: a[:],
+ peers: peerAddresses,
+ expected: all[i+1:],
+ exclusive: true,
+ nFails: rand.Int()%3 + 2,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for _, c := range testCases {
+ testForwardMsg(t, ps, &c)
+ }
+}
+
+// this function tests the forwarding of a single message. the recipient address is passed as param,
+// along with addresses of all peers, and indices of those peers which are expected to receive the message.
+func testForwardMsg(t *testing.T, ps *Pss, c *testCase) {
+ recipientAddr := c.recipient
+ peers := c.peers
+ expected := c.expected
+ exclusive := c.exclusive
+ nFails := c.nFails
+ tries := 0 // number of previous failed tries
+
+ resultMap := make(map[pot.Address]int)
+
+ defer func() { sendFunc = sendMsg }()
+ sendFunc = func(_ *Pss, sp *network.Peer, _ *PssMsg) bool {
+ if tries < nFails {
+ tries++
+ return false
+ }
+ a := pot.NewAddressFromBytes(sp.Address())
+ resultMap[a]++
+ return true
+ }
+
+ msg := newTestMsg(recipientAddr)
+ ps.forward(msg)
+
+ // check test results
+ var fail bool
+ precision := len(recipientAddr)
+ if precision > 4 {
+ precision = 4
+ }
+ s := fmt.Sprintf("test [%s]\nmsg address: %x..., radius: %d", c.name, recipientAddr[:precision], 8*len(recipientAddr))
+
+ // false negatives (expected message didn't reach peer)
+ if exclusive {
+ var cnt int
+ for _, i := range expected {
+ a := peers[i]
+ cnt += resultMap[a]
+ resultMap[a] = 0
+ }
+ if cnt != 1 {
+ s += fmt.Sprintf("\n%d messages received by %d peers with indices: [%v]", cnt, len(expected), expected)
+ fail = true
+ }
+ } else {
+ for _, i := range expected {
+ a := peers[i]
+ received := resultMap[a]
+ if received != 1 {
+ s += fmt.Sprintf("\npeer number %d [%x...] received %d messages", i, a[:4], received)
+ fail = true
+ }
+ resultMap[a] = 0
+ }
+ }
+
+ // false positives (unexpected message reached peer)
+ for k, v := range resultMap {
+ if v != 0 {
+ // find the index of the false positive peer
+ var j int
+ for j = 0; j < len(peers); j++ {
+ if peers[j] == k {
+ break
+ }
+ }
+ s += fmt.Sprintf("\npeer number %d [%x...] received %d messages", j, k[:4], v)
+ fail = true
+ }
+ }
+
+ if fail {
+ t.Fatal(s)
+ }
+}
+
+func addPeers(kad *network.Kademlia, addresses []pot.Address) {
+ for _, a := range addresses {
+ p := newTestDiscoveryPeer(a, kad)
+ kad.On(p)
+ }
+}
+
+func createPss(t *testing.T, kad *network.Kademlia) *Pss {
+ privKey, err := crypto.GenerateKey()
+ pssp := NewPssParams().WithPrivateKey(privKey)
+ ps, err := NewPss(kad, pssp)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ return ps
+}
+
+func newTestDiscoveryPeer(addr pot.Address, kad *network.Kademlia) *network.Peer {
+ rw := &p2p.MsgPipeRW{}
+ p := p2p.NewPeer(enode.ID{}, "test", []p2p.Cap{})
+ pp := protocols.NewPeer(p, rw, &protocols.Spec{})
+ bp := &network.BzzPeer{
+ Peer: pp,
+ BzzAddr: &network.BzzAddr{
+ OAddr: addr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", addr[:])),
+ },
+ }
+ return network.NewPeer(bp, kad)
+}
+
+func newTestMsg(addr []byte) *PssMsg {
+ msg := newPssMsg(&msgParams{})
+ msg.To = addr[:]
+ msg.Expire = uint32(time.Now().Add(time.Second * 60).Unix())
+ msg.Payload = &whisper.Envelope{
+ Topic: [4]byte{},
+ Data: []byte("i have nothing to hide"),
+ }
+ return msg
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go
index 5486abaf..bb67b515 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go
@@ -321,9 +321,7 @@ func (ctl *HandshakeController) handleKeys(pubkeyid string, keymsg *handshakeMsg
for _, key := range keymsg.Keys {
sendsymkey := make([]byte, len(key))
copy(sendsymkey, key)
- var address PssAddress
- copy(address[:], keymsg.From)
- sendsymkeyid, err := ctl.pss.setSymmetricKey(sendsymkey, keymsg.Topic, &address, false, false)
+ sendsymkeyid, err := ctl.pss.setSymmetricKey(sendsymkey, keymsg.Topic, PssAddress(keymsg.From), false, false)
if err != nil {
return err
}
@@ -356,7 +354,7 @@ func (ctl *HandshakeController) handleKeys(pubkeyid string, keymsg *handshakeMsg
func (ctl *HandshakeController) sendKey(pubkeyid string, topic *Topic, keycount uint8) ([]string, error) {
var requestcount uint8
- to := &PssAddress{}
+ to := PssAddress{}
if _, ok := ctl.pss.pubKeyPool[pubkeyid]; !ok {
return []string{}, errors.New("Invalid public key")
} else if psp, ok := ctl.pss.pubKeyPool[pubkeyid][*topic]; ok {
@@ -564,5 +562,5 @@ func (api *HandshakeAPI) SendSym(symkeyid string, topic Topic, msg hexutil.Bytes
api.ctrl.symKeyIndex[symkeyid].count++
log.Trace("increment symkey send use", "symkeyid", symkeyid, "count", api.ctrl.symKeyIndex[symkeyid].count, "limit", api.ctrl.symKeyIndex[symkeyid].limit, "receiver", common.ToHex(crypto.FromECDSAPub(api.ctrl.pss.PublicKey())))
}
- return
+ return err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake_test.go
index 0fc7e798..895163f3 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake_test.go
@@ -30,6 +30,7 @@ import (
// asymmetrical key exchange between two directly connected peers
// full address, partial address (8 bytes) and empty address
func TestHandshake(t *testing.T) {
+ t.Skip("handshakes are not adapted to current pss core code")
t.Run("32", testHandshake)
t.Run("8", testHandshake)
t.Run("0", testHandshake)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go
index d3c89058..e9d40dc3 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go
@@ -138,7 +138,7 @@ func (c *Controller) Subscribe(name string, pubkey *ecdsa.PublicKey, address pss
c.mu.Lock()
defer c.mu.Unlock()
msg := NewMsg(MsgCodeStart, name, c.pss.BaseAddr())
- c.pss.SetPeerPublicKey(pubkey, controlTopic, &address)
+ c.pss.SetPeerPublicKey(pubkey, controlTopic, address)
pubkeyId := hexutil.Encode(crypto.FromECDSAPub(pubkey))
smsg, err := rlp.EncodeToBytes(msg)
if err != nil {
@@ -271,7 +271,7 @@ func (c *Controller) addToBin(ntfr *notifier, address []byte) (symKeyId string,
currentBin.count++
symKeyId = currentBin.symKeyId
} else {
- symKeyId, err = c.pss.GenerateSymmetricKey(ntfr.topic, &pssAddress, false)
+ symKeyId, err = c.pss.GenerateSymmetricKey(ntfr.topic, pssAddress, false)
if err != nil {
return "", nil, err
}
@@ -312,7 +312,7 @@ func (c *Controller) handleStartMsg(msg *Msg, keyid string) (err error) {
if err != nil {
return err
}
- err = c.pss.SetPeerPublicKey(pubkey, controlTopic, &pssAddress)
+ err = c.pss.SetPeerPublicKey(pubkey, controlTopic, pssAddress)
if err != nil {
return err
}
@@ -335,7 +335,7 @@ func (c *Controller) handleNotifyWithKeyMsg(msg *Msg) error {
// \TODO keep track of and add actual address
updaterAddr := pss.PssAddress([]byte{})
- c.pss.SetSymmetricKey(symkey, topic, &updaterAddr, true)
+ c.pss.SetSymmetricKey(symkey, topic, updaterAddr, true)
c.pss.Register(&topic, pss.NewHandler(c.Handler))
return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload[:len(msg.Payload)-symKeyLength])
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify_test.go
index 6100195b..5c29f68e 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify_test.go
@@ -209,7 +209,7 @@ func newServices(allowRaw bool) adapters.Services {
return k
}
params := network.NewKadParams()
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
params.MaxBinSize = 3
params.MinBinSize = 1
params.MaxRetries = 1000
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go
index d0986d28..bee64b0d 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go
@@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -40,6 +39,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/storage"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -81,7 +81,7 @@ type senderPeer interface {
// member `protected` prevents garbage collection of the instance
type pssPeer struct {
lastSeen time.Time
- address *PssAddress
+ address PssAddress
protected bool
}
@@ -187,7 +187,7 @@ func NewPss(k *network.Kademlia, params *PssParams) (*Pss, error) {
hashPool: sync.Pool{
New: func() interface{} {
- return sha3.NewKeccak256()
+ return sha3.NewLegacyKeccak256()
},
},
}
@@ -396,9 +396,11 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
// raw is simplest handler contingency to check, so check that first
var isRaw bool
if pssmsg.isRaw() {
- if !p.topicHandlerCaps[psstopic].raw {
- log.Debug("No handler for raw message", "topic", psstopic)
- return nil
+ if _, ok := p.topicHandlerCaps[psstopic]; ok {
+ if !p.topicHandlerCaps[psstopic].raw {
+ log.Debug("No handler for raw message", "topic", psstopic)
+ return nil
+ }
}
isRaw = true
}
@@ -437,10 +439,10 @@ func (p *Pss) process(pssmsg *PssMsg, raw bool, prox bool) error {
var err error
var recvmsg *whisper.ReceivedMessage
var payload []byte
- var from *PssAddress
+ var from PssAddress
var asymmetric bool
var keyid string
- var keyFunc func(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, *PssAddress, error)
+ var keyFunc func(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, PssAddress, error)
envelope := pssmsg.Payload
psstopic := Topic(envelope.Topic)
@@ -473,7 +475,7 @@ func (p *Pss) process(pssmsg *PssMsg, raw bool, prox bool) error {
}
-func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, raw bool, prox bool, asymmetric bool, keyid string) {
+func (p *Pss) executeHandlers(topic Topic, payload []byte, from PssAddress, raw bool, prox bool, asymmetric bool, keyid string) {
handlers := p.getHandlers(topic)
peer := p2p.NewPeer(enode.ID{}, fmt.Sprintf("%x", from), []p2p.Cap{})
for h := range handlers {
@@ -511,7 +513,7 @@ func (p *Pss) isSelfPossibleRecipient(msg *PssMsg, prox bool) bool {
}
depth := p.Kademlia.NeighbourhoodDepth()
- po, _ := p.Kademlia.Pof(p.Kademlia.BaseAddr(), msg.To, 0)
+ po, _ := network.Pof(p.Kademlia.BaseAddr(), msg.To, 0)
log.Trace("selfpossible", "po", po, "depth", depth)
return depth <= po
@@ -528,7 +530,10 @@ func (p *Pss) isSelfPossibleRecipient(msg *PssMsg, prox bool) bool {
//
// The value in `address` will be used as a routing hint for the
// public key / topic association
-func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address *PssAddress) error {
+func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address PssAddress) error {
+ if err := validateAddress(address); err != nil {
+ return err
+ }
pubkeybytes := crypto.FromECDSAPub(pubkey)
if len(pubkeybytes) == 0 {
return fmt.Errorf("invalid public key: %v", pubkey)
@@ -543,12 +548,12 @@ func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address *Ps
}
p.pubKeyPool[pubkeyid][topic] = psp
p.pubKeyPoolMu.Unlock()
- log.Trace("added pubkey", "pubkeyid", pubkeyid, "topic", topic, "address", common.ToHex(*address))
+ log.Trace("added pubkey", "pubkeyid", pubkeyid, "topic", topic, "address", address)
return nil
}
// Automatically generate a new symkey for a topic and address hint
-func (p *Pss) GenerateSymmetricKey(topic Topic, address *PssAddress, addToCache bool) (string, error) {
+func (p *Pss) GenerateSymmetricKey(topic Topic, address PssAddress, addToCache bool) (string, error) {
keyid, err := p.w.GenerateSymKey()
if err != nil {
return "", err
@@ -569,11 +574,14 @@ func (p *Pss) GenerateSymmetricKey(topic Topic, address *PssAddress, addToCache
//
// Returns a string id that can be used to retrieve the key bytes
// from the whisper backend (see pss.GetSymmetricKey())
-func (p *Pss) SetSymmetricKey(key []byte, topic Topic, address *PssAddress, addtocache bool) (string, error) {
+func (p *Pss) SetSymmetricKey(key []byte, topic Topic, address PssAddress, addtocache bool) (string, error) {
+ if err := validateAddress(address); err != nil {
+ return "", err
+ }
return p.setSymmetricKey(key, topic, address, addtocache, true)
}
-func (p *Pss) setSymmetricKey(key []byte, topic Topic, address *PssAddress, addtocache bool, protected bool) (string, error) {
+func (p *Pss) setSymmetricKey(key []byte, topic Topic, address PssAddress, addtocache bool, protected bool) (string, error) {
keyid, err := p.w.AddSymKeyDirect(key)
if err != nil {
return "", err
@@ -585,7 +593,7 @@ func (p *Pss) setSymmetricKey(key []byte, topic Topic, address *PssAddress, addt
// adds a symmetric key to the pss key pool, and optionally adds the key
// to the collection of keys used to attempt symmetric decryption of
// incoming messages
-func (p *Pss) addSymmetricKeyToPool(keyid string, topic Topic, address *PssAddress, addtocache bool, protected bool) {
+func (p *Pss) addSymmetricKeyToPool(keyid string, topic Topic, address PssAddress, addtocache bool, protected bool) {
psp := &pssPeer{
address: address,
protected: protected,
@@ -601,7 +609,7 @@ func (p *Pss) addSymmetricKeyToPool(keyid string, topic Topic, address *PssAddre
p.symKeyDecryptCache[p.symKeyDecryptCacheCursor%cap(p.symKeyDecryptCache)] = &keyid
}
key, _ := p.GetSymmetricKey(keyid)
- log.Trace("added symkey", "symkeyid", keyid, "symkey", common.ToHex(key), "topic", topic, "address", fmt.Sprintf("%p", address), "cache", addtocache)
+ log.Trace("added symkey", "symkeyid", keyid, "symkey", common.ToHex(key), "topic", topic, "address", address, "cache", addtocache)
}
// Returns a symmetric key byte seqyence stored in the whisper backend
@@ -622,7 +630,7 @@ func (p *Pss) GetPublickeyPeers(keyid string) (topic []Topic, address []PssAddre
defer p.pubKeyPoolMu.RUnlock()
for t, peer := range p.pubKeyPool[keyid] {
topic = append(topic, t)
- address = append(address, *peer.address)
+ address = append(address, peer.address)
}
return topic, address, nil
@@ -633,7 +641,7 @@ func (p *Pss) getPeerAddress(keyid string, topic Topic) (PssAddress, error) {
defer p.pubKeyPoolMu.RUnlock()
if peers, ok := p.pubKeyPool[keyid]; ok {
if t, ok := peers[topic]; ok {
- return *t.address, nil
+ return t.address, nil
}
}
return nil, fmt.Errorf("peer with pubkey %s, topic %x not found", keyid, topic)
@@ -645,7 +653,7 @@ func (p *Pss) getPeerAddress(keyid string, topic Topic) (PssAddress, error) {
// encapsulating the decrypted message, and the whisper backend id
// of the symmetric key used to decrypt the message.
// It fails if decryption of the message fails or if the message is corrupted
-func (p *Pss) processSym(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, *PssAddress, error) {
+func (p *Pss) processSym(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, PssAddress, error) {
metrics.GetOrRegisterCounter("pss.process.sym", nil).Inc(1)
for i := p.symKeyDecryptCacheCursor; i > p.symKeyDecryptCacheCursor-cap(p.symKeyDecryptCache) && i > 0; i-- {
@@ -677,7 +685,7 @@ func (p *Pss) processSym(envelope *whisper.Envelope) (*whisper.ReceivedMessage,
// encapsulating the decrypted message, and the byte representation of
// the public key used to decrypt the message.
// It fails if decryption of message fails, or if the message is corrupted
-func (p *Pss) processAsym(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, *PssAddress, error) {
+func (p *Pss) processAsym(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, PssAddress, error) {
metrics.GetOrRegisterCounter("pss.process.asym", nil).Inc(1)
recvmsg, err := envelope.OpenAsymmetric(p.privateKey)
@@ -689,7 +697,7 @@ func (p *Pss) processAsym(envelope *whisper.Envelope) (*whisper.ReceivedMessage,
return nil, "", nil, fmt.Errorf("invalid message")
}
pubkeyid := common.ToHex(crypto.FromECDSAPub(recvmsg.Src))
- var from *PssAddress
+ var from PssAddress
p.pubKeyPoolMu.Lock()
if p.pubKeyPool[pubkeyid][Topic(envelope.Topic)] != nil {
from = p.pubKeyPool[pubkeyid][Topic(envelope.Topic)].address
@@ -751,6 +759,9 @@ func (p *Pss) enqueue(msg *PssMsg) error {
//
// Will fail if raw messages are disallowed
func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
+ if err := validateAddress(address); err != nil {
+ return err
+ }
pssMsgParams := &msgParams{
raw: true,
}
@@ -770,8 +781,10 @@ func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
// if we have a proxhandler on this topic
// also deliver message to ourselves
- if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox {
- return p.process(pssMsg, true, true)
+ if _, ok := p.topicHandlerCaps[topic]; ok {
+ if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox {
+ return p.process(pssMsg, true, true)
+ }
}
return nil
}
@@ -789,11 +802,8 @@ func (p *Pss) SendSym(symkeyid string, topic Topic, msg []byte) error {
p.symKeyPoolMu.Unlock()
if !ok {
return fmt.Errorf("invalid topic '%s' for symkey '%s'", topic.String(), symkeyid)
- } else if psp.address == nil {
- return fmt.Errorf("no address hint for topic '%s' symkey '%s'", topic.String(), symkeyid)
}
- err = p.send(*psp.address, topic, msg, false, symkey)
- return err
+ return p.send(psp.address, topic, msg, false, symkey)
}
// Send a message using asymmetric encryption
@@ -808,13 +818,8 @@ func (p *Pss) SendAsym(pubkeyid string, topic Topic, msg []byte) error {
p.pubKeyPoolMu.Unlock()
if !ok {
return fmt.Errorf("invalid topic '%s' for pubkey '%s'", topic.String(), pubkeyid)
- } else if psp.address == nil {
- return fmt.Errorf("no address hint for topic '%s' pubkey '%s'", topic.String(), pubkeyid)
}
- go func() {
- p.send(*psp.address, topic, msg, true, common.FromHex(pubkeyid))
- }()
- return nil
+ return p.send(psp.address, topic, msg, true, common.FromHex(pubkeyid))
}
// Send is payload agnostic, and will accept any byte slice as payload
@@ -886,68 +891,97 @@ func (p *Pss) send(to []byte, topic Topic, msg []byte, asymmetric bool, key []by
return nil
}
-// Forwards a pss message to the peer(s) closest to the to recipient address in the PssMsg struct
-// The recipient address can be of any length, and the byte slice will be matched to the MSB slice
-// of the peer address of the equivalent length.
+// sendFunc is a helper function that tries to send a message and returns true on success.
+// It is set here for usage in production, and optionally overridden in tests.
+var sendFunc func(p *Pss, sp *network.Peer, msg *PssMsg) bool = sendMsg
+
+// tries to send a message, returns true if successful
+func sendMsg(p *Pss, sp *network.Peer, msg *PssMsg) bool {
+ var isPssEnabled bool
+ info := sp.Info()
+ for _, capability := range info.Caps {
+ if capability == p.capstring {
+ isPssEnabled = true
+ break
+ }
+ }
+ if !isPssEnabled {
+ log.Error("peer doesn't have matching pss capabilities, skipping", "peer", info.Name, "caps", info.Caps)
+ return false
+ }
+
+ // get the protocol peer from the forwarding peer cache
+ p.fwdPoolMu.RLock()
+ pp := p.fwdPool[sp.Info().ID]
+ p.fwdPoolMu.RUnlock()
+
+ err := pp.Send(context.TODO(), msg)
+ if err != nil {
+ metrics.GetOrRegisterCounter("pss.pp.send.error", nil).Inc(1)
+ log.Error(err.Error())
+ }
+
+ return err == nil
+}
+
+// Forwards a pss message to the peer(s) based on recipient address according to the algorithm
+// described below. The recipient address can be of any length, and the byte slice will be matched
+// to the MSB slice of the peer address of the equivalent length.
+//
+// If the recipient address (or partial address) is within the neighbourhood depth of the forwarding
+// node, then it will be forwarded to all the nearest neighbours of the forwarding node. In case of
+// partial address, it should be forwarded to all the peers matching the partial address, if there
+// are any; otherwise only to one peer, closest to the recipient address. In any case, if the message
+// forwarding fails, the node should try to forward it to the next best peer, until the message is
+// successfully forwarded to at least one peer.
func (p *Pss) forward(msg *PssMsg) error {
metrics.GetOrRegisterCounter("pss.forward", nil).Inc(1)
-
+ sent := 0 // number of successful sends
to := make([]byte, addressLength)
copy(to[:len(msg.To)], msg.To)
+ neighbourhoodDepth := p.Kademlia.NeighbourhoodDepth()
- // send with kademlia
- // find the closest peer to the recipient and attempt to send
- sent := 0
- p.Kademlia.EachConn(to, 256, func(sp *network.Peer, po int, isproxbin bool) bool {
- info := sp.Info()
+ // luminosity is the opposite of darkness. the more bytes are removed from the address, the higher is darkness,
+ // but the luminosity is less. here luminosity equals the number of bits given in the destination address.
+ luminosityRadius := len(msg.To) * 8
- // check if the peer is running pss
- var ispss bool
- for _, cap := range info.Caps {
- if cap == p.capstring {
- ispss = true
- break
+ // proximity order function matching up to neighbourhoodDepth bits (po <= neighbourhoodDepth)
+ pof := pot.DefaultPof(neighbourhoodDepth)
+
+ // soft threshold for msg broadcast
+ broadcastThreshold, _ := pof(to, p.BaseAddr(), 0)
+ if broadcastThreshold > luminosityRadius {
+ broadcastThreshold = luminosityRadius
+ }
+
+ var onlySendOnce bool // indicates if the message should only be sent to one peer with closest address
+
+ // if measured from the recipient address as opposed to the base address (see Kademlia.EachConn
+ // call below), then peers that fall in the same proximity bin as recipient address will appear
+ // [at least] one bit closer, but only if these additional bits are given in the recipient address.
+ if broadcastThreshold < luminosityRadius && broadcastThreshold < neighbourhoodDepth {
+ broadcastThreshold++
+ onlySendOnce = true
+ }
+
+ p.Kademlia.EachConn(to, addressLength*8, func(sp *network.Peer, po int) bool {
+ if po < broadcastThreshold && sent > 0 {
+ return false // stop iterating
+ }
+ if sendFunc(p, sp, msg) {
+ sent++
+ if onlySendOnce {
+ return false
+ }
+ if po == addressLength*8 {
+ // stop iterating if successfully sent to the exact recipient (perfect match of full address)
+ return false
}
}
- if !ispss {
- log.Trace("peer doesn't have matching pss capabilities, skipping", "peer", info.Name, "caps", info.Caps)
- return true
- }
-
- // get the protocol peer from the forwarding peer cache
- sendMsg := fmt.Sprintf("MSG TO %x FROM %x VIA %x", to, p.BaseAddr(), sp.Address())
- p.fwdPoolMu.RLock()
- pp := p.fwdPool[sp.Info().ID]
- p.fwdPoolMu.RUnlock()
-
- // attempt to send the message
- err := pp.Send(context.TODO(), msg)
- if err != nil {
- metrics.GetOrRegisterCounter("pss.pp.send.error", nil).Inc(1)
- log.Error(err.Error())
- return true
- }
- sent++
- log.Trace(fmt.Sprintf("%v: successfully forwarded", sendMsg))
-
- // continue forwarding if:
- // - if the peer is end recipient but the full address has not been disclosed
- // - if the peer address matches the partial address fully
- // - if the peer is in proxbin
- if len(msg.To) < addressLength && bytes.Equal(msg.To, sp.Address()[:len(msg.To)]) {
- log.Trace(fmt.Sprintf("Pss keep forwarding: Partial address + full partial match"))
- return true
- } else if isproxbin {
- log.Trace(fmt.Sprintf("%x is in proxbin, keep forwarding", common.ToHex(sp.Address())))
- return true
- }
- // at this point we stop forwarding, and the state is as follows:
- // - the peer is end recipient and we have full address
- // - we are not in proxbin (directed routing)
- // - partial addresses don't fully match
- return false
+ return true
})
+ // if we failed to send to anyone, re-insert message in the send-queue
if sent == 0 {
log.Debug("unable to forward to any peers")
if err := p.enqueue(msg); err != nil {
@@ -1034,3 +1068,10 @@ func (p *Pss) digestBytes(msg []byte) pssDigest {
copy(digest[:], key[:digestLength])
return digest
}
+
+func validateAddress(addr PssAddress) error {
+ if len(addr) > addressLength {
+ return errors.New("address too long")
+ }
+ return nil
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss_test.go
index 72f62acd..46daa467 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss_test.go
@@ -407,7 +407,7 @@ func TestProxShortCircuit(t *testing.T) {
// try the same prox message with sym and asym send
proxAddrPss := PssAddress(proxMessageAddress)
- symKeyId, err := ps.GenerateSymmetricKey(topic, &proxAddrPss, true)
+ symKeyId, err := ps.GenerateSymmetricKey(topic, proxAddrPss, true)
go func() {
err := ps.SendSym(symKeyId, topic, []byte("baz"))
if err != nil {
@@ -424,7 +424,7 @@ func TestProxShortCircuit(t *testing.T) {
t.Fatal("sym timeout")
}
- err = ps.SetPeerPublicKey(&privKey.PublicKey, topic, &proxAddrPss)
+ err = ps.SetPeerPublicKey(&privKey.PublicKey, topic, proxAddrPss)
if err != nil {
t.Fatal(err)
}
@@ -491,12 +491,12 @@ func TestAddressMatchProx(t *testing.T) {
// meanwhile test regression for kademlia since we are compiling the test parameters from different packages
var proxes int
var conns int
- kad.EachConn(nil, peerCount, func(p *network.Peer, po int, prox bool) bool {
+ depth := kad.NeighbourhoodDepth()
+ kad.EachConn(nil, peerCount, func(p *network.Peer, po int) bool {
conns++
- if prox {
+ if po >= depth {
proxes++
}
- log.Trace("kadconn", "po", po, "peer", p, "prox", prox)
return true
})
if proxes != nnPeerCount {
@@ -786,14 +786,14 @@ func TestKeys(t *testing.T) {
copy(addr, network.RandomAddr().Over())
outkey := network.RandomAddr().Over()
topicobj := BytesToTopic([]byte("foo:42"))
- ps.SetPeerPublicKey(&theirprivkey.PublicKey, topicobj, &addr)
- outkeyid, err := ps.SetSymmetricKey(outkey, topicobj, &addr, false)
+ ps.SetPeerPublicKey(&theirprivkey.PublicKey, topicobj, addr)
+ outkeyid, err := ps.SetSymmetricKey(outkey, topicobj, addr, false)
if err != nil {
t.Fatalf("failed to set 'our' outgoing symmetric key")
}
// make a symmetric key that we will send to peer for encrypting messages to us
- inkeyid, err := ps.GenerateSymmetricKey(topicobj, &addr, true)
+ inkeyid, err := ps.GenerateSymmetricKey(topicobj, addr, true)
if err != nil {
t.Fatalf("failed to set 'our' incoming symmetric key")
}
@@ -816,8 +816,8 @@ func TestKeys(t *testing.T) {
// check that the key is stored in the peerpool
psp := ps.symKeyPool[inkeyid][topicobj]
- if psp.address != &addr {
- t.Fatalf("inkey address does not match; %p != %p", psp.address, &addr)
+ if !bytes.Equal(psp.address, addr) {
+ t.Fatalf("inkey address does not match; %p != %p", psp.address, addr)
}
}
@@ -1008,6 +1008,34 @@ func TestRawAllow(t *testing.T) {
}
}
+// BELOW HERE ARE TESTS USING THE SIMULATION FRAMEWORK
+
+// tests that the API layer can handle edge case values
+func TestApi(t *testing.T) {
+ clients, err := setupNetwork(2, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ topic := "0xdeadbeef"
+
+ err = clients[0].Call(nil, "pss_sendRaw", "0x", topic, "0x666f6f")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = clients[0].Call(nil, "pss_sendRaw", "0xabcdef", topic, "0x")
+ if err == nil {
+ t.Fatal("expected error on empty msg")
+ }
+
+ overflowAddr := [33]byte{}
+ err = clients[0].Call(nil, "pss_sendRaw", hexutil.Encode(overflowAddr[:]), topic, "0x666f6f")
+ if err == nil {
+ t.Fatal("expected error on send too big address")
+ }
+}
+
// verifies that nodes can send and receive raw (verbatim) messages
func TestSendRaw(t *testing.T) {
t.Run("32", testSendRaw)
@@ -1668,7 +1696,7 @@ func benchmarkSymKeySend(b *testing.B) {
topic := BytesToTopic([]byte("foo"))
to := make(PssAddress, 32)
copy(to[:], network.RandomAddr().Over())
- symkeyid, err := ps.GenerateSymmetricKey(topic, &to, true)
+ symkeyid, err := ps.GenerateSymmetricKey(topic, to, true)
if err != nil {
b.Fatalf("could not generate symkey: %v", err)
}
@@ -1676,7 +1704,7 @@ func benchmarkSymKeySend(b *testing.B) {
if err != nil {
b.Fatalf("could not retrieve symkey: %v", err)
}
- ps.SetSymmetricKey(symkey, topic, &to, false)
+ ps.SetSymmetricKey(symkey, topic, to, false)
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -1712,7 +1740,7 @@ func benchmarkAsymKeySend(b *testing.B) {
topic := BytesToTopic([]byte("foo"))
to := make(PssAddress, 32)
copy(to[:], network.RandomAddr().Over())
- ps.SetPeerPublicKey(&privkey.PublicKey, topic, &to)
+ ps.SetPeerPublicKey(&privkey.PublicKey, topic, to)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ps.SendAsym(common.ToHex(crypto.FromECDSAPub(&privkey.PublicKey)), topic, msg)
@@ -1761,7 +1789,7 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) {
for i := 0; i < int(keycount); i++ {
to := make(PssAddress, 32)
copy(to[:], network.RandomAddr().Over())
- keyid, err = ps.GenerateSymmetricKey(topic, &to, true)
+ keyid, err = ps.GenerateSymmetricKey(topic, to, true)
if err != nil {
b.Fatalf("cant generate symkey #%d: %v", i, err)
}
@@ -1843,7 +1871,7 @@ func benchmarkSymkeyBruteforceSameaddr(b *testing.B) {
topic := BytesToTopic([]byte("foo"))
for i := 0; i < int(keycount); i++ {
copy(addr[i], network.RandomAddr().Over())
- keyid, err = ps.GenerateSymmetricKey(topic, &addr[i], true)
+ keyid, err = ps.GenerateSymmetricKey(topic, addr[i], true)
if err != nil {
b.Fatalf("cant generate symkey #%d: %v", i, err)
}
@@ -1937,7 +1965,7 @@ func newServices(allowRaw bool) adapters.Services {
return k
}
params := network.NewKadParams()
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
params.MaxBinSize = 3
params.MinBinSize = 1
params.MaxRetries = 1000
@@ -2017,7 +2045,7 @@ func newTestPss(privkey *ecdsa.PrivateKey, kad *network.Kademlia, ppextra *PssPa
// set up routing if kademlia is not passed to us
if kad == nil {
kp := network.NewKadParams()
- kp.MinProxBinSize = 3
+ kp.NeighbourhoodSize = 3
kad = network.NewKademlia(nid[:], kp)
}
@@ -2044,12 +2072,13 @@ func NewAPITest(ps *Pss) *APITest {
return &APITest{Pss: ps}
}
-func (apitest *APITest) SetSymKeys(pubkeyid string, recvsymkey []byte, sendsymkey []byte, limit uint16, topic Topic, to PssAddress) ([2]string, error) {
- recvsymkeyid, err := apitest.SetSymmetricKey(recvsymkey, topic, &to, true)
+func (apitest *APITest) SetSymKeys(pubkeyid string, recvsymkey []byte, sendsymkey []byte, limit uint16, topic Topic, to hexutil.Bytes) ([2]string, error) {
+
+ recvsymkeyid, err := apitest.SetSymmetricKey(recvsymkey, topic, PssAddress(to), true)
if err != nil {
return [2]string{}, err
}
- sendsymkeyid, err := apitest.SetSymmetricKey(sendsymkey, topic, &to, false)
+ sendsymkeyid, err := apitest.SetSymmetricKey(sendsymkey, topic, PssAddress(to), false)
if err != nil {
return [2]string{}, err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go
index e128b8cb..d4e5d1b2 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go
@@ -18,19 +18,28 @@
// more complex operations on storage data organized in fields and indexes.
//
// Only type which holds logical information about swarm storage chunks data
-// and metadata is IndexItem. This part is not generalized mostly for
+// and metadata is Item. This part is not generalized mostly for
// performance reasons.
package shed
import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
"github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/log"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
)
-// The limit for LevelDB OpenFilesCacheCapacity.
-const openFileLimit = 128
+const (
+ openFileLimit = 128 // The limit for LevelDB OpenFilesCacheCapacity.
+ writePauseWarningThrottler = 1 * time.Minute
+)
// DB provides abstractions over LevelDB in order to
// implement complex structures using fields and ordered indexes.
@@ -38,11 +47,22 @@ const openFileLimit = 128
// information about naming and types.
type DB struct {
ldb *leveldb.DB
+
+ compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
+ compReadMeter metrics.Meter // Meter for measuring the data read during compaction
+ compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
+ writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
+ writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
+ diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
+ diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
+
+ quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
}
// NewDB constructs a new DB and validates the schema
// if it exists in database on the given path.
-func NewDB(path string) (db *DB, err error) {
+// metricsPrefix is used for metrics collection for the given DB.
+func NewDB(path string, metricsPrefix string) (db *DB, err error) {
ldb, err := leveldb.OpenFile(path, &opt.Options{
OpenFilesCacheCapacity: openFileLimit,
})
@@ -66,6 +86,15 @@ func NewDB(path string) (db *DB, err error) {
return nil, err
}
}
+
+ // Configure meters for DB
+ db.configure(metricsPrefix)
+
+ // Create a quit channel for the periodic metrics collector and run it
+ db.quitChan = make(chan chan error)
+
+ go db.meter(10 * time.Second)
+
return db, nil
}
@@ -126,5 +155,175 @@ func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) {
// Close closes LevelDB database.
func (db *DB) Close() (err error) {
+ close(db.quitChan)
return db.ldb.Close()
}
+
+// Configure configures the database metrics collectors
+func (db *DB) configure(prefix string) {
+ // Initialize all the metrics collector at the requested prefix
+ db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
+ db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
+ db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
+ db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
+ db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
+ db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
+ db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
+}
+
+func (db *DB) meter(refresh time.Duration) {
+ // Create the counters to store current and previous compaction values
+ compactions := make([][]float64, 2)
+ for i := 0; i < 2; i++ {
+ compactions[i] = make([]float64, 3)
+ }
+ // Create storage for iostats.
+ var iostats [2]float64
+
+ // Create storage and warning log tracer for write delay.
+ var (
+ delaystats [2]int64
+ lastWritePaused time.Time
+ )
+
+ var (
+ errc chan error
+ merr error
+ )
+
+ // Iterate ad infinitum and collect the stats
+ for i := 1; errc == nil && merr == nil; i++ {
+ // Retrieve the database stats
+ stats, err := db.ldb.GetProperty("leveldb.stats")
+ if err != nil {
+ log.Error("Failed to read database stats", "err", err)
+ merr = err
+ continue
+ }
+ // Find the compaction table, skip the header
+ lines := strings.Split(stats, "\n")
+ for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
+ lines = lines[1:]
+ }
+ if len(lines) <= 3 {
+ log.Error("Compaction table not found")
+ merr = errors.New("compaction table not found")
+ continue
+ }
+ lines = lines[3:]
+
+ // Iterate over all the table rows, and accumulate the entries
+ for j := 0; j < len(compactions[i%2]); j++ {
+ compactions[i%2][j] = 0
+ }
+ for _, line := range lines {
+ parts := strings.Split(line, "|")
+ if len(parts) != 6 {
+ break
+ }
+ for idx, counter := range parts[3:] {
+ value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
+ if err != nil {
+ log.Error("Compaction entry parsing failed", "err", err)
+ merr = err
+ continue
+ }
+ compactions[i%2][idx] += value
+ }
+ }
+ // Update all the requested meters
+ if db.compTimeMeter != nil {
+ db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
+ }
+ if db.compReadMeter != nil {
+ db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
+ }
+ if db.compWriteMeter != nil {
+ db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
+ }
+
+ // Retrieve the write delay statistic
+ writedelay, err := db.ldb.GetProperty("leveldb.writedelay")
+ if err != nil {
+ log.Error("Failed to read database write delay statistic", "err", err)
+ merr = err
+ continue
+ }
+ var (
+ delayN int64
+ delayDuration string
+ duration time.Duration
+ paused bool
+ )
+ if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
+ log.Error("Write delay statistic not found")
+ merr = err
+ continue
+ }
+ duration, err = time.ParseDuration(delayDuration)
+ if err != nil {
+ log.Error("Failed to parse delay duration", "err", err)
+ merr = err
+ continue
+ }
+ if db.writeDelayNMeter != nil {
+ db.writeDelayNMeter.Mark(delayN - delaystats[0])
+ }
+ if db.writeDelayMeter != nil {
+ db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
+ }
+ // If a warning that db is performing compaction has been displayed, any subsequent
+ // warnings will be withheld for one minute not to overwhelm the user.
+ if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
+ time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) {
+ log.Warn("Database compacting, degraded performance")
+ lastWritePaused = time.Now()
+ }
+ delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
+
+ // Retrieve the database iostats.
+ ioStats, err := db.ldb.GetProperty("leveldb.iostats")
+ if err != nil {
+ log.Error("Failed to read database iostats", "err", err)
+ merr = err
+ continue
+ }
+ var nRead, nWrite float64
+ parts := strings.Split(ioStats, " ")
+ if len(parts) < 2 {
+ log.Error("Bad syntax of ioStats", "ioStats", ioStats)
+ merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
+ continue
+ }
+ if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
+ log.Error("Bad syntax of read entry", "entry", parts[0])
+ merr = err
+ continue
+ }
+ if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
+ log.Error("Bad syntax of write entry", "entry", parts[1])
+ merr = err
+ continue
+ }
+ if db.diskReadMeter != nil {
+ db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
+ }
+ if db.diskWriteMeter != nil {
+ db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
+ }
+ iostats[0], iostats[1] = nRead, nWrite
+
+ // Sleep a bit, then repeat the stats collection
+ select {
+ case errc = <-db.quitChan:
+ // Quit requesting, stop hammering the database
+ case <-time.After(refresh):
+ // Timeout, gather a new set of stats
+ }
+ }
+
+ if errc == nil {
+ errc = <-db.quitChan
+ }
+ errc <- merr
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/db_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db_test.go
index 45325bee..65fdac4a 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/shed/db_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db_test.go
@@ -55,7 +55,7 @@ func TestDB_persistence(t *testing.T) {
}
defer os.RemoveAll(dir)
- db, err := NewDB(dir)
+ db, err := NewDB(dir, "")
if err != nil {
t.Fatal(err)
}
@@ -73,7 +73,7 @@ func TestDB_persistence(t *testing.T) {
t.Fatal(err)
}
- db2, err := NewDB(dir)
+ db2, err := NewDB(dir, "")
if err != nil {
t.Fatal(err)
}
@@ -101,7 +101,7 @@ func newTestDB(t *testing.T) (db *DB, cleanupFunc func()) {
t.Fatal(err)
}
cleanupFunc = func() { os.RemoveAll(dir) }
- db, err = NewDB(dir)
+ db, err = NewDB(dir, "")
if err != nil {
cleanupFunc()
t.Fatal(err)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/example_store_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/example_store_test.go
index 2ed0be14..9a83855e 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/shed/example_store_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/example_store_test.go
@@ -52,7 +52,7 @@ type Store struct {
// and possible conflicts with schema from existing database is checked
// automatically.
func New(path string) (s *Store, err error) {
- db, err := shed.NewDB(path)
+ db, err := shed.NewDB(path, "")
if err != nil {
return nil, err
}
@@ -71,20 +71,20 @@ func New(path string) (s *Store, err error) {
}
// Index storing actual chunk address, data and store timestamp.
s.retrievalIndex, err = db.NewIndex("Address->StoreTimestamp|Data", shed.IndexFuncs{
- EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
- DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
- EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
value = append(b, fields.Data...)
return value, nil
},
- DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
e.Data = value[8:]
return e, nil
@@ -96,19 +96,19 @@ func New(path string) (s *Store, err error) {
// Index storing access timestamp for a particular address.
// It is needed in order to update gc index keys for iteration order.
s.accessIndex, err = db.NewIndex("Address->AccessTimestamp", shed.IndexFuncs{
- EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
- DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ DecodeKey: func(key []byte) (e shed.Item, err error) {
e.Address = key
return e, nil
},
- EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ EncodeValue: func(fields shed.Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fields.AccessTimestamp))
return b, nil
},
- DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(value))
return e, nil
},
@@ -118,23 +118,23 @@ func New(path string) (s *Store, err error) {
}
// Index with keys ordered by access timestamp for garbage collection prioritization.
s.gcIndex, err = db.NewIndex("AccessTimestamp|StoredTimestamp|Address->nil", shed.IndexFuncs{
- EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ EncodeKey: func(fields shed.Item) (key []byte, err error) {
b := make([]byte, 16, 16+len(fields.Address))
binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
key = append(b, fields.Address...)
return key, nil
},
- DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ DecodeKey: func(key []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[8:16]))
e.Address = key[16:]
return e, nil
},
- EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ EncodeValue: func(fields shed.Item) (value []byte, err error) {
return nil, nil
},
- DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
return e, nil
},
})
@@ -146,7 +146,7 @@ func New(path string) (s *Store, err error) {
// Put stores the chunk and sets it store timestamp.
func (s *Store) Put(_ context.Context, ch storage.Chunk) (err error) {
- return s.retrievalIndex.Put(shed.IndexItem{
+ return s.retrievalIndex.Put(shed.Item{
Address: ch.Address(),
Data: ch.Data(),
StoreTimestamp: time.Now().UTC().UnixNano(),
@@ -161,7 +161,7 @@ func (s *Store) Get(_ context.Context, addr storage.Address) (c storage.Chunk, e
batch := new(leveldb.Batch)
// Get the chunk data and storage timestamp.
- item, err := s.retrievalIndex.Get(shed.IndexItem{
+ item, err := s.retrievalIndex.Get(shed.Item{
Address: addr,
})
if err != nil {
@@ -172,13 +172,13 @@ func (s *Store) Get(_ context.Context, addr storage.Address) (c storage.Chunk, e
}
// Get the chunk access timestamp.
- accessItem, err := s.accessIndex.Get(shed.IndexItem{
+ accessItem, err := s.accessIndex.Get(shed.Item{
Address: addr,
})
switch err {
case nil:
// Remove gc index entry if access timestamp is found.
- err = s.gcIndex.DeleteInBatch(batch, shed.IndexItem{
+ err = s.gcIndex.DeleteInBatch(batch, shed.Item{
Address: item.Address,
StoreTimestamp: accessItem.AccessTimestamp,
AccessTimestamp: item.StoreTimestamp,
@@ -197,7 +197,7 @@ func (s *Store) Get(_ context.Context, addr storage.Address) (c storage.Chunk, e
accessTimestamp := time.Now().UTC().UnixNano()
// Put new access timestamp in access index.
- err = s.accessIndex.PutInBatch(batch, shed.IndexItem{
+ err = s.accessIndex.PutInBatch(batch, shed.Item{
Address: addr,
AccessTimestamp: accessTimestamp,
})
@@ -206,7 +206,7 @@ func (s *Store) Get(_ context.Context, addr storage.Address) (c storage.Chunk, e
}
// Put new access timestamp in gc index.
- err = s.gcIndex.PutInBatch(batch, shed.IndexItem{
+ err = s.gcIndex.PutInBatch(batch, shed.Item{
Address: item.Address,
AccessTimestamp: accessTimestamp,
StoreTimestamp: item.StoreTimestamp,
@@ -244,7 +244,7 @@ func (s *Store) CollectGarbage() (err error) {
// New batch for a new cg round.
trash := new(leveldb.Batch)
// Iterate through all index items and break when needed.
- err = s.gcIndex.IterateAll(func(item shed.IndexItem) (stop bool, err error) {
+ err = s.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) {
// Remove the chunk.
err = s.retrievalIndex.DeleteInBatch(trash, item)
if err != nil {
@@ -265,7 +265,7 @@ func (s *Store) CollectGarbage() (err error) {
return true, nil
}
return false, nil
- })
+ }, nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go
index 80e0069a..0417583a 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go
@@ -99,6 +99,44 @@ func (f Uint64Field) IncInBatch(batch *leveldb.Batch) (val uint64, err error) {
return val, nil
}
+// Dec decrements a uint64 value in the database.
+// This operation is not goroutine save.
+// The field is protected from overflow to a negative value.
+func (f Uint64Field) Dec() (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ if val != 0 {
+ val--
+ }
+ return val, f.Put(val)
+}
+
+// DecInBatch decrements a uint64 value in the batch
+// by retreiving a value from the database, not the same batch.
+// This operation is not goroutine save.
+// The field is protected from overflow to a negative value.
+func (f Uint64Field) DecInBatch(batch *leveldb.Batch) (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ if val != 0 {
+ val--
+ }
+ f.PutInBatch(batch, val)
+ return val, nil
+}
+
// encode transforms uint64 to 8 byte long
// slice in big endian encoding.
func encodeUint64(val uint64) (b []byte) {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64_test.go
index 69ade71b..9462b56d 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64_test.go
@@ -192,3 +192,109 @@ func TestUint64Field_IncInBatch(t *testing.T) {
t.Errorf("got uint64 %v, want %v", got, want)
}
}
+
+// TestUint64Field_Dec validates Dec operation
+// of the Uint64Field.
+func TestUint64Field_Dec(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test overflow protection
+ var want uint64
+ got, err := counter.Dec()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ want = 32
+ err = counter.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want = 31
+ got, err = counter.Dec()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
+
+// TestUint64Field_DecInBatch validates DecInBatch operation
+// of the Uint64Field.
+func TestUint64Field_DecInBatch(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ batch := new(leveldb.Batch)
+ var want uint64
+ got, err := counter.DecInBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ batch2 := new(leveldb.Batch)
+ want = 42
+ counter.PutInBatch(batch2, want)
+ err = db.WriteBatch(batch2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ batch3 := new(leveldb.Batch)
+ want = 41
+ got, err = counter.DecInBatch(batch3)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch3)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go
index ba803e3c..df88b1b6 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go
@@ -17,22 +17,24 @@
package shed
import (
+ "bytes"
+
"github.com/syndtr/goleveldb/leveldb"
)
-// IndexItem holds fields relevant to Swarm Chunk data and metadata.
+// Item holds fields relevant to Swarm Chunk data and metadata.
// All information required for swarm storage and operations
// on that storage must be defined here.
// This structure is logically connected to swarm storage,
// the only part of this package that is not generalized,
// mostly for performance reasons.
//
-// IndexItem is a type that is used for retrieving, storing and encoding
+// Item is a type that is used for retrieving, storing and encoding
// chunk data and metadata. It is passed as an argument to Index encoding
// functions, get function and put function.
// But it is also returned with additional data from get function call
// and as the argument in iterator function definition.
-type IndexItem struct {
+type Item struct {
Address []byte
Data []byte
AccessTimestamp int64
@@ -43,9 +45,9 @@ type IndexItem struct {
}
// Merge is a helper method to construct a new
-// IndexItem by filling up fields with default values
-// of a particular IndexItem with values from another one.
-func (i IndexItem) Merge(i2 IndexItem) (new IndexItem) {
+// Item by filling up fields with default values
+// of a particular Item with values from another one.
+func (i Item) Merge(i2 Item) (new Item) {
if i.Address == nil {
i.Address = i2.Address
}
@@ -67,26 +69,26 @@ func (i IndexItem) Merge(i2 IndexItem) (new IndexItem) {
// Index represents a set of LevelDB key value pairs that have common
// prefix. It holds functions for encoding and decoding keys and values
// to provide transparent actions on saved data which inclide:
-// - getting a particular IndexItem
-// - saving a particular IndexItem
+// - getting a particular Item
+// - saving a particular Item
// - iterating over a sorted LevelDB keys
// It implements IndexIteratorInterface interface.
type Index struct {
db *DB
prefix []byte
- encodeKeyFunc func(fields IndexItem) (key []byte, err error)
- decodeKeyFunc func(key []byte) (e IndexItem, err error)
- encodeValueFunc func(fields IndexItem) (value []byte, err error)
- decodeValueFunc func(value []byte) (e IndexItem, err error)
+ encodeKeyFunc func(fields Item) (key []byte, err error)
+ decodeKeyFunc func(key []byte) (e Item, err error)
+ encodeValueFunc func(fields Item) (value []byte, err error)
+ decodeValueFunc func(keyFields Item, value []byte) (e Item, err error)
}
// IndexFuncs structure defines functions for encoding and decoding
// LevelDB keys and values for a specific index.
type IndexFuncs struct {
- EncodeKey func(fields IndexItem) (key []byte, err error)
- DecodeKey func(key []byte) (e IndexItem, err error)
- EncodeValue func(fields IndexItem) (value []byte, err error)
- DecodeValue func(value []byte) (e IndexItem, err error)
+ EncodeKey func(fields Item) (key []byte, err error)
+ DecodeKey func(key []byte) (e Item, err error)
+ EncodeValue func(fields Item) (value []byte, err error)
+ DecodeValue func(keyFields Item, value []byte) (e Item, err error)
}
// NewIndex returns a new Index instance with defined name and
@@ -105,7 +107,7 @@ func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) {
// by appending the provided index id byte.
// This is needed to avoid collisions between keys of different
// indexes as all index ids are unique.
- encodeKeyFunc: func(e IndexItem) (key []byte, err error) {
+ encodeKeyFunc: func(e Item) (key []byte, err error) {
key, err = funcs.EncodeKey(e)
if err != nil {
return nil, err
@@ -115,7 +117,7 @@ func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) {
// This function reverses the encodeKeyFunc constructed key
// to transparently work with index keys without their index ids.
// It assumes that index keys are prefixed with only one byte.
- decodeKeyFunc: func(key []byte) (e IndexItem, err error) {
+ decodeKeyFunc: func(key []byte) (e Item, err error) {
return funcs.DecodeKey(key[1:])
},
encodeValueFunc: funcs.EncodeValue,
@@ -123,10 +125,10 @@ func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) {
}, nil
}
-// Get accepts key fields represented as IndexItem to retrieve a
+// Get accepts key fields represented as Item to retrieve a
// value from the index and return maximum available information
-// from the index represented as another IndexItem.
-func (f Index) Get(keyFields IndexItem) (out IndexItem, err error) {
+// from the index represented as another Item.
+func (f Index) Get(keyFields Item) (out Item, err error) {
key, err := f.encodeKeyFunc(keyFields)
if err != nil {
return out, err
@@ -135,16 +137,16 @@ func (f Index) Get(keyFields IndexItem) (out IndexItem, err error) {
if err != nil {
return out, err
}
- out, err = f.decodeValueFunc(value)
+ out, err = f.decodeValueFunc(keyFields, value)
if err != nil {
return out, err
}
return out.Merge(keyFields), nil
}
-// Put accepts IndexItem to encode information from it
+// Put accepts Item to encode information from it
// and save it to the database.
-func (f Index) Put(i IndexItem) (err error) {
+func (f Index) Put(i Item) (err error) {
key, err := f.encodeKeyFunc(i)
if err != nil {
return err
@@ -159,7 +161,7 @@ func (f Index) Put(i IndexItem) (err error) {
// PutInBatch is the same as Put method, but it just
// saves the key/value pair to the batch instead
// directly to the database.
-func (f Index) PutInBatch(batch *leveldb.Batch, i IndexItem) (err error) {
+func (f Index) PutInBatch(batch *leveldb.Batch, i Item) (err error) {
key, err := f.encodeKeyFunc(i)
if err != nil {
return err
@@ -172,9 +174,9 @@ func (f Index) PutInBatch(batch *leveldb.Batch, i IndexItem) (err error) {
return nil
}
-// Delete accepts IndexItem to remove a key/value pair
+// Delete accepts Item to remove a key/value pair
// from the database based on its fields.
-func (f Index) Delete(keyFields IndexItem) (err error) {
+func (f Index) Delete(keyFields Item) (err error) {
key, err := f.encodeKeyFunc(keyFields)
if err != nil {
return err
@@ -184,7 +186,7 @@ func (f Index) Delete(keyFields IndexItem) (err error) {
// DeleteInBatch is the same as Delete just the operation
// is performed on the batch instead on the database.
-func (f Index) DeleteInBatch(batch *leveldb.Batch, keyFields IndexItem) (err error) {
+func (f Index) DeleteInBatch(batch *leveldb.Batch, keyFields Item) (err error) {
key, err := f.encodeKeyFunc(keyFields)
if err != nil {
return err
@@ -193,32 +195,71 @@ func (f Index) DeleteInBatch(batch *leveldb.Batch, keyFields IndexItem) (err err
return nil
}
-// IndexIterFunc is a callback on every IndexItem that is decoded
+// IndexIterFunc is a callback on every Item that is decoded
// by iterating on an Index keys.
// By returning a true for stop variable, iteration will
// stop, and by returning the error, that error will be
// propagated to the called iterator method on Index.
-type IndexIterFunc func(item IndexItem) (stop bool, err error)
+type IndexIterFunc func(item Item) (stop bool, err error)
-// IterateAll iterates over all keys of the Index.
-func (f Index) IterateAll(fn IndexIterFunc) (err error) {
+// IterateOptions defines optional parameters for Iterate function.
+type IterateOptions struct {
+ // StartFrom is the Item to start the iteration from.
+ StartFrom *Item
+ // If SkipStartFromItem is true, StartFrom item will not
+ // be iterated on.
+ SkipStartFromItem bool
+ // Iterate over items which keys have a common prefix.
+ Prefix []byte
+}
+
+// Iterate function iterates over keys of the Index.
+// If IterateOptions is nil, the iterations is over all keys.
+func (f Index) Iterate(fn IndexIterFunc, options *IterateOptions) (err error) {
+ if options == nil {
+ options = new(IterateOptions)
+ }
+ // construct a prefix with Index prefix and optional common key prefix
+ prefix := append(f.prefix, options.Prefix...)
+ // start from the prefix
+ startKey := prefix
+ if options.StartFrom != nil {
+ // start from the provided StartFrom Item key value
+ startKey, err = f.encodeKeyFunc(*options.StartFrom)
+ if err != nil {
+ return err
+ }
+ }
it := f.db.NewIterator()
defer it.Release()
- for ok := it.Seek(f.prefix); ok; ok = it.Next() {
+ // move the cursor to the start key
+ ok := it.Seek(startKey)
+ if !ok {
+ // stop iterator if seek has failed
+ return it.Error()
+ }
+ if options.SkipStartFromItem && bytes.Equal(startKey, it.Key()) {
+ // skip the start from Item if it is the first key
+ // and it is explicitly configured to skip it
+ ok = it.Next()
+ }
+ for ; ok; ok = it.Next() {
key := it.Key()
- if key[0] != f.prefix[0] {
+ if !bytes.HasPrefix(key, prefix) {
break
}
- keyIndexItem, err := f.decodeKeyFunc(key)
+ // create a copy of key byte slice not to share leveldb underlaying slice array
+ keyItem, err := f.decodeKeyFunc(append([]byte(nil), key...))
if err != nil {
return err
}
- valueIndexItem, err := f.decodeValueFunc(it.Value())
+ // create a copy of value byte slice not to share leveldb underlaying slice array
+ valueItem, err := f.decodeValueFunc(keyItem, append([]byte(nil), it.Value()...))
if err != nil {
return err
}
- stop, err := fn(keyIndexItem.Merge(valueIndexItem))
+ stop, err := fn(keyItem.Merge(valueItem))
if err != nil {
return err
}
@@ -229,12 +270,27 @@ func (f Index) IterateAll(fn IndexIterFunc) (err error) {
return it.Error()
}
-// IterateFrom iterates over Index keys starting from the key
-// encoded from the provided IndexItem.
-func (f Index) IterateFrom(start IndexItem, fn IndexIterFunc) (err error) {
+// Count returns the number of items in index.
+func (f Index) Count() (count int, err error) {
+ it := f.db.NewIterator()
+ defer it.Release()
+
+ for ok := it.Seek(f.prefix); ok; ok = it.Next() {
+ key := it.Key()
+ if key[0] != f.prefix[0] {
+ break
+ }
+ count++
+ }
+ return count, it.Error()
+}
+
+// CountFrom returns the number of items in index keys
+// starting from the key encoded from the provided Item.
+func (f Index) CountFrom(start Item) (count int, err error) {
startKey, err := f.encodeKeyFunc(start)
if err != nil {
- return err
+ return 0, err
}
it := f.db.NewIterator()
defer it.Release()
@@ -244,21 +300,7 @@ func (f Index) IterateFrom(start IndexItem, fn IndexIterFunc) (err error) {
if key[0] != f.prefix[0] {
break
}
- keyIndexItem, err := f.decodeKeyFunc(key)
- if err != nil {
- return err
- }
- valueIndexItem, err := f.decodeValueFunc(it.Value())
- if err != nil {
- return err
- }
- stop, err := fn(keyIndexItem.Merge(valueIndexItem))
- if err != nil {
- return err
- }
- if stop {
- break
- }
+ count++
}
- return it.Error()
+ return count, it.Error()
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/index_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index_test.go
index ba82216d..97d7c91f 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/shed/index_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index_test.go
@@ -29,20 +29,20 @@ import (
// Index functions for the index that is used in tests in this file.
var retrievalIndexFuncs = IndexFuncs{
- EncodeKey: func(fields IndexItem) (key []byte, err error) {
+ EncodeKey: func(fields Item) (key []byte, err error) {
return fields.Address, nil
},
- DecodeKey: func(key []byte) (e IndexItem, err error) {
+ DecodeKey: func(key []byte) (e Item, err error) {
e.Address = key
return e, nil
},
- EncodeValue: func(fields IndexItem) (value []byte, err error) {
+ EncodeValue: func(fields Item) (value []byte, err error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
value = append(b, fields.Data...)
return value, nil
},
- DecodeValue: func(value []byte) (e IndexItem, err error) {
+ DecodeValue: func(keyItem Item, value []byte) (e Item, err error) {
e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
e.Data = value[8:]
return e, nil
@@ -60,7 +60,7 @@ func TestIndex(t *testing.T) {
}
t.Run("put", func(t *testing.T) {
- want := IndexItem{
+ want := Item{
Address: []byte("put-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
@@ -70,16 +70,16 @@ func TestIndex(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, err := index.Get(IndexItem{
+ got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
- checkIndexItem(t, got, want)
+ checkItem(t, got, want)
t.Run("overwrite", func(t *testing.T) {
- want := IndexItem{
+ want := Item{
Address: []byte("put-hash"),
Data: []byte("New DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
@@ -89,18 +89,18 @@ func TestIndex(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, err := index.Get(IndexItem{
+ got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
- checkIndexItem(t, got, want)
+ checkItem(t, got, want)
})
})
t.Run("put in batch", func(t *testing.T) {
- want := IndexItem{
+ want := Item{
Address: []byte("put-in-batch-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
@@ -112,16 +112,16 @@ func TestIndex(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, err := index.Get(IndexItem{
+ got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
- checkIndexItem(t, got, want)
+ checkItem(t, got, want)
t.Run("overwrite", func(t *testing.T) {
- want := IndexItem{
+ want := Item{
Address: []byte("put-in-batch-hash"),
Data: []byte("New DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
@@ -133,13 +133,13 @@ func TestIndex(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, err := index.Get(IndexItem{
+ got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
- checkIndexItem(t, got, want)
+ checkItem(t, got, want)
})
})
@@ -150,13 +150,13 @@ func TestIndex(t *testing.T) {
address := []byte("put-in-batch-twice-hash")
// put the first item
- index.PutInBatch(batch, IndexItem{
+ index.PutInBatch(batch, Item{
Address: address,
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
})
- want := IndexItem{
+ want := Item{
Address: address,
Data: []byte("New DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
@@ -168,17 +168,17 @@ func TestIndex(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, err := index.Get(IndexItem{
+ got, err := index.Get(Item{
Address: address,
})
if err != nil {
t.Fatal(err)
}
- checkIndexItem(t, got, want)
+ checkItem(t, got, want)
})
t.Run("delete", func(t *testing.T) {
- want := IndexItem{
+ want := Item{
Address: []byte("delete-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
@@ -188,15 +188,15 @@ func TestIndex(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, err := index.Get(IndexItem{
+ got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
- checkIndexItem(t, got, want)
+ checkItem(t, got, want)
- err = index.Delete(IndexItem{
+ err = index.Delete(Item{
Address: want.Address,
})
if err != nil {
@@ -204,7 +204,7 @@ func TestIndex(t *testing.T) {
}
wantErr := leveldb.ErrNotFound
- got, err = index.Get(IndexItem{
+ got, err = index.Get(Item{
Address: want.Address,
})
if err != wantErr {
@@ -213,7 +213,7 @@ func TestIndex(t *testing.T) {
})
t.Run("delete in batch", func(t *testing.T) {
- want := IndexItem{
+ want := Item{
Address: []byte("delete-in-batch-hash"),
Data: []byte("DATA"),
StoreTimestamp: time.Now().UTC().UnixNano(),
@@ -223,16 +223,16 @@ func TestIndex(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- got, err := index.Get(IndexItem{
+ got, err := index.Get(Item{
Address: want.Address,
})
if err != nil {
t.Fatal(err)
}
- checkIndexItem(t, got, want)
+ checkItem(t, got, want)
batch := new(leveldb.Batch)
- index.DeleteInBatch(batch, IndexItem{
+ index.DeleteInBatch(batch, Item{
Address: want.Address,
})
err = db.WriteBatch(batch)
@@ -241,7 +241,7 @@ func TestIndex(t *testing.T) {
}
wantErr := leveldb.ErrNotFound
- got, err = index.Get(IndexItem{
+ got, err = index.Get(Item{
Address: want.Address,
})
if err != wantErr {
@@ -250,8 +250,9 @@ func TestIndex(t *testing.T) {
})
}
-// TestIndex_iterate validates index iterator functions for correctness.
-func TestIndex_iterate(t *testing.T) {
+// TestIndex_Iterate validates index Iterate
+// functions for correctness.
+func TestIndex_Iterate(t *testing.T) {
db, cleanupFunc := newTestDB(t)
defer cleanupFunc()
@@ -260,7 +261,7 @@ func TestIndex_iterate(t *testing.T) {
t.Fatal(err)
}
- items := []IndexItem{
+ items := []Item{
{
Address: []byte("iterate-hash-01"),
Data: []byte("data80"),
@@ -290,7 +291,7 @@ func TestIndex_iterate(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- item04 := IndexItem{
+ item04 := Item{
Address: []byte("iterate-hash-04"),
Data: []byte("data0"),
}
@@ -306,31 +307,53 @@ func TestIndex_iterate(t *testing.T) {
t.Run("all", func(t *testing.T) {
var i int
- err := index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
- checkIndexItem(t, item, want)
+ checkItem(t, item, want)
i++
return false, nil
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ t.Run("start from", func(t *testing.T) {
+ startIndex := 2
+ i := startIndex
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, &IterateOptions{
+ StartFrom: &items[startIndex],
})
if err != nil {
t.Fatal(err)
}
})
- t.Run("from", func(t *testing.T) {
+ t.Run("skip start from", func(t *testing.T) {
startIndex := 2
- i := startIndex
- err := index.IterateFrom(items[startIndex], func(item IndexItem) (stop bool, err error) {
+ i := startIndex + 1
+ err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
- checkIndexItem(t, item, want)
+ checkItem(t, item, want)
i++
return false, nil
+ }, &IterateOptions{
+ StartFrom: &items[startIndex],
+ SkipStartFromItem: true,
})
if err != nil {
t.Fatal(err)
@@ -341,18 +364,209 @@ func TestIndex_iterate(t *testing.T) {
var i int
stopIndex := 3
var count int
- err := index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ err := index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
- checkIndexItem(t, item, want)
+ checkItem(t, item, want)
count++
if i == stopIndex {
return true, nil
}
i++
return false, nil
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantItemsCount := stopIndex + 1
+ if count != wantItemsCount {
+ t.Errorf("got %v items, expected %v", count, wantItemsCount)
+ }
+ })
+
+ t.Run("no overflow", func(t *testing.T) {
+ secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ secondItem := Item{
+ Address: []byte("iterate-hash-10"),
+ Data: []byte("data-second"),
+ }
+ err = secondIndex.Put(secondItem)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var i int
+ err = index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ i = 0
+ err = secondIndex.Iterate(func(item Item) (stop bool, err error) {
+ if i > 1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ checkItem(t, item, secondItem)
+ i++
+ return false, nil
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// TestIndex_Iterate_withPrefix validates index Iterate
+// function for correctness.
+func TestIndex_Iterate_withPrefix(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ allItems := []Item{
+ {Address: []byte("want-hash-00"), Data: []byte("data80")},
+ {Address: []byte("skip-hash-01"), Data: []byte("data81")},
+ {Address: []byte("skip-hash-02"), Data: []byte("data82")},
+ {Address: []byte("skip-hash-03"), Data: []byte("data83")},
+ {Address: []byte("want-hash-04"), Data: []byte("data84")},
+ {Address: []byte("want-hash-05"), Data: []byte("data85")},
+ {Address: []byte("want-hash-06"), Data: []byte("data86")},
+ {Address: []byte("want-hash-07"), Data: []byte("data87")},
+ {Address: []byte("want-hash-08"), Data: []byte("data88")},
+ {Address: []byte("want-hash-09"), Data: []byte("data89")},
+ {Address: []byte("skip-hash-10"), Data: []byte("data90")},
+ }
+ batch := new(leveldb.Batch)
+ for _, i := range allItems {
+ index.PutInBatch(batch, i)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ prefix := []byte("want")
+
+ items := make([]Item, 0)
+ for _, item := range allItems {
+ if bytes.HasPrefix(item.Address, prefix) {
+ items = append(items, item)
+ }
+ }
+ sort.SliceStable(items, func(i, j int) bool {
+ return bytes.Compare(items[i].Address, items[j].Address) < 0
+ })
+
+ t.Run("with prefix", func(t *testing.T) {
+ var i int
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, &IterateOptions{
+ Prefix: prefix,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if i != len(items) {
+ t.Errorf("got %v items, want %v", i, len(items))
+ }
+ })
+
+ t.Run("with prefix and start from", func(t *testing.T) {
+ startIndex := 2
+ var count int
+ i := startIndex
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ count++
+ return false, nil
+ }, &IterateOptions{
+ StartFrom: &items[startIndex],
+ Prefix: prefix,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantCount := len(items) - startIndex
+ if count != wantCount {
+ t.Errorf("got %v items, want %v", count, wantCount)
+ }
+ })
+
+ t.Run("with prefix and skip start from", func(t *testing.T) {
+ startIndex := 2
+ var count int
+ i := startIndex + 1
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ count++
+ return false, nil
+ }, &IterateOptions{
+ StartFrom: &items[startIndex],
+ SkipStartFromItem: true,
+ Prefix: prefix,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantCount := len(items) - startIndex - 1
+ if count != wantCount {
+ t.Errorf("got %v items, want %v", count, wantCount)
+ }
+ })
+
+ t.Run("stop", func(t *testing.T) {
+ var i int
+ stopIndex := 3
+ var count int
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ count++
+ if i == stopIndex {
+ return true, nil
+ }
+ i++
+ return false, nil
+ }, &IterateOptions{
+ Prefix: prefix,
})
if err != nil {
t.Fatal(err)
@@ -369,46 +583,187 @@ func TestIndex_iterate(t *testing.T) {
t.Fatal(err)
}
- secondIndexItem := IndexItem{
+ secondItem := Item{
Address: []byte("iterate-hash-10"),
Data: []byte("data-second"),
}
- err = secondIndex.Put(secondIndexItem)
+ err = secondIndex.Put(secondItem)
if err != nil {
t.Fatal(err)
}
var i int
- err = index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ err = index.Iterate(func(item Item) (stop bool, err error) {
if i > len(items)-1 {
return true, fmt.Errorf("got unexpected index item: %#v", item)
}
want := items[i]
- checkIndexItem(t, item, want)
+ checkItem(t, item, want)
i++
return false, nil
+ }, &IterateOptions{
+ Prefix: prefix,
})
if err != nil {
t.Fatal(err)
}
-
- i = 0
- err = secondIndex.IterateAll(func(item IndexItem) (stop bool, err error) {
- if i > 1 {
- return true, fmt.Errorf("got unexpected index item: %#v", item)
- }
- checkIndexItem(t, item, secondIndexItem)
- i++
- return false, nil
- })
- if err != nil {
- t.Fatal(err)
+ if i != len(items) {
+ t.Errorf("got %v items, want %v", i, len(items))
}
})
}
-// checkIndexItem is a test helper function that compares if two Index items are the same.
-func checkIndexItem(t *testing.T, got, want IndexItem) {
+// TestIndex_count tests if Index.Count and Index.CountFrom
+// returns the correct number of items.
+func TestIndex_count(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ items := []Item{
+ {
+ Address: []byte("iterate-hash-01"),
+ Data: []byte("data80"),
+ },
+ {
+ Address: []byte("iterate-hash-02"),
+ Data: []byte("data84"),
+ },
+ {
+ Address: []byte("iterate-hash-03"),
+ Data: []byte("data22"),
+ },
+ {
+ Address: []byte("iterate-hash-04"),
+ Data: []byte("data41"),
+ },
+ {
+ Address: []byte("iterate-hash-05"),
+ Data: []byte("data1"),
+ },
+ }
+ batch := new(leveldb.Batch)
+ for _, i := range items {
+ index.PutInBatch(batch, i)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("Count", func(t *testing.T) {
+ got, err := index.Count()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := len(items)
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+
+ t.Run("CountFrom", func(t *testing.T) {
+ got, err := index.CountFrom(Item{
+ Address: items[1].Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := len(items) - 1
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+
+ // update the index with another item
+ t.Run("add item", func(t *testing.T) {
+ item04 := Item{
+ Address: []byte("iterate-hash-06"),
+ Data: []byte("data0"),
+ }
+ err = index.Put(item04)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ count := len(items) + 1
+
+ t.Run("Count", func(t *testing.T) {
+ got, err := index.Count()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := count
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+
+ t.Run("CountFrom", func(t *testing.T) {
+ got, err := index.CountFrom(Item{
+ Address: items[1].Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := count - 1
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+ })
+
+ // delete some items
+ t.Run("delete items", func(t *testing.T) {
+ deleteCount := 3
+
+ for _, item := range items[:deleteCount] {
+ err := index.Delete(item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ count := len(items) + 1 - deleteCount
+
+ t.Run("Count", func(t *testing.T) {
+ got, err := index.Count()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := count
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+
+ t.Run("CountFrom", func(t *testing.T) {
+ got, err := index.CountFrom(Item{
+ Address: items[deleteCount+1].Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := count - 1
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+ })
+}
+
+// checkItem is a test helper function that compares if two Index items are the same.
+func checkItem(t *testing.T, got, want Item) {
t.Helper()
if !bytes.Equal(got.Address, want.Address) {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/state.go b/vendor/github.com/ethereum/go-ethereum/swarm/state.go
deleted file mode 100644
index 1984ab03..00000000
--- a/vendor/github.com/ethereum/go-ethereum/swarm/state.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package swarm
-
-type Voidstore struct {
-}
-
-func (self Voidstore) Load(string) ([]byte, error) {
- return nil, nil
-}
-
-func (self Voidstore) Save(string, []byte) error {
- return nil
-}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go
index fc5dd8f7..147e34b2 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go
@@ -28,9 +28,6 @@ import (
// ErrNotFound is returned when no results are returned from the database
var ErrNotFound = errors.New("ErrorNotFound")
-// ErrInvalidArgument is returned when the argument type does not match the expected type
-var ErrInvalidArgument = errors.New("ErrorInvalidArgument")
-
// Store defines methods required to get, set, delete values for different keys
// and close the underlying resources.
type Store interface {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go
index cbe65372..a8bfe2d1 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go
@@ -65,10 +65,6 @@ If all is well it is possible to implement this by simply composing readers so t
The hashing itself does use extra copies and allocation though, since it does need it.
*/
-var (
- errAppendOppNotSuported = errors.New("Append operation not supported")
-)
-
type ChunkerParams struct {
chunkSize int64
hashSize int64
@@ -99,7 +95,6 @@ type TreeChunker struct {
ctx context.Context
branches int64
- hashFunc SwarmHasher
dataSize int64
data io.Reader
// calculated
@@ -365,10 +360,6 @@ func (tc *TreeChunker) runWorker(ctx context.Context) {
}()
}
-func (tc *TreeChunker) Append() (Address, func(), error) {
- return nil, nil, errAppendOppNotSuported
-}
-
// LazyChunkReader implements LazySectionReader
type LazyChunkReader struct {
ctx context.Context
@@ -411,7 +402,6 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
log.Debug("lazychunkreader.size", "addr", r.addr)
if r.chunkData == nil {
-
startTime := time.Now()
chunkData, err := r.getter.Get(cctx, Reference(r.addr))
if err != nil {
@@ -420,13 +410,8 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
}
metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime)
r.chunkData = chunkData
- s := r.chunkData.Size()
- log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
- if s < 0 {
- return 0, errors.New("corrupt size")
- }
- return int64(s), nil
}
+
s := r.chunkData.Size()
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker_test.go
index 1f847edc..9a125944 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker_test.go
@@ -24,8 +24,8 @@ import (
"io"
"testing"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
/*
@@ -142,7 +142,7 @@ func TestSha3ForCorrectness(t *testing.T) {
io.LimitReader(bytes.NewReader(input[8:]), int64(size))
- rawSha3 := sha3.NewKeccak256()
+ rawSha3 := sha3.NewLegacyKeccak256()
rawSha3.Reset()
rawSha3.Write(input)
rawSha3Output := rawSha3.Sum(nil)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/common_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/common_test.go
index af104a5a..bcc29d8c 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/common_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/common_test.go
@@ -179,8 +179,9 @@ func testStoreCorrect(m ChunkStore, n int, chunksize int64, t *testing.T) {
return fmt.Errorf("key does not match retrieved chunk Address")
}
hasher := MakeHashFunc(DefaultHash)()
- hasher.ResetWithLength(chunk.SpanBytes())
- hasher.Write(chunk.Payload())
+ data := chunk.Data()
+ hasher.ResetWithLength(data[:8])
+ hasher.Write(data[8:])
exp := hasher.Sum(nil)
if !bytes.Equal(h, exp) {
return fmt.Errorf("key is not hash of chunk data")
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/database.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/database.go
index e25fce31..12367b90 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/database.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/database.go
@@ -64,16 +64,6 @@ func (db *LDBDatabase) Delete(key []byte) error {
return db.db.Delete(key, nil)
}
-func (db *LDBDatabase) LastKnownTD() []byte {
- data, _ := db.Get([]byte("LTD"))
-
- if len(data) == 0 {
- data = []byte{0x0}
- }
-
- return data
-}
-
func (db *LDBDatabase) NewIterator() iterator.Iterator {
metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/encryption/encryption_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/encryption/encryption_test.go
index 0c0d0508..3b4f8a4e 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/encryption/encryption_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/encryption/encryption_test.go
@@ -22,13 +22,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
var expectedTransformedHex = "352187af3a843decc63ceca6cb01ea39dbcf77caf0a8f705f5c30d557044ceec9392b94a79376f1e5c10cd0c0f2a98e5353bf22b3ea4fdac6677ee553dec192e3db64e179d0474e96088fb4abd2babd67de123fb398bdf84d818f7bda2c1ab60b3ea0e0569ae54aa969658eb4844e6960d2ff44d7c087ee3aaffa1c0ee5df7e50b615f7ad90190f022934ad5300c7d1809bfe71a11cc04cece5274eb97a5f20350630522c1dbb7cebaf4f97f84e03f5cfd88f2b48880b25d12f4d5e75c150f704ef6b46c72e07db2b705ac3644569dccd22fd8f964f6ef787fda63c46759af334e6f665f70eac775a7017acea49f3c7696151cb1b9434fa4ac27fb803921ffb5ec58dafa168098d7d5b97e384be3384cf5bc235c3d887fef89fe76c0065f9b8d6ad837b442340d9e797b46ef5709ea3358bc415df11e4830de986ef0f1c418ffdcc80e9a3cda9bea0ab5676c0d4240465c43ba527e3b4ea50b4f6255b510e5d25774a75449b0bd71e56c537ade4fcf0f4d63c99ae1dbb5a844971e2c19941b8facfcfc8ee3056e7cb3c7114c5357e845b52f7103cb6e00d2308c37b12baa5b769e1cc7b00fc06f2d16e70cc27a82cb9c1a4e40cb0d43907f73df2c9db44f1b51a6b0bc6d09f77ac3be14041fae3f9df2da42df43ae110904f9ecee278030185254d7c6e918a5512024d047f77a992088cb3190a6587aa54d0c7231c1cd2e455e0d4c07f74bece68e29cd8ba0190c0bcfb26d24634af5d91a81ef5d4dd3d614836ce942ddbf7bb1399317f4c03faa675f325f18324bf9433844bfe5c4cc04130c8d5c329562b7cd66e72f7355de8f5375a72202971613c32bd7f3fcdcd51080758cd1d0a46dbe8f0374381dbc359f5864250c63dde8131cbd7c98ae2b0147d6ea4bf65d1443d511b18e6d608bbb46ac036353b4c51df306a10a6f6939c38629a5c18aaf89cac04bd3ad5156e6b92011c88341cb08551bab0a89e6a46538f5af33b86121dba17e3a434c273f385cd2e8cb90bdd32747d8425d929ccbd9b0815c73325988855549a8489dfd047daf777aaa3099e54cf997175a5d9e1edfe363e3b68c70e02f6bf4fcde6a0f3f7d0e7e98bde1a72ae8b6cd27b32990680cc4a04fc467f41c5adcaddabfc71928a3f6872c360c1d765260690dd28b269864c8e380d9c92ef6b89b0094c8f9bb22608b4156381b19b920e9583c9616ce5693b4d2a6c689f02e6a91584a8e501e107403d2689dd0045269dd9946c0e969fb656a3b39d84a798831f5f9290f163eb2f97d3ae25071324e95e2256d9c1e56eb83c26397855323edc202d56ad05894333b7f0ed3c1e4734782eb8bd5477242fd80d7a89b12866f85cfae476322f032465d6b1253993033fccd4723530630ab97a1566460af9c90c9da843c229406e65f3fa578bd6bf04dee9b6153807ddadb8ceefc5c601a8ab26023c67b1ab1e8e0f29ce94c78c308005a781853e7a2e0e51738939a657c987b5e611f32f47b5ff461c52e63e0ea390515a8e1f5393dae54ea526934b5f310b76e3fa050e40718cb4c8a20e58946d6ee1879f08c52764422fe542b3240e75eccb7aa75b1f8a651e37a3bc56b0932cdae0e985948468db1f98eb4b77b82081ea25d8a762db00f7898864984bd80e2f3f35f236bf57291dec28f550769943bcfb6f884b7687589b673642ef7fe5d7d5a87d3eca5017f83ccb9a3310520474479464cb3f433440e7e2f1e28c0aef700a45848573409e7ab66e0cfd4fe5d2147ace81bc65fd8891f6245cd69246bbf5c27830e5ab882dd1d02aba34ff6ca9af88df00fd602892f02fedbdc65dedec203faf3f8ff4a97314e0ddb58b9ab756a61a562597f4088b445fcc3b28a708ca7b1485dcd791b779fbf2b3ef1ec5c6205f595fbe45a02105034147e5a146089c200a49dae33ae051a08ea5f974a21540aaeffa7f9d9e3d35478016fb27b871036eb27217a5b834b461f535752fb5f1c8dded3ae14ce3a2ef6639e2fe41939e3509e46e347a95d50b2080f1ba42c804b290ddc912c952d1cec3f2661369f738feacc0dbf1ea27429c644e45f9e26f30c341acd34c7519b2a1663e334621691e810767e9918c2c547b2e23cce915f97d26aac8d0d2fcd3edb7986ad4e2b8a852edebad534cb6c0e9f0797d3563e5409d7e068e48356c67ce519246cd9c560e881453df97cbba562018811e6cf8c327f399d1d1253ab47a19f4a0ccc7c6d86a9603e0551da310ea595d71305c4aad96819120a92cdbaf1f77ec8df9cc7c838c0d4de1e8692dd81da38268d1d71324bcffdafbe5122e4b81828e021e936d83ae8021eac592aa52cd296b5ce392c7173d622f8e07d18f59bb1b08ba15211af6703463b09b593af3c37735296816d9f2e7a369354a5374ea3955e14ca8ac56d5bfe4aef7a21bd825d6ae85530bee5d2aaaa4914981b3dfdb2e92ec2a27c83d74b59e84ff5c056f7d8945745f2efc3dcf28f288c6cd8383700fb2312f7001f24dd40015e436ae23e052fe9070ea9535b9c989898a9bda3d5382cf10e432fae6ccf0c825b3e6436edd3a9f8846e5606f8563931b5f29ba407c5236e5730225dda211a8504ec1817bc935e1fd9a532b648c502df302ed2063aed008fd5676131ac9e95998e9447b02bd29d77e38fcfd2959f2de929b31970335eb2a74348cc6918bc35b9bf749eab0fe304c946cd9e1ca284e6853c42646e60b6b39e0d3fb3c260abfc5c1b4ca3c3770f344118ca7c7f5c1ad1f123f8f369cd60afc3cdb3e9e81968c5c9fa7c8b014ffe0508dd4f0a2a976d5d1ca8fc9ad7a237d92cfe7b41413d934d6e142824b252699397e48e4bac4e91ebc10602720684bd0863773c548f9a2f9724245e47b129ecf65afd7252aac48c8a8d6fd3d888af592a01fb02dc71ed7538a700d3d16243e4621e0fcf9f8ed2b4e11c9fa9a95338bb1dac74a7d9bc4eb8cbf900b634a2a56469c00f5994e4f0934bdb947640e6d67e47d0b621aacd632bfd3c800bd7d93bd329f494a90e06ed51535831bd6e07ac1b4b11434ef3918fa9511813a002913f33f836454798b8d1787fea9a4c4743ba091ed192ed92f4d33e43a226bf9503e1a83a16dd340b3cbbf38af6db0d99201da8de529b4225f3d2fa2aad6621afc6c79ef3537720591edfc681ae6d00ede53ed724fc71b23b90d2e9b7158aaee98d626a4fe029107df2cb5f90147e07ebe423b1519d848af18af365c71bfd0665db46be493bbe99b79a188de0cf3594aef2299f0324075bdce9eb0b87bc29d62401ba4fd6ae48b1ba33261b5b845279becf38ee03e3dc5c45303321c5fac96fd02a3ad8c9e3b02127b320501333c9e6360440d1ad5e64a6239501502dde1a49c9abe33b66098458eee3d611bb06ffcd234a1b9aef4af5021cd61f0de6789f822ee116b5078aae8c129e8391d8987500d322b58edd1595dc570b57341f2df221b94a96ab7fbcf32a8ca9684196455694024623d7ed49f7d66e8dd453c0bae50e0d8b34377b22d0ece059e2c385dfc70b9089fcd27577c51f4d870b5738ee2b68c361a67809c105c7848b68860a829f29930857a9f9d40b14fd2384ac43bafdf43c0661103794c4bd07d1cfdd4681b6aeaefad53d4c1473359bcc5a83b09189352e5bb9a7498dd0effb89c35aad26954551f8b0621374b449bf515630bd3974dca982279733470fdd059aa9c3df403d8f22b38c4709c82d8f12b888e22990350490e16179caf406293cc9e65f116bafcbe96af132f679877061107a2f690a82a8cb46eea57a90abd23798c5937c6fe6b17be3f9bfa01ce117d2c268181b9095bf49f395fea07ca03838de0588c5e2db633e836d64488c1421e653ea52d810d096048c092d0da6e02fa6613890219f51a76148c8588c2487b171a28f17b7a299204874af0131725d793481333be5f08e86ca837a226850b0c1060891603bfecf9e55cddd22c0dbb28d495342d9cc3de8409f72e52a0115141cffe755c74f061c1a770428ccb0ae59536ee6fc074fbfc6cacb51a549d327527e20f8407477e60355863f1153f9ce95641198663c968874e7fdb29407bd771d94fdda8180cbb0358f5874738db705924b8cbe0cd5e1484aeb64542fe8f38667b7c34baf818c63b1e18440e9fba575254d063fd49f24ef26432f4eb323f3836972dca87473e3e9bb26dc3be236c3aae6bc8a6da567442309da0e8450e242fc9db836e2964f2c76a3b80a2c677979882dda7d7ebf62c93664018bcf4ec431fe6b403d49b3b36618b9c07c2d0d4569cb8d52223903debc72ec113955b206c34f1ae5300990ccfc0180f47d91afdb542b6312d12aeff7e19c645dc0b9fe6e3288e9539f6d5870f99882df187bfa6d24d179dfd1dac22212c8b5339f7171a3efc15b760fed8f68538bc5cbd845c2d1ab41f3a6c692820653eaef7930c02fbe6061d93805d73decdbb945572a7c44ed0241982a6e4d2d730898f82b3d9877cb7bca41cc6dcee67aa0c3d6db76f0b0a708ace0031113e48429de5d886c10e9200f68f32263a2fbf44a5992c2459fda7b8796ba796e3a0804fc25992ed2c9a5fe0580a6b809200ecde6caa0364b58be11564dcb9a616766dd7906db5636ee708b0204f38d309466d8d4a162965dd727e29f5a6c133e9b4ed5bafe803e479f9b2a7640c942c4a40b14ac7dc9828546052761a070f6404008f1ec3605836339c3da95a00b4fd81b2cabf88b51d2087d5b83e8c5b69bf96d8c72cbd278dad3bbb42b404b436f84ad688a22948adf60a81090f1e904291503c16e9f54b05fc76c881a5f95f0e732949e95d3f1bae2d3652a14fe0dda2d68879604657171856ef72637def2a96ac47d7b3fe86eb3198f5e0e626f06be86232305f2ae79ffcd2725e48208f9d8d63523f81915acc957563ab627cd6bc68c2a37d59fb0ed77a90aa9d085d6914a8ebada22a2c2d471b5163aeddd799d90fbb10ed6851ace2c4af504b7d572686700a59d6db46d5e42bb83f8e0c0ffe1dfa6582cc0b34c921ff6e85e83188d24906d5c08bb90069639e713051b3102b53e6f703e8210017878add5df68e6f2b108de279c5490e9eef5590185c4a1c744d4e00d244e1245a8805bd30407b1bc488db44870ccfd75a8af104df78efa2fb7ba31f048a263efdb3b63271fff4922bece9a71187108f65744a24f4947dc556b7440cb4fa45d296bb7f724588d1f245125b21ea063500029bd49650237f53899daf1312809552c81c5827341263cc807a29fe84746170cdfa1ff3838399a5645319bcaff674bb70efccdd88b3d3bb2f2d98111413585dc5d5bd5168f43b3f55e58972a5b2b9b3733febf02f931bd436648cb617c3794841aab961fe41277ab07812e1d3bc4ff6f4350a3e615bfba08c3b9480ef57904d3a16f7e916345202e3f93d11f7a7305170cb8c4eb9ac88ace8bbd1f377bdd5855d3162d6723d4435e84ce529b8f276a8927915ac759a0d04e5ca4a9d3da6291f0333b475df527e99fe38f7a4082662e8125936640c26dd1d17cf284ce6e2b17777a05aa0574f7793a6a062cc6f7263f7ab126b4528a17becfdec49ac0f7d8705aa1704af97fb861faa8a466161b2b5c08a5bacc79fe8500b913d65c8d3c52d1fd52d2ab2c9f52196e712455619c1cd3e0f391b274487944240e2ed8858dd0823c801094310024ae3fe4dd1cf5a2b6487b42cc5937bbafb193ee331d87e378258963d49b9da90899bbb4b88e79f78e866b0213f4719f67da7bcc2fce073c01e87c62ea3cdbcd589cfc41281f2f4c757c742d6d1e"
-var hashFunc = sha3.NewKeccak256
+var hashFunc = sha3.NewLegacyKeccak256
var testKey Key
func init() {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/error.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/error.go
index 44261c08..a9d0616f 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/error.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/error.go
@@ -23,23 +23,15 @@ import (
const (
ErrInit = iota
ErrNotFound
- ErrIO
ErrUnauthorized
ErrInvalidValue
ErrDataOverflow
ErrNothingToReturn
- ErrCorruptData
ErrInvalidSignature
ErrNotSynced
- ErrPeriodDepth
- ErrCnt
)
var (
- ErrChunkNotFound = errors.New("chunk not found")
- ErrFetching = errors.New("chunk still fetching")
- ErrChunkInvalid = errors.New("invalid chunk")
- ErrChunkForward = errors.New("cannot forward")
- ErrChunkUnavailable = errors.New("chunk unavailable")
- ErrChunkTimeout = errors.New("timeout")
+ ErrChunkNotFound = errors.New("chunk not found")
+ ErrChunkInvalid = errors.New("invalid chunk")
)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/handler.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/handler.go
index 9e264028..063d3e92 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/handler.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/handler.go
@@ -23,7 +23,6 @@ import (
"context"
"fmt"
"sync"
- "time"
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
@@ -32,12 +31,10 @@ import (
)
type Handler struct {
- chunkStore *storage.NetStore
- HashSize int
- cache map[uint64]*cacheEntry
- cacheLock sync.RWMutex
- storeTimeout time.Duration
- queryMaxPeriods uint32
+ chunkStore *storage.NetStore
+ HashSize int
+ cache map[uint64]*cacheEntry
+ cacheLock sync.RWMutex
}
// HandlerParams pass parameters to the Handler constructor NewHandler
@@ -82,9 +79,8 @@ func (h *Handler) SetStore(store *storage.NetStore) {
// Validate is a chunk validation method
// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature
// It implements the storage.ChunkValidator interface
-func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
- dataLength := len(data)
- if dataLength < minimumSignedUpdateLength {
+func (h *Handler) Validate(chunk storage.Chunk) bool {
+ if len(chunk.Data()) < minimumSignedUpdateLength {
return false
}
@@ -94,8 +90,8 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
// First, deserialize the chunk
var r Request
- if err := r.fromChunk(chunkAddr, data); err != nil {
- log.Debug("Invalid feed update chunk", "addr", chunkAddr.Hex(), "err", err.Error())
+ if err := r.fromChunk(chunk); err != nil {
+ log.Debug("Invalid feed update chunk", "addr", chunk.Address(), "err", err)
return false
}
@@ -198,7 +194,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
}
var request Request
- if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil {
+ if err := request.fromChunk(chunk); err != nil {
return nil, nil
}
if request.Time <= timeLimit {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/handler_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/handler_test.go
index fb2ef3a6..2f8a5245 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/handler_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/handler_test.go
@@ -40,7 +40,6 @@ var (
}
cleanF func()
subtopicName = "føø.bar"
- hashfunc = storage.MakeHashFunc(storage.DefaultHash)
)
func init() {
@@ -366,7 +365,7 @@ func TestValidator(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if !rh.Validate(chunk.Address(), chunk.Data()) {
+ if !rh.Validate(chunk) {
t.Fatal("Chunk validator fail on update chunk")
}
@@ -375,7 +374,7 @@ func TestValidator(t *testing.T) {
address[0] = 11
address[15] = 99
- if rh.Validate(address, chunk.Data()) {
+ if rh.Validate(storage.NewChunk(address, chunk.Data())) {
t.Fatal("Expected Validate to fail with false chunk address")
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/request.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/request.go
index 6968d8b9..dd91a7cf 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/request.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/request.go
@@ -171,9 +171,11 @@ func (r *Request) toChunk() (storage.Chunk, error) {
}
// fromChunk populates this structure from chunk data. It does not verify the signature is valid.
-func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
+func (r *Request) fromChunk(chunk storage.Chunk) error {
// for update chunk layout see Request definition
+ chunkdata := chunk.Data()
+
//deserialize the feed update portion
if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil {
return err
@@ -189,7 +191,7 @@ func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error
}
r.Signature = signature
- r.idAddr = updateAddr
+ r.idAddr = chunk.Address()
r.binaryData = chunkdata
return nil
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/request_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/request_test.go
index f5de32b7..c30158fd 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/request_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/request_test.go
@@ -197,7 +197,7 @@ func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
// Test that parseUpdate fails if the chunk is too small
var r Request
- if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil {
+ if err := r.fromChunk(storage.NewChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength))); err == nil {
t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength)
}
@@ -226,7 +226,7 @@ func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00")
var recovered Request
- recovered.fromChunk(chunk.Address(), chunk.Data())
+ recovered.fromChunk(chunk)
if !reflect.DeepEqual(recovered, r) {
t.Fatal("Expected recovered feed update request to equal the original one")
}
@@ -282,7 +282,7 @@ func TestReverse(t *testing.T) {
// check that we can recover the owner account from the update chunk's signature
var checkUpdate Request
- if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil {
+ if err := checkUpdate.fromChunk(chunk); err != nil {
t.Fatal(err)
}
checkdigest, err := checkUpdate.GetDigest()
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/timestampprovider.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/timestampprovider.go
index 072dc3a4..fb60cea9 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/timestampprovider.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/feed/timestampprovider.go
@@ -17,7 +17,6 @@
package feed
import (
- "encoding/binary"
"encoding/json"
"time"
)
@@ -30,32 +29,11 @@ type Timestamp struct {
Time uint64 `json:"time"` // Unix epoch timestamp, in seconds
}
-// 8 bytes uint64 Time
-const timestampLength = 8
-
// timestampProvider interface describes a source of timestamp information
type timestampProvider interface {
Now() Timestamp // returns the current timestamp information
}
-// binaryGet populates the timestamp structure from the given byte slice
-func (t *Timestamp) binaryGet(data []byte) error {
- if len(data) != timestampLength {
- return NewError(ErrCorruptData, "timestamp data has the wrong size")
- }
- t.Time = binary.LittleEndian.Uint64(data[:8])
- return nil
-}
-
-// binaryPut Serializes a Timestamp to a byte slice
-func (t *Timestamp) binaryPut(data []byte) error {
- if len(data) != timestampLength {
- return NewError(ErrCorruptData, "timestamp data has the wrong size")
- }
- binary.LittleEndian.PutUint64(data, t.Time)
- return nil
-}
-
// UnmarshalJSON implements the json.Unmarshaller interface
func (t *Timestamp) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &t.Time)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/hasherstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/hasherstore.go
index ff18e64c..23b52ee0 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/hasherstore.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/hasherstore.go
@@ -21,9 +21,9 @@ import (
"fmt"
"sync/atomic"
- "github.com/ethereum/go-ethereum/crypto/sha3"
ch "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
+ "golang.org/x/crypto/sha3"
)
type hasherStore struct {
@@ -232,11 +232,11 @@ func (h *hasherStore) decrypt(chunkData ChunkData, key encryption.Key) ([]byte,
}
func (h *hasherStore) newSpanEncryption(key encryption.Key) encryption.Encryption {
- return encryption.New(key, 0, uint32(ch.DefaultSize/h.refSize), sha3.NewKeccak256)
+ return encryption.New(key, 0, uint32(ch.DefaultSize/h.refSize), sha3.NewLegacyKeccak256)
}
func (h *hasherStore) newDataEncryption(key encryption.Key) encryption.Encryption {
- return encryption.New(key, int(ch.DefaultSize), 0, sha3.NewKeccak256)
+ return encryption.New(key, int(ch.DefaultSize), 0, sha3.NewLegacyKeccak256)
}
func (h *hasherStore) storeChunk(ctx context.Context, chunk *chunk) {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go
index bd4f6b91..635d3342 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go
@@ -248,10 +248,6 @@ func U64ToBytes(val uint64) []byte {
return data
}
-func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) {
- index.Access = s.accessCnt
-}
-
func getIndexKey(hash Address) []byte {
hashSize := len(hash)
key := make([]byte, hashSize+1)
@@ -777,18 +773,6 @@ func (s *LDBStore) BinIndex(po uint8) uint64 {
return s.bucketCnt[po]
}
-func (s *LDBStore) Size() uint64 {
- s.lock.RLock()
- defer s.lock.RUnlock()
- return s.entryCnt
-}
-
-func (s *LDBStore) CurrentStorageIndex() uint64 {
- s.lock.RLock()
- defer s.lock.RUnlock()
- return s.dataIdx
-}
-
// Put adds a chunk to the database, adding indices and incrementing global counters.
// If it already exists, it merely increments the access count of the existing entry.
// Is thread safe
@@ -810,11 +794,11 @@ func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
batch := s.batch
log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
- idata, err := s.db.Get(ikey)
+ _, err := s.db.Get(ikey)
if err != nil {
s.doPut(chunk, &index, po)
}
- idata = encodeIndex(&index)
+ idata := encodeIndex(&index)
s.batch.Put(ikey, idata)
// add the access-chunkindex index for garbage collection
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore_test.go
index e8b9ae39..1fe466f9 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore_test.go
@@ -79,14 +79,6 @@ func testPoFunc(k Address) (ret uint8) {
return uint8(Proximity(basekey, k[:]))
}
-func (db *testDbStore) close() {
- db.Close()
- err := os.RemoveAll(db.dir)
- if err != nil {
- panic(err)
- }
-}
-
func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) {
db, cleanup, err := newTestDbStore(mock, true)
defer cleanup()
@@ -453,7 +445,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
for i := 0; i < n; i++ {
- ret, err := ldb.Get(nil, chunks[i].Address())
+ ret, err := ldb.Get(context.TODO(), chunks[i].Address())
if i%2 == 0 {
// expect even chunks to be missing
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go
index 111821ff..95656090 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go
@@ -92,7 +92,7 @@ func (ls *LocalStore) isValid(chunk Chunk) bool {
// ls.Validators contains a list of one validator per chunk type.
// if one validator succeeds, then the chunk is valid
for _, v := range ls.Validators {
- if valid = v.Validate(chunk.Address(), chunk.Data()); valid {
+ if valid = v.Validate(chunk); valid {
break
}
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore_test.go
index 7a07726d..7a4162a4 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore_test.go
@@ -118,7 +118,7 @@ func TestValidator(t *testing.T) {
type boolTestValidator bool
-func (self boolTestValidator) Validate(addr Address, data []byte) bool {
+func (self boolTestValidator) Validate(chunk Chunk) bool {
return bool(self)
}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/memstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/memstore.go
index 36b1e00d..86e5813d 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/memstore.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/memstore.go
@@ -57,7 +57,7 @@ func (m *MemStore) Get(_ context.Context, addr Address) (Chunk, error) {
if !ok {
return nil, ErrChunkNotFound
}
- return c.(*chunk), nil
+ return c.(Chunk), nil
}
func (m *MemStore) Put(_ context.Context, c Chunk) error {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go
index 1fb71b70..626ba3fe 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go
@@ -103,13 +103,6 @@ type Exporter interface {
Export(w io.Writer) (n int, err error)
}
-// ImportExporter is an interface for importing and exporting
-// mock store data to and from a tar archive.
-type ImportExporter interface {
- Importer
- Exporter
-}
-
// ExportedChunk is the structure that is saved in tar archive for
// each chunk as JSON-encoded bytes.
type ExportedChunk struct {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go
index 10180985..69828b14 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go
@@ -196,17 +196,22 @@ func ImportExport(t *testing.T, outStore, inStore mock.GlobalStorer, n int) {
r, w := io.Pipe()
defer r.Close()
+ exportErrChan := make(chan error)
go func() {
defer w.Close()
- if _, err := exporter.Export(w); err != nil {
- t.Fatalf("export: %v", err)
- }
+
+ _, err := exporter.Export(w)
+ exportErrChan <- err
}()
if _, err := importer.Import(r); err != nil {
t.Fatalf("import: %v", err)
}
+ if err := <-exportErrChan; err != nil {
+ t.Fatalf("export: %v", err)
+ }
+
for i, addr := range addrs {
chunkAddr := storage.Address(append(addr[:], []byte(strconv.FormatInt(int64(i)+1, 16))...))
data := []byte(strconv.FormatInt(int64(i)+1, 16))
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/netstore_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/netstore_test.go
index 8a09fa5a..2ed3e075 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/netstore_test.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/netstore_test.go
@@ -20,6 +20,8 @@ import (
"bytes"
"context"
"crypto/rand"
+ "errors"
+ "fmt"
"io/ioutil"
"sync"
"testing"
@@ -114,19 +116,24 @@ func TestNetStoreGetAndPut(t *testing.T) {
defer cancel()
c := make(chan struct{}) // this channel ensures that the gouroutine with the Put does not run earlier than the Get
+ putErrC := make(chan error)
go func() {
<-c // wait for the Get to be called
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
- t.Fatal("Expected netStore to use a fetcher for the Get call")
+ putErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
+ return
}
err := netStore.Put(ctx, chunk)
if err != nil {
- t.Fatalf("Expected no err got %v", err)
+ putErrC <- fmt.Errorf("Expected no err got %v", err)
+ return
}
+
+ putErrC <- nil
}()
close(c)
@@ -134,6 +141,10 @@ func TestNetStoreGetAndPut(t *testing.T) {
if err != nil {
t.Fatalf("Expected no err got %v", err)
}
+
+ if err := <-putErrC; err != nil {
+ t.Fatal(err)
+ }
// the retrieved chunk should be the same as what we Put
if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
t.Fatalf("Different chunk received than what was put")
@@ -200,14 +211,18 @@ func TestNetStoreGetTimeout(t *testing.T) {
defer cancel()
c := make(chan struct{}) // this channel ensures that the gouroutine does not run earlier than the Get
+ fetcherErrC := make(chan error)
go func() {
<-c // wait for the Get to be called
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
- t.Fatal("Expected netStore to use a fetcher for the Get call")
+ fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
+ return
}
+
+ fetcherErrC <- nil
}()
close(c)
@@ -220,6 +235,10 @@ func TestNetStoreGetTimeout(t *testing.T) {
t.Fatalf("Expected context.DeadLineExceeded err got %v", err)
}
+ if err := <-fetcherErrC; err != nil {
+ t.Fatal(err)
+ }
+
// A fetcher was created, check if it has been removed after timeout
if netStore.fetchers.Len() != 0 {
t.Fatal("Expected netStore to remove the fetcher after timeout")
@@ -243,20 +262,29 @@ func TestNetStoreGetCancel(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
c := make(chan struct{}) // this channel ensures that the gouroutine with the cancel does not run earlier than the Get
+ fetcherErrC := make(chan error, 1)
go func() {
<-c // wait for the Get to be called
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
- t.Fatal("Expected netStore to use a fetcher for the Get call")
+ fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
+ return
}
+
+ fetcherErrC <- nil
cancel()
}()
close(c)
+
// We call Get with an unavailable chunk, so it will create a fetcher and wait for delivery
_, err := netStore.Get(ctx, chunk.Address())
+ if err := <-fetcherErrC; err != nil {
+ t.Fatal(err)
+ }
+
// After the context is cancelled above Get should return with an error
if err != context.Canceled {
t.Fatalf("Expected context.Canceled err got %v", err)
@@ -286,46 +314,55 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
+ putErrC := make(chan error)
go func() {
// sleep to make sure Put is called after all the Get
time.Sleep(500 * time.Millisecond)
// check if netStore created exactly one fetcher for all Get calls
if netStore.fetchers.Len() != 1 {
- t.Fatal("Expected netStore to use one fetcher for all Get calls")
+ putErrC <- errors.New("Expected netStore to use one fetcher for all Get calls")
+ return
}
err := netStore.Put(ctx, chunk)
if err != nil {
- t.Fatalf("Expected no err got %v", err)
+ putErrC <- fmt.Errorf("Expected no err got %v", err)
+ return
}
+ putErrC <- nil
}()
+ count := 4
// call Get 4 times for the same unavailable chunk. The calls will be blocked until the Put above.
- getWG := sync.WaitGroup{}
- for i := 0; i < 4; i++ {
- getWG.Add(1)
+ errC := make(chan error)
+ for i := 0; i < count; i++ {
go func() {
- defer getWG.Done()
recChunk, err := netStore.Get(ctx, chunk.Address())
if err != nil {
- t.Fatalf("Expected no err got %v", err)
+ errC <- fmt.Errorf("Expected no err got %v", err)
}
if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
- t.Fatalf("Different chunk received than what was put")
+ errC <- errors.New("Different chunk received than what was put")
}
+ errC <- nil
}()
}
- finishedC := make(chan struct{})
- go func() {
- getWG.Wait()
- close(finishedC)
- }()
+ if err := <-putErrC; err != nil {
+ t.Fatal(err)
+ }
+
+ timeout := time.After(1 * time.Second)
// The Get calls should return after Put, so no timeout expected
- select {
- case <-finishedC:
- case <-time.After(1 * time.Second):
- t.Fatalf("Timeout waiting for Get calls to return")
+ for i := 0; i < count; i++ {
+ select {
+ case err := <-errC:
+ if err != nil {
+ t.Fatal(err)
+ }
+ case <-timeout:
+ t.Fatalf("Timeout waiting for Get calls to return")
+ }
}
// A fetcher was created, check if it has been removed after cancel
@@ -448,7 +485,7 @@ func TestNetStoreGetCallsOffer(t *testing.T) {
defer cancel()
// We call get for a not available chunk, it will timeout because the chunk is not delivered
- chunk, err := netStore.Get(ctx, chunk.Address())
+ _, err := netStore.Get(ctx, chunk.Address())
if err != context.DeadlineExceeded {
t.Fatalf("Expect error %v got %v", context.DeadlineExceeded, err)
@@ -542,16 +579,12 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
}
- // Call wait three times parallelly
- wg := sync.WaitGroup{}
- for i := 0; i < 3; i++ {
- wg.Add(1)
+ // Call wait three times in parallel
+ count := 3
+ errC := make(chan error)
+ for i := 0; i < count; i++ {
go func() {
- err := wait(ctx)
- if err != nil {
- t.Fatalf("Expected no err got %v", err)
- }
- wg.Done()
+ errC <- wait(ctx)
}()
}
@@ -570,7 +603,12 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
}
// wait until all wait calls return (because the chunk is delivered)
- wg.Wait()
+ for i := 0; i < count; i++ {
+ err := <-errC
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
// There should be no more fetchers for the delivered chunk
if netStore.fetchers.Len() != 0 {
@@ -606,23 +644,29 @@ func TestNetStoreFetcherLifeCycleWithTimeout(t *testing.T) {
t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
}
- // Call wait three times parallelly
- wg := sync.WaitGroup{}
- for i := 0; i < 3; i++ {
- wg.Add(1)
+ // Call wait three times in parallel
+ count := 3
+ errC := make(chan error)
+ for i := 0; i < count; i++ {
go func() {
- defer wg.Done()
rctx, rcancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer rcancel()
err := wait(rctx)
if err != context.DeadlineExceeded {
- t.Fatalf("Expected err %v got %v", context.DeadlineExceeded, err)
+ errC <- fmt.Errorf("Expected err %v got %v", context.DeadlineExceeded, err)
+ return
}
+ errC <- nil
}()
}
// wait until all wait calls timeout
- wg.Wait()
+ for i := 0; i < count; i++ {
+ err := <-errC
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
// There should be no more fetchers after timeout
if netStore.fetchers.Len() != 0 {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/pyramid.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/pyramid.go
index f74eef06..e5bd7a76 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/pyramid.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/pyramid.go
@@ -71,11 +71,6 @@ const (
splitTimeout = time.Minute * 5
)
-const (
- DataChunk = 0
- TreeChunk = 1
-)
-
type PyramidSplitterParams struct {
SplitterParams
getter Getter
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go
index 42557766..d7923522 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go
@@ -23,62 +23,21 @@ import (
"crypto/rand"
"encoding/binary"
"fmt"
- "hash"
"io"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/bmt"
ch "github.com/ethereum/go-ethereum/swarm/chunk"
+ "golang.org/x/crypto/sha3"
)
const MaxPO = 16
const AddressLength = 32
-type Hasher func() hash.Hash
type SwarmHasher func() SwarmHash
-// Peer is the recorded as Source on the chunk
-// should probably not be here? but network should wrap chunk object
-type Peer interface{}
-
type Address []byte
-func (a Address) Size() uint {
- return uint(len(a))
-}
-
-func (a Address) isEqual(y Address) bool {
- return bytes.Equal(a, y)
-}
-
-func (a Address) bits(i, j uint) uint {
- ii := i >> 3
- jj := i & 7
- if ii >= a.Size() {
- return 0
- }
-
- if jj+j <= 8 {
- return uint((a[ii] >> jj) & ((1 << j) - 1))
- }
-
- res := uint(a[ii] >> jj)
- jj = 8 - jj
- j -= jj
- for j != 0 {
- ii++
- if j < 8 {
- res += uint(a[ii]&((1<>uint8(7-j))&0x01 != 0 {
return i*8 + j
@@ -112,10 +68,6 @@ func Proximity(one, other []byte) (ret int) {
return MaxPO
}
-func IsZeroAddr(addr Address) bool {
- return len(addr) == 0 || bytes.Equal(addr, ZeroAddr)
-}
-
var ZeroAddr = Address(common.Hash{}.Bytes())
func MakeHashFunc(hash string) SwarmHasher {
@@ -123,10 +75,10 @@ func MakeHashFunc(hash string) SwarmHasher {
case "SHA256":
return func() SwarmHash { return &HashWithLength{crypto.SHA256.New()} }
case "SHA3":
- return func() SwarmHash { return &HashWithLength{sha3.NewKeccak256()} }
+ return func() SwarmHash { return &HashWithLength{sha3.NewLegacyKeccak256()} }
case "BMT":
return func() SwarmHash {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
hasherSize := hasher().Size()
segmentCount := ch.DefaultSize / hasherSize
pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize)
@@ -184,9 +136,6 @@ func (c AddressCollection) Swap(i, j int) {
// Chunk interface implemented by context.Contexts and data chunks
type Chunk interface {
Address() Address
- Payload() []byte
- SpanBytes() []byte
- Span() int64
Data() []byte
}
@@ -208,25 +157,10 @@ func (c *chunk) Address() Address {
return c.addr
}
-func (c *chunk) SpanBytes() []byte {
- return c.sdata[:8]
-}
-
-func (c *chunk) Span() int64 {
- if c.span == -1 {
- c.span = int64(binary.LittleEndian.Uint64(c.sdata[:8]))
- }
- return c.span
-}
-
func (c *chunk) Data() []byte {
return c.sdata
}
-func (c *chunk) Payload() []byte {
- return c.sdata[8:]
-}
-
// String() for pretty printing
func (self *chunk) String() string {
return fmt.Sprintf("Address: %v TreeSize: %v Chunksize: %v", self.addr.Log(), self.span, len(self.sdata))
@@ -322,12 +256,8 @@ func (c ChunkData) Size() uint64 {
return binary.LittleEndian.Uint64(c[:8])
}
-func (c ChunkData) Data() []byte {
- return c[8:]
-}
-
type ChunkValidator interface {
- Validate(addr Address, data []byte) bool
+ Validate(chunk Chunk) bool
}
// Provides method for validation of content address in chunks
@@ -344,7 +274,8 @@ func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator {
}
// Validate that the given key is a valid content address for the given data
-func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool {
+func (v *ContentAddressValidator) Validate(chunk Chunk) bool {
+ data := chunk.Data()
if l := len(data); l < 9 || l > ch.DefaultSize+8 {
// log.Error("invalid chunk size", "chunk", addr.Hex(), "size", l)
return false
@@ -355,7 +286,7 @@ func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool {
hasher.Write(data[8:])
hash := hasher.Sum(nil)
- return bytes.Equal(hash, addr[:])
+ return bytes.Equal(hash, chunk.Address())
}
type ChunkStore interface {
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/types_test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/types_test.go
new file mode 100644
index 00000000..32907bbf
--- /dev/null
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/types_test.go
@@ -0,0 +1,186 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package storage
+
+import (
+ "strconv"
+ "testing"
+)
+
+// TestProximity validates Proximity function with explicit
+// values in a table-driven test. It is highly dependant on
+// MaxPO constant and it validates cases up to MaxPO=32.
+func TestProximity(t *testing.T) {
+ // integer from base2 encoded string
+ bx := func(s string) uint8 {
+ i, err := strconv.ParseUint(s, 2, 8)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return uint8(i)
+ }
+ // adjust expected bins in respect to MaxPO
+ limitPO := func(po uint8) uint8 {
+ if po > MaxPO {
+ return MaxPO
+ }
+ return po
+ }
+ base := []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000000")}
+ for _, tc := range []struct {
+ addr []byte
+ po uint8
+ }{
+ {
+ addr: base,
+ po: MaxPO,
+ },
+ {
+ addr: []byte{bx("10000000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(0),
+ },
+ {
+ addr: []byte{bx("01000000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(1),
+ },
+ {
+ addr: []byte{bx("00100000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(2),
+ },
+ {
+ addr: []byte{bx("00010000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(3),
+ },
+ {
+ addr: []byte{bx("00001000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(4),
+ },
+ {
+ addr: []byte{bx("00000100"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(5),
+ },
+ {
+ addr: []byte{bx("00000010"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(6),
+ },
+ {
+ addr: []byte{bx("00000001"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(7),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("10000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(8),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("01000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(9),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00100000"), bx("00000000"), bx("00000000")},
+ po: limitPO(10),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00010000"), bx("00000000"), bx("00000000")},
+ po: limitPO(11),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00001000"), bx("00000000"), bx("00000000")},
+ po: limitPO(12),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000100"), bx("00000000"), bx("00000000")},
+ po: limitPO(13),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000010"), bx("00000000"), bx("00000000")},
+ po: limitPO(14),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000001"), bx("00000000"), bx("00000000")},
+ po: limitPO(15),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("10000000"), bx("00000000")},
+ po: limitPO(16),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("01000000"), bx("00000000")},
+ po: limitPO(17),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00100000"), bx("00000000")},
+ po: limitPO(18),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00010000"), bx("00000000")},
+ po: limitPO(19),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00001000"), bx("00000000")},
+ po: limitPO(20),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000100"), bx("00000000")},
+ po: limitPO(21),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000010"), bx("00000000")},
+ po: limitPO(22),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000001"), bx("00000000")},
+ po: limitPO(23),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("10000000")},
+ po: limitPO(24),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("01000000")},
+ po: limitPO(25),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00100000")},
+ po: limitPO(26),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00010000")},
+ po: limitPO(27),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00001000")},
+ po: limitPO(28),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000100")},
+ po: limitPO(29),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000010")},
+ po: limitPO(30),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000001")},
+ po: limitPO(31),
+ },
+ } {
+ got := uint8(Proximity(base, tc.addr))
+ if got != tc.po {
+ t.Errorf("got %v bin, want %v", got, tc.po)
+ }
+ }
+}
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go b/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go
index a4ff9405..db52675f 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go
@@ -74,8 +74,6 @@ type Swarm struct {
bzz *network.Bzz // the logistic manager
backend chequebook.Backend // simple blockchain Backend
privateKey *ecdsa.PrivateKey
- corsString string
- swapEnabled bool
netStore *storage.NetStore
sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
ps *pss.Pss
@@ -86,18 +84,6 @@ type Swarm struct {
tracerClose io.Closer
}
-type SwarmAPI struct {
- Api *api.API
- Backend chequebook.Backend
-}
-
-func (self *Swarm) API() *SwarmAPI {
- return &SwarmAPI{
- Api: self.api,
- Backend: self.backend,
- }
-}
-
// creates a new swarm service instance
// implements node.Service
// If mockStore is not nil, it will be used as the storage for chunk data.
@@ -479,14 +465,6 @@ func (self *Swarm) Protocols() (protos []p2p.Protocol) {
return
}
-func (self *Swarm) RegisterPssProtocol(spec *protocols.Spec, targetprotocol *p2p.Protocol, options *pss.ProtocolParams) (*pss.Protocol, error) {
- if !pss.IsActiveProtocol {
- return nil, fmt.Errorf("Pss protocols not available (built with !nopssprotocol tag)")
- }
- topic := pss.ProtocolTopic(spec)
- return pss.RegisterProtocol(self.ps, &topic, spec, targetprotocol, options)
-}
-
// implements node.Service
// APIs returns the RPC API descriptors the Swarm implementation offers
func (self *Swarm) APIs() []rpc.API {
@@ -518,6 +496,12 @@ func (self *Swarm) APIs() []rpc.API {
Service: self.sfs,
Public: false,
},
+ {
+ Namespace: "accounting",
+ Version: protocols.AccountingVersion,
+ Service: protocols.NewAccountingApi(self.accountingMetrics),
+ Public: false,
+ },
}
apis = append(apis, self.bzz.APIs()...)
@@ -529,10 +513,6 @@ func (self *Swarm) APIs() []rpc.API {
return apis
}
-func (self *Swarm) Api() *api.API {
- return self.api
-}
-
// SetChequebook ensures that the local checquebook is set up on chain.
func (self *Swarm) SetChequebook(ctx context.Context) error {
err := self.config.Swap.SetChequebook(ctx, self.backend, self.config.Path)
diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go b/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go
index c3481090..831080eb 100644
--- a/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go
+++ b/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 0 // Major version component of the current release
VersionMinor = 3 // Minor version component of the current release
- VersionPatch = 8 // Patch version component of the current release
+ VersionPatch = 9 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/vendor/github.com/ethereum/go-ethereum/tests/state_test_util.go b/vendor/github.com/ethereum/go-ethereum/tests/state_test_util.go
index 3683aae3..43628419 100644
--- a/vendor/github.com/ethereum/go-ethereum/tests/state_test_util.go
+++ b/vendor/github.com/ethereum/go-ethereum/tests/state_test_util.go
@@ -31,10 +31,10 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
// StateTest checks transaction processing without block context.
@@ -248,7 +248,7 @@ func (tx *stTransaction) toMessage(ps stPostState) (core.Message, error) {
}
func rlpHash(x interface{}) (h common.Hash) {
- hw := sha3.NewKeccak256()
+ hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
diff --git a/vendor/github.com/ethereum/go-ethereum/trie/hasher.go b/vendor/github.com/ethereum/go-ethereum/trie/hasher.go
index 7b1d7793..9d6756b6 100644
--- a/vendor/github.com/ethereum/go-ethereum/trie/hasher.go
+++ b/vendor/github.com/ethereum/go-ethereum/trie/hasher.go
@@ -21,8 +21,8 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
type hasher struct {
@@ -57,7 +57,7 @@ var hasherPool = sync.Pool{
New: func() interface{} {
return &hasher{
tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
- sha: sha3.NewKeccak256().(keccakState),
+ sha: sha3.NewLegacyKeccak256().(keccakState),
}
},
}
diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go
index 7df64764..c6fd9989 100644
--- a/vendor/golang.org/x/crypto/acme/acme.go
+++ b/vendor/golang.org/x/crypto/acme/acme.go
@@ -77,6 +77,10 @@ const (
type Client struct {
// Key is the account key used to register with a CA and sign requests.
// Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey.
+ //
+ // The following algorithms are supported:
+ // RS256, ES256, ES384 and ES512.
+ // See RFC7518 for more details about the algorithms.
Key crypto.Signer
// HTTPClient optionally specifies an HTTP client to use
diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go
index 4c2fc072..a50d9bfc 100644
--- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go
+++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go
@@ -69,7 +69,7 @@ func HostWhitelist(hosts ...string) HostPolicy {
}
return func(_ context.Context, host string) error {
if !whitelist[host] {
- return errors.New("acme/autocert: host not configured")
+ return fmt.Errorf("acme/autocert: host %q not configured in HostWhitelist", host)
}
return nil
}
diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go
index 6cbca25d..1093b503 100644
--- a/vendor/golang.org/x/crypto/acme/jws.go
+++ b/vendor/golang.org/x/crypto/acme/jws.go
@@ -25,7 +25,7 @@ func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byt
if err != nil {
return nil, err
}
- alg, sha := jwsHasher(key)
+ alg, sha := jwsHasher(key.Public())
if alg == "" || !sha.Available() {
return nil, ErrUnsupportedKey
}
@@ -97,13 +97,16 @@ func jwkEncode(pub crypto.PublicKey) (string, error) {
}
// jwsSign signs the digest using the given key.
-// It returns ErrUnsupportedKey if the key type is unknown.
-// The hash is used only for RSA keys.
+// The hash is unused for ECDSA keys.
+//
+// Note: non-stdlib crypto.Signer implementations are expected to return
+// the signature in the format as specified in RFC7518.
+// See https://tools.ietf.org/html/rfc7518 for more details.
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
- switch key := key.(type) {
- case *rsa.PrivateKey:
- return key.Sign(rand.Reader, digest, hash)
- case *ecdsa.PrivateKey:
+ if key, ok := key.(*ecdsa.PrivateKey); ok {
+ // The key.Sign method of ecdsa returns ASN1-encoded signature.
+ // So, we use the package Sign function instead
+ // to get R and S values directly and format the result accordingly.
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
if err != nil {
return nil, err
@@ -118,18 +121,18 @@ func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error)
copy(sig[size*2-len(sb):], sb)
return sig, nil
}
- return nil, ErrUnsupportedKey
+ return key.Sign(rand.Reader, digest, hash)
}
// jwsHasher indicates suitable JWS algorithm name and a hash function
// to use for signing a digest with the provided key.
// It returns ("", 0) if the key is not supported.
-func jwsHasher(key crypto.Signer) (string, crypto.Hash) {
- switch key := key.(type) {
- case *rsa.PrivateKey:
+func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) {
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
return "RS256", crypto.SHA256
- case *ecdsa.PrivateKey:
- switch key.Params().Name {
+ case *ecdsa.PublicKey:
+ switch pub.Params().Name {
case "P-256":
return "ES256", crypto.SHA256
case "P-384":
diff --git a/vendor/golang.org/x/crypto/acme/jws_test.go b/vendor/golang.org/x/crypto/acme/jws_test.go
index 0ff0fb5a..ee30b1e1 100644
--- a/vendor/golang.org/x/crypto/acme/jws_test.go
+++ b/vendor/golang.org/x/crypto/acme/jws_test.go
@@ -5,6 +5,7 @@
package acme
import (
+ "crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
@@ -13,6 +14,7 @@ import (
"encoding/json"
"encoding/pem"
"fmt"
+ "io"
"math/big"
"testing"
)
@@ -241,6 +243,79 @@ func TestJWSEncodeJSONEC(t *testing.T) {
}
}
+type customTestSigner struct {
+ sig []byte
+ pub crypto.PublicKey
+}
+
+func (s *customTestSigner) Public() crypto.PublicKey { return s.pub }
+func (s *customTestSigner) Sign(io.Reader, []byte, crypto.SignerOpts) ([]byte, error) {
+ return s.sig, nil
+}
+
+func TestJWSEncodeJSONCustom(t *testing.T) {
+ claims := struct{ Msg string }{"hello"}
+ const (
+ // printf '{"Msg":"hello"}' | base64 | tr -d '=' | tr '/+' '_-'
+ payload = "eyJNc2ciOiJoZWxsbyJ9"
+ // printf 'testsig' | base64 | tr -d '='
+ testsig = "dGVzdHNpZw"
+
+ // printf '{"alg":"ES256","jwk":{"crv":"P-256","kty":"EC","x":,"y":,"nonce":"nonce"}' | \
+ // base64 | tr -d '=' | tr '/+' '_-'
+ es256phead = "eyJhbGciOiJFUzI1NiIsImp3ayI6eyJjcnYiOiJQLTI1NiIsImt0eSI6IkVDIiwieCI6IjVsaEV1" +
+ "ZzV4SzR4QkRaMm5BYmF4THRhTGl2ODVieEo3ZVBkMWRrTzIzSFEiLCJ5IjoiNGFpSzcyc0JlVUFH" +
+ "a3YwVGFMc213b2tZVVl5TnhHc1M1RU1JS3dzTklLayJ9LCJub25jZSI6Im5vbmNlIn0"
+
+ // {"alg":"RS256","jwk":{"e":"AQAB","kty":"RSA","n":"..."},"nonce":"nonce"}
+ rs256phead = "eyJhbGciOiJSUzI1NiIsImp3ayI6eyJlIjoiQVFBQiIsImt0eSI6" +
+ "IlJTQSIsIm4iOiI0eGdaM2VSUGt3b1J2eTdxZVJVYm1NRGUwVi14" +
+ "SDllV0xkdTBpaGVlTGxybUQybXFXWGZQOUllU0tBcGJuMzRnOFR1" +
+ "QVM5ZzV6aHE4RUxRM2ttanItS1Y4NkdBTWdJNlZBY0dscTNRcnpw" +
+ "VENmXzMwQWI3LXphd3JmUmFGT05hMUh3RXpQWTFLSG5HVmt4SmM4" +
+ "NWdOa3dZSTlTWTJSSFh0dmxuM3pzNXdJVE5yZG9zcUVYZWFJa1ZZ" +
+ "QkVoYmhOdTU0cHAza3hvNlR1V0xpOWU2cFhlV2V0RXdtbEJ3dFda" +
+ "bFBvaWIyajNUeExCa3NLWmZveUZ5ZWszODBtSGdKQXVtUV9JMmZq" +
+ "ajk4Xzk3bWszaWhPWTRBZ1ZkQ0RqMXpfR0NvWmtHNVJxN25iQ0d5" +
+ "b3N5S1d5RFgwMFpzLW5OcVZob0xlSXZYQzRubldkSk1aNnJvZ3h5" +
+ "UVEifSwibm9uY2UiOiJub25jZSJ9"
+ )
+
+ tt := []struct {
+ alg, phead string
+ pub crypto.PublicKey
+ }{
+ {"RS256", rs256phead, testKey.Public()},
+ {"ES256", es256phead, testKeyEC.Public()},
+ }
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.alg, func(t *testing.T) {
+ signer := &customTestSigner{
+ sig: []byte("testsig"),
+ pub: tc.pub,
+ }
+ b, err := jwsEncodeJSON(claims, signer, "nonce")
+ if err != nil {
+ t.Fatal(err)
+ }
+ var j struct{ Protected, Payload, Signature string }
+ if err := json.Unmarshal(b, &j); err != nil {
+ t.Fatal(err)
+ }
+ if j.Protected != tc.phead {
+ t.Errorf("j.Protected = %q\nwant %q", j.Protected, tc.phead)
+ }
+ if j.Payload != payload {
+ t.Errorf("j.Payload = %q\nwant %q", j.Payload, payload)
+ }
+ if j.Signature != testsig {
+ t.Errorf("j.Signature = %q\nwant %q", j.Signature, testsig)
+ }
+ })
+ }
+}
+
func TestJWKThumbprintRSA(t *testing.T) {
// Key example from RFC 7638
const base64N = "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAt" +
diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go
index aecf759e..b7162d82 100644
--- a/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go
+++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go
@@ -209,19 +209,19 @@ func TestMinorNotRequired(t *testing.T) {
func BenchmarkEqual(b *testing.B) {
b.StopTimer()
passwd := []byte("somepasswordyoulike")
- hash, _ := GenerateFromPassword(passwd, 10)
+ hash, _ := GenerateFromPassword(passwd, DefaultCost)
b.StartTimer()
for i := 0; i < b.N; i++ {
CompareHashAndPassword(hash, passwd)
}
}
-func BenchmarkGeneration(b *testing.B) {
+func BenchmarkDefaultCost(b *testing.B) {
b.StopTimer()
passwd := []byte("mylongpassword1234")
b.StartTimer()
for i := 0; i < b.N; i++ {
- GenerateFromPassword(passwd, 10)
+ GenerateFromPassword(passwd, DefaultCost)
}
}
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go
index c814496a..52c414db 100644
--- a/vendor/golang.org/x/crypto/blake2b/blake2x.go
+++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go
@@ -29,7 +29,7 @@ type XOF interface {
}
// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
-// the the length of the output is not known in advance.
+// the length of the output is not known in advance.
const OutputLengthUnknown = 0
// magicUnknownOutputLength is a magic value for the output size that indicates
diff --git a/vendor/golang.org/x/crypto/blake2s/blake2x.go b/vendor/golang.org/x/crypto/blake2s/blake2x.go
index eaff2a7f..828749ff 100644
--- a/vendor/golang.org/x/crypto/blake2s/blake2x.go
+++ b/vendor/golang.org/x/crypto/blake2s/blake2x.go
@@ -29,7 +29,7 @@ type XOF interface {
}
// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
-// the the length of the output is not known in advance.
+// the length of the output is not known in advance.
const OutputLengthUnknown = 0
// magicUnknownOutputLength is a magic value for the output size that indicates
diff --git a/vendor/golang.org/x/crypto/bn256/gfp12.go b/vendor/golang.org/x/crypto/bn256/gfp12.go
index f084eddf..2b0151eb 100644
--- a/vendor/golang.org/x/crypto/bn256/gfp12.go
+++ b/vendor/golang.org/x/crypto/bn256/gfp12.go
@@ -125,8 +125,8 @@ func (e *gfP12) Mul(a, b *gfP12, pool *bnPool) *gfP12 {
}
func (e *gfP12) MulScalar(a *gfP12, b *gfP6, pool *bnPool) *gfP12 {
- e.x.Mul(e.x, b, pool)
- e.y.Mul(e.y, b, pool)
+ e.x.Mul(a.x, b, pool)
+ e.y.Mul(a.y, b, pool)
return e
}
diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go
index 29b4c764..ca7b1db5 100644
--- a/vendor/golang.org/x/crypto/cryptobyte/builder.go
+++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go
@@ -50,8 +50,14 @@ func NewFixedBuilder(buffer []byte) *Builder {
}
}
+// SetError sets the value to be returned as the error from Bytes. Writes
+// performed after calling SetError are ignored.
+func (b *Builder) SetError(err error) {
+ b.err = err
+}
+
// Bytes returns the bytes written by the builder or an error if one has
-// occurred during during building.
+// occurred during building.
func (b *Builder) Bytes() ([]byte, error) {
if b.err != nil {
return nil, b.err
@@ -94,7 +100,7 @@ func (b *Builder) AddBytes(v []byte) {
b.add(v...)
}
-// BuilderContinuation is continuation-passing interface for building
+// BuilderContinuation is a continuation-passing interface for building
// length-prefixed byte sequences. Builder methods for length-prefixed
// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
// supplied to them. The child builder passed to the continuation can be used
@@ -268,9 +274,11 @@ func (b *Builder) flushChild() {
return
}
- if !b.fixedSize {
- b.result = child.result // In case child reallocated result.
+ if b.fixedSize && &b.result[0] != &child.result[0] {
+ panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
}
+
+ b.result = child.result
}
func (b *Builder) add(bytes ...byte) {
@@ -278,7 +286,7 @@ func (b *Builder) add(bytes ...byte) {
return
}
if b.child != nil {
- panic("attempted write while child is pending")
+ panic("cryptobyte: attempted write while child is pending")
}
if len(b.result)+len(bytes) < len(bytes) {
b.err = errors.New("cryptobyte: length overflow")
@@ -290,6 +298,26 @@ func (b *Builder) add(bytes ...byte) {
b.result = append(b.result, bytes...)
}
+// Unwrite rolls back n bytes written directly to the Builder. An attempt by a
+// child builder passed to a continuation to unwrite bytes from its parent will
+// panic.
+func (b *Builder) Unwrite(n int) {
+ if b.err != nil {
+ return
+ }
+ if b.child != nil {
+ panic("cryptobyte: attempted unwrite while child is pending")
+ }
+ length := len(b.result) - b.pendingLenLen - b.offset
+ if length < 0 {
+ panic("cryptobyte: internal error")
+ }
+ if n > length {
+ panic("cryptobyte: attempted to unwrite more than was written")
+ }
+ b.result = b.result[:len(b.result)-n]
+}
+
// A MarshalingValue marshals itself into a Builder.
type MarshalingValue interface {
// Marshal is called by Builder.AddValue. It receives a pointer to a builder
diff --git a/vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go b/vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go
index f294dd55..fb637091 100644
--- a/vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go
+++ b/vendor/golang.org/x/crypto/cryptobyte/cryptobyte_test.go
@@ -327,12 +327,14 @@ func TestWriteWithPendingChild(t *testing.T) {
var b Builder
b.AddUint8LengthPrefixed(func(c *Builder) {
c.AddUint8LengthPrefixed(func(d *Builder) {
- defer func() {
- if recover() == nil {
- t.Errorf("recover() = nil, want error; c.AddUint8() did not panic")
- }
+ func() {
+ defer func() {
+ if recover() == nil {
+ t.Errorf("recover() = nil, want error; c.AddUint8() did not panic")
+ }
+ }()
+ c.AddUint8(2) // panics
}()
- c.AddUint8(2) // panics
defer func() {
if recover() == nil {
@@ -351,6 +353,92 @@ func TestWriteWithPendingChild(t *testing.T) {
})
}
+func TestSetError(t *testing.T) {
+ const errorStr = "TestSetError"
+ var b Builder
+ b.SetError(errors.New(errorStr))
+
+ ret, err := b.Bytes()
+ if ret != nil {
+ t.Error("expected nil result")
+ }
+ if err == nil {
+ t.Fatal("unexpected nil error")
+ }
+ if s := err.Error(); s != errorStr {
+ t.Errorf("expected error %q, got %v", errorStr, s)
+ }
+}
+
+func TestUnwrite(t *testing.T) {
+ var b Builder
+ b.AddBytes([]byte{1, 2, 3, 4, 5})
+ b.Unwrite(2)
+ if err := builderBytesEq(&b, 1, 2, 3); err != nil {
+ t.Error(err)
+ }
+
+ func() {
+ defer func() {
+ if recover() == nil {
+ t.Errorf("recover() = nil, want error; b.Unwrite() did not panic")
+ }
+ }()
+ b.Unwrite(4) // panics
+ }()
+
+ b = Builder{}
+ b.AddBytes([]byte{1, 2, 3, 4, 5})
+ b.AddUint8LengthPrefixed(func(b *Builder) {
+ b.AddBytes([]byte{1, 2, 3, 4, 5})
+
+ defer func() {
+ if recover() == nil {
+ t.Errorf("recover() = nil, want error; b.Unwrite() did not panic")
+ }
+ }()
+ b.Unwrite(6) // panics
+ })
+
+ b = Builder{}
+ b.AddBytes([]byte{1, 2, 3, 4, 5})
+ b.AddUint8LengthPrefixed(func(c *Builder) {
+ defer func() {
+ if recover() == nil {
+ t.Errorf("recover() = nil, want error; b.Unwrite() did not panic")
+ }
+ }()
+ b.Unwrite(2) // panics (attempted unwrite while child is pending)
+ })
+}
+
+func TestFixedBuilderLengthPrefixed(t *testing.T) {
+ bufCap := 10
+ inner := bytes.Repeat([]byte{0xff}, bufCap-2)
+ buf := make([]byte, 0, bufCap)
+ b := NewFixedBuilder(buf)
+ b.AddUint16LengthPrefixed(func(b *Builder) {
+ b.AddBytes(inner)
+ })
+ if got := b.BytesOrPanic(); len(got) != bufCap {
+ t.Errorf("Expected output length to be %d, got %d", bufCap, len(got))
+ }
+}
+
+func TestFixedBuilderPanicReallocate(t *testing.T) {
+ defer func() {
+ recover()
+ }()
+
+ b := NewFixedBuilder(make([]byte, 0, 10))
+ b1 := NewFixedBuilder(make([]byte, 0, 10))
+ b.AddUint16LengthPrefixed(func(b *Builder) {
+ *b = *b1
+ })
+
+ t.Error("Builder did not panic")
+}
+
// ASN.1
func TestASN1Int64(t *testing.T) {
diff --git a/vendor/golang.org/x/crypto/hkdf/example_test.go b/vendor/golang.org/x/crypto/hkdf/example_test.go
index df843951..e89c260e 100644
--- a/vendor/golang.org/x/crypto/hkdf/example_test.go
+++ b/vendor/golang.org/x/crypto/hkdf/example_test.go
@@ -9,49 +9,44 @@ import (
"crypto/rand"
"crypto/sha256"
"fmt"
- "golang.org/x/crypto/hkdf"
"io"
+
+ "golang.org/x/crypto/hkdf"
)
-// Usage example that expands one master key into three other cryptographically
-// secure keys.
+// Usage example that expands one master secret into three other
+// cryptographically secure keys.
func Example_usage() {
- // Underlying hash function to use
+ // Underlying hash function for HMAC.
hash := sha256.New
- // Cryptographically secure master key.
- master := []byte{0x00, 0x01, 0x02, 0x03} // i.e. NOT this.
+ // Cryptographically secure master secret.
+ secret := []byte{0x00, 0x01, 0x02, 0x03} // i.e. NOT this.
- // Non secret salt, optional (can be nil)
- // Recommended: hash-length sized random
+ // Non-secret salt, optional (can be nil).
+ // Recommended: hash-length random value.
salt := make([]byte, hash().Size())
- n, err := io.ReadFull(rand.Reader, salt)
- if n != len(salt) || err != nil {
- fmt.Println("error:", err)
- return
+ if _, err := rand.Read(salt); err != nil {
+ panic(err)
}
- // Non secret context specific info, optional (can be nil).
- // Note, independent from the master key.
- info := []byte{0x03, 0x14, 0x15, 0x92, 0x65}
+ // Non-secret context info, optional (can be nil).
+ info := []byte("hkdf example")
- // Create the key derivation function
- hkdf := hkdf.New(hash, master, salt, info)
+ // Generate three 128-bit derived keys.
+ hkdf := hkdf.New(hash, secret, salt, info)
- // Generate the required keys
- keys := make([][]byte, 3)
- for i := 0; i < len(keys); i++ {
- keys[i] = make([]byte, 24)
- n, err := io.ReadFull(hkdf, keys[i])
- if n != len(keys[i]) || err != nil {
- fmt.Println("error:", err)
- return
+ var keys [][]byte
+ for i := 0; i < 3; i++ {
+ key := make([]byte, 16)
+ if _, err := io.ReadFull(hkdf, key); err != nil {
+ panic(err)
}
+ keys = append(keys, key)
}
- // Keys should contain 192 bit random keys
- for i := 1; i <= len(keys); i++ {
- fmt.Printf("Key #%d: %v\n", i, !bytes.Equal(keys[i-1], make([]byte, 24)))
+ for i := range keys {
+ fmt.Printf("Key #%d: %v\n", i+1, !bytes.Equal(keys[i], make([]byte, 16)))
}
// Output:
diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go
index 5bc24635..dda3f143 100644
--- a/vendor/golang.org/x/crypto/hkdf/hkdf.go
+++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go
@@ -8,8 +8,6 @@
// HKDF is a cryptographic key derivation function (KDF) with the goal of
// expanding limited input keying material into one or more cryptographically
// strong secret keys.
-//
-// RFC 5869: https://tools.ietf.org/html/rfc5869
package hkdf // import "golang.org/x/crypto/hkdf"
import (
@@ -19,6 +17,21 @@ import (
"io"
)
+// Extract generates a pseudorandom key for use with Expand from an input secret
+// and an optional independent salt.
+//
+// Only use this function if you need to reuse the extracted key with multiple
+// Expand invocations and different context values. Most common scenarios,
+// including the generation of multiple keys, should use New instead.
+func Extract(hash func() hash.Hash, secret, salt []byte) []byte {
+ if salt == nil {
+ salt = make([]byte, hash().Size())
+ }
+ extractor := hmac.New(hash, salt)
+ extractor.Write(secret)
+ return extractor.Sum(nil)
+}
+
type hkdf struct {
expander hash.Hash
size int
@@ -26,22 +39,22 @@ type hkdf struct {
info []byte
counter byte
- prev []byte
- cache []byte
+ prev []byte
+ buf []byte
}
func (f *hkdf) Read(p []byte) (int, error) {
// Check whether enough data can be generated
need := len(p)
- remains := len(f.cache) + int(255-f.counter+1)*f.size
+ remains := len(f.buf) + int(255-f.counter+1)*f.size
if remains < need {
return 0, errors.New("hkdf: entropy limit reached")
}
- // Read from the cache, if enough data is present
- n := copy(p, f.cache)
+ // Read any leftover from the buffer
+ n := copy(p, f.buf)
p = p[n:]
- // Fill the buffer
+ // Fill the rest of the buffer
for len(p) > 0 {
f.expander.Reset()
f.expander.Write(f.prev)
@@ -51,25 +64,30 @@ func (f *hkdf) Read(p []byte) (int, error) {
f.counter++
// Copy the new batch into p
- f.cache = f.prev
- n = copy(p, f.cache)
+ f.buf = f.prev
+ n = copy(p, f.buf)
p = p[n:]
}
// Save leftovers for next run
- f.cache = f.cache[n:]
+ f.buf = f.buf[n:]
return need, nil
}
-// New returns a new HKDF using the given hash, the secret keying material to expand
-// and optional salt and info fields.
-func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
- if salt == nil {
- salt = make([]byte, hash().Size())
- }
- extractor := hmac.New(hash, salt)
- extractor.Write(secret)
- prk := extractor.Sum(nil)
-
- return &hkdf{hmac.New(hash, prk), extractor.Size(), info, 1, nil, nil}
+// Expand returns a Reader, from which keys can be read, using the given
+// pseudorandom key and optional context info, skipping the extraction step.
+//
+// The pseudorandomKey should have been generated by Extract, or be a uniformly
+// random or pseudorandom cryptographically strong key. See RFC 5869, Section
+// 3.3. Most common scenarios will want to use New instead.
+func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader {
+ expander := hmac.New(hash, pseudorandomKey)
+ return &hkdf{expander, expander.Size(), info, 1, nil, nil}
+}
+
+// New returns a Reader, from which keys can be read, using the given hash,
+// secret, salt and context info. Salt and info can be nil.
+func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
+ prk := Extract(hash, secret, salt)
+ return Expand(hash, prk, info)
}
diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf_test.go b/vendor/golang.org/x/crypto/hkdf/hkdf_test.go
index cee659bc..ea575772 100644
--- a/vendor/golang.org/x/crypto/hkdf/hkdf_test.go
+++ b/vendor/golang.org/x/crypto/hkdf/hkdf_test.go
@@ -18,6 +18,7 @@ type hkdfTest struct {
hash func() hash.Hash
master []byte
salt []byte
+ prk []byte
info []byte
out []byte
}
@@ -35,6 +36,12 @@ var hkdfTests = []hkdfTest{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c,
},
+ []byte{
+ 0x07, 0x77, 0x09, 0x36, 0x2c, 0x2e, 0x32, 0xdf,
+ 0x0d, 0xdc, 0x3f, 0x0d, 0xc4, 0x7b, 0xba, 0x63,
+ 0x90, 0xb6, 0xc7, 0x3b, 0xb5, 0x0f, 0x9c, 0x31,
+ 0x22, 0xec, 0x84, 0x4a, 0xd7, 0xc2, 0xb3, 0xe5,
+ },
[]byte{
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9,
@@ -74,6 +81,12 @@ var hkdfTests = []hkdfTest{
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
},
+ []byte{
+ 0x06, 0xa6, 0xb8, 0x8c, 0x58, 0x53, 0x36, 0x1a,
+ 0x06, 0x10, 0x4c, 0x9c, 0xeb, 0x35, 0xb4, 0x5c,
+ 0xef, 0x76, 0x00, 0x14, 0x90, 0x46, 0x71, 0x01,
+ 0x4a, 0x19, 0x3f, 0x40, 0xc1, 0x5f, 0xc2, 0x44,
+ },
[]byte{
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
@@ -108,6 +121,12 @@ var hkdfTests = []hkdfTest{
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
},
[]byte{},
+ []byte{
+ 0x19, 0xef, 0x24, 0xa3, 0x2c, 0x71, 0x7b, 0x16,
+ 0x7f, 0x33, 0xa9, 0x1d, 0x6f, 0x64, 0x8b, 0xdf,
+ 0x96, 0x59, 0x67, 0x76, 0xaf, 0xdb, 0x63, 0x77,
+ 0xac, 0x43, 0x4c, 0x1c, 0x29, 0x3c, 0xcb, 0x04,
+ },
[]byte{},
[]byte{
0x8d, 0xa4, 0xe7, 0x75, 0xa5, 0x63, 0xc1, 0x8f,
@@ -118,6 +137,30 @@ var hkdfTests = []hkdfTest{
0x96, 0xc8,
},
},
+ {
+ sha256.New,
+ []byte{
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ },
+ nil,
+ []byte{
+ 0x19, 0xef, 0x24, 0xa3, 0x2c, 0x71, 0x7b, 0x16,
+ 0x7f, 0x33, 0xa9, 0x1d, 0x6f, 0x64, 0x8b, 0xdf,
+ 0x96, 0x59, 0x67, 0x76, 0xaf, 0xdb, 0x63, 0x77,
+ 0xac, 0x43, 0x4c, 0x1c, 0x29, 0x3c, 0xcb, 0x04,
+ },
+ nil,
+ []byte{
+ 0x8d, 0xa4, 0xe7, 0x75, 0xa5, 0x63, 0xc1, 0x8f,
+ 0x71, 0x5f, 0x80, 0x2a, 0x06, 0x3c, 0x5a, 0x31,
+ 0xb8, 0xa1, 0x1f, 0x5c, 0x5e, 0xe1, 0x87, 0x9e,
+ 0xc3, 0x45, 0x4e, 0x5f, 0x3c, 0x73, 0x8d, 0x2d,
+ 0x9d, 0x20, 0x13, 0x95, 0xfa, 0xa4, 0xb6, 0x1a,
+ 0x96, 0xc8,
+ },
+ },
{
sha1.New,
[]byte{
@@ -128,6 +171,11 @@ var hkdfTests = []hkdfTest{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c,
},
+ []byte{
+ 0x9b, 0x6c, 0x18, 0xc4, 0x32, 0xa7, 0xbf, 0x8f,
+ 0x0e, 0x71, 0xc8, 0xeb, 0x88, 0xf4, 0xb3, 0x0b,
+ 0xaa, 0x2b, 0xa2, 0x43,
+ },
[]byte{
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9,
@@ -167,6 +215,11 @@ var hkdfTests = []hkdfTest{
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
},
+ []byte{
+ 0x8a, 0xda, 0xe0, 0x9a, 0x2a, 0x30, 0x70, 0x59,
+ 0x47, 0x8d, 0x30, 0x9b, 0x26, 0xc4, 0x11, 0x5a,
+ 0x22, 0x4c, 0xfa, 0xf6,
+ },
[]byte{
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
@@ -201,6 +254,11 @@ var hkdfTests = []hkdfTest{
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
},
[]byte{},
+ []byte{
+ 0xda, 0x8c, 0x8a, 0x73, 0xc7, 0xfa, 0x77, 0x28,
+ 0x8e, 0xc6, 0xf5, 0xe7, 0xc2, 0x97, 0x78, 0x6a,
+ 0xa0, 0xd3, 0x2d, 0x01,
+ },
[]byte{},
[]byte{
0x0a, 0xc1, 0xaf, 0x70, 0x02, 0xb3, 0xd7, 0x61,
@@ -219,7 +277,12 @@ var hkdfTests = []hkdfTest{
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
},
nil,
- []byte{},
+ []byte{
+ 0x2a, 0xdc, 0xca, 0xda, 0x18, 0x77, 0x9e, 0x7c,
+ 0x20, 0x77, 0xad, 0x2e, 0xb1, 0x9d, 0x3f, 0x3e,
+ 0x73, 0x13, 0x85, 0xdd,
+ },
+ nil,
[]byte{
0x2c, 0x91, 0x11, 0x72, 0x04, 0xd7, 0x45, 0xf3,
0x50, 0x0d, 0x63, 0x6a, 0x62, 0xf6, 0x4f, 0x0a,
@@ -233,6 +296,11 @@ var hkdfTests = []hkdfTest{
func TestHKDF(t *testing.T) {
for i, tt := range hkdfTests {
+ prk := Extract(tt.hash, tt.master, tt.salt)
+ if !bytes.Equal(prk, tt.prk) {
+ t.Errorf("test %d: incorrect PRK: have %v, need %v.", i, prk, tt.prk)
+ }
+
hkdf := New(tt.hash, tt.master, tt.salt, tt.info)
out := make([]byte, len(tt.out))
@@ -244,6 +312,17 @@ func TestHKDF(t *testing.T) {
if !bytes.Equal(out, tt.out) {
t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out)
}
+
+ hkdf = Expand(tt.hash, prk, tt.info)
+
+ n, err = io.ReadFull(hkdf, out)
+ if n != len(tt.out) || err != nil {
+ t.Errorf("test %d: not enough output bytes from Expand: %d.", i, n)
+ }
+
+ if !bytes.Equal(out, tt.out) {
+ t.Errorf("test %d: incorrect output from Expand: have %v, need %v.", i, out, tt.out)
+ }
}
}
diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go
index 5edc9c97..f079d9ea 100644
--- a/vendor/golang.org/x/crypto/ocsp/ocsp.go
+++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go
@@ -63,7 +63,7 @@ func (r ResponseStatus) String() string {
}
// ResponseError is an error that may be returned by ParseResponse to indicate
-// that the response itself is an error, not just that its indicating that a
+// that the response itself is an error, not just that it's indicating that a
// certificate is revoked, unknown, etc.
type ResponseError struct {
Status ResponseStatus
diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go
index efe6e730..3e251860 100644
--- a/vendor/golang.org/x/crypto/openpgp/keys.go
+++ b/vendor/golang.org/x/crypto/openpgp/keys.go
@@ -345,36 +345,8 @@ EachPacket:
switch pkt := p.(type) {
case *packet.UserId:
- // Make a new Identity object, that we might wind up throwing away.
- // We'll only add it if we get a valid self-signature over this
- // userID.
- current := new(Identity)
- current.Name = pkt.Id
- current.UserId = pkt
-
- for {
- p, err = packets.Next()
- if err == io.EOF {
- break EachPacket
- } else if err != nil {
- return nil, err
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- packets.Unread(p)
- continue EachPacket
- }
-
- if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
- if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
- return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
- }
- current.SelfSignature = sig
- e.Identities[pkt.Id] = current
- } else {
- current.Signatures = append(current.Signatures, sig)
- }
+ if err := addUserID(e, packets, pkt); err != nil {
+ return nil, err
}
case *packet.Signature:
if pkt.SigType == packet.SigTypeKeyRevocation {
@@ -426,6 +398,42 @@ EachPacket:
return e, nil
}
+func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
+ // Make a new Identity object, that we might wind up throwing away.
+ // We'll only add it if we get a valid self-signature over this
+ // userID.
+ identity := new(Identity)
+ identity.Name = pkt.Id
+ identity.UserId = pkt
+
+ for {
+ p, err := packets.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+
+ sig, ok := p.(*packet.Signature)
+ if !ok {
+ packets.Unread(p)
+ break
+ }
+
+ if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
+ if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
+ return errors.StructuralError("user ID self-signature invalid: " + err.Error())
+ }
+ identity.SelfSignature = sig
+ e.Identities[pkt.Id] = identity
+ } else {
+ identity.Signatures = append(identity.Signatures, sig)
+ }
+ }
+
+ return nil
+}
+
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
var subKey Subkey
subKey.PublicKey = pub
@@ -457,7 +465,8 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p
case packet.SigTypeSubkeyRevocation:
subKey.Sig = sig
case packet.SigTypeSubkeyBinding:
- if subKey.Sig == nil {
+
+ if shouldReplaceSubkeySig(subKey.Sig, sig) {
subKey.Sig = sig
}
}
@@ -472,6 +481,22 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p
return nil
}
+func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool {
+ if potentialNewSig == nil {
+ return false
+ }
+
+ if existingSig == nil {
+ return true
+ }
+
+ if existingSig.SigType == packet.SigTypeSubkeyRevocation {
+ return false // never override a revocation signature
+ }
+
+ return potentialNewSig.CreationTime.After(existingSig.CreationTime)
+}
+
const defaultRSAKeyBits = 2048
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
diff --git a/vendor/golang.org/x/crypto/openpgp/keys_data_test.go b/vendor/golang.org/x/crypto/openpgp/keys_data_test.go
new file mode 100644
index 00000000..7779bd97
--- /dev/null
+++ b/vendor/golang.org/x/crypto/openpgp/keys_data_test.go
@@ -0,0 +1,200 @@
+package openpgp
+
+const expiringKeyHex = "988d0451d1ec5d010400ba3385721f2dc3f4ab096b2ee867ab77213f0a27a8538441c35d2fa225b08798a1439a66a5150e6bdc3f40f5d28d588c712394c632b6299f77db8c0d48d37903fb72ebd794d61be6aa774688839e5fdecfe06b2684cc115d240c98c66cb1ef22ae84e3aa0c2b0c28665c1e7d4d044e7f270706193f5223c8d44e0d70b7b8da830011010001b40f4578706972792074657374206b657988be041301020028050251d1ec5d021b03050900278d00060b090807030206150802090a0b0416020301021e01021780000a091072589ad75e237d8c033503fd10506d72837834eb7f994117740723adc39227104b0d326a1161871c0b415d25b4aedef946ca77ea4c05af9c22b32cf98be86ab890111fced1ee3f75e87b7cc3c00dc63bbc85dfab91c0dc2ad9de2c4d13a34659333a85c6acc1a669c5e1d6cecb0cf1e56c10e72d855ae177ddc9e766f9b2dda57ccbb75f57156438bbdb4e42b88d0451d1ec5d0104009c64906559866c5cb61578f5846a94fcee142a489c9b41e67b12bb54cfe86eb9bc8566460f9a720cb00d6526fbccfd4f552071a8e3f7744b1882d01036d811ee5a3fb91a1c568055758f43ba5d2c6a9676b012f3a1a89e47bbf624f1ad571b208f3cc6224eb378f1645dd3d47584463f9eadeacfd1ce6f813064fbfdcc4b5a53001101000188a504180102000f021b0c050251d1f06b050900093e89000a091072589ad75e237d8c20e00400ab8310a41461425b37889c4da28129b5fae6084fafbc0a47dd1adc74a264c6e9c9cc125f40462ee1433072a58384daef88c961c390ed06426a81b464a53194c4e291ddd7e2e2ba3efced01537d713bd111f48437bde2363446200995e8e0d4e528dda377fd1e8f8ede9c8e2198b393bd86852ce7457a7e3daf74d510461a5b77b88d0451d1ece8010400b3a519f83ab0010307e83bca895170acce8964a044190a2b368892f7a244758d9fc193482648acb1fb9780d28cc22d171931f38bb40279389fc9bf2110876d4f3db4fcfb13f22f7083877fe56592b3b65251312c36f83ffcb6d313c6a17f197dd471f0712aad15a8537b435a92471ba2e5b0c72a6c72536c3b567c558d7b6051001101000188a504180102000f021b0c050251d1f07b050900279091000a091072589ad75e237d8ce69e03fe286026afacf7c97ee20673864d4459a2240b5655219950643c7dba0ac384b1d4359c67805b21d98211f7b09c2a0ccf6410c8c04d4ff4a51293725d8d6570d9d8bb0e10c07d22357caeb49626df99c180be02d77d1fe8ed25e7a54481237646083a9f89a11566cd20b9e995b1487c5f9e02aeb434f3a1897cd416dd0a87861838da3e9e"
+const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98"
+const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f"
+const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011"
+
+const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Charset: UTF-8
+
+mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
+ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
+zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
+QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
+QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
+9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
+Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
+dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
+JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
+ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
+RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
+/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
+yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv
+2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR
+bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL
+C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP
+WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y
+MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA
+EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ
+MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N
+1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm
++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N
+lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW
+CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF
+4artDmrG
+=7FfJ
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
+ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
+zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
+QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
+QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
+9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
+Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
+dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
+JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
+ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
+RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
+/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
+yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ
+UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe
+iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK
+FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8
+R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh
++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA
+EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO
+52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb
+u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl
+w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep
+54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+
+YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL
+bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E
+i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB
+DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1
+8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY
+s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745
+U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL
+6LCg2mg=
+=Dhm4
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo
+7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom
+lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0
+E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC
+CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw
+6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH
+7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv
+X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7
+GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl
+y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw
+R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW
+CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+
+LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO
+aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx
+yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl
+BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr
+Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK
+CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp
+C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ
+SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/
+MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70=
+=vtbN
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e
+DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/
+uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW
+ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx
+nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ
+x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg
+PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy
+9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
+1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2
+depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl
+aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2
+DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa
+XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU
+8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2
+b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD
+BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG
+0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N
+s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb
+tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0
+BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE
+/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7
+kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z
+VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa
+PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ
+snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi
+bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8
+K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X
+8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+
+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb
+OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l
+QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V
+yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U
+heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB
+7qTZOahrETw=
+=IKnw
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR
+s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL
+EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0
+I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/
+lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
+AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7
+2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0
+69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9
+Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b
+wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW
+FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR
+AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM
+vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx
+22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj
+7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY
+AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044
+tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX
+JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl
+mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1
+8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC
+0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b
+4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl
+Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY
+=3fWu
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L
+plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz
+r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0
+I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ
+sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
+AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+
+3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm
+k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s
+GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G
+HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT
+CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR
+AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN
+MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j
+UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS
+YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs
+nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/
+U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es
+HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK
+lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg
+AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV
+UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM
+0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX
+12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa
+3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi
+wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6
+Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G
+1dh1WoUCyREZsJQg2YoIpWIcvw+a
+=bNRo
+-----END PGP PUBLIC KEY BLOCK-----
+`
diff --git a/vendor/golang.org/x/crypto/openpgp/keys_test.go b/vendor/golang.org/x/crypto/openpgp/keys_test.go
index 46225d4d..0eb1a9ef 100644
--- a/vendor/golang.org/x/crypto/openpgp/keys_test.go
+++ b/vendor/golang.org/x/crypto/openpgp/keys_test.go
@@ -254,6 +254,51 @@ func TestSubkeyRevocation(t *testing.T) {
}
}
+func TestKeyWithSubKeyAndBadSelfSigOrder(t *testing.T) {
+ // This key was altered so that the self signatures following the
+ // subkey are in a sub-optimal order.
+ //
+ // Note: Should someone have to create a similar key again, look into
+ // gpgsplit, gpg --dearmor, and gpg --enarmor.
+ //
+ // The packet ordering is the following:
+ // PUBKEY UID UIDSELFSIG SUBKEY SELFSIG1 SELFSIG2
+ //
+ // Where:
+ // SELFSIG1 expires on 2018-06-14 and was created first
+ // SELFSIG2 does not expire and was created after SELFSIG1
+ //
+ // Test for RFC 4880 5.2.3.3:
+ // > An implementation that encounters multiple self-signatures on the
+ // > same object may resolve the ambiguity in any way it sees fit, but it
+ // > is RECOMMENDED that priority be given to the most recent self-
+ // > signature.
+ //
+ // This means that we should keep SELFSIG2.
+
+ keys, err := ReadArmoredKeyRing(bytes.NewBufferString(keyWithSubKeyAndBadSelfSigOrder))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(keys) != 1 {
+ t.Fatal("Failed to read key with a sub key and a bad selfsig packet order")
+ }
+
+ key := keys[0]
+
+ if numKeys, expected := len(key.Subkeys), 1; numKeys != expected {
+ t.Fatalf("Read %d subkeys, expected %d", numKeys, expected)
+ }
+
+ subKey := key.Subkeys[0]
+
+ if lifetime := subKey.Sig.KeyLifetimeSecs; lifetime != nil {
+ t.Errorf("The signature has a key lifetime (%d), but it should be nil", *lifetime)
+ }
+
+}
+
func TestKeyUsage(t *testing.T) {
kring, err := ReadKeyRing(readerFromHex(subkeyUsageHex))
if err != nil {
@@ -448,168 +493,3 @@ func TestNewEntityPublicSerialization(t *testing.T) {
t.Fatal(err)
}
}
-
-const expiringKeyHex = "988d0451d1ec5d010400ba3385721f2dc3f4ab096b2ee867ab77213f0a27a8538441c35d2fa225b08798a1439a66a5150e6bdc3f40f5d28d588c712394c632b6299f77db8c0d48d37903fb72ebd794d61be6aa774688839e5fdecfe06b2684cc115d240c98c66cb1ef22ae84e3aa0c2b0c28665c1e7d4d044e7f270706193f5223c8d44e0d70b7b8da830011010001b40f4578706972792074657374206b657988be041301020028050251d1ec5d021b03050900278d00060b090807030206150802090a0b0416020301021e01021780000a091072589ad75e237d8c033503fd10506d72837834eb7f994117740723adc39227104b0d326a1161871c0b415d25b4aedef946ca77ea4c05af9c22b32cf98be86ab890111fced1ee3f75e87b7cc3c00dc63bbc85dfab91c0dc2ad9de2c4d13a34659333a85c6acc1a669c5e1d6cecb0cf1e56c10e72d855ae177ddc9e766f9b2dda57ccbb75f57156438bbdb4e42b88d0451d1ec5d0104009c64906559866c5cb61578f5846a94fcee142a489c9b41e67b12bb54cfe86eb9bc8566460f9a720cb00d6526fbccfd4f552071a8e3f7744b1882d01036d811ee5a3fb91a1c568055758f43ba5d2c6a9676b012f3a1a89e47bbf624f1ad571b208f3cc6224eb378f1645dd3d47584463f9eadeacfd1ce6f813064fbfdcc4b5a53001101000188a504180102000f021b0c050251d1f06b050900093e89000a091072589ad75e237d8c20e00400ab8310a41461425b37889c4da28129b5fae6084fafbc0a47dd1adc74a264c6e9c9cc125f40462ee1433072a58384daef88c961c390ed06426a81b464a53194c4e291ddd7e2e2ba3efced01537d713bd111f48437bde2363446200995e8e0d4e528dda377fd1e8f8ede9c8e2198b393bd86852ce7457a7e3daf74d510461a5b77b88d0451d1ece8010400b3a519f83ab0010307e83bca895170acce8964a044190a2b368892f7a244758d9fc193482648acb1fb9780d28cc22d171931f38bb40279389fc9bf2110876d4f3db4fcfb13f22f7083877fe56592b3b65251312c36f83ffcb6d313c6a17f197dd471f0712aad15a8537b435a92471ba2e5b0c72a6c72536c3b567c558d7b6051001101000188a504180102000f021b0c050251d1f07b050900279091000a091072589ad75e237d8ce69e03fe286026afacf7c97ee20673864d4459a2240b5655219950643c7dba0ac384b1d4359c67805b21d98211f7b09c2a0ccf6410c8c04d4ff4a51293725d8d6570d9d8bb0e10c07d22357caeb49626df99c180be02d77d1fe8ed25e7a54481237646083a9f89a11566cd20b9e995b1487c5f9e02aeb434f3a1897cd416dd0a87861838da3e9e"
-const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98"
-const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f"
-const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011"
-const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Charset: UTF-8
-
-mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
-ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
-zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
-QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
-QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
-9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
-Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
-dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
-JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
-ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
-RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
-/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
-yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv
-2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR
-bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL
-C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP
-WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y
-MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA
-EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ
-MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N
-1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm
-+ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N
-lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW
-CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF
-4artDmrG
-=7FfJ
------END PGP PUBLIC KEY BLOCK-----`
-
-const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
-ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
-zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
-QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
-QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
-9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
-Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
-dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
-JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
-ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
-RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
-/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
-yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ
-UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe
-iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK
-FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8
-R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh
-+SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA
-EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO
-52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb
-u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl
-w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep
-54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+
-YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL
-bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E
-i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB
-DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1
-8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY
-s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745
-U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL
-6LCg2mg=
-=Dhm4
------END PGP PUBLIC KEY BLOCK-----`
-
-const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo
-7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom
-lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0
-E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC
-CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw
-6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH
-7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv
-X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7
-GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl
-y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw
-R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW
-CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+
-LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO
-aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx
-yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl
-BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr
-Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK
-CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp
-C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ
-SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/
-MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70=
-=vtbN
------END PGP PUBLIC KEY BLOCK-----`
-
-const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e
-DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/
-uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW
-ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx
-nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ
-x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg
-PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy
-9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
-1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2
-depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl
-aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2
-DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa
-XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU
-8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2
-b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD
-BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG
-0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N
-s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb
-tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0
-BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE
-/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7
-kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z
-VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa
-PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ
-snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi
-bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8
-K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X
-8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+
-TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb
-OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l
-QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V
-yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U
-heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB
-7qTZOahrETw=
-=IKnw
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR
-s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL
-EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0
-I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/
-lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
-AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7
-2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0
-69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9
-Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b
-wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW
-FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR
-AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM
-vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx
-22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj
-7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY
-AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044
-tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX
-JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl
-mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1
-8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC
-0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b
-4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl
-Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY
-=3fWu
------END PGP PUBLIC KEY BLOCK-----`
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
index 625bb5ac..5af64c54 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
@@ -404,14 +404,16 @@ const (
type PublicKeyAlgorithm uint8
const (
- PubKeyAlgoRSA PublicKeyAlgorithm = 1
- PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
- PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
- PubKeyAlgoElGamal PublicKeyAlgorithm = 16
- PubKeyAlgoDSA PublicKeyAlgorithm = 17
+ PubKeyAlgoRSA PublicKeyAlgorithm = 1
+ PubKeyAlgoElGamal PublicKeyAlgorithm = 16
+ PubKeyAlgoDSA PublicKeyAlgorithm = 17
// RFC 6637, Section 5.
PubKeyAlgoECDH PublicKeyAlgorithm = 18
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
+
+ // Deprecated in RFC 4880, Section 13.5. Use key flags instead.
+ PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
+ PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
)
// CanEncrypt returns true if it's possible to encrypt a message to a public
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
index 34734cc6..bd31ccea 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
@@ -64,14 +64,19 @@ func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateK
return pk
}
-// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that
+// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
// implements RSA or ECDSA.
func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
pk := new(PrivateKey)
+ // In general, the public Keys should be used as pointers. We still
+ // type-switch on the values, for backwards-compatibility.
switch pubkey := signer.Public().(type) {
+ case *rsa.PublicKey:
+ pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey)
case rsa.PublicKey:
pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
- pk.PubKeyAlgo = PubKeyAlgoRSASignOnly
+ case *ecdsa.PublicKey:
+ pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey)
case ecdsa.PublicKey:
pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
default:
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go
index ac651d91..cc08b483 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key_test.go
@@ -14,7 +14,6 @@ import (
"crypto/x509"
"encoding/hex"
"hash"
- "io"
"testing"
"time"
)
@@ -162,15 +161,7 @@ func TestECDSAPrivateKey(t *testing.T) {
}
type rsaSigner struct {
- priv *rsa.PrivateKey
-}
-
-func (s *rsaSigner) Public() crypto.PublicKey {
- return s.priv.PublicKey
-}
-
-func (s *rsaSigner) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
- return s.priv.Sign(rand, msg, opts)
+ *rsa.PrivateKey
}
func TestRSASignerPrivateKey(t *testing.T) {
@@ -181,12 +172,8 @@ func TestRSASignerPrivateKey(t *testing.T) {
priv := NewSignerPrivateKey(time.Now(), &rsaSigner{rsaPriv})
- if priv.PubKeyAlgo != PubKeyAlgoRSASignOnly {
- t.Fatal("NewSignerPrivateKey should have made a sign-only RSA private key")
- }
-
sig := &Signature{
- PubKeyAlgo: PubKeyAlgoRSASignOnly,
+ PubKeyAlgo: PubKeyAlgoRSA,
Hash: crypto.SHA256,
}
msg := []byte("Hello World!")
@@ -208,15 +195,7 @@ func TestRSASignerPrivateKey(t *testing.T) {
}
type ecdsaSigner struct {
- priv *ecdsa.PrivateKey
-}
-
-func (s *ecdsaSigner) Public() crypto.PublicKey {
- return s.priv.PublicKey
-}
-
-func (s *ecdsaSigner) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
- return s.priv.Sign(rand, msg, opts)
+ *ecdsa.PrivateKey
}
func TestECDSASignerPrivateKey(t *testing.T) {
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go
index 6ce0cbed..b2a24a53 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/signature.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/signature.go
@@ -542,7 +542,7 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e
r, s, err = ecdsa.Sign(config.Random(), pk, digest)
} else {
var b []byte
- b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil)
+ b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
if err == nil {
r, s, err = unwrapECDSASig(b)
}
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
index 96a2b382..d19ffbc7 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
@@ -80,7 +80,7 @@ func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
// ImageData returns zero or more byte slices, each containing
// JPEG File Interchange Format (JFIF), for each photo in the
-// the user attribute packet.
+// user attribute packet.
func (uat *UserAttribute) ImageData() (imageData [][]byte) {
for _, sp := range uat.Contents {
if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go
index d6dede74..4ee71784 100644
--- a/vendor/golang.org/x/crypto/openpgp/write.go
+++ b/vendor/golang.org/x/crypto/openpgp/write.go
@@ -271,6 +271,7 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
+ hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
@@ -349,6 +350,7 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
+ hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
index 6c6e8423..fd97ba1b 100644
--- a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
+++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
@@ -5,7 +5,7 @@
// Package ripemd160 implements the RIPEMD-160 hash algorithm.
package ripemd160 // import "golang.org/x/crypto/ripemd160"
-// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart
+// RIPEMD-160 is designed by Hans Dobbertin, Antoon Bosselaers, and Bart
// Preneel with specifications available at:
// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf.
diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go
index 9b25b5ac..3362afd1 100644
--- a/vendor/golang.org/x/crypto/scrypt/scrypt.go
+++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go
@@ -29,7 +29,7 @@ func blockXOR(dst, src []uint32, n int) {
}
// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in,
-// and puts the result into both both tmp and out.
+// and puts the result into both tmp and out.
func salsaXOR(tmp *[16]uint32, in, out []uint32) {
w0 := tmp[0] ^ in[0]
w1 := tmp[1] ^ in[1]
diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go
index a0ee3ae7..c2fef30a 100644
--- a/vendor/golang.org/x/crypto/sha3/doc.go
+++ b/vendor/golang.org/x/crypto/sha3/doc.go
@@ -43,7 +43,7 @@
// is then "full" and the permutation is applied to "empty" it. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge in the same way, except that output
-// output is copied out instead of input being XORed in.
+// is copied out instead of input being XORed in.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go
index 4fb38c0a..0d8043fd 100644
--- a/vendor/golang.org/x/crypto/sha3/hashes.go
+++ b/vendor/golang.org/x/crypto/sha3/hashes.go
@@ -58,6 +58,12 @@ func New512() hash.Hash {
// that uses non-standard padding. All other users should use New256 instead.
func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
+// NewLegacyKeccak512 creates a new Keccak-512 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New512 instead.
+func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
+
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
h := New224()
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_test.go b/vendor/golang.org/x/crypto/sha3/sha3_test.go
index c1f6ca39..26d1549b 100644
--- a/vendor/golang.org/x/crypto/sha3/sha3_test.go
+++ b/vendor/golang.org/x/crypto/sha3/sha3_test.go
@@ -44,6 +44,7 @@ var testDigests = map[string]func() hash.Hash{
"SHA3-384": New384,
"SHA3-512": New512,
"Keccak-256": NewLegacyKeccak256,
+ "Keccak-512": NewLegacyKeccak512,
"SHAKE128": newHashShake128,
"SHAKE256": newHashShake256,
}
@@ -137,6 +138,11 @@ func TestKeccak(t *testing.T) {
[]byte("abc"),
"4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45",
},
+ {
+ NewLegacyKeccak512,
+ []byte("abc"),
+ "18587dc2ea106b9a1563e32b3312421ca164c7f1f07bc922a9c83d77cea3a1e5d0c69910739025372dc14ac9642629379540c17e2a65b19d77aa511a9d00bb96",
+ },
}
for _, u := range tests {
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go
index b1808dd2..51f74050 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/client.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/client.go
@@ -25,10 +25,22 @@ import (
"math/big"
"sync"
+ "crypto"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh"
)
+// SignatureFlags represent additional flags that can be passed to the signature
+// requests an defined in [PROTOCOL.agent] section 4.5.1.
+type SignatureFlags uint32
+
+// SignatureFlag values as defined in [PROTOCOL.agent] section 5.3.
+const (
+ SignatureFlagReserved SignatureFlags = 1 << iota
+ SignatureFlagRsaSha256
+ SignatureFlagRsaSha512
+)
+
// Agent represents the capabilities of an ssh-agent.
type Agent interface {
// List returns the identities known to the agent.
@@ -57,6 +69,26 @@ type Agent interface {
Signers() ([]ssh.Signer, error)
}
+type ExtendedAgent interface {
+ Agent
+
+ // SignWithFlags signs like Sign, but allows for additional flags to be sent/received
+ SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error)
+
+ // Extension processes a custom extension request. Standard-compliant agents are not
+ // required to support any extensions, but this method allows agents to implement
+ // vendor-specific methods or add experimental features. See [PROTOCOL.agent] section 4.7.
+ // If agent extensions are unsupported entirely this method MUST return an
+ // ErrExtensionUnsupported error. Similarly, if just the specific extensionType in
+ // the request is unsupported by the agent then ErrExtensionUnsupported MUST be
+ // returned.
+ //
+ // In the case of success, since [PROTOCOL.agent] section 4.7 specifies that the contents
+ // of the response are unspecified (including the type of the message), the complete
+ // response will be returned as a []byte slice, including the "type" byte of the message.
+ Extension(extensionType string, contents []byte) ([]byte, error)
+}
+
// ConstraintExtension describes an optional constraint defined by users.
type ConstraintExtension struct {
// ExtensionName consist of a UTF-8 string suffixed by the
@@ -179,6 +211,23 @@ type constrainExtensionAgentMsg struct {
Rest []byte `ssh:"rest"`
}
+// See [PROTOCOL.agent], section 4.7
+const agentExtension = 27
+const agentExtensionFailure = 28
+
+// ErrExtensionUnsupported indicates that an extension defined in
+// [PROTOCOL.agent] section 4.7 is unsupported by the agent. Specifically this
+// error indicates that the agent returned a standard SSH_AGENT_FAILURE message
+// as the result of a SSH_AGENTC_EXTENSION request. Note that the protocol
+// specification (and therefore this error) does not distinguish between a
+// specific extension being unsupported and extensions being unsupported entirely.
+var ErrExtensionUnsupported = errors.New("agent: extension unsupported")
+
+type extensionAgentMsg struct {
+ ExtensionType string `sshtype:"27"`
+ Contents []byte
+}
+
// Key represents a protocol 2 public key as defined in
// [PROTOCOL.agent], section 2.5.2.
type Key struct {
@@ -260,7 +309,7 @@ type client struct {
// NewClient returns an Agent that talks to an ssh-agent process over
// the given connection.
-func NewClient(rw io.ReadWriter) Agent {
+func NewClient(rw io.ReadWriter) ExtendedAgent {
return &client{conn: rw}
}
@@ -268,6 +317,21 @@ func NewClient(rw io.ReadWriter) Agent {
// unmarshaled into reply and replyType is set to the first byte of
// the reply, which contains the type of the message.
func (c *client) call(req []byte) (reply interface{}, err error) {
+ buf, err := c.callRaw(req)
+ if err != nil {
+ return nil, err
+ }
+ reply, err = unmarshal(buf)
+ if err != nil {
+ return nil, clientErr(err)
+ }
+ return reply, nil
+}
+
+// callRaw sends an RPC to the agent. On success, the raw
+// bytes of the response are returned; no unmarshalling is
+// performed on the response.
+func (c *client) callRaw(req []byte) (reply []byte, err error) {
c.mu.Lock()
defer c.mu.Unlock()
@@ -284,18 +348,14 @@ func (c *client) call(req []byte) (reply interface{}, err error) {
}
respSize := binary.BigEndian.Uint32(respSizeBuf[:])
if respSize > maxAgentResponseBytes {
- return nil, clientErr(err)
+ return nil, clientErr(errors.New("response too large"))
}
buf := make([]byte, respSize)
if _, err = io.ReadFull(c.conn, buf); err != nil {
return nil, clientErr(err)
}
- reply, err = unmarshal(buf)
- if err != nil {
- return nil, clientErr(err)
- }
- return reply, err
+ return buf, nil
}
func (c *client) simpleCall(req []byte) error {
@@ -369,9 +429,14 @@ func (c *client) List() ([]*Key, error) {
// Sign has the agent sign the data using a protocol 2 key as defined
// in [PROTOCOL.agent] section 2.6.2.
func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
+ return c.SignWithFlags(key, data, 0)
+}
+
+func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) {
req := ssh.Marshal(signRequestAgentMsg{
KeyBlob: key.Marshal(),
Data: data,
+ Flags: uint32(flags),
})
msg, err := c.call(req)
@@ -681,3 +746,44 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature,
// The agent has its own entropy source, so the rand argument is ignored.
return s.agent.Sign(s.pub, data)
}
+
+func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) {
+ var flags SignatureFlags
+ if opts != nil {
+ switch opts.HashFunc() {
+ case crypto.SHA256:
+ flags = SignatureFlagRsaSha256
+ case crypto.SHA512:
+ flags = SignatureFlagRsaSha512
+ }
+ }
+ return s.agent.SignWithFlags(s.pub, data, flags)
+}
+
+// Calls an extension method. It is up to the agent implementation as to whether or not
+// any particular extension is supported and may always return an error. Because the
+// type of the response is up to the implementation, this returns the bytes of the
+// response and does not attempt any type of unmarshalling.
+func (c *client) Extension(extensionType string, contents []byte) ([]byte, error) {
+ req := ssh.Marshal(extensionAgentMsg{
+ ExtensionType: extensionType,
+ Contents: contents,
+ })
+ buf, err := c.callRaw(req)
+ if err != nil {
+ return nil, err
+ }
+ if len(buf) == 0 {
+ return nil, errors.New("agent: failure; empty response")
+ }
+ // [PROTOCOL.agent] section 4.7 indicates that an SSH_AGENT_FAILURE message
+ // represents an agent that does not support the extension
+ if buf[0] == agentFailure {
+ return nil, ErrExtensionUnsupported
+ }
+ if buf[0] == agentExtensionFailure {
+ return nil, errors.New("agent: generic extension failure")
+ }
+
+ return buf, nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client_test.go b/vendor/golang.org/x/crypto/ssh/agent/client_test.go
index 266fd6d4..2f798f94 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/client_test.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/client_test.go
@@ -20,7 +20,7 @@ import (
)
// startOpenSSHAgent executes ssh-agent, and returns an Agent interface to it.
-func startOpenSSHAgent(t *testing.T) (client Agent, socket string, cleanup func()) {
+func startOpenSSHAgent(t *testing.T) (client ExtendedAgent, socket string, cleanup func()) {
if testing.Short() {
// ssh-agent is not always available, and the key
// types supported vary by platform.
@@ -79,13 +79,12 @@ func startOpenSSHAgent(t *testing.T) (client Agent, socket string, cleanup func(
}
}
-// startKeyringAgent uses Keyring to simulate a ssh-agent Server and returns a client.
-func startKeyringAgent(t *testing.T) (client Agent, cleanup func()) {
+func startAgent(t *testing.T, agent Agent) (client ExtendedAgent, cleanup func()) {
c1, c2, err := netPipe()
if err != nil {
t.Fatalf("netPipe: %v", err)
}
- go ServeAgent(NewKeyring(), c2)
+ go ServeAgent(agent, c2)
return NewClient(c1), func() {
c1.Close()
@@ -93,6 +92,11 @@ func startKeyringAgent(t *testing.T) (client Agent, cleanup func()) {
}
}
+// startKeyringAgent uses Keyring to simulate a ssh-agent Server and returns a client.
+func startKeyringAgent(t *testing.T) (client ExtendedAgent, cleanup func()) {
+ return startAgent(t, NewKeyring())
+}
+
func testOpenSSHAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
agent, _, cleanup := startOpenSSHAgent(t)
defer cleanup()
@@ -107,7 +111,7 @@ func testKeyringAgent(t *testing.T, key interface{}, cert *ssh.Certificate, life
testAgentInterface(t, agent, key, cert, lifetimeSecs)
}
-func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
+func testAgentInterface(t *testing.T, agent ExtendedAgent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
signer, err := ssh.NewSignerFromKey(key)
if err != nil {
t.Fatalf("NewSignerFromKey(%T): %v", key, err)
@@ -159,6 +163,25 @@ func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Ce
t.Fatalf("Verify(%s): %v", pubKey.Type(), err)
}
+ // For tests on RSA keys, try signing with SHA-256 and SHA-512 flags
+ if pubKey.Type() == "ssh-rsa" {
+ sshFlagTest := func(flag SignatureFlags, expectedSigFormat string) {
+ sig, err = agent.SignWithFlags(pubKey, data, flag)
+ if err != nil {
+ t.Fatalf("SignWithFlags(%s): %v", pubKey.Type(), err)
+ }
+ if sig.Format != expectedSigFormat {
+ t.Fatalf("Signature format didn't match expected value: %s != %s", sig.Format, expectedSigFormat)
+ }
+ if err := pubKey.Verify(data, sig); err != nil {
+ t.Fatalf("Verify(%s): %v", pubKey.Type(), err)
+ }
+ }
+ sshFlagTest(0, ssh.SigAlgoRSA)
+ sshFlagTest(SignatureFlagRsaSha256, ssh.SigAlgoRSASHA2256)
+ sshFlagTest(SignatureFlagRsaSha512, ssh.SigAlgoRSASHA2512)
+ }
+
// If the key has a lifetime, is it removed when it should be?
if lifetimeSecs > 0 {
time.Sleep(time.Second*time.Duration(lifetimeSecs) + 100*time.Millisecond)
@@ -218,6 +241,35 @@ func netPipe() (net.Conn, net.Conn, error) {
return c1, c2, nil
}
+func TestServerResponseTooLarge(t *testing.T) {
+ a, b, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+
+ defer a.Close()
+ defer b.Close()
+
+ var response identitiesAnswerAgentMsg
+ response.NumKeys = 1
+ response.Keys = make([]byte, maxAgentResponseBytes+1)
+
+ agent := NewClient(a)
+ go func() {
+ n, _ := b.Write(ssh.Marshal(response))
+ if n < 4 {
+ t.Fatalf("At least 4 bytes (the response size) should have been successfully written: %d < 4", n)
+ }
+ }()
+ _, err = agent.List()
+ if err == nil {
+ t.Fatal("Did not get error result")
+ }
+ if err.Error() != "agent: client error: response too large" {
+ t.Fatal("Did not get expected error result")
+ }
+}
+
func TestAuth(t *testing.T) {
agent, _, cleanup := startOpenSSHAgent(t)
defer cleanup()
@@ -377,3 +429,38 @@ func testAgentLifetime(t *testing.T, agent Agent) {
t.Errorf("Want 0 keys, got %v", len(keys))
}
}
+
+type keyringExtended struct {
+ *keyring
+}
+
+func (r *keyringExtended) Extension(extensionType string, contents []byte) ([]byte, error) {
+ if extensionType != "my-extension@example.com" {
+ return []byte{agentExtensionFailure}, nil
+ }
+ return append([]byte{agentSuccess}, contents...), nil
+}
+
+func TestAgentExtensions(t *testing.T) {
+ agent, _, cleanup := startOpenSSHAgent(t)
+ defer cleanup()
+ result, err := agent.Extension("my-extension@example.com", []byte{0x00, 0x01, 0x02})
+ if err == nil {
+ t.Fatal("should have gotten agent extension failure")
+ }
+
+ agent, cleanup = startAgent(t, &keyringExtended{})
+ defer cleanup()
+ result, err = agent.Extension("my-extension@example.com", []byte{0x00, 0x01, 0x02})
+ if err != nil {
+ t.Fatalf("agent extension failure: %v", err)
+ }
+ if len(result) != 4 || !bytes.Equal(result, []byte{agentSuccess, 0x00, 0x01, 0x02}) {
+ t.Fatalf("agent extension result invalid: %v", result)
+ }
+
+ result, err = agent.Extension("bad-extension@example.com", []byte{0x00, 0x01, 0x02})
+ if err == nil {
+ t.Fatal("should have gotten agent extension failure")
+ }
+}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
index 1a516327..c9d97943 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go
@@ -182,6 +182,10 @@ func (r *keyring) Add(key AddedKey) error {
// Sign returns a signature for the data.
func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
+ return r.SignWithFlags(key, data, 0)
+}
+
+func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.locked {
@@ -192,7 +196,24 @@ func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
wanted := key.Marshal()
for _, k := range r.keys {
if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) {
- return k.signer.Sign(rand.Reader, data)
+ if flags == 0 {
+ return k.signer.Sign(rand.Reader, data)
+ } else {
+ if algorithmSigner, ok := k.signer.(ssh.AlgorithmSigner); !ok {
+ return nil, fmt.Errorf("agent: signature does not support non-default signature algorithm: %T", k.signer)
+ } else {
+ var algorithm string
+ switch flags {
+ case SignatureFlagRsaSha256:
+ algorithm = ssh.SigAlgoRSASHA2256
+ case SignatureFlagRsaSha512:
+ algorithm = ssh.SigAlgoRSASHA2512
+ default:
+ return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags)
+ }
+ return algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm)
+ }
+ }
}
}
return nil, errors.New("not found")
@@ -213,3 +234,8 @@ func (r *keyring) Signers() ([]ssh.Signer, error) {
}
return s, nil
}
+
+// The keyring does not support any extensions
+func (r *keyring) Extension(extensionType string, contents []byte) ([]byte, error) {
+ return nil, ErrExtensionUnsupported
+}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go
index 2e4692cb..a1949762 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/server.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/server.go
@@ -128,7 +128,14 @@ func (s *server) processRequest(data []byte) (interface{}, error) {
Blob: req.KeyBlob,
}
- sig, err := s.agent.Sign(k, req.Data) // TODO(hanwen): flags.
+ var sig *ssh.Signature
+ var err error
+ if extendedAgent, ok := s.agent.(ExtendedAgent); ok {
+ sig, err = extendedAgent.SignWithFlags(k, req.Data, SignatureFlags(req.Flags))
+ } else {
+ sig, err = s.agent.Sign(k, req.Data)
+ }
+
if err != nil {
return nil, err
}
@@ -150,6 +157,43 @@ func (s *server) processRequest(data []byte) (interface{}, error) {
case agentAddIDConstrained, agentAddIdentity:
return nil, s.insertIdentity(data)
+
+ case agentExtension:
+ // Return a stub object where the whole contents of the response gets marshaled.
+ var responseStub struct {
+ Rest []byte `ssh:"rest"`
+ }
+
+ if extendedAgent, ok := s.agent.(ExtendedAgent); !ok {
+ // If this agent doesn't implement extensions, [PROTOCOL.agent] section 4.7
+ // requires that we return a standard SSH_AGENT_FAILURE message.
+ responseStub.Rest = []byte{agentFailure}
+ } else {
+ var req extensionAgentMsg
+ if err := ssh.Unmarshal(data, &req); err != nil {
+ return nil, err
+ }
+ res, err := extendedAgent.Extension(req.ExtensionType, req.Contents)
+ if err != nil {
+ // If agent extensions are unsupported, return a standard SSH_AGENT_FAILURE
+ // message as required by [PROTOCOL.agent] section 4.7.
+ if err == ErrExtensionUnsupported {
+ responseStub.Rest = []byte{agentFailure}
+ } else {
+ // As the result of any other error processing an extension request,
+ // [PROTOCOL.agent] section 4.7 requires that we return a
+ // SSH_AGENT_EXTENSION_FAILURE code.
+ responseStub.Rest = []byte{agentExtensionFailure}
+ }
+ } else {
+ if len(res) == 0 {
+ return nil, nil
+ }
+ responseStub.Rest = res
+ }
+ }
+
+ return responseStub, nil
}
return nil, fmt.Errorf("unknown opcode %d", data[0])
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
index 42106f3f..00ed9923 100644
--- a/vendor/golang.org/x/crypto/ssh/certs.go
+++ b/vendor/golang.org/x/crypto/ssh/certs.go
@@ -222,6 +222,11 @@ type openSSHCertSigner struct {
signer Signer
}
+type algorithmOpenSSHCertSigner struct {
+ *openSSHCertSigner
+ algorithmSigner AlgorithmSigner
+}
+
// NewCertSigner returns a Signer that signs with the given Certificate, whose
// private key is held by signer. It returns an error if the public key in cert
// doesn't match the key used by signer.
@@ -230,7 +235,12 @@ func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
return nil, errors.New("ssh: signer and cert have different public key")
}
- return &openSSHCertSigner{cert, signer}, nil
+ if algorithmSigner, ok := signer.(AlgorithmSigner); ok {
+ return &algorithmOpenSSHCertSigner{
+ &openSSHCertSigner{cert, signer}, algorithmSigner}, nil
+ } else {
+ return &openSSHCertSigner{cert, signer}, nil
+ }
}
func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
@@ -241,6 +251,10 @@ func (s *openSSHCertSigner) PublicKey() PublicKey {
return s.pub
}
+func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
+ return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm)
+}
+
const sourceAddressCriticalOption = "source-address"
// CertChecker does the work of verifying a certificate. Its methods
diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go
index ae6ca775..7b00bff1 100644
--- a/vendor/golang.org/x/crypto/ssh/client.go
+++ b/vendor/golang.org/x/crypto/ssh/client.go
@@ -185,7 +185,7 @@ func Dial(network, addr string, config *ClientConfig) (*Client, error) {
// keys. A HostKeyCallback must return nil if the host key is OK, or
// an error to reject it. It receives the hostname as passed to Dial
// or NewClientConn. The remote address is the RemoteAddr of the
-// net.Conn underlying the the SSH connection.
+// net.Conn underlying the SSH connection.
type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
// BannerCallback is the function type used for treat the banner sent by
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth_test.go b/vendor/golang.org/x/crypto/ssh/client_auth_test.go
index 5fbb20d8..026d137e 100644
--- a/vendor/golang.org/x/crypto/ssh/client_auth_test.go
+++ b/vendor/golang.org/x/crypto/ssh/client_auth_test.go
@@ -9,6 +9,7 @@ import (
"crypto/rand"
"errors"
"fmt"
+ "io"
"os"
"strings"
"testing"
@@ -28,8 +29,14 @@ func (cr keyboardInteractive) Challenge(user string, instruction string, questio
var clientPassword = "tiger"
// tryAuth runs a handshake with a given config against an SSH server
-// with config serverConfig
+// with config serverConfig. Returns both client and server side errors.
func tryAuth(t *testing.T, config *ClientConfig) error {
+ err, _ := tryAuthBothSides(t, config)
+ return err
+}
+
+// tryAuthBothSides runs the handshake and returns the resulting errors from both sides of the connection.
+func tryAuthBothSides(t *testing.T, config *ClientConfig) (clientError error, serverAuthErrors []error) {
c1, c2, err := netPipe()
if err != nil {
t.Fatalf("netPipe: %v", err)
@@ -79,9 +86,13 @@ func tryAuth(t *testing.T, config *ClientConfig) error {
}
serverConfig.AddHostKey(testSigners["rsa"])
+ serverConfig.AuthLogCallback = func(conn ConnMetadata, method string, err error) {
+ serverAuthErrors = append(serverAuthErrors, err)
+ }
+
go newServer(c1, serverConfig)
_, _, _, err = NewClientConn(c2, "", config)
- return err
+ return err, serverAuthErrors
}
func TestClientAuthPublicKey(t *testing.T) {
@@ -213,6 +224,45 @@ func TestAuthMethodRSAandDSA(t *testing.T) {
}
}
+type invalidAlgSigner struct {
+ Signer
+}
+
+func (s *invalidAlgSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
+ sig, err := s.Signer.Sign(rand, data)
+ if sig != nil {
+ sig.Format = "invalid"
+ }
+ return sig, err
+}
+
+func TestMethodInvalidAlgorithm(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(&invalidAlgSigner{testSigners["rsa"]}),
+ },
+ HostKeyCallback: InsecureIgnoreHostKey(),
+ }
+
+ err, serverErrors := tryAuthBothSides(t, config)
+ if err == nil {
+ t.Fatalf("login succeeded")
+ }
+
+ found := false
+ want := "algorithm \"invalid\""
+
+ var errStrings []string
+ for _, err := range serverErrors {
+ found = found || (err != nil && strings.Contains(err.Error(), want))
+ errStrings = append(errStrings, err.Error())
+ }
+ if !found {
+ t.Errorf("server got error %q, want substring %q", errStrings, want)
+ }
+}
+
func TestClientHMAC(t *testing.T) {
for _, mac := range supportedMACs {
config := &ClientConfig{
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index 2261dc38..96980479 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -38,6 +38,16 @@ const (
KeyAlgoED25519 = "ssh-ed25519"
)
+// These constants represent non-default signature algorithms that are supported
+// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See
+// [PROTOCOL.agent] section 4.5.1 and
+// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10
+const (
+ SigAlgoRSA = "ssh-rsa"
+ SigAlgoRSASHA2256 = "rsa-sha2-256"
+ SigAlgoRSASHA2512 = "rsa-sha2-512"
+)
+
// parsePubKey parses a public key of the given algorithm.
// Use ParsePublicKey for keys with prepended algorithm.
func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
@@ -301,6 +311,19 @@ type Signer interface {
Sign(rand io.Reader, data []byte) (*Signature, error)
}
+// A AlgorithmSigner is a Signer that also supports specifying a specific
+// algorithm to use for signing.
+type AlgorithmSigner interface {
+ Signer
+
+ // SignWithAlgorithm is like Signer.Sign, but allows specification of a
+ // non-default signing algorithm. See the SigAlgo* constants in this
+ // package for signature algorithms supported by this package. Callers may
+ // pass an empty string for the algorithm in which case the AlgorithmSigner
+ // will use its default algorithm.
+ SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error)
+}
+
type rsaPublicKey rsa.PublicKey
func (r *rsaPublicKey) Type() string {
@@ -349,13 +372,21 @@ func (r *rsaPublicKey) Marshal() []byte {
}
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
- if sig.Format != r.Type() {
+ var hash crypto.Hash
+ switch sig.Format {
+ case SigAlgoRSA:
+ hash = crypto.SHA1
+ case SigAlgoRSASHA2256:
+ hash = crypto.SHA256
+ case SigAlgoRSASHA2512:
+ hash = crypto.SHA512
+ default:
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
- h := crypto.SHA1.New()
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
- return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
+ return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob)
}
func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
@@ -459,6 +490,14 @@ func (k *dsaPrivateKey) PublicKey() PublicKey {
}
func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
+ return k.SignWithAlgorithm(rand, data, "")
+}
+
+func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
+ if algorithm != "" && algorithm != k.PublicKey().Type() {
+ return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
+ }
+
h := crypto.SHA1.New()
h.Write(data)
digest := h.Sum(nil)
@@ -691,16 +730,42 @@ func (s *wrappedSigner) PublicKey() PublicKey {
}
func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
+ return s.SignWithAlgorithm(rand, data, "")
+}
+
+func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
var hashFunc crypto.Hash
- switch key := s.pubKey.(type) {
- case *rsaPublicKey, *dsaPublicKey:
- hashFunc = crypto.SHA1
- case *ecdsaPublicKey:
- hashFunc = ecHash(key.Curve)
- case ed25519PublicKey:
- default:
- return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+ if _, ok := s.pubKey.(*rsaPublicKey); ok {
+ // RSA keys support a few hash functions determined by the requested signature algorithm
+ switch algorithm {
+ case "", SigAlgoRSA:
+ algorithm = SigAlgoRSA
+ hashFunc = crypto.SHA1
+ case SigAlgoRSASHA2256:
+ hashFunc = crypto.SHA256
+ case SigAlgoRSASHA2512:
+ hashFunc = crypto.SHA512
+ default:
+ return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
+ }
+ } else {
+ // The only supported algorithm for all other key types is the same as the type of the key
+ if algorithm == "" {
+ algorithm = s.pubKey.Type()
+ } else if algorithm != s.pubKey.Type() {
+ return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
+ }
+
+ switch key := s.pubKey.(type) {
+ case *dsaPublicKey:
+ hashFunc = crypto.SHA1
+ case *ecdsaPublicKey:
+ hashFunc = ecHash(key.Curve)
+ case ed25519PublicKey:
+ default:
+ return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+ }
}
var digest []byte
@@ -745,7 +810,7 @@ func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
}
return &Signature{
- Format: s.pubKey.Type(),
+ Format: algorithm,
Blob: signature,
}, nil
}
diff --git a/vendor/golang.org/x/crypto/ssh/keys_test.go b/vendor/golang.org/x/crypto/ssh/keys_test.go
index f28725f1..3847b3bf 100644
--- a/vendor/golang.org/x/crypto/ssh/keys_test.go
+++ b/vendor/golang.org/x/crypto/ssh/keys_test.go
@@ -109,6 +109,49 @@ func TestKeySignVerify(t *testing.T) {
}
}
+func TestKeySignWithAlgorithmVerify(t *testing.T) {
+ for _, priv := range testSigners {
+ if algorithmSigner, ok := priv.(AlgorithmSigner); !ok {
+ t.Errorf("Signers constructed by ssh package should always implement the AlgorithmSigner interface: %T", priv)
+ } else {
+ pub := priv.PublicKey()
+ data := []byte("sign me")
+
+ signWithAlgTestCase := func(algorithm string, expectedAlg string) {
+ sig, err := algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm)
+ if err != nil {
+ t.Fatalf("Sign(%T): %v", priv, err)
+ }
+ if sig.Format != expectedAlg {
+ t.Errorf("signature format did not match requested signature algorithm: %s != %s", sig.Format, expectedAlg)
+ }
+
+ if err := pub.Verify(data, sig); err != nil {
+ t.Errorf("publicKey.Verify(%T): %v", priv, err)
+ }
+ sig.Blob[5]++
+ if err := pub.Verify(data, sig); err == nil {
+ t.Errorf("publicKey.Verify on broken sig did not fail")
+ }
+ }
+
+ // Using the empty string as the algorithm name should result in the same signature format as the algorithm-free Sign method.
+ defaultSig, err := priv.Sign(rand.Reader, data)
+ if err != nil {
+ t.Fatalf("Sign(%T): %v", priv, err)
+ }
+ signWithAlgTestCase("", defaultSig.Format)
+
+ // RSA keys are the only ones which currently support more than one signing algorithm
+ if pub.Type() == KeyAlgoRSA {
+ for _, algorithm := range []string{SigAlgoRSA, SigAlgoRSASHA2256, SigAlgoRSASHA2512} {
+ signWithAlgTestCase(algorithm, algorithm)
+ }
+ }
+ }
+ }
+}
+
func TestParseRSAPrivateKey(t *testing.T) {
key := testPrivateKeys["rsa"]
diff --git a/vendor/golang.org/x/crypto/ssh/mux_test.go b/vendor/golang.org/x/crypto/ssh/mux_test.go
index d88b64e4..94596ec2 100644
--- a/vendor/golang.org/x/crypto/ssh/mux_test.go
+++ b/vendor/golang.org/x/crypto/ssh/mux_test.go
@@ -20,7 +20,7 @@ func muxPair() (*mux, *mux) {
return s, c
}
-// Returns both ends of a channel, and the mux for the the 2nd
+// Returns both ends of a channel, and the mux for the 2nd
// channel.
func channelPair(t *testing.T) (*channel, *channel, *mux) {
c, s := muxPair()
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index d0f48253..e86e8966 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -404,7 +404,7 @@ userAuthLoop:
perms, authErr = config.PasswordCallback(s, password)
case "keyboard-interactive":
if config.KeyboardInteractiveCallback == nil {
- authErr = errors.New("ssh: keyboard-interactive auth not configubred")
+ authErr = errors.New("ssh: keyboard-interactive auth not configured")
break
}
@@ -484,6 +484,7 @@ userAuthLoop:
// sig.Format. This is usually the same, but
// for certs, the names differ.
if !isAcceptableAlgo(sig.Format) {
+ authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break
}
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
index d9b77c1c..5e5d33b1 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd windows plan9 solaris
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd windows plan9 solaris
package terminal
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
index 731c89a2..39110408 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
@@ -25,7 +25,7 @@ type State struct {
termios unix.Termios
}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
return err == nil
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
new file mode 100644
index 00000000..dfcd6278
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
@@ -0,0 +1,12 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+
+package terminal
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+const ioctlWriteTermios = unix.TCSETS
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
index 799f049f..9317ac7e 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
@@ -21,7 +21,7 @@ import (
type State struct{}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
return false
}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
index 9e41b9f4..3d5f06a9 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
@@ -17,7 +17,7 @@ type State struct {
termios unix.Termios
}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
return err == nil
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
index 8618955d..6cb8a950 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -26,7 +26,7 @@ type State struct {
mode uint32
}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
var st uint32
err := windows.GetConsoleMode(windows.Handle(fd), &st)
diff --git a/vendor/golang.org/x/crypto/ssh/testdata/keys.go b/vendor/golang.org/x/crypto/ssh/testdata/keys.go
index bdaa9cbc..bfae85fe 100644
--- a/vendor/golang.org/x/crypto/ssh/testdata/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/testdata/keys.go
@@ -155,7 +155,7 @@ var SSHCertificates = map[string][]byte{
// Generated by the following commands:
//
// 1. Assumes "rsa" key above in file named "rsa", write out the public key to "rsa.pub":
- // ssh-keygen -y -f rsa > rsa.pu
+ // ssh-keygen -y -f rsa > rsa.pub
//
// 2. Assumes "ca" key above in file named "ca", sign a cert for "rsa.pub":
// ssh-keygen -s ca -h -n host.example.com -V +500w -I host.example.com-key rsa.pub