diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 9a61d3932..c03fa06c7 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -2,6 +2,7 @@
# Each line is a file pattern followed by one or more owners.
accounts/usbwallet @karalabe
+accounts/abi @gballet
consensus @karalabe
core/ @karalabe @holiman
eth/ @karalabe
@@ -9,27 +10,4 @@ les/ @zsfelfoldi
light/ @zsfelfoldi
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi
-p2p/simulations @lmars
-p2p/protocols @zelig
-swarm/api/http @justelad
-swarm/bmt @zelig
-swarm/dev @lmars
-swarm/fuse @jmozah @holisticode
-swarm/grafana_dashboards @nonsense
-swarm/metrics @nonsense @holisticode
-swarm/multihash @nolash
-swarm/network/bitvector @zelig @janos
-swarm/network/priorityqueue @zelig @janos
-swarm/network/simulations @zelig @janos
-swarm/network/stream @janos @zelig @holisticode @justelad
-swarm/network/stream/intervals @janos
-swarm/network/stream/testing @zelig
-swarm/pot @zelig
-swarm/pss @nolash @zelig @nonsense
-swarm/services @zelig
-swarm/state @justelad
-swarm/storage/encryption @zelig @nagydani
-swarm/storage/mock @janos
-swarm/storage/feed @nolash @jpeletier
-swarm/testutil @lmars
whisper/ @gballet @gluk256
diff --git a/.github/no-response.yml b/.github/no-response.yml
index b6e96efdc..903d4ce85 100644
--- a/.github/no-response.yml
+++ b/.github/no-response.yml
@@ -1,7 +1,7 @@
# Number of days of inactivity before an Issue is closed for lack of response
daysUntilClose: 30
# Label requiring a response
-responseRequiredLabel: more-information-needed
+responseRequiredLabel: "need:more-information"
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
closeComment: >
This issue has been automatically closed because there has been no response
diff --git a/.github/stale.yml b/.github/stale.yml
index c621939c3..6d921cc79 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -7,7 +7,7 @@ exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
-staleLabel: stale
+staleLabel: "status:inactive"
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
diff --git a/.travis.yml b/.travis.yml
index c1cc7c4aa..b41277085 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -29,6 +29,14 @@ matrix:
- os: osx
go: 1.11.x
script:
+ - echo "Increase the maximum number of open file descriptors on macOS"
+ - NOFILE=20480
+ - sudo sysctl -w kern.maxfiles=$NOFILE
+ - sudo sysctl -w kern.maxfilesperproc=$NOFILE
+ - sudo launchctl limit maxfiles $NOFILE $NOFILE
+ - sudo launchctl limit maxfiles
+ - ulimit -S -n $NOFILE
+ - ulimit -n
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
@@ -148,7 +156,7 @@ matrix:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- - curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz
+ - curl https://storage.googleapis.com/golang/go1.11.4.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
diff --git a/README.md b/README.md
index 4b62bfde0..7593dd090 100644
--- a/README.md
+++ b/README.md
@@ -168,7 +168,7 @@ HTTP based JSON-RPC API options:
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
-via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification)
+via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification)
on all transports. You can reuse the same connection for multiple requests!
**Note: Please understand the security implications of opening up an HTTP/WS based transport before
diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go
index 535e5d78b..08d5db979 100644
--- a/accounts/abi/abi.go
+++ b/accounts/abi/abi.go
@@ -58,13 +58,11 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
return nil, err
}
return arguments, nil
-
}
method, exist := abi.Methods[name]
if !exist {
return nil, fmt.Errorf("method '%s' not found", name)
}
-
arguments, err := method.Inputs.Pack(args...)
if err != nil {
return nil, err
@@ -82,7 +80,7 @@ func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
// we need to decide whether we're calling a method or an event
if method, ok := abi.Methods[name]; ok {
if len(output)%32 != 0 {
- return fmt.Errorf("abi: improperly formatted output")
+ return fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(output), output)
}
return method.Outputs.Unpack(v, output)
} else if event, ok := abi.Events[name]; ok {
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index 59ba79cb6..b9444f9f0 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -22,11 +22,10 @@ import (
"fmt"
"log"
"math/big"
+ "reflect"
"strings"
"testing"
- "reflect"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -52,11 +51,14 @@ const jsondata2 = `
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
- { "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] }
+ { "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
+ { "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
+ { "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
+ { "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
]`
func TestReader(t *testing.T) {
- Uint256, _ := NewType("uint256")
+ Uint256, _ := NewType("uint256", nil)
exp := ABI{
Methods: map[string]Method{
"balance": {
@@ -177,7 +179,7 @@ func TestTestSlice(t *testing.T) {
}
func TestMethodSignature(t *testing.T) {
- String, _ := NewType("string")
+ String, _ := NewType("string", nil)
m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
exp := "foo(string,string)"
if m.Sig() != exp {
@@ -189,12 +191,31 @@ func TestMethodSignature(t *testing.T) {
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
}
- uintt, _ := NewType("uint256")
+ uintt, _ := NewType("uint256", nil)
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
exp = "foo(uint256)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
}
+
+ // Method with tuple arguments
+ s, _ := NewType("tuple", []ArgumentMarshaling{
+ {Name: "a", Type: "int256"},
+ {Name: "b", Type: "int256[]"},
+ {Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
+ {Name: "x", Type: "int256"},
+ {Name: "y", Type: "int256"},
+ }},
+ {Name: "d", Type: "tuple[2]", Components: []ArgumentMarshaling{
+ {Name: "x", Type: "int256"},
+ {Name: "y", Type: "int256"},
+ }},
+ })
+ m = Method{"foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
+ exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
+ if m.Sig() != exp {
+ t.Error("signature mismatch", exp, "!=", m.Sig())
+ }
}
func TestMultiPack(t *testing.T) {
@@ -564,11 +585,13 @@ func TestBareEvents(t *testing.T) {
const definition = `[
{ "type" : "event", "name" : "balance" },
{ "type" : "event", "name" : "anon", "anonymous" : true},
- { "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] }
+ { "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] },
+ { "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
]`
- arg0, _ := NewType("uint256")
- arg1, _ := NewType("address")
+ arg0, _ := NewType("uint256", nil)
+ arg1, _ := NewType("address", nil)
+ tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
expectedEvents := map[string]struct {
Anonymous bool
@@ -580,6 +603,10 @@ func TestBareEvents(t *testing.T) {
{Name: "arg0", Type: arg0, Indexed: false},
{Name: "arg1", Type: arg1, Indexed: true},
}},
+ "tuple": {false, []Argument{
+ {Name: "t", Type: tuple, Indexed: false},
+ {Name: "arg1", Type: arg1, Indexed: true},
+ }},
}
abi, err := JSON(strings.NewReader(definition))
@@ -646,28 +673,24 @@ func TestUnpackEvent(t *testing.T) {
}
type ReceivedEvent struct {
- Address common.Address
- Amount *big.Int
- Memo []byte
+ Sender common.Address
+ Amount *big.Int
+ Memo []byte
}
var ev ReceivedEvent
err = abi.Unpack(&ev, "received", data)
if err != nil {
t.Error(err)
- } else {
- t.Logf("len(data): %d; received event: %+v", len(data), ev)
}
type ReceivedAddrEvent struct {
- Address common.Address
+ Sender common.Address
}
var receivedAddrEv ReceivedAddrEvent
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
if err != nil {
t.Error(err)
- } else {
- t.Logf("len(data): %d; received event: %+v", len(data), receivedAddrEv)
}
}
diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index 93b513c34..d0a6b035c 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -33,24 +33,27 @@ type Argument struct {
type Arguments []Argument
+type ArgumentMarshaling struct {
+ Name string
+ Type string
+ Components []ArgumentMarshaling
+ Indexed bool
+}
+
// UnmarshalJSON implements json.Unmarshaler interface
func (argument *Argument) UnmarshalJSON(data []byte) error {
- var extarg struct {
- Name string
- Type string
- Indexed bool
- }
- err := json.Unmarshal(data, &extarg)
+ var arg ArgumentMarshaling
+ err := json.Unmarshal(data, &arg)
if err != nil {
return fmt.Errorf("argument json err: %v", err)
}
- argument.Type, err = NewType(extarg.Type)
+ argument.Type, err = NewType(arg.Type, arg.Components)
if err != nil {
return err
}
- argument.Name = extarg.Name
- argument.Indexed = extarg.Indexed
+ argument.Name = arg.Name
+ argument.Indexed = arg.Indexed
return nil
}
@@ -85,7 +88,6 @@ func (arguments Arguments) isTuple() bool {
// Unpack performs the operation hexdata -> Go format
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
-
// make sure the passed value is arguments pointer
if reflect.Ptr != reflect.ValueOf(v).Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
@@ -97,52 +99,134 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if arguments.isTuple() {
return arguments.unpackTuple(v, marshalledValues)
}
- return arguments.unpackAtomic(v, marshalledValues)
+ return arguments.unpackAtomic(v, marshalledValues[0])
}
-func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
+// unpack sets the unmarshalled value to go format.
+// Note the dst here must be settable.
+func unpack(t *Type, dst interface{}, src interface{}) error {
+ var (
+ dstVal = reflect.ValueOf(dst).Elem()
+ srcVal = reflect.ValueOf(src)
+ )
+ if t.T != TupleTy && !((t.T == SliceTy || t.T == ArrayTy) && t.Elem.T == TupleTy) {
+ return set(dstVal, srcVal)
+ }
+
+ switch t.T {
+ case TupleTy:
+ if dstVal.Kind() != reflect.Struct {
+ return fmt.Errorf("abi: invalid dst value for unpack, want struct, got %s", dstVal.Kind())
+ }
+ fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, dstVal)
+ if err != nil {
+ return err
+ }
+ for i, elem := range t.TupleElems {
+ fname := fieldmap[t.TupleRawNames[i]]
+ field := dstVal.FieldByName(fname)
+ if !field.IsValid() {
+ return fmt.Errorf("abi: field %s can't found in the given value", t.TupleRawNames[i])
+ }
+ if err := unpack(elem, field.Addr().Interface(), srcVal.Field(i).Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+ case SliceTy:
+ if dstVal.Kind() != reflect.Slice {
+ return fmt.Errorf("abi: invalid dst value for unpack, want slice, got %s", dstVal.Kind())
+ }
+ slice := reflect.MakeSlice(dstVal.Type(), srcVal.Len(), srcVal.Len())
+ for i := 0; i < slice.Len(); i++ {
+ if err := unpack(t.Elem, slice.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ dstVal.Set(slice)
+ case ArrayTy:
+ if dstVal.Kind() != reflect.Array {
+ return fmt.Errorf("abi: invalid dst value for unpack, want array, got %s", dstVal.Kind())
+ }
+ array := reflect.New(dstVal.Type()).Elem()
+ for i := 0; i < array.Len(); i++ {
+ if err := unpack(t.Elem, array.Index(i).Addr().Interface(), srcVal.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ dstVal.Set(array)
+ }
+ return nil
+}
+
+// unpackAtomic unpacks ( hexdata -> go ) a single value
+func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues interface{}) error {
+ if arguments.LengthNonIndexed() == 0 {
+ return nil
+ }
+ argument := arguments.NonIndexed()[0]
+ elem := reflect.ValueOf(v).Elem()
+
+ if elem.Kind() == reflect.Struct {
+ fieldmap, err := mapArgNamesToStructFields([]string{argument.Name}, elem)
+ if err != nil {
+ return err
+ }
+ field := elem.FieldByName(fieldmap[argument.Name])
+ if !field.IsValid() {
+ return fmt.Errorf("abi: field %s can't be found in the given value", argument.Name)
+ }
+ return unpack(&argument.Type, field.Addr().Interface(), marshalledValues)
+ }
+ return unpack(&argument.Type, elem.Addr().Interface(), marshalledValues)
+}
+
+// unpackTuple unpacks ( hexdata -> go ) a batch of values.
+func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
var (
value = reflect.ValueOf(v).Elem()
typ = value.Type()
kind = value.Kind()
)
-
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
return err
}
// If the interface is a struct, get of abi->struct_field mapping
-
var abi2struct map[string]string
if kind == reflect.Struct {
- var err error
- abi2struct, err = mapAbiToStructFields(arguments, value)
+ var (
+ argNames []string
+ err error
+ )
+ for _, arg := range arguments.NonIndexed() {
+ argNames = append(argNames, arg.Name)
+ }
+ abi2struct, err = mapArgNamesToStructFields(argNames, value)
if err != nil {
return err
}
}
for i, arg := range arguments.NonIndexed() {
-
- reflectValue := reflect.ValueOf(marshalledValues[i])
-
switch kind {
case reflect.Struct:
- if structField, ok := abi2struct[arg.Name]; ok {
- if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
- return err
- }
+ field := value.FieldByName(abi2struct[arg.Name])
+ if !field.IsValid() {
+ return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
+ }
+ if err := unpack(&arg.Type, field.Addr().Interface(), marshalledValues[i]); err != nil {
+ return err
}
case reflect.Slice, reflect.Array:
if value.Len() < i {
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
}
v := value.Index(i)
- if err := requireAssignable(v, reflectValue); err != nil {
+ if err := requireAssignable(v, reflect.ValueOf(marshalledValues[i])); err != nil {
return err
}
-
- if err := set(v.Elem(), reflectValue, arg); err != nil {
+ if err := unpack(&arg.Type, v.Addr().Interface(), marshalledValues[i]); err != nil {
return err
}
default:
@@ -150,48 +234,7 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
}
}
return nil
-}
-// unpackAtomic unpacks ( hexdata -> go ) a single value
-func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
- if len(marshalledValues) != 1 {
- return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
- }
-
- elem := reflect.ValueOf(v).Elem()
- kind := elem.Kind()
- reflectValue := reflect.ValueOf(marshalledValues[0])
-
- var abi2struct map[string]string
- if kind == reflect.Struct {
- var err error
- if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
- return err
- }
- arg := arguments.NonIndexed()[0]
- if structField, ok := abi2struct[arg.Name]; ok {
- return set(elem.FieldByName(structField), reflectValue, arg)
- }
- return nil
- }
-
- return set(elem, reflectValue, arguments.NonIndexed()[0])
-
-}
-
-// Computes the full size of an array;
-// i.e. counting nested arrays, which count towards size for unpacking.
-func getArraySize(arr *Type) int {
- size := arr.Size
- // Arrays can be nested, with each element being the same size
- arr = arr.Elem
- for arr.T == ArrayTy {
- // Keep multiplying by elem.Size while the elem is an array.
- size *= arr.Size
- arr = arr.Elem
- }
- // Now we have the full array size, including its children.
- return size
}
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
@@ -202,7 +245,7 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
virtualArgs := 0
for index, arg := range arguments.NonIndexed() {
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
- if arg.Type.T == ArrayTy {
+ if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
@@ -213,7 +256,11 @@ func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
- virtualArgs += getArraySize(&arg.Type) - 1
+ virtualArgs += getTypeSize(arg.Type)/32 - 1
+ } else if arg.Type.T == TupleTy && !isDynamicType(arg.Type) {
+ // If we have a static tuple, like (uint256, bool, uint256), these are
+ // coded as just like uint256,bool,uint256
+ virtualArgs += getTypeSize(arg.Type)/32 - 1
}
if err != nil {
return nil, err
@@ -243,11 +290,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
- if abiArg.Type.T == ArrayTy {
- inputOffset += 32 * abiArg.Type.Size
- } else {
- inputOffset += 32
- }
+ inputOffset += getTypeSize(abiArg.Type)
}
var ret []byte
for i, a := range args {
@@ -257,14 +300,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
if err != nil {
return nil, err
}
- // check for a slice type (string, bytes, slice)
- if input.Type.requiresLengthPrefix() {
- // calculate the offset
- offset := inputOffset + len(variableInput)
+ // check for dynamic types
+ if isDynamicType(input.Type) {
// set the offset
- ret = append(ret, packNum(reflect.ValueOf(offset))...)
- // Append the packed output to the variable input. The variable input
- // will be appended at the end of the input.
+ ret = append(ret, packNum(reflect.ValueOf(inputOffset))...)
+ // calculate next offset
+ inputOffset += len(packed)
+ // append to variable input
variableInput = append(variableInput, packed...)
} else {
// append the packed value to the input
@@ -277,14 +319,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
return ret, nil
}
-// capitalise makes the first character of a string upper case, also removing any
-// prefixing underscores from the variable names.
-func capitalise(input string) string {
- for len(input) > 0 && input[0] == '_' {
- input = input[1:]
+// ToCamelCase converts an under-score string to a camel-case string
+func ToCamelCase(input string) string {
+ parts := strings.Split(input, "_")
+ for i, s := range parts {
+ if len(s) > 0 {
+ parts[i] = strings.ToUpper(s[:1]) + s[1:]
+ }
}
- if len(input) == 0 {
- return ""
- }
- return strings.ToUpper(input[:1]) + input[1:]
+ return strings.Join(parts, "")
}
diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go
index 83ad1c8ae..c37bdf11d 100644
--- a/accounts/abi/bind/base.go
+++ b/accounts/abi/bind/base.go
@@ -36,10 +36,10 @@ type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Tra
// CallOpts is the collection of options to fine tune a contract call request.
type CallOpts struct {
- Pending bool // Whether to operate on the pending state or the last known one
- From common.Address // Optional the sender address, otherwise the first account is used
-
- Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
+ Pending bool // Whether to operate on the pending state or the last known one
+ From common.Address // Optional the sender address, otherwise the first account is used
+ BlockNumber *big.Int // Optional the block number on which the call should be performed
+ Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// TransactOpts is the collection of authorization data required to create a
@@ -148,10 +148,10 @@ func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string,
}
}
} else {
- output, err = c.caller.CallContract(ctx, msg, nil)
+ output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
if err == nil && len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise.
- if code, err = c.caller.CodeAt(ctx, c.address, nil); err != nil {
+ if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
return err
} else if len(code) == 0 {
return ErrNoCode
diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go
new file mode 100644
index 000000000..8adff8b59
--- /dev/null
+++ b/accounts/abi/bind/base_test.go
@@ -0,0 +1,64 @@
+package bind_test
+
+import (
+ "context"
+ "math/big"
+ "testing"
+
+ ethereum "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type mockCaller struct {
+ codeAtBlockNumber *big.Int
+ callContractBlockNumber *big.Int
+}
+
+func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
+ mc.codeAtBlockNumber = blockNumber
+ return []byte{1, 2, 3}, nil
+}
+
+func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
+ mc.callContractBlockNumber = blockNumber
+ return nil, nil
+}
+
+func TestPassingBlockNumber(t *testing.T) {
+
+ mc := &mockCaller{}
+
+ bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
+ Methods: map[string]abi.Method{
+ "something": {
+ Name: "something",
+ Outputs: abi.Arguments{},
+ },
+ },
+ }, mc, nil, nil)
+ var ret string
+
+ blockNumber := big.NewInt(42)
+
+ bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something")
+
+ if mc.callContractBlockNumber != blockNumber {
+ t.Fatalf("CallContract() was not passed the block number")
+ }
+
+ if mc.codeAtBlockNumber != blockNumber {
+ t.Fatalf("CodeAt() was not passed the block number")
+ }
+
+ bc.Call(&bind.CallOpts{}, &ret, "something")
+
+ if mc.callContractBlockNumber != nil {
+ t.Fatalf("CallContract() was passed a block number when it should not have been")
+ }
+
+ if mc.codeAtBlockNumber != nil {
+ t.Fatalf("CodeAt() was passed a block number when it should not have been")
+ }
+}
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index 4dca4b4ea..5ee30d024 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -381,54 +381,23 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
// methodNormalizer is a name transformer that modifies Solidity method names to
// conform to target language naming concentions.
var methodNormalizer = map[Lang]func(string) string{
- LangGo: capitalise,
+ LangGo: abi.ToCamelCase,
LangJava: decapitalise,
}
// capitalise makes a camel-case string which starts with an upper case character.
func capitalise(input string) string {
- for len(input) > 0 && input[0] == '_' {
- input = input[1:]
- }
- if len(input) == 0 {
- return ""
- }
- return toCamelCase(strings.ToUpper(input[:1]) + input[1:])
+ return abi.ToCamelCase(input)
}
// decapitalise makes a camel-case string which starts with a lower case character.
func decapitalise(input string) string {
- for len(input) > 0 && input[0] == '_' {
- input = input[1:]
- }
if len(input) == 0 {
- return ""
+ return input
}
- return toCamelCase(strings.ToLower(input[:1]) + input[1:])
-}
-// toCamelCase converts an under-score string to a camel-case string
-func toCamelCase(input string) string {
- toupper := false
-
- result := ""
- for k, v := range input {
- switch {
- case k == 0:
- result = strings.ToUpper(string(input[0]))
-
- case toupper:
- result += strings.ToUpper(string(v))
- toupper = false
-
- case v == '_':
- toupper = true
-
- default:
- result += string(v)
- }
- }
- return result
+ goForm := abi.ToCamelCase(input)
+ return strings.ToLower(goForm[:1]) + goForm[1:]
}
// structured checks whether a list of ABI data types has enough information to
diff --git a/accounts/abi/event.go b/accounts/abi/event.go
index a3f6be973..9392c1990 100644
--- a/accounts/abi/event.go
+++ b/accounts/abi/event.go
@@ -36,12 +36,12 @@ type Event struct {
func (e Event) String() string {
inputs := make([]string, len(e.Inputs))
for i, input := range e.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
+ inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
if input.Indexed {
- inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
+ inputs[i] = fmt.Sprintf("%v indexed %v", input.Type, input.Name)
}
}
- return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
+ return fmt.Sprintf("event %v(%v)", e.Name, strings.Join(inputs, ", "))
}
// Id returns the canonical representation of the event's signature used by the
diff --git a/accounts/abi/event_test.go b/accounts/abi/event_test.go
index 3bfdd7c0a..e735cceb8 100644
--- a/accounts/abi/event_test.go
+++ b/accounts/abi/event_test.go
@@ -87,12 +87,12 @@ func TestEventId(t *testing.T) {
}{
{
definition: `[
- { "type" : "event", "name" : "balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
- { "type" : "event", "name" : "check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
+ { "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
+ { "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
]`,
expectations: map[string]common.Hash{
- "balance": crypto.Keccak256Hash([]byte("balance(uint256)")),
- "check": crypto.Keccak256Hash([]byte("check(address,uint256)")),
+ "Balance": crypto.Keccak256Hash([]byte("Balance(uint256)")),
+ "Check": crypto.Keccak256Hash([]byte("Check(address,uint256)")),
},
},
}
@@ -111,6 +111,39 @@ func TestEventId(t *testing.T) {
}
}
+func TestEventString(t *testing.T) {
+ var table = []struct {
+ definition string
+ expectations map[string]string
+ }{
+ {
+ definition: `[
+ { "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
+ { "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] },
+ { "type" : "event", "name" : "Transfer", "inputs": [{ "name": "from", "type": "address", "indexed": true }, { "name": "to", "type": "address", "indexed": true }, { "name": "value", "type": "uint256" }] }
+ ]`,
+ expectations: map[string]string{
+ "Balance": "event Balance(uint256 in)",
+ "Check": "event Check(address t, uint256 b)",
+ "Transfer": "event Transfer(address indexed from, address indexed to, uint256 value)",
+ },
+ },
+ }
+
+ for _, test := range table {
+ abi, err := JSON(strings.NewReader(test.definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for name, event := range abi.Events {
+ if event.String() != test.expectations[name] {
+ t.Errorf("expected string to be %s, got %s", test.expectations[name], event.String())
+ }
+ }
+ }
+}
+
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
diff --git a/accounts/abi/method.go b/accounts/abi/method.go
index 583105765..2d8d3d658 100644
--- a/accounts/abi/method.go
+++ b/accounts/abi/method.go
@@ -56,14 +56,14 @@ func (method Method) Sig() string {
func (method Method) String() string {
inputs := make([]string, len(method.Inputs))
for i, input := range method.Inputs {
- inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
+ inputs[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
}
outputs := make([]string, len(method.Outputs))
for i, output := range method.Outputs {
+ outputs[i] = output.Type.String()
if len(output.Name) > 0 {
- outputs[i] = fmt.Sprintf("%v ", output.Name)
+ outputs[i] += fmt.Sprintf(" %v", output.Name)
}
- outputs[i] += output.Type.String()
}
constant := ""
if method.Const {
diff --git a/accounts/abi/method_test.go b/accounts/abi/method_test.go
new file mode 100644
index 000000000..a98f1cd31
--- /dev/null
+++ b/accounts/abi/method_test.go
@@ -0,0 +1,61 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package abi
+
+import (
+ "strings"
+ "testing"
+)
+
+const methoddata = `
+[
+ { "type" : "function", "name" : "balance", "constant" : true },
+ { "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
+ { "type" : "function", "name" : "transfer", "constant" : false, "inputs" : [ { "name" : "from", "type" : "address" }, { "name" : "to", "type" : "address" }, { "name" : "value", "type" : "uint256" } ], "outputs" : [ { "name" : "success", "type" : "bool" } ] }
+]`
+
+func TestMethodString(t *testing.T) {
+ var table = []struct {
+ method string
+ expectation string
+ }{
+ {
+ method: "balance",
+ expectation: "function balance() constant returns()",
+ },
+ {
+ method: "send",
+ expectation: "function send(uint256 amount) returns()",
+ },
+ {
+ method: "transfer",
+ expectation: "function transfer(address from, address to, uint256 value) returns(bool success)",
+ },
+ }
+
+ abi, err := JSON(strings.NewReader(methoddata))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range table {
+ got := abi.Methods[test.method].String()
+ if got != test.expectation {
+ t.Errorf("expected string to be %s, got %s", test.expectation, got)
+ }
+ }
+}
diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go
index 58a5b7a58..10cd3a396 100644
--- a/accounts/abi/pack_test.go
+++ b/accounts/abi/pack_test.go
@@ -29,314 +29,601 @@ import (
func TestPack(t *testing.T) {
for i, test := range []struct {
- typ string
-
- input interface{}
- output []byte
+ typ string
+ components []ArgumentMarshaling
+ input interface{}
+ output []byte
}{
{
"uint8",
+ nil,
uint8(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint8[]",
+ nil,
[]uint8{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint16",
+ nil,
uint16(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint16[]",
+ nil,
[]uint16{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint32",
+ nil,
uint32(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint32[]",
+ nil,
[]uint32{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint64",
+ nil,
uint64(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint64[]",
+ nil,
[]uint64{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint256",
+ nil,
big.NewInt(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint256[]",
+ nil,
[]*big.Int{big.NewInt(1), big.NewInt(2)},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int8",
+ nil,
int8(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int8[]",
+ nil,
[]int8{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int16",
+ nil,
int16(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int16[]",
+ nil,
[]int16{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int32",
+ nil,
int32(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int32[]",
+ nil,
[]int32{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int64",
+ nil,
int64(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int64[]",
+ nil,
[]int64{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int256",
+ nil,
big.NewInt(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int256[]",
+ nil,
[]*big.Int{big.NewInt(1), big.NewInt(2)},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"bytes1",
+ nil,
[1]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes2",
+ nil,
[2]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes3",
+ nil,
[3]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes4",
+ nil,
[4]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes5",
+ nil,
[5]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes6",
+ nil,
[6]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes7",
+ nil,
[7]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes8",
+ nil,
[8]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes9",
+ nil,
[9]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes10",
+ nil,
[10]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes11",
+ nil,
[11]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes12",
+ nil,
[12]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes13",
+ nil,
[13]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes14",
+ nil,
[14]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes15",
+ nil,
[15]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes16",
+ nil,
[16]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes17",
+ nil,
[17]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes18",
+ nil,
[18]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes19",
+ nil,
[19]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes20",
+ nil,
[20]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes21",
+ nil,
[21]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes22",
+ nil,
[22]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes23",
+ nil,
[23]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes24",
- [24]byte{1},
- common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
- },
- {
- "bytes24",
+ nil,
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes25",
+ nil,
[25]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes26",
+ nil,
[26]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes27",
+ nil,
[27]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes28",
+ nil,
[28]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes29",
+ nil,
[29]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes30",
+ nil,
[30]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes31",
+ nil,
[31]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes32",
+ nil,
[32]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"uint32[2][3][4]",
+ nil,
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
},
{
"address[]",
+ nil,
[]common.Address{{1}, {2}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
},
{
"bytes32[]",
+ nil,
[]common.Hash{{1}, {2}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
},
{
"function",
+ nil,
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"string",
+ nil,
"foobar",
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
},
+ {
+ "string[]",
+ nil,
+ []string{"hello", "foobar"},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
+ "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
+ "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
+ "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
+ },
+ {
+ "string[2]",
+ nil,
+ []string{"hello", "foobar"},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
+ "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
+ "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
+ "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
+ "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
+ },
+ {
+ "bytes32[][]",
+ nil,
+ [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
+ "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
+
+ {
+ "bytes32[][2]",
+ nil,
+ [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
+ "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
+
+ {
+ "bytes32[3][2]",
+ nil,
+ [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
+ {
+ // static tuple
+ "tuple",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "int64"},
+ {Name: "b", Type: "int256"},
+ {Name: "c", Type: "int256"},
+ {Name: "d", Type: "bool"},
+ {Name: "e", Type: "bytes32[3][2]"},
+ },
+ struct {
+ A int64
+ B *big.Int
+ C *big.Int
+ D bool
+ E [][]common.Hash
+ }{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
+ "0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2]
+ },
+ {
+ // dynamic tuple
+ "tuple",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "string"},
+ {Name: "b", Type: "int64"},
+ {Name: "c", Type: "bytes"},
+ {Name: "d", Type: "string[]"},
+ {Name: "e", Type: "int256[]"},
+ {Name: "f", Type: "address[]"},
+ },
+ struct {
+ FieldA string `abi:"a"` // Test whether abi tag works
+ FieldB int64 `abi:"b"`
+ C []byte
+ D []string
+ E []*big.Int
+ F []common.Address
+ }{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
+ common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
+ "0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
+ "0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
+ "0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
+ "0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
+ "0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
+ "666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
+ "0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
+ "0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
+ "0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
+ "0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
+ "0000000000000000000000000000000000000000000000000000000000000003" + // foo length
+ "666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
+ "0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
+ "6261720000000000000000000000000000000000000000000000000000000000" + // bar
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // 1
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
+ "0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
+ "0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2}
+ },
+ {
+ // nested tuple
+ "tuple",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}},
+ {Name: "b", Type: "int256[]"},
+ },
+ struct {
+ A struct {
+ FieldA *big.Int `abi:"a"`
+ B []*big.Int
+ }
+ B []*big.Int
+ }{
+ A: struct {
+ FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
+ B []*big.Int
+ }{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
+ B: []*big.Int{big.NewInt(1), big.NewInt(0)}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset
+ "00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
+ "0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
+ "0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
+ "0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value
+ "0000000000000000000000000000000000000000000000000000000000000002" + // b length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
+ "0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value
+ },
+ {
+ // tuple slice
+ "tuple[]",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "int256"},
+ {Name: "b", Type: "int256[]"},
+ },
+ []struct {
+ A *big.Int
+ B []*big.Int
+ }{
+ {big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
+ {big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
+ },
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
+ "00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
+ "0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
+ "0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value
+ },
+ {
+ // static tuple array
+ "tuple[2]",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "int256"},
+ {Name: "b", Type: "int256"},
+ },
+ [2]struct {
+ A *big.Int
+ B *big.Int
+ }{
+ {big.NewInt(-1), big.NewInt(1)},
+ {big.NewInt(1), big.NewInt(-1)},
+ },
+ common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b
+ },
+ {
+ // dynamic tuple array
+ "tuple[2]",
+ []ArgumentMarshaling{
+ {Name: "a", Type: "int256[]"},
+ },
+ [2]struct {
+ A []*big.Int
+ }{
+ {[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
+ {[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
+ },
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
+ "00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
+ "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
+ "0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
+ "0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
+ "0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
+ },
} {
- typ, err := NewType(test.typ)
+ typ, err := NewType(test.typ, test.components)
if err != nil {
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
}
-
output, err := typ.pack(reflect.ValueOf(test.input))
if err != nil {
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
}
if !bytes.Equal(output, test.output) {
- t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
+ t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
}
}
}
@@ -406,6 +693,59 @@ func TestMethodPack(t *testing.T) {
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
+
+ a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
+ sig = abi.Methods["nestedArray"].Id()
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
+ sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
+ packed, err = abi.Pack("nestedArray", a, []common.Address{addrC, addrD})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(packed, sig) {
+ t.Errorf("expected %x got %x", sig, packed)
+ }
+
+ sig = abi.Methods["nestedArray2"].Id()
+ sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ packed, err = abi.Pack("nestedArray2", [2][]uint8{{1}, {1}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(packed, sig) {
+ t.Errorf("expected %x got %x", sig, packed)
+ }
+
+ sig = abi.Methods["nestedSlice"].Id()
+ sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
+ sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
+ packed, err = abi.Pack("nestedSlice", [][]uint8{{1, 2}, {1, 2}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(packed, sig) {
+ t.Errorf("expected %x got %x", sig, packed)
+ }
}
func TestPackNumber(t *testing.T) {
diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go
index 0193517a4..1b0bb0049 100644
--- a/accounts/abi/reflect.go
+++ b/accounts/abi/reflect.go
@@ -71,22 +71,36 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
//
// set is a bit more lenient when it comes to assignment and doesn't force an as
// strict ruleset as bare `reflect` does.
-func set(dst, src reflect.Value, output Argument) error {
- dstType := dst.Type()
- srcType := src.Type()
+func set(dst, src reflect.Value) error {
+ dstType, srcType := dst.Type(), src.Type()
switch {
- case dstType.AssignableTo(srcType):
- dst.Set(src)
case dstType.Kind() == reflect.Interface:
+ return set(dst.Elem(), src)
+ case dstType.Kind() == reflect.Ptr && dstType.Elem() != derefbigT:
+ return set(dst.Elem(), src)
+ case srcType.AssignableTo(dstType) && dst.CanSet():
dst.Set(src)
- case dstType.Kind() == reflect.Ptr:
- return set(dst.Elem(), src, output)
+ case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice:
+ return setSlice(dst, src)
default:
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
}
return nil
}
+// setSlice attempts to assign src to dst when slices are not assignable by default
+// e.g. src: [][]byte -> dst: [][15]byte
+func setSlice(dst, src reflect.Value) error {
+ slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
+ for i := 0; i < src.Len(); i++ {
+ v := src.Index(i)
+ reflect.Copy(slice.Index(i), v)
+ }
+
+ dst.Set(slice)
+ return nil
+}
+
// requireAssignable assures that `dest` is a pointer and it's not an interface.
func requireAssignable(dst, src reflect.Value) error {
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
@@ -112,14 +126,14 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
return nil
}
-// mapAbiToStringField maps abi to struct fields.
+// mapArgNamesToStructFields maps a slice of argument names to struct fields.
// first round: for each Exportable field that contains a `abi:""` tag
-// and this field name exists in the arguments, pair them together.
-// second round: for each argument field that has not been already linked,
+// and this field name exists in the given argument name list, pair them together.
+// second round: for each argument name that has not been already linked,
// find what variable is expected to be mapped into, if it exists and has not been
// used, pair them.
-func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
-
+// Note this function assumes the given value is a struct value.
+func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
typ := value.Type()
abi2struct := make(map[string]string)
@@ -133,45 +147,39 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
continue
}
-
// skip fields that have no abi:"" tag.
var ok bool
var tagName string
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
continue
}
-
// check if tag is empty.
if tagName == "" {
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
}
-
// check which argument field matches with the abi tag.
found := false
- for _, abiField := range args.NonIndexed() {
- if abiField.Name == tagName {
- if abi2struct[abiField.Name] != "" {
+ for _, arg := range argNames {
+ if arg == tagName {
+ if abi2struct[arg] != "" {
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
}
// pair them
- abi2struct[abiField.Name] = structFieldName
- struct2abi[structFieldName] = abiField.Name
+ abi2struct[arg] = structFieldName
+ struct2abi[structFieldName] = arg
found = true
}
}
-
// check if this tag has been mapped.
if !found {
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
}
-
}
// second round ~~~
- for _, arg := range args {
+ for _, argName := range argNames {
- abiFieldName := arg.Name
- structFieldName := capitalise(abiFieldName)
+ structFieldName := ToCamelCase(argName)
if structFieldName == "" {
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
@@ -181,11 +189,11 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
// struct field with the same field name. If so, raise an error:
// abi: [ { "name": "value" } ]
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
- if abi2struct[abiFieldName] != "" {
- if abi2struct[abiFieldName] != structFieldName &&
+ if abi2struct[argName] != "" {
+ if abi2struct[argName] != structFieldName &&
struct2abi[structFieldName] == "" &&
value.FieldByName(structFieldName).IsValid() {
- return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
+ return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", argName)
}
continue
}
@@ -197,16 +205,14 @@ func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]strin
if value.FieldByName(structFieldName).IsValid() {
// pair them
- abi2struct[abiFieldName] = structFieldName
- struct2abi[structFieldName] = abiFieldName
+ abi2struct[argName] = structFieldName
+ struct2abi[structFieldName] = argName
} else {
// not paired, but annotate as used, to detect cases like
// abi : [ { "name": "value" }, { "name": "_value" } ]
// struct { Value *big.Int }
- struct2abi[structFieldName] = abiFieldName
+ struct2abi[structFieldName] = argName
}
-
}
-
return abi2struct, nil
}
diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go
new file mode 100644
index 000000000..c425e6e54
--- /dev/null
+++ b/accounts/abi/reflect_test.go
@@ -0,0 +1,191 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package abi
+
+import (
+ "reflect"
+ "testing"
+)
+
+type reflectTest struct {
+ name string
+ args []string
+ struc interface{}
+ want map[string]string
+ err string
+}
+
+var reflectTests = []reflectTest{
+ {
+ name: "OneToOneCorrespondance",
+ args: []string{"fieldA"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ },
+ },
+ {
+ name: "MissingFieldsInStruct",
+ args: []string{"fieldA", "fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ },
+ },
+ {
+ name: "MoreFieldsInStructThanArgs",
+ args: []string{"fieldA"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ FieldB int
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ },
+ },
+ {
+ name: "MissingFieldInArgs",
+ args: []string{"fieldA"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ FieldB int `abi:"fieldB"`
+ }{},
+ err: "struct: abi tag 'fieldB' defined but not found in abi",
+ },
+ {
+ name: "NoAbiDescriptor",
+ args: []string{"fieldA"},
+ struc: struct {
+ FieldA int
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ },
+ },
+ {
+ name: "NoArgs",
+ args: []string{},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ }{},
+ err: "struct: abi tag 'fieldA' defined but not found in abi",
+ },
+ {
+ name: "DifferentName",
+ args: []string{"fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldB"`
+ }{},
+ want: map[string]string{
+ "fieldB": "FieldA",
+ },
+ },
+ {
+ name: "DifferentName",
+ args: []string{"fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldB"`
+ }{},
+ want: map[string]string{
+ "fieldB": "FieldA",
+ },
+ },
+ {
+ name: "MultipleFields",
+ args: []string{"fieldA", "fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ FieldB int `abi:"fieldB"`
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ "fieldB": "FieldB",
+ },
+ },
+ {
+ name: "MultipleFieldsABIMissing",
+ args: []string{"fieldA", "fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldA"`
+ FieldB int
+ }{},
+ want: map[string]string{
+ "fieldA": "FieldA",
+ "fieldB": "FieldB",
+ },
+ },
+ {
+ name: "NameConflict",
+ args: []string{"fieldB"},
+ struc: struct {
+ FieldA int `abi:"fieldB"`
+ FieldB int
+ }{},
+ err: "abi: multiple variables maps to the same abi field 'fieldB'",
+ },
+ {
+ name: "Underscored",
+ args: []string{"_"},
+ struc: struct {
+ FieldA int
+ }{},
+ err: "abi: purely underscored output cannot unpack to struct",
+ },
+ {
+ name: "DoubleMapping",
+ args: []string{"fieldB", "fieldC", "fieldA"},
+ struc: struct {
+ FieldA int `abi:"fieldC"`
+ FieldB int
+ }{},
+ err: "abi: multiple outputs mapping to the same struct field 'FieldA'",
+ },
+ {
+ name: "AlreadyMapped",
+ args: []string{"fieldB", "fieldB"},
+ struc: struct {
+ FieldB int `abi:"fieldB"`
+ }{},
+ err: "struct: abi tag in 'FieldB' already mapped",
+ },
+}
+
+func TestReflectNameToStruct(t *testing.T) {
+ for _, test := range reflectTests {
+ t.Run(test.name, func(t *testing.T) {
+ m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
+ if len(test.err) > 0 {
+ if err == nil || err.Error() != test.err {
+ t.Fatalf("Invalid error: expected %v, got %v", test.err, err)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ for fname := range test.want {
+ if m[fname] != test.want[fname] {
+ t.Fatalf("Incorrect value for field %s: expected %v, got %v", fname, test.want[fname], m[fname])
+ }
+ }
+ }
+ })
+ }
+}
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index dce89d2b4..26151dbd3 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -17,6 +17,7 @@
package abi
import (
+ "errors"
"fmt"
"reflect"
"regexp"
@@ -32,6 +33,7 @@ const (
StringTy
SliceTy
ArrayTy
+ TupleTy
AddressTy
FixedBytesTy
BytesTy
@@ -43,13 +45,16 @@ const (
// Type is the reflection of the supported argument type
type Type struct {
Elem *Type
-
Kind reflect.Kind
Type reflect.Type
Size int
T byte // Our own type checking
stringKind string // holds the unparsed string for deriving signatures
+
+ // Tuple relative fields
+ TupleElems []*Type // Type information of all tuple fields
+ TupleRawNames []string // Raw field name of all tuple fields
}
var (
@@ -58,7 +63,7 @@ var (
)
// NewType creates a new reflection type of abi type given in t.
-func NewType(t string) (typ Type, err error) {
+func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) {
// check that array brackets are equal if they exist
if strings.Count(t, "[") != strings.Count(t, "]") {
return Type{}, fmt.Errorf("invalid arg type in abi")
@@ -71,7 +76,7 @@ func NewType(t string) (typ Type, err error) {
if strings.Count(t, "[") != 0 {
i := strings.LastIndex(t, "[")
// recursively embed the type
- embeddedType, err := NewType(t[:i])
+ embeddedType, err := NewType(t[:i], components)
if err != nil {
return Type{}, err
}
@@ -87,6 +92,9 @@ func NewType(t string) (typ Type, err error) {
typ.Kind = reflect.Slice
typ.Elem = &embeddedType
typ.Type = reflect.SliceOf(embeddedType.Type)
+ if embeddedType.T == TupleTy {
+ typ.stringKind = embeddedType.stringKind + sliced
+ }
} else if len(intz) == 1 {
// is a array
typ.T = ArrayTy
@@ -97,6 +105,9 @@ func NewType(t string) (typ Type, err error) {
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
}
typ.Type = reflect.ArrayOf(typ.Size, embeddedType.Type)
+ if embeddedType.T == TupleTy {
+ typ.stringKind = embeddedType.stringKind + sliced
+ }
} else {
return Type{}, fmt.Errorf("invalid formatting of array type")
}
@@ -158,6 +169,40 @@ func NewType(t string) (typ Type, err error) {
typ.Size = varSize
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
}
+ case "tuple":
+ var (
+ fields []reflect.StructField
+ elems []*Type
+ names []string
+ expression string // canonical parameter expression
+ )
+ expression += "("
+ for idx, c := range components {
+ cType, err := NewType(c.Type, c.Components)
+ if err != nil {
+ return Type{}, err
+ }
+ if ToCamelCase(c.Name) == "" {
+ return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
+ }
+ fields = append(fields, reflect.StructField{
+ Name: ToCamelCase(c.Name), // reflect.StructOf will panic for any exported field.
+ Type: cType.Type,
+ })
+ elems = append(elems, &cType)
+ names = append(names, c.Name)
+ expression += cType.stringKind
+ if idx != len(components)-1 {
+ expression += ","
+ }
+ }
+ expression += ")"
+ typ.Kind = reflect.Struct
+ typ.Type = reflect.StructOf(fields)
+ typ.TupleElems = elems
+ typ.TupleRawNames = names
+ typ.T = TupleTy
+ typ.stringKind = expression
case "function":
typ.Kind = reflect.Array
typ.T = FunctionTy
@@ -178,28 +223,82 @@ func (t Type) String() (out string) {
func (t Type) pack(v reflect.Value) ([]byte, error) {
// dereference pointer first if it's a pointer
v = indirect(v)
-
if err := typeCheck(t, v); err != nil {
return nil, err
}
- if t.T == SliceTy || t.T == ArrayTy {
- var packed []byte
+ switch t.T {
+ case SliceTy, ArrayTy:
+ var ret []byte
+ if t.requiresLengthPrefix() {
+ // append length
+ ret = append(ret, packNum(reflect.ValueOf(v.Len()))...)
+ }
+
+ // calculate offset if any
+ offset := 0
+ offsetReq := isDynamicType(*t.Elem)
+ if offsetReq {
+ offset = getTypeSize(*t.Elem) * v.Len()
+ }
+ var tail []byte
for i := 0; i < v.Len(); i++ {
val, err := t.Elem.pack(v.Index(i))
if err != nil {
return nil, err
}
- packed = append(packed, val...)
+ if !offsetReq {
+ ret = append(ret, val...)
+ continue
+ }
+ ret = append(ret, packNum(reflect.ValueOf(offset))...)
+ offset += len(val)
+ tail = append(tail, val...)
}
- if t.T == SliceTy {
- return packBytesSlice(packed, v.Len()), nil
- } else if t.T == ArrayTy {
- return packed, nil
+ return append(ret, tail...), nil
+ case TupleTy:
+ // (T1,...,Tk) for k >= 0 and any types T1, …, Tk
+ // enc(X) = head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(k))
+ // where X = (X(1), ..., X(k)) and head and tail are defined for Ti being a static
+ // type as
+ // head(X(i)) = enc(X(i)) and tail(X(i)) = "" (the empty string)
+ // and as
+ // head(X(i)) = enc(len(head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(i-1))))
+ // tail(X(i)) = enc(X(i))
+ // otherwise, i.e. if Ti is a dynamic type.
+ fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, v)
+ if err != nil {
+ return nil, err
}
+ // Calculate prefix occupied size.
+ offset := 0
+ for _, elem := range t.TupleElems {
+ offset += getTypeSize(*elem)
+ }
+ var ret, tail []byte
+ for i, elem := range t.TupleElems {
+ field := v.FieldByName(fieldmap[t.TupleRawNames[i]])
+ if !field.IsValid() {
+ return nil, fmt.Errorf("field %s for tuple not found in the given struct", t.TupleRawNames[i])
+ }
+ val, err := elem.pack(field)
+ if err != nil {
+ return nil, err
+ }
+ if isDynamicType(*elem) {
+ ret = append(ret, packNum(reflect.ValueOf(offset))...)
+ tail = append(tail, val...)
+ offset += len(val)
+ } else {
+ ret = append(ret, val...)
+ }
+ }
+ return append(ret, tail...), nil
+
+ default:
+ return packElement(t, v), nil
}
- return packElement(t, v), nil
}
// requireLengthPrefix returns whether the type requires any sort of length
@@ -207,3 +306,47 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
}
+
+// isDynamicType returns true if the type is dynamic.
+// The following types are called “dynamic”:
+// * bytes
+// * string
+// * T[] for any T
+// * T[k] for any dynamic T and any k >= 0
+// * (T1,...,Tk) if Ti is dynamic for some 1 <= i <= k
+func isDynamicType(t Type) bool {
+ if t.T == TupleTy {
+ for _, elem := range t.TupleElems {
+ if isDynamicType(*elem) {
+ return true
+ }
+ }
+ return false
+ }
+ return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
+}
+
+// getTypeSize returns the size that this type needs to occupy.
+// We distinguish static and dynamic types. Static types are encoded in-place
+// and dynamic types are encoded at a separately allocated location after the
+// current block.
+// So for a static variable, the size returned represents the size that the
+// variable actually occupies.
+// For a dynamic variable, the returned size is fixed 32 bytes, which is used
+// to store the location reference for actual value storage.
+func getTypeSize(t Type) int {
+ if t.T == ArrayTy && !isDynamicType(*t.Elem) {
+ // Recursively calculate type size if it is a nested array
+ if t.Elem.T == ArrayTy {
+ return t.Size * getTypeSize(*t.Elem)
+ }
+ return t.Size * 32
+ } else if t.T == TupleTy && !isDynamicType(t) {
+ total := 0
+ for _, elem := range t.TupleElems {
+ total += getTypeSize(*elem)
+ }
+ return total
+ }
+ return 32
+}
diff --git a/accounts/abi/type_test.go b/accounts/abi/type_test.go
index f6b36f18f..7ef47330d 100644
--- a/accounts/abi/type_test.go
+++ b/accounts/abi/type_test.go
@@ -32,72 +32,75 @@ type typeWithoutStringer Type
// Tests that all allowed types get recognized by the type parser.
func TestTypeRegexp(t *testing.T) {
tests := []struct {
- blob string
- kind Type
+ blob string
+ components []ArgumentMarshaling
+ kind Type
}{
- {"bool", Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
- {"bool[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
- {"bool[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
- {"bool[2][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
- {"bool[][]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
- {"bool[][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
- {"bool[2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
- {"bool[2][][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
- {"bool[2][2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
- {"bool[][][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
- {"bool[][2][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
- {"int8", Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
- {"int16", Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
- {"int32", Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
- {"int64", Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
- {"int256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
- {"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
- {"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
- {"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
- {"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
- {"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
- {"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
- {"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
- {"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
- {"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
- {"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
- {"uint8", Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
- {"uint16", Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
- {"uint32", Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
- {"uint64", Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
- {"uint256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
- {"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
- {"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
- {"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
- {"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
- {"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
- {"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
- {"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
- {"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
- {"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
- {"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
- {"bytes32", Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
- {"bytes[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
- {"bytes[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
- {"bytes32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
- {"bytes32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
- {"string", Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
- {"string[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
- {"string[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
- {"address", Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
- {"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
- {"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
+ {"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
+ {"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
+ {"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
+ {"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
+ {"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
+ {"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
+ {"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
+ {"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
+ {"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
+ {"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
+ {"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
+ {"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
+ {"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
+ {"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
+ {"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
+ {"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
+ {"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
+ {"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
+ {"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
+ {"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
+ {"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
+ {"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
+ {"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
+ {"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
+ {"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
+ {"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
+ {"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
+ {"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
+ {"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
+ {"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
+ {"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
+ {"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
+ {"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
+ {"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
+ {"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
+ {"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
+ {"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
+ {"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
+ {"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
+ {"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
+ {"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
+ {"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
+ {"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
+ {"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
+ {"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
+ {"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
+ {"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
+ {"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
+ {"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
+ {"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
+ {"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
+ {"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
// TODO when fixed types are implemented properly
- // {"fixed", Type{}},
- // {"fixed128x128", Type{}},
- // {"fixed[]", Type{}},
- // {"fixed[2]", Type{}},
- // {"fixed128x128[]", Type{}},
- // {"fixed128x128[2]", Type{}},
+ // {"fixed", nil, Type{}},
+ // {"fixed128x128", nil, Type{}},
+ // {"fixed[]", nil, Type{}},
+ // {"fixed[2]", nil, Type{}},
+ // {"fixed128x128[]", nil, Type{}},
+ // {"fixed128x128[2]", nil, Type{}},
+ {"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct{ A int64 }{}), stringKind: "(int64)",
+ TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
}
for _, tt := range tests {
- typ, err := NewType(tt.blob)
+ typ, err := NewType(tt.blob, tt.components)
if err != nil {
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
}
@@ -109,154 +112,170 @@ func TestTypeRegexp(t *testing.T) {
func TestTypeCheck(t *testing.T) {
for i, test := range []struct {
- typ string
- input interface{}
- err string
+ typ string
+ components []ArgumentMarshaling
+ input interface{}
+ err string
}{
- {"uint", big.NewInt(1), "unsupported arg type: uint"},
- {"int", big.NewInt(1), "unsupported arg type: int"},
- {"uint256", big.NewInt(1), ""},
- {"uint256[][3][]", [][3][]*big.Int{{{}}}, ""},
- {"uint256[][][3]", [3][][]*big.Int{{{}}}, ""},
- {"uint256[3][][]", [][][3]*big.Int{{{}}}, ""},
- {"uint256[3][3][3]", [3][3][3]*big.Int{{{}}}, ""},
- {"uint8[][]", [][]uint8{}, ""},
- {"int256", big.NewInt(1), ""},
- {"uint8", uint8(1), ""},
- {"uint16", uint16(1), ""},
- {"uint32", uint32(1), ""},
- {"uint64", uint64(1), ""},
- {"int8", int8(1), ""},
- {"int16", int16(1), ""},
- {"int32", int32(1), ""},
- {"int64", int64(1), ""},
- {"uint24", big.NewInt(1), ""},
- {"uint40", big.NewInt(1), ""},
- {"uint48", big.NewInt(1), ""},
- {"uint56", big.NewInt(1), ""},
- {"uint72", big.NewInt(1), ""},
- {"uint80", big.NewInt(1), ""},
- {"uint88", big.NewInt(1), ""},
- {"uint96", big.NewInt(1), ""},
- {"uint104", big.NewInt(1), ""},
- {"uint112", big.NewInt(1), ""},
- {"uint120", big.NewInt(1), ""},
- {"uint128", big.NewInt(1), ""},
- {"uint136", big.NewInt(1), ""},
- {"uint144", big.NewInt(1), ""},
- {"uint152", big.NewInt(1), ""},
- {"uint160", big.NewInt(1), ""},
- {"uint168", big.NewInt(1), ""},
- {"uint176", big.NewInt(1), ""},
- {"uint184", big.NewInt(1), ""},
- {"uint192", big.NewInt(1), ""},
- {"uint200", big.NewInt(1), ""},
- {"uint208", big.NewInt(1), ""},
- {"uint216", big.NewInt(1), ""},
- {"uint224", big.NewInt(1), ""},
- {"uint232", big.NewInt(1), ""},
- {"uint240", big.NewInt(1), ""},
- {"uint248", big.NewInt(1), ""},
- {"int24", big.NewInt(1), ""},
- {"int40", big.NewInt(1), ""},
- {"int48", big.NewInt(1), ""},
- {"int56", big.NewInt(1), ""},
- {"int72", big.NewInt(1), ""},
- {"int80", big.NewInt(1), ""},
- {"int88", big.NewInt(1), ""},
- {"int96", big.NewInt(1), ""},
- {"int104", big.NewInt(1), ""},
- {"int112", big.NewInt(1), ""},
- {"int120", big.NewInt(1), ""},
- {"int128", big.NewInt(1), ""},
- {"int136", big.NewInt(1), ""},
- {"int144", big.NewInt(1), ""},
- {"int152", big.NewInt(1), ""},
- {"int160", big.NewInt(1), ""},
- {"int168", big.NewInt(1), ""},
- {"int176", big.NewInt(1), ""},
- {"int184", big.NewInt(1), ""},
- {"int192", big.NewInt(1), ""},
- {"int200", big.NewInt(1), ""},
- {"int208", big.NewInt(1), ""},
- {"int216", big.NewInt(1), ""},
- {"int224", big.NewInt(1), ""},
- {"int232", big.NewInt(1), ""},
- {"int240", big.NewInt(1), ""},
- {"int248", big.NewInt(1), ""},
- {"uint30", uint8(1), "abi: cannot use uint8 as type ptr as argument"},
- {"uint8", uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
- {"uint8", uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
- {"uint8", uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
- {"uint8", int8(1), "abi: cannot use int8 as type uint8 as argument"},
- {"uint8", int16(1), "abi: cannot use int16 as type uint8 as argument"},
- {"uint8", int32(1), "abi: cannot use int32 as type uint8 as argument"},
- {"uint8", int64(1), "abi: cannot use int64 as type uint8 as argument"},
- {"uint16", uint16(1), ""},
- {"uint16", uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
- {"uint16[]", []uint16{1, 2, 3}, ""},
- {"uint16[]", [3]uint16{1, 2, 3}, ""},
- {"uint16[]", []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
- {"uint16[3]", [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
- {"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
- {"uint16[3]", []uint16{1, 2, 3}, ""},
- {"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
- {"address[]", []common.Address{{1}}, ""},
- {"address[1]", []common.Address{{1}}, ""},
- {"address[1]", [1]common.Address{{1}}, ""},
- {"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
- {"bytes32", [32]byte{}, ""},
- {"bytes31", [31]byte{}, ""},
- {"bytes30", [30]byte{}, ""},
- {"bytes29", [29]byte{}, ""},
- {"bytes28", [28]byte{}, ""},
- {"bytes27", [27]byte{}, ""},
- {"bytes26", [26]byte{}, ""},
- {"bytes25", [25]byte{}, ""},
- {"bytes24", [24]byte{}, ""},
- {"bytes23", [23]byte{}, ""},
- {"bytes22", [22]byte{}, ""},
- {"bytes21", [21]byte{}, ""},
- {"bytes20", [20]byte{}, ""},
- {"bytes19", [19]byte{}, ""},
- {"bytes18", [18]byte{}, ""},
- {"bytes17", [17]byte{}, ""},
- {"bytes16", [16]byte{}, ""},
- {"bytes15", [15]byte{}, ""},
- {"bytes14", [14]byte{}, ""},
- {"bytes13", [13]byte{}, ""},
- {"bytes12", [12]byte{}, ""},
- {"bytes11", [11]byte{}, ""},
- {"bytes10", [10]byte{}, ""},
- {"bytes9", [9]byte{}, ""},
- {"bytes8", [8]byte{}, ""},
- {"bytes7", [7]byte{}, ""},
- {"bytes6", [6]byte{}, ""},
- {"bytes5", [5]byte{}, ""},
- {"bytes4", [4]byte{}, ""},
- {"bytes3", [3]byte{}, ""},
- {"bytes2", [2]byte{}, ""},
- {"bytes1", [1]byte{}, ""},
- {"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
- {"bytes32", common.Hash{1}, ""},
- {"bytes31", common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
- {"bytes31", [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
- {"bytes", []byte{0, 1}, ""},
- {"bytes", [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
- {"bytes", common.Hash{1}, "abi: cannot use array as type slice as argument"},
- {"string", "hello world", ""},
- {"string", string(""), ""},
- {"string", []byte{}, "abi: cannot use slice as type string as argument"},
- {"bytes32[]", [][32]byte{{}}, ""},
- {"function", [24]byte{}, ""},
- {"bytes20", common.Address{}, ""},
- {"address", [20]byte{}, ""},
- {"address", common.Address{}, ""},
- {"bytes32[]]", "", "invalid arg type in abi"},
- {"invalidType", "", "unsupported arg type: invalidType"},
- {"invalidSlice[]", "", "unsupported arg type: invalidSlice"},
+ {"uint", nil, big.NewInt(1), "unsupported arg type: uint"},
+ {"int", nil, big.NewInt(1), "unsupported arg type: int"},
+ {"uint256", nil, big.NewInt(1), ""},
+ {"uint256[][3][]", nil, [][3][]*big.Int{{{}}}, ""},
+ {"uint256[][][3]", nil, [3][][]*big.Int{{{}}}, ""},
+ {"uint256[3][][]", nil, [][][3]*big.Int{{{}}}, ""},
+ {"uint256[3][3][3]", nil, [3][3][3]*big.Int{{{}}}, ""},
+ {"uint8[][]", nil, [][]uint8{}, ""},
+ {"int256", nil, big.NewInt(1), ""},
+ {"uint8", nil, uint8(1), ""},
+ {"uint16", nil, uint16(1), ""},
+ {"uint32", nil, uint32(1), ""},
+ {"uint64", nil, uint64(1), ""},
+ {"int8", nil, int8(1), ""},
+ {"int16", nil, int16(1), ""},
+ {"int32", nil, int32(1), ""},
+ {"int64", nil, int64(1), ""},
+ {"uint24", nil, big.NewInt(1), ""},
+ {"uint40", nil, big.NewInt(1), ""},
+ {"uint48", nil, big.NewInt(1), ""},
+ {"uint56", nil, big.NewInt(1), ""},
+ {"uint72", nil, big.NewInt(1), ""},
+ {"uint80", nil, big.NewInt(1), ""},
+ {"uint88", nil, big.NewInt(1), ""},
+ {"uint96", nil, big.NewInt(1), ""},
+ {"uint104", nil, big.NewInt(1), ""},
+ {"uint112", nil, big.NewInt(1), ""},
+ {"uint120", nil, big.NewInt(1), ""},
+ {"uint128", nil, big.NewInt(1), ""},
+ {"uint136", nil, big.NewInt(1), ""},
+ {"uint144", nil, big.NewInt(1), ""},
+ {"uint152", nil, big.NewInt(1), ""},
+ {"uint160", nil, big.NewInt(1), ""},
+ {"uint168", nil, big.NewInt(1), ""},
+ {"uint176", nil, big.NewInt(1), ""},
+ {"uint184", nil, big.NewInt(1), ""},
+ {"uint192", nil, big.NewInt(1), ""},
+ {"uint200", nil, big.NewInt(1), ""},
+ {"uint208", nil, big.NewInt(1), ""},
+ {"uint216", nil, big.NewInt(1), ""},
+ {"uint224", nil, big.NewInt(1), ""},
+ {"uint232", nil, big.NewInt(1), ""},
+ {"uint240", nil, big.NewInt(1), ""},
+ {"uint248", nil, big.NewInt(1), ""},
+ {"int24", nil, big.NewInt(1), ""},
+ {"int40", nil, big.NewInt(1), ""},
+ {"int48", nil, big.NewInt(1), ""},
+ {"int56", nil, big.NewInt(1), ""},
+ {"int72", nil, big.NewInt(1), ""},
+ {"int80", nil, big.NewInt(1), ""},
+ {"int88", nil, big.NewInt(1), ""},
+ {"int96", nil, big.NewInt(1), ""},
+ {"int104", nil, big.NewInt(1), ""},
+ {"int112", nil, big.NewInt(1), ""},
+ {"int120", nil, big.NewInt(1), ""},
+ {"int128", nil, big.NewInt(1), ""},
+ {"int136", nil, big.NewInt(1), ""},
+ {"int144", nil, big.NewInt(1), ""},
+ {"int152", nil, big.NewInt(1), ""},
+ {"int160", nil, big.NewInt(1), ""},
+ {"int168", nil, big.NewInt(1), ""},
+ {"int176", nil, big.NewInt(1), ""},
+ {"int184", nil, big.NewInt(1), ""},
+ {"int192", nil, big.NewInt(1), ""},
+ {"int200", nil, big.NewInt(1), ""},
+ {"int208", nil, big.NewInt(1), ""},
+ {"int216", nil, big.NewInt(1), ""},
+ {"int224", nil, big.NewInt(1), ""},
+ {"int232", nil, big.NewInt(1), ""},
+ {"int240", nil, big.NewInt(1), ""},
+ {"int248", nil, big.NewInt(1), ""},
+ {"uint30", nil, uint8(1), "abi: cannot use uint8 as type ptr as argument"},
+ {"uint8", nil, uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
+ {"uint8", nil, uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
+ {"uint8", nil, uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
+ {"uint8", nil, int8(1), "abi: cannot use int8 as type uint8 as argument"},
+ {"uint8", nil, int16(1), "abi: cannot use int16 as type uint8 as argument"},
+ {"uint8", nil, int32(1), "abi: cannot use int32 as type uint8 as argument"},
+ {"uint8", nil, int64(1), "abi: cannot use int64 as type uint8 as argument"},
+ {"uint16", nil, uint16(1), ""},
+ {"uint16", nil, uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
+ {"uint16[]", nil, []uint16{1, 2, 3}, ""},
+ {"uint16[]", nil, [3]uint16{1, 2, 3}, ""},
+ {"uint16[]", nil, []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
+ {"uint16[3]", nil, [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
+ {"uint16[3]", nil, [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
+ {"uint16[3]", nil, []uint16{1, 2, 3}, ""},
+ {"uint16[3]", nil, []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
+ {"address[]", nil, []common.Address{{1}}, ""},
+ {"address[1]", nil, []common.Address{{1}}, ""},
+ {"address[1]", nil, [1]common.Address{{1}}, ""},
+ {"address[2]", nil, [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
+ {"bytes32", nil, [32]byte{}, ""},
+ {"bytes31", nil, [31]byte{}, ""},
+ {"bytes30", nil, [30]byte{}, ""},
+ {"bytes29", nil, [29]byte{}, ""},
+ {"bytes28", nil, [28]byte{}, ""},
+ {"bytes27", nil, [27]byte{}, ""},
+ {"bytes26", nil, [26]byte{}, ""},
+ {"bytes25", nil, [25]byte{}, ""},
+ {"bytes24", nil, [24]byte{}, ""},
+ {"bytes23", nil, [23]byte{}, ""},
+ {"bytes22", nil, [22]byte{}, ""},
+ {"bytes21", nil, [21]byte{}, ""},
+ {"bytes20", nil, [20]byte{}, ""},
+ {"bytes19", nil, [19]byte{}, ""},
+ {"bytes18", nil, [18]byte{}, ""},
+ {"bytes17", nil, [17]byte{}, ""},
+ {"bytes16", nil, [16]byte{}, ""},
+ {"bytes15", nil, [15]byte{}, ""},
+ {"bytes14", nil, [14]byte{}, ""},
+ {"bytes13", nil, [13]byte{}, ""},
+ {"bytes12", nil, [12]byte{}, ""},
+ {"bytes11", nil, [11]byte{}, ""},
+ {"bytes10", nil, [10]byte{}, ""},
+ {"bytes9", nil, [9]byte{}, ""},
+ {"bytes8", nil, [8]byte{}, ""},
+ {"bytes7", nil, [7]byte{}, ""},
+ {"bytes6", nil, [6]byte{}, ""},
+ {"bytes5", nil, [5]byte{}, ""},
+ {"bytes4", nil, [4]byte{}, ""},
+ {"bytes3", nil, [3]byte{}, ""},
+ {"bytes2", nil, [2]byte{}, ""},
+ {"bytes1", nil, [1]byte{}, ""},
+ {"bytes32", nil, [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
+ {"bytes32", nil, common.Hash{1}, ""},
+ {"bytes31", nil, common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
+ {"bytes31", nil, [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
+ {"bytes", nil, []byte{0, 1}, ""},
+ {"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
+ {"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"},
+ {"string", nil, "hello world", ""},
+ {"string", nil, string(""), ""},
+ {"string", nil, []byte{}, "abi: cannot use slice as type string as argument"},
+ {"bytes32[]", nil, [][32]byte{{}}, ""},
+ {"function", nil, [24]byte{}, ""},
+ {"bytes20", nil, common.Address{}, ""},
+ {"address", nil, [20]byte{}, ""},
+ {"address", nil, common.Address{}, ""},
+ {"bytes32[]]", nil, "", "invalid arg type in abi"},
+ {"invalidType", nil, "", "unsupported arg type: invalidType"},
+ {"invalidSlice[]", nil, "", "unsupported arg type: invalidSlice"},
+ // simple tuple
+ {"tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, struct {
+ A *big.Int
+ B *big.Int
+ }{}, ""},
+ // tuple slice
+ {"tuple[]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
+ A *big.Int
+ B *big.Int
+ }{}, ""},
+ // tuple array
+ {"tuple[2]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
+ A *big.Int
+ B *big.Int
+ }{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
} {
- typ, err := NewType(test.typ)
+ typ, err := NewType(test.typ, test.components)
if err != nil && len(test.err) == 0 {
t.Fatal("unexpected parse error:", err)
} else if err != nil && len(test.err) != 0 {
diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go
index d5875140c..8406b09c8 100644
--- a/accounts/abi/unpack.go
+++ b/accounts/abi/unpack.go
@@ -115,17 +115,6 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
}
-func getFullElemSize(elem *Type) int {
- //all other should be counted as 32 (slices have pointers to respective elements)
- size := 32
- //arrays wrap it, each element being the same size
- for elem.T == ArrayTy {
- size *= elem.Size
- elem = elem.Elem
- }
- return size
-}
-
// iteratively unpack elements
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
if size < 0 {
@@ -150,13 +139,9 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
// Arrays have packed elements, resulting in longer unpack steps.
// Slices have just 32 bytes per element (pointing to the contents).
- elemSize := 32
- if t.T == ArrayTy {
- elemSize = getFullElemSize(t.Elem)
- }
+ elemSize := getTypeSize(*t.Elem)
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
-
inter, err := toGoType(i, *t.Elem, output)
if err != nil {
return nil, err
@@ -170,6 +155,36 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
return refSlice.Interface(), nil
}
+func forTupleUnpack(t Type, output []byte) (interface{}, error) {
+ retval := reflect.New(t.Type).Elem()
+ virtualArgs := 0
+ for index, elem := range t.TupleElems {
+ marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
+ if elem.T == ArrayTy && !isDynamicType(*elem) {
+ // If we have a static array, like [3]uint256, these are coded as
+ // just like uint256,uint256,uint256.
+ // This means that we need to add two 'virtual' arguments when
+ // we count the index from now on.
+ //
+ // Array values nested multiple levels deep are also encoded inline:
+ // [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
+ //
+ // Calculate the full array size to get the correct offset for the next argument.
+ // Decrement it by 1, as the normal index increment is still applied.
+ virtualArgs += getTypeSize(*elem)/32 - 1
+ } else if elem.T == TupleTy && !isDynamicType(*elem) {
+ // If we have a static tuple, like (uint256, bool, uint256), these are
+ // coded as just like uint256,bool,uint256
+ virtualArgs += getTypeSize(*elem)/32 - 1
+ }
+ if err != nil {
+ return nil, err
+ }
+ retval.Field(index).Set(reflect.ValueOf(marshalledValue))
+ }
+ return retval.Interface(), nil
+}
+
// toGoType parses the output bytes and recursively assigns the value of these bytes
// into a go type with accordance with the ABI spec.
func toGoType(index int, t Type, output []byte) (interface{}, error) {
@@ -178,14 +193,14 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
}
var (
- returnOutput []byte
- begin, end int
- err error
+ returnOutput []byte
+ begin, length int
+ err error
)
// if we require a length prefix, find the beginning word and size returned.
if t.requiresLengthPrefix() {
- begin, end, err = lengthPrefixPointsTo(index, output)
+ begin, length, err = lengthPrefixPointsTo(index, output)
if err != nil {
return nil, err
}
@@ -194,12 +209,26 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
}
switch t.T {
+ case TupleTy:
+ if isDynamicType(t) {
+ begin, err := tuplePointsTo(index, output)
+ if err != nil {
+ return nil, err
+ }
+ return forTupleUnpack(t, output[begin:])
+ } else {
+ return forTupleUnpack(t, output[index:])
+ }
case SliceTy:
- return forEachUnpack(t, output, begin, end)
+ return forEachUnpack(t, output[begin:], 0, length)
case ArrayTy:
- return forEachUnpack(t, output, index, t.Size)
+ if isDynamicType(*t.Elem) {
+ offset := int64(binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:]))
+ return forEachUnpack(t, output[offset:], 0, t.Size)
+ }
+ return forEachUnpack(t, output[index:], 0, t.Size)
case StringTy: // variable arrays are written at the end of the return bytes
- return string(output[begin : begin+end]), nil
+ return string(output[begin : begin+length]), nil
case IntTy, UintTy:
return readInteger(t.T, t.Kind, returnOutput), nil
case BoolTy:
@@ -209,7 +238,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
case HashTy:
return common.BytesToHash(returnOutput), nil
case BytesTy:
- return output[begin : begin+end], nil
+ return output[begin : begin+length], nil
case FixedBytesTy:
return readFixedBytes(t, returnOutput)
case FunctionTy:
@@ -250,3 +279,17 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
length = int(lengthBig.Uint64())
return
}
+
+// tuplePointsTo resolves the location reference for dynamic tuple.
+func tuplePointsTo(index int, output []byte) (start int, err error) {
+ offset := big.NewInt(0).SetBytes(output[index : index+32])
+ outputLen := big.NewInt(int64(len(output)))
+
+ if offset.Cmp(big.NewInt(int64(len(output)))) > 0 {
+ return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
+ }
+ if offset.BitLen() > 63 {
+ return 0, fmt.Errorf("abi offset larger than int64: %v", offset)
+ }
+ return int(offset.Uint64()), nil
+}
diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go
index 97552b90c..ff88be3d3 100644
--- a/accounts/abi/unpack_test.go
+++ b/accounts/abi/unpack_test.go
@@ -173,9 +173,14 @@ var unpackTests = []unpackTest{
// multi dimensional, if these pass, all types that don't require length prefix should pass
{
def: `[{"type": "uint8[][]"}]`,
- enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000E0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: [][]uint8{{1, 2}, {1, 2}},
},
+ {
+ def: `[{"type": "uint8[][]"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
+ want: [][]uint8{{1, 2}, {1, 2, 3}},
+ },
{
def: `[{"type": "uint8[2][2]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -183,7 +188,7 @@ var unpackTests = []unpackTest{
},
{
def: `[{"type": "uint8[][2]"}]`,
- enc: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
+ enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001",
want: [2][]uint8{{1}, {1}},
},
{
@@ -191,6 +196,11 @@ var unpackTests = []unpackTest{
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: [][2]uint8{{1, 2}},
},
+ {
+ def: `[{"type": "uint8[2][]"}]`,
+ enc: "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: [][2]uint8{{1, 2}, {1, 2}},
+ },
{
def: `[{"type": "uint16[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -236,6 +246,26 @@ var unpackTests = []unpackTest{
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
},
+ {
+ def: `[{"type": "string[4]"}]`,
+ enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000000548656c6c6f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005576f726c64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b476f2d657468657265756d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000",
+ want: [4]string{"Hello", "World", "Go-ethereum", "Ethereum"},
+ },
+ {
+ def: `[{"type": "string[]"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b676f2d657468657265756d000000000000000000000000000000000000000000",
+ want: []string{"Ethereum", "go-ethereum"},
+ },
+ {
+ def: `[{"type": "bytes[]"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003f0f0f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003f0f0f00000000000000000000000000000000000000000000000000000000000",
+ want: [][]byte{{0xf0, 0xf0, 0xf0}, {0xf0, 0xf0, 0xf0}},
+ },
+ {
+ def: `[{"type": "uint256[2][][]"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e80000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000003e8",
+ want: [][][2]*big.Int{{{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}, {{big.NewInt(1), big.NewInt(200)}, {big.NewInt(1), big.NewInt(1000)}}},
+ },
{
def: `[{"type": "int8[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -295,6 +325,53 @@ var unpackTests = []unpackTest{
Int2 *big.Int
}{big.NewInt(1), big.NewInt(2)},
},
+ {
+ def: `[{"name":"int_one","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int__one","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int_one_","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ }{big.NewInt(1)},
+ },
+ {
+ def: `[{"name":"int_one","type":"int256"}, {"name":"intone","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ Intone *big.Int
+ }{big.NewInt(1), big.NewInt(2)},
+ },
+ {
+ def: `[{"name":"___","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ IntOne *big.Int
+ Intone *big.Int
+ }{},
+ err: "abi: purely underscored output cannot unpack to struct",
+ },
+ {
+ def: `[{"name":"int_one","type":"int256"},{"name":"IntOne","type":"int256"}]`,
+ enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ want: struct {
+ Int1 *big.Int
+ Int2 *big.Int
+ }{},
+ err: "abi: multiple outputs mapping to the same struct field 'IntOne'",
+ },
{
def: `[{"name":"int","type":"int256"},{"name":"Int","type":"int256"}]`,
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -359,6 +436,55 @@ func TestUnpack(t *testing.T) {
}
}
+func TestUnpackSetDynamicArrayOutput(t *testing.T) {
+ abi, err := JSON(strings.NewReader(`[{"constant":true,"inputs":[],"name":"testDynamicFixedBytes15","outputs":[{"name":"","type":"bytes15[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"testDynamicFixedBytes32","outputs":[{"name":"","type":"bytes32[]"}],"payable":false,"stateMutability":"view","type":"function"}]`))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var (
+ marshalledReturn32 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783132333435363738393000000000000000000000000000000000000000003078303938373635343332310000000000000000000000000000000000000000")
+ marshalledReturn15 = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000230783031323334350000000000000000000000000000000000000000000000003078393837363534000000000000000000000000000000000000000000000000")
+
+ out32 [][32]byte
+ out15 [][15]byte
+ )
+
+ // test 32
+ err = abi.Unpack(&out32, "testDynamicFixedBytes32", marshalledReturn32)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(out32) != 2 {
+ t.Fatalf("expected array with 2 values, got %d", len(out32))
+ }
+ expected := common.Hex2Bytes("3078313233343536373839300000000000000000000000000000000000000000")
+ if !bytes.Equal(out32[0][:], expected) {
+ t.Errorf("expected %x, got %x\n", expected, out32[0])
+ }
+ expected = common.Hex2Bytes("3078303938373635343332310000000000000000000000000000000000000000")
+ if !bytes.Equal(out32[1][:], expected) {
+ t.Errorf("expected %x, got %x\n", expected, out32[1])
+ }
+
+ // test 15
+ err = abi.Unpack(&out15, "testDynamicFixedBytes32", marshalledReturn15)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(out15) != 2 {
+ t.Fatalf("expected array with 2 values, got %d", len(out15))
+ }
+ expected = common.Hex2Bytes("307830313233343500000000000000")
+ if !bytes.Equal(out15[0][:], expected) {
+ t.Errorf("expected %x, got %x\n", expected, out15[0])
+ }
+ expected = common.Hex2Bytes("307839383736353400000000000000")
+ if !bytes.Equal(out15[1][:], expected) {
+ t.Errorf("expected %x, got %x\n", expected, out15[1])
+ }
+}
+
type methodMultiOutput struct {
Int *big.Int
String string
@@ -462,6 +588,68 @@ func TestMultiReturnWithArray(t *testing.T) {
}
}
+func TestMultiReturnWithStringArray(t *testing.T) {
+ const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "uint256[3]"},{"name": "","type": "address"},{"name": "","type": "string[2]"},{"name": "","type": "bool"}]}]`
+ abi, err := JSON(strings.NewReader(definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+ buff := new(bytes.Buffer)
+ buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000005c1b78ea0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000001a055690d9db80000000000000000000000000000ab1257528b3782fb40d7ed5f72e624b744dffb2f00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008457468657265756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001048656c6c6f2c20457468657265756d2100000000000000000000000000000000"))
+ temp, _ := big.NewInt(0).SetString("30000000000000000000", 10)
+ ret1, ret1Exp := new([3]*big.Int), [3]*big.Int{big.NewInt(1545304298), big.NewInt(6), temp}
+ ret2, ret2Exp := new(common.Address), common.HexToAddress("ab1257528b3782fb40d7ed5f72e624b744dffb2f")
+ ret3, ret3Exp := new([2]string), [2]string{"Ethereum", "Hello, Ethereum!"}
+ ret4, ret4Exp := new(bool), false
+ if err := abi.Unpack(&[]interface{}{ret1, ret2, ret3, ret4}, "multi", buff.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*ret1, ret1Exp) {
+ t.Error("big.Int array result", *ret1, "!= Expected", ret1Exp)
+ }
+ if !reflect.DeepEqual(*ret2, ret2Exp) {
+ t.Error("address result", *ret2, "!= Expected", ret2Exp)
+ }
+ if !reflect.DeepEqual(*ret3, ret3Exp) {
+ t.Error("string array result", *ret3, "!= Expected", ret3Exp)
+ }
+ if !reflect.DeepEqual(*ret4, ret4Exp) {
+ t.Error("bool result", *ret4, "!= Expected", ret4Exp)
+ }
+}
+
+func TestMultiReturnWithStringSlice(t *testing.T) {
+ const definition = `[{"name" : "multi", "outputs": [{"name": "","type": "string[]"},{"name": "","type": "uint256[]"}]}]`
+ abi, err := JSON(strings.NewReader(definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+ buff := new(bytes.Buffer)
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0] offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000120")) // output[1] offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[0] length
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040")) // output[0][0] offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // output[0][1] offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008")) // output[0][0] length
+ buff.Write(common.Hex2Bytes("657468657265756d000000000000000000000000000000000000000000000000")) // output[0][0] value
+ buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000b")) // output[0][1] length
+ buff.Write(common.Hex2Bytes("676f2d657468657265756d000000000000000000000000000000000000000000")) // output[0][1] value
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // output[1] length
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000064")) // output[1][0] value
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000065")) // output[1][1] value
+ ret1, ret1Exp := new([]string), []string{"ethereum", "go-ethereum"}
+ ret2, ret2Exp := new([]*big.Int), []*big.Int{big.NewInt(100), big.NewInt(101)}
+ if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*ret1, ret1Exp) {
+ t.Error("string slice result", *ret1, "!= Expected", ret1Exp)
+ }
+ if !reflect.DeepEqual(*ret2, ret2Exp) {
+ t.Error("uint256 slice result", *ret2, "!= Expected", ret2Exp)
+ }
+}
+
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
// Similar to TestMultiReturnWithArray, but with a special case in mind:
// values of nested static arrays count towards the size as well, and any element following
@@ -751,6 +939,108 @@ func TestUnmarshal(t *testing.T) {
}
}
+func TestUnpackTuple(t *testing.T) {
+ const simpleTuple = `[{"name":"tuple","constant":false,"outputs":[{"type":"tuple","name":"ret","components":[{"type":"int256","name":"a"},{"type":"int256","name":"b"}]}]}]`
+ abi, err := JSON(strings.NewReader(simpleTuple))
+ if err != nil {
+ t.Fatal(err)
+ }
+ buff := new(bytes.Buffer)
+
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // ret[a] = 1
+ buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
+
+ v := struct {
+ Ret struct {
+ A *big.Int
+ B *big.Int
+ }
+ }{Ret: struct {
+ A *big.Int
+ B *big.Int
+ }{new(big.Int), new(big.Int)}}
+
+ err = abi.Unpack(&v, "tuple", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ } else {
+ if v.Ret.A.Cmp(big.NewInt(1)) != 0 {
+ t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.Ret.A)
+ }
+ if v.Ret.B.Cmp(big.NewInt(-1)) != 0 {
+ t.Errorf("unexpected value unpacked: want %x, got %x", v.Ret.B, -1)
+ }
+ }
+
+ // Test nested tuple
+ const nestedTuple = `[{"name":"tuple","constant":false,"outputs":[
+ {"type":"tuple","name":"s","components":[{"type":"uint256","name":"a"},{"type":"uint256[]","name":"b"},{"type":"tuple[]","name":"c","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]}]},
+ {"type":"tuple","name":"t","components":[{"name":"x", "type":"uint256"},{"name":"y","type":"uint256"}]},
+ {"type":"uint256","name":"a"}
+ ]}]`
+
+ abi, err = JSON(strings.NewReader(nestedTuple))
+ if err != nil {
+ t.Fatal(err)
+ }
+ buff.Reset()
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000080")) // s offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")) // t.X = 0
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // t.Y = 1
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // a = 1
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.A = 1
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000060")) // s.B offset
+ buff.Write(common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0")) // s.C offset
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B length
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.B[0] = 1
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.B[0] = 2
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C length
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[0].X
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[0].Y
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002")) // s.C[1].X
+ buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) // s.C[1].Y
+
+ type T struct {
+ X *big.Int `abi:"x"`
+ Z *big.Int `abi:"y"` // Test whether the abi tag works.
+ }
+
+ type S struct {
+ A *big.Int
+ B []*big.Int
+ C []T
+ }
+
+ type Ret struct {
+ FieldS S `abi:"s"`
+ FieldT T `abi:"t"`
+ A *big.Int
+ }
+ var ret Ret
+ var expected = Ret{
+ FieldS: S{
+ A: big.NewInt(1),
+ B: []*big.Int{big.NewInt(1), big.NewInt(2)},
+ C: []T{
+ {big.NewInt(1), big.NewInt(2)},
+ {big.NewInt(2), big.NewInt(1)},
+ },
+ },
+ FieldT: T{
+ big.NewInt(0), big.NewInt(1),
+ },
+ A: big.NewInt(1),
+ }
+
+ err = abi.Unpack(&ret, "tuple", buff.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+ if reflect.DeepEqual(ret, expected) {
+ t.Error("unexpected unpack value")
+ }
+}
+
func TestOOMMaliciousInput(t *testing.T) {
oomTests := []unpackTest{
{
diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go
index da3a46eb8..8f660e282 100644
--- a/accounts/keystore/account_cache.go
+++ b/accounts/keystore/account_cache.go
@@ -265,7 +265,10 @@ func (ac *accountCache) scanAccounts() error {
case (addr == common.Address{}):
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
default:
- return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
+ return &accounts.Account{
+ Address: addr,
+ URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
+ }
}
return nil
}
diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go
index 0564751c4..84d8df0c5 100644
--- a/accounts/keystore/key.go
+++ b/accounts/keystore/key.go
@@ -171,7 +171,10 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
if err != nil {
return nil, accounts.Account{}, err
}
- a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}}
+ a := accounts.Account{
+ Address: key.Address,
+ URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
+ }
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
zeroKey(key.PrivateKey)
return nil, a, err
@@ -224,5 +227,6 @@ func toISO8601(t time.Time) string {
} else {
tz = fmt.Sprintf("%03d00", offset/3600)
}
- return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
+ return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
+ t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
}
diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/passphrase.go
similarity index 99%
rename from accounts/keystore/keystore_passphrase.go
rename to accounts/keystore/passphrase.go
index 9794f32fe..a0b6cf538 100644
--- a/accounts/keystore/keystore_passphrase.go
+++ b/accounts/keystore/passphrase.go
@@ -233,6 +233,7 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
PrivateKey: key,
}, nil
}
+
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
if cryptoJson.Cipher != "aes-128-ctr" {
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
diff --git a/accounts/keystore/keystore_passphrase_test.go b/accounts/keystore/passphrase_test.go
similarity index 100%
rename from accounts/keystore/keystore_passphrase_test.go
rename to accounts/keystore/passphrase_test.go
diff --git a/accounts/keystore/keystore_plain.go b/accounts/keystore/plain.go
similarity index 100%
rename from accounts/keystore/keystore_plain.go
rename to accounts/keystore/plain.go
diff --git a/accounts/keystore/keystore_plain_test.go b/accounts/keystore/plain_test.go
similarity index 100%
rename from accounts/keystore/keystore_plain_test.go
rename to accounts/keystore/plain_test.go
diff --git a/accounts/keystore/presale.go b/accounts/keystore/presale.go
index 1554294e1..03055245f 100644
--- a/accounts/keystore/presale.go
+++ b/accounts/keystore/presale.go
@@ -38,7 +38,13 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou
return accounts.Account{}, nil, err
}
key.Id = uuid.NewRandom()
- a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: keyStore.JoinPath(keyFileName(key.Address))}}
+ a := accounts.Account{
+ Address: key.Address,
+ URL: accounts.URL{
+ Scheme: KeyStoreScheme,
+ Path: keyStore.JoinPath(keyFileName(key.Address)),
+ },
+ }
err = keyStore.StoreKey(a.URL.Path, key, password)
return a, key, err
}
diff --git a/accounts/keystore/keystore_wallet.go b/accounts/keystore/wallet.go
similarity index 89%
rename from accounts/keystore/keystore_wallet.go
rename to accounts/keystore/wallet.go
index 758fdfe36..2f774cc94 100644
--- a/accounts/keystore/keystore_wallet.go
+++ b/accounts/keystore/wallet.go
@@ -52,8 +52,8 @@ func (w *keystoreWallet) Status() (string, error) {
// is no connection or decryption step necessary to access the list of accounts.
func (w *keystoreWallet) Open(passphrase string) error { return nil }
-// Close implements accounts.Wallet, but is a noop for plain wallets since is no
-// meaningful open operation.
+// Close implements accounts.Wallet, but is a noop for plain wallets since there
+// is no meaningful open operation.
func (w *keystoreWallet) Close() error { return nil }
// Accounts implements accounts.Wallet, returning an account list consisting of
@@ -84,10 +84,7 @@ func (w *keystoreWallet) SelfDerive(base accounts.DerivationPath, chain ethereum
// able to sign via our shared keystore backend).
func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
- if account.Address != w.account.Address {
- return nil, accounts.ErrUnknownAccount
- }
- if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
+ if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@@ -100,10 +97,7 @@ func (w *keystoreWallet) SignHash(account accounts.Account, hash []byte) ([]byte
// be able to sign via our shared keystore backend).
func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
- if account.Address != w.account.Address {
- return nil, accounts.ErrUnknownAccount
- }
- if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
+ if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@@ -114,10 +108,7 @@ func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction,
// given hash with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passphrase string, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
- if account.Address != w.account.Address {
- return nil, accounts.ErrUnknownAccount
- }
- if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
+ if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
@@ -128,10 +119,7 @@ func (w *keystoreWallet) SignHashWithPassphrase(account accounts.Account, passph
// transaction with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
- if account.Address != w.account.Address {
- return nil, accounts.ErrUnknownAccount
- }
- if account.URL != (accounts.URL{}) && account.URL != w.account.URL {
+ if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go
index 7d5f67908..c30903b5b 100644
--- a/accounts/usbwallet/ledger.go
+++ b/accounts/usbwallet/ledger.go
@@ -257,7 +257,9 @@ func (w *ledgerDriver) ledgerDerive(derivationPath []uint32) (common.Address, er
// Decode the hex sting into an Ethereum address and return
var address common.Address
- hex.Decode(address[:], hexstr)
+ if _, err = hex.Decode(address[:], hexstr); err != nil {
+ return common.Address{}, err
+ }
return address, nil
}
diff --git a/appveyor.yml b/appveyor.yml
index e5126b252..defad29cd 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip
- - 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.4.windows-%GETH_ARCH%.zip
+ - 7z x go1.11.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version
diff --git a/build/update-license.go b/build/update-license.go
index 22e403342..e3e00d4cc 100644
--- a/build/update-license.go
+++ b/build/update-license.go
@@ -1,3 +1,19 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
// +build none
/*
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 962fc021d..54b67ce10 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -89,7 +89,7 @@ func runCmd(ctx *cli.Context) error {
genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
- tracer = NewJSONLogger(logconfig, os.Stdout)
+ tracer = vm.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.GlobalBool(DebugFlag.Name) {
debugLogger = vm.NewStructLogger(logconfig)
tracer = debugLogger
@@ -206,6 +206,7 @@ func runCmd(ctx *cli.Context) error {
execTime := time.Since(tstart)
if ctx.GlobalBool(DumpFlag.Name) {
+ statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump()))
}
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index 06c9be380..b3c69d9b9 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -68,7 +68,7 @@ func stateTestCmd(ctx *cli.Context) error {
)
switch {
case ctx.GlobalBool(MachineFlag.Name):
- tracer = NewJSONLogger(config, os.Stderr)
+ tracer = vm.NewJSONLogger(config, os.Stderr)
case ctx.GlobalBool(DebugFlag.Name):
debugger = vm.NewStructLogger(config)
diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go
index 2ffe12276..a7c20db77 100644
--- a/cmd/faucet/faucet.go
+++ b/cmd/faucet/faucet.go
@@ -256,7 +256,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
}
for _, boot := range enodes {
old, err := enode.ParseV4(boot.String())
- if err != nil {
+ if err == nil {
stack.Server().AddPeer(old)
}
}
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 7fe43b74c..4e6b3a079 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -20,7 +20,7 @@ import (
"bufio"
"errors"
"fmt"
- "io"
+ "math/big"
"os"
"reflect"
"unicode"
@@ -152,7 +152,9 @@ func enableWhisper(ctx *cli.Context) bool {
func makeFullNode(ctx *cli.Context) *node.Node {
stack, cfg := makeConfigNode(ctx)
-
+ if ctx.GlobalIsSet(utils.ConstantinopleOverrideFlag.Name) {
+ cfg.Eth.ConstantinopleOverride = new(big.Int).SetUint64(ctx.GlobalUint64(utils.ConstantinopleOverrideFlag.Name))
+ }
utils.RegisterEthService(stack, &cfg.Eth)
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
@@ -199,7 +201,17 @@ func dumpConfig(ctx *cli.Context) error {
if err != nil {
return err
}
- io.WriteString(os.Stdout, comment)
- os.Stdout.Write(out)
+
+ dump := os.Stdout
+ if ctx.NArg() > 0 {
+ dump, err = os.OpenFile(ctx.Args().Get(0), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return err
+ }
+ defer dump.Close()
+ }
+ dump.WriteString(comment)
+ dump.Write(out)
+
return nil
}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index d154e5e94..ebf40de46 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -87,6 +87,7 @@ var (
utils.LightServFlag,
utils.LightPeersFlag,
utils.LightKDFFlag,
+ utils.WhitelistFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
@@ -122,6 +123,7 @@ var (
utils.RinkebyFlag,
utils.VMEnableDebugFlag,
utils.NetworkIdFlag,
+ utils.ConstantinopleOverrideFlag,
utils.RPCCORSDomainFlag,
utils.RPCVirtualHostsFlag,
utils.EthStatsURLFlag,
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 239423670..d7595e2b5 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -81,6 +81,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.LightServFlag,
utils.LightPeersFlag,
utils.LightKDFFlag,
+ utils.WhitelistFlag,
},
},
{
diff --git a/cmd/puppeth/genesis.go b/cmd/puppeth/genesis.go
index 5f39a889d..c95c81a6d 100644
--- a/cmd/puppeth/genesis.go
+++ b/cmd/puppeth/genesis.go
@@ -20,35 +20,41 @@ import (
"encoding/binary"
"errors"
"math"
+ "math/big"
+ "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ math2 "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
)
-// cppEthereumGenesisSpec represents the genesis specification format used by the
+// alethGenesisSpec represents the genesis specification format used by the
// C++ Ethereum implementation.
-type cppEthereumGenesisSpec struct {
+type alethGenesisSpec struct {
SealEngine string `json:"sealEngine"`
Params struct {
- AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
- HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
- EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
- EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
- ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
- ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
- NetworkID hexutil.Uint64 `json:"networkID"`
- ChainID hexutil.Uint64 `json:"chainID"`
- MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
- MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
- MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
- GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
- MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
- DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
- DurationLimit *hexutil.Big `json:"durationLimit"`
- BlockReward *hexutil.Big `json:"blockReward"`
+ AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
+ MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
+ HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
+ DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
+ EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
+ EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
+ ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
+ ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
+ MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
+ MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
+ TieBreakingGas bool `json:"tieBreakingGas"`
+ GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
+ MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
+ DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
+ DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
+ BlockReward *hexutil.Big `json:"blockReward"`
+ NetworkID hexutil.Uint64 `json:"networkID"`
+ ChainID hexutil.Uint64 `json:"chainID"`
+ AllowFutureBlocks bool `json:"allowFutureBlocks"`
} `json:"params"`
Genesis struct {
@@ -62,57 +68,68 @@ type cppEthereumGenesisSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
- Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
+ Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
}
-// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
+// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
-type cppEthereumGenesisSpecAccount struct {
- Balance *hexutil.Big `json:"balance"`
- Nonce uint64 `json:"nonce,omitempty"`
- Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
+type alethGenesisSpecAccount struct {
+ Balance *math2.HexOrDecimal256 `json:"balance"`
+ Nonce uint64 `json:"nonce,omitempty"`
+ Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
}
-// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
-type cppEthereumGenesisSpecBuiltin struct {
- Name string `json:"name,omitempty"`
- StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
- Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
+// alethGenesisSpecBuiltin is the precompiled contract definition.
+type alethGenesisSpecBuiltin struct {
+ Name string `json:"name,omitempty"`
+ StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
+ Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
}
-type cppEthereumGenesisSpecLinearPricing struct {
+type alethGenesisSpecLinearPricing struct {
Base uint64 `json:"base"`
Word uint64 `json:"word"`
}
-// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
+// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific
// chain specification format.
-func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
- // Only ethash is currently supported between go-ethereum and cpp-ethereum
+func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) {
+ // Only ethash is currently supported between go-ethereum and aleth
if genesis.Config.Ethash == nil {
return nil, errors.New("unsupported consensus engine")
}
- // Reconstruct the chain spec in Parity's format
- spec := &cppEthereumGenesisSpec{
+ // Reconstruct the chain spec in Aleth format
+ spec := &alethGenesisSpec{
SealEngine: "Ethash",
}
+ // Some defaults
spec.Params.AccountStartNonce = 0
+ spec.Params.TieBreakingGas = false
+ spec.Params.AllowFutureBlocks = false
+ spec.Params.DaoHardforkBlock = 0
+
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
- spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
- spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
+
+ // Byzantium
+ if num := genesis.Config.ByzantiumBlock; num != nil {
+ spec.setByzantium(num)
+ }
+ // Constantinople
+ if num := genesis.Config.ConstantinopleBlock; num != nil {
+ spec.setConstantinople(num)
+ }
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
-
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
- spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
+ spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64)
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
- spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
- spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
- spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
+ spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor)
+ spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
+ spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
@@ -126,77 +143,108 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
- spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
for address, account := range genesis.Alloc {
- spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
- Balance: (*hexutil.Big)(account.Balance),
- Nonce: account.Nonce,
- }
- }
- spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
- }
- spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
- }
- spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
- }
- spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
+ spec.setAccount(address, account)
}
+
+ spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover",
+ Linear: &alethGenesisSpecLinearPricing{Base: 3000}})
+ spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256",
+ Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}})
+ spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160",
+ Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}})
+ spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity",
+ Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
if genesis.Config.ByzantiumBlock != nil {
- spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
- }
- spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
- }
- spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
- }
- spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
- }
+ spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
+ spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
+ Linear: &alethGenesisSpecLinearPricing{Base: 500}})
+ spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
+ Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
+ spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
}
return spec, nil
}
+func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
+ }
+ addr := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
+ if _, exist := spec.Accounts[addr]; !exist {
+ spec.Accounts[addr] = &alethGenesisSpecAccount{}
+ }
+ spec.Accounts[addr].Precompiled = data
+}
+
+func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
+ }
+
+ a, exist := spec.Accounts[common.UnprefixedAddress(address)]
+ if !exist {
+ a = &alethGenesisSpecAccount{}
+ spec.Accounts[common.UnprefixedAddress(address)] = a
+ }
+ a.Balance = (*math2.HexOrDecimal256)(account.Balance)
+ a.Nonce = account.Nonce
+
+}
+
+func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
+ spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
+}
+
+func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
+ spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
+}
+
// parityChainSpec is the chain specification format used by Parity.
type parityChainSpec struct {
- Name string `json:"name"`
- Engine struct {
+ Name string `json:"name"`
+ Datadir string `json:"dataDir"`
+ Engine struct {
Ethash struct {
Params struct {
- MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
- DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
- DurationLimit *hexutil.Big `json:"durationLimit"`
- BlockReward *hexutil.Big `json:"blockReward"`
- HomesteadTransition uint64 `json:"homesteadTransition"`
- EIP150Transition uint64 `json:"eip150Transition"`
- EIP160Transition uint64 `json:"eip160Transition"`
- EIP161abcTransition uint64 `json:"eip161abcTransition"`
- EIP161dTransition uint64 `json:"eip161dTransition"`
- EIP649Reward *hexutil.Big `json:"eip649Reward"`
- EIP100bTransition uint64 `json:"eip100bTransition"`
- EIP649Transition uint64 `json:"eip649Transition"`
+ MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
+ DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
+ DurationLimit *hexutil.Big `json:"durationLimit"`
+ BlockReward map[string]string `json:"blockReward"`
+ DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
+ HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
+ EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
} `json:"params"`
} `json:"Ethash"`
} `json:"engine"`
Params struct {
- MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
- MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
- GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
- NetworkID hexutil.Uint64 `json:"networkID"`
- MaxCodeSize uint64 `json:"maxCodeSize"`
- EIP155Transition uint64 `json:"eip155Transition"`
- EIP98Transition uint64 `json:"eip98Transition"`
- EIP86Transition uint64 `json:"eip86Transition"`
- EIP140Transition uint64 `json:"eip140Transition"`
- EIP211Transition uint64 `json:"eip211Transition"`
- EIP214Transition uint64 `json:"eip214Transition"`
- EIP658Transition uint64 `json:"eip658Transition"`
+ AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
+ MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
+ MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
+ GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
+ NetworkID hexutil.Uint64 `json:"networkID"`
+ ChainID hexutil.Uint64 `json:"chainID"`
+ MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
+ MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
+ EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
+ EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
+ EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
+ EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
+ EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
+ EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
+ EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
+ EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
+ EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
+ EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
+ EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
+ EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
+ EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
+ EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
} `json:"params"`
Genesis struct {
@@ -215,22 +263,22 @@ type parityChainSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
- Nodes []string `json:"nodes"`
- Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
+ Nodes []string `json:"nodes"`
+ Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"`
}
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type parityChainSpecAccount struct {
- Balance *hexutil.Big `json:"balance"`
- Nonce uint64 `json:"nonce,omitempty"`
+ Balance math2.HexOrDecimal256 `json:"balance"`
+ Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"`
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
}
// parityChainSpecBuiltin is the precompiled contract definition.
type parityChainSpecBuiltin struct {
Name string `json:"name,omitempty"`
- ActivateAt uint64 `json:"activate_at,omitempty"`
+ ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
}
@@ -265,34 +313,51 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
}
// Reconstruct the chain spec in Parity's format
spec := &parityChainSpec{
- Name: network,
- Nodes: bootnodes,
+ Name: network,
+ Nodes: bootnodes,
+ Datadir: strings.ToLower(network),
}
+ spec.Engine.Ethash.Params.BlockReward = make(map[string]string)
+ spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string)
+ // Frontier
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
- spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
- spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
- spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
- spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
- spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
- spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
- spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
- spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
+ spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward)
+ // Homestead
+ spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64())
+
+ // Tangerine Whistle : 150
+ // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md
+ spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64())
+
+ // Spurious Dragon: 155, 160, 161, 170
+ // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md
+ spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
+ spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
+ spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
+ spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
+
+ // Byzantium
+ if num := genesis.Config.ByzantiumBlock; num != nil {
+ spec.setByzantium(num)
+ }
+ // Constantinople
+ if num := genesis.Config.ConstantinopleBlock; num != nil {
+ spec.setConstantinople(num)
+ }
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
- spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
+ spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
+ spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaxCodeSize = params.MaxCodeSize
- spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
- spec.Params.EIP98Transition = math.MaxUint64
- spec.Params.EIP86Transition = math.MaxUint64
- spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
+ // geth has it set from zero
+ spec.Params.MaxCodeSizeTransition = 0
+
+ // Disable this one
+ spec.Params.EIP98Transition = math.MaxInt64
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
@@ -305,42 +370,77 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
- spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
+ spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
for address, account := range genesis.Alloc {
- spec.Accounts[address] = &parityChainSpecAccount{
- Balance: (*hexutil.Big)(account.Balance),
- Nonce: account.Nonce,
+ bal := math2.HexOrDecimal256(*account.Balance)
+
+ spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{
+ Balance: bal,
+ Nonce: math2.HexOrDecimal64(account.Nonce),
}
}
- spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
- Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
- }
- spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
+ spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover",
+ Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}})
+
+ spec.setPrecompile(2, &parityChainSpecBuiltin{
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
- }
- spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
+ })
+ spec.setPrecompile(3, &parityChainSpecBuiltin{
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
- }
- spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
+ })
+ spec.setPrecompile(4, &parityChainSpecBuiltin{
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
- }
+ })
if genesis.Config.ByzantiumBlock != nil {
- spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
- Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
- }
- spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
- }
- spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
- }
- spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
- }
+ blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
+ spec.setPrecompile(5, &parityChainSpecBuiltin{
+ Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
+ })
+ spec.setPrecompile(6, &parityChainSpecBuiltin{
+ Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
+ })
+ spec.setPrecompile(7, &parityChainSpecBuiltin{
+ Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
+ })
+ spec.setPrecompile(8, &parityChainSpecBuiltin{
+ Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
+ })
}
return spec, nil
}
+func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
+ }
+ a := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
+ if _, exist := spec.Accounts[a]; !exist {
+ spec.Accounts[a] = &parityChainSpecAccount{}
+ }
+ spec.Accounts[a].Builtin = data
+}
+
+func (spec *parityChainSpec) setByzantium(num *big.Int) {
+ spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward)
+ spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000)
+ n := hexutil.Uint64(num.Uint64())
+ spec.Engine.Ethash.Params.EIP100bTransition = n
+ spec.Params.EIP140Transition = n
+ spec.Params.EIP211Transition = n
+ spec.Params.EIP214Transition = n
+ spec.Params.EIP658Transition = n
+}
+
+func (spec *parityChainSpec) setConstantinople(num *big.Int) {
+ spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward)
+ spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000)
+ n := hexutil.Uint64(num.Uint64())
+ spec.Params.EIP145Transition = n
+ spec.Params.EIP1014Transition = n
+ spec.Params.EIP1052Transition = n
+ spec.Params.EIP1283Transition = n
+}
+
// pyEthereumGenesisSpec represents the genesis specification format used by the
// Python Ethereum implementation.
type pyEthereumGenesisSpec struct {
diff --git a/cmd/puppeth/genesis_test.go b/cmd/puppeth/genesis_test.go
new file mode 100644
index 000000000..83e738360
--- /dev/null
+++ b/cmd/puppeth/genesis_test.go
@@ -0,0 +1,109 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/ethereum/go-ethereum/core"
+)
+
+// Tests the go-ethereum to Aleth chainspec conversion for the Stureby testnet.
+func TestAlethSturebyConverter(t *testing.T) {
+ blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ var genesis core.Genesis
+ if err := json.Unmarshal(blob, &genesis); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ spec, err := newAlethGenesisSpec("stureby", &genesis)
+ if err != nil {
+ t.Fatalf("failed creating chainspec: %v", err)
+ }
+
+ expBlob, err := ioutil.ReadFile("testdata/stureby_aleth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ expspec := &alethGenesisSpec{}
+ if err := json.Unmarshal(expBlob, expspec); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ if !reflect.DeepEqual(expspec, spec) {
+ t.Errorf("chainspec mismatch")
+ c := spew.ConfigState{
+ DisablePointerAddresses: true,
+ SortKeys: true,
+ }
+ exp := strings.Split(c.Sdump(expspec), "\n")
+ got := strings.Split(c.Sdump(spec), "\n")
+ for i := 0; i < len(exp) && i < len(got); i++ {
+ if exp[i] != got[i] {
+ fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
+ }
+ }
+ }
+}
+
+// Tests the go-ethereum to Parity chainspec conversion for the Stureby testnet.
+func TestParitySturebyConverter(t *testing.T) {
+ blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ var genesis core.Genesis
+ if err := json.Unmarshal(blob, &genesis); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ spec, err := newParityChainSpec("Stureby", &genesis, []string{})
+ if err != nil {
+ t.Fatalf("failed creating chainspec: %v", err)
+ }
+
+ expBlob, err := ioutil.ReadFile("testdata/stureby_parity.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ expspec := &parityChainSpec{}
+ if err := json.Unmarshal(expBlob, expspec); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ expspec.Nodes = []string{}
+
+ if !reflect.DeepEqual(expspec, spec) {
+ t.Errorf("chainspec mismatch")
+ c := spew.ConfigState{
+ DisablePointerAddresses: true,
+ SortKeys: true,
+ }
+ exp := strings.Split(c.Sdump(expspec), "\n")
+ got := strings.Split(c.Sdump(spec), "\n")
+ for i := 0; i < len(exp) && i < len(got); i++ {
+ if exp[i] != got[i] {
+ fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
+ }
+ }
+ }
+}
diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go
index d22bd8110..cb3ed6e71 100644
--- a/cmd/puppeth/module_dashboard.go
+++ b/cmd/puppeth/module_dashboard.go
@@ -640,7 +640,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
files[filepath.Join(workdir, network+".json")] = genesis
if conf.Genesis.Config.Ethash != nil {
- cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis)
+ cppSpec, err := newAlethGenesisSpec(network, conf.Genesis)
if err != nil {
return nil, err
}
diff --git a/cmd/puppeth/module_ethstats.go b/cmd/puppeth/module_ethstats.go
index a7d99a297..58ecb8395 100644
--- a/cmd/puppeth/module_ethstats.go
+++ b/cmd/puppeth/module_ethstats.go
@@ -43,7 +43,8 @@ version: '2'
services:
ethstats:
build: .
- image: {{.Network}}/ethstats{{if not .VHost}}
+ image: {{.Network}}/ethstats
+ container_name: {{.Network}}_ethstats_1{{if not .VHost}}
ports:
- "{{.Port}}:3000"{{end}}
environment:
diff --git a/cmd/puppeth/module_explorer.go b/cmd/puppeth/module_explorer.go
index e916deaf6..e465fa04a 100644
--- a/cmd/puppeth/module_explorer.go
+++ b/cmd/puppeth/module_explorer.go
@@ -77,6 +77,7 @@ services:
explorer:
build: .
image: {{.Network}}/explorer
+ container_name: {{.Network}}_explorer_1
ports:
- "{{.NodePort}}:{{.NodePort}}"
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}
diff --git a/cmd/puppeth/module_faucet.go b/cmd/puppeth/module_faucet.go
index 06c9fc0f5..3a06bf3c6 100644
--- a/cmd/puppeth/module_faucet.go
+++ b/cmd/puppeth/module_faucet.go
@@ -56,8 +56,10 @@ services:
faucet:
build: .
image: {{.Network}}/faucet
+ container_name: {{.Network}}_faucet_1
ports:
- - "{{.EthPort}}:{{.EthPort}}"{{if not .VHost}}
+ - "{{.EthPort}}:{{.EthPort}}"
+ - "{{.EthPort}}:{{.EthPort}}/udp"{{if not .VHost}}
- "{{.ApiPort}}:8080"{{end}}
volumes:
- {{.Datadir}}:/root/.faucet
diff --git a/cmd/puppeth/module_nginx.go b/cmd/puppeth/module_nginx.go
index 7f87661d3..1b1ae61ff 100644
--- a/cmd/puppeth/module_nginx.go
+++ b/cmd/puppeth/module_nginx.go
@@ -40,6 +40,7 @@ services:
nginx:
build: .
image: {{.Network}}/nginx
+ container_name: {{.Network}}_nginx_1
ports:
- "{{.Port}}:80"
volumes:
diff --git a/cmd/puppeth/module_node.go b/cmd/puppeth/module_node.go
index 069adfe4f..5d9ef4652 100644
--- a/cmd/puppeth/module_node.go
+++ b/cmd/puppeth/module_node.go
@@ -55,6 +55,7 @@ services:
{{.Type}}:
build: .
image: {{.Network}}/{{.Type}}
+ container_name: {{.Network}}_{{.Type}}_1
ports:
- "{{.Port}}:{{.Port}}"
- "{{.Port}}:{{.Port}}/udp"
diff --git a/cmd/puppeth/module_wallet.go b/cmd/puppeth/module_wallet.go
index 90812c4a0..ebaa5b6ae 100644
--- a/cmd/puppeth/module_wallet.go
+++ b/cmd/puppeth/module_wallet.go
@@ -57,6 +57,7 @@ services:
wallet:
build: .
image: {{.Network}}/wallet
+ container_name: {{.Network}}_wallet_1
ports:
- "{{.NodePort}}:{{.NodePort}}"
- "{{.NodePort}}:{{.NodePort}}/udp"
diff --git a/cmd/puppeth/puppeth.go b/cmd/puppeth/puppeth.go
index f9b8fe481..c3de5f936 100644
--- a/cmd/puppeth/puppeth.go
+++ b/cmd/puppeth/puppeth.go
@@ -43,18 +43,23 @@ func main() {
Usage: "log level to emit to the screen",
},
}
- app.Action = func(c *cli.Context) error {
+ app.Before = func(c *cli.Context) error {
// Set up the logger to print everything and the random generator
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
rand.Seed(time.Now().UnixNano())
- network := c.String("network")
- if strings.Contains(network, " ") || strings.Contains(network, "-") {
- log.Crit("No spaces or hyphens allowed in network name")
- }
- // Start the wizard and relinquish control
- makeWizard(c.String("network")).run()
return nil
}
+ app.Action = runWizard
app.Run(os.Args)
}
+
+// runWizard start the wizard and relinquish control to it.
+func runWizard(c *cli.Context) error {
+ network := c.String("network")
+ if strings.Contains(network, " ") || strings.Contains(network, "-") || strings.ToLower(network) != network {
+ log.Crit("No spaces, hyphens or capital letters allowed in network name")
+ }
+ makeWizard(c.String("network")).run()
+ return nil
+}
diff --git a/cmd/puppeth/testdata/stureby_aleth.json b/cmd/puppeth/testdata/stureby_aleth.json
new file mode 100644
index 000000000..1ef1d8ae1
--- /dev/null
+++ b/cmd/puppeth/testdata/stureby_aleth.json
@@ -0,0 +1,112 @@
+{
+ "sealEngine":"Ethash",
+ "params":{
+ "accountStartNonce":"0x00",
+ "maximumExtraDataSize":"0x20",
+ "homesteadForkBlock":"0x2710",
+ "daoHardforkBlock":"0x00",
+ "EIP150ForkBlock":"0x3a98",
+ "EIP158ForkBlock":"0x59d8",
+ "byzantiumForkBlock":"0x7530",
+ "constantinopleForkBlock":"0x9c40",
+ "minGasLimit":"0x1388",
+ "maxGasLimit":"0x7fffffffffffffff",
+ "tieBreakingGas":false,
+ "gasLimitBoundDivisor":"0x0400",
+ "minimumDifficulty":"0x20000",
+ "difficultyBoundDivisor":"0x0800",
+ "durationLimit":"0x0d",
+ "blockReward":"0x4563918244F40000",
+ "networkID":"0x4cb2e",
+ "chainID":"0x4cb2e",
+ "allowFutureBlocks":false
+ },
+ "genesis":{
+ "nonce":"0x0000000000000000",
+ "difficulty":"0x20000",
+ "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "author":"0x0000000000000000000000000000000000000000",
+ "timestamp":"0x59a4e76d",
+ "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit":"0x47b760"
+ },
+ "accounts":{
+ "0000000000000000000000000000000000000001":{
+ "balance":"1",
+ "precompiled":{
+ "name":"ecrecover",
+ "linear":{
+ "base":3000,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000002":{
+ "balance":"1",
+ "precompiled":{
+ "name":"sha256",
+ "linear":{
+ "base":60,
+ "word":12
+ }
+ }
+ },
+ "0000000000000000000000000000000000000003":{
+ "balance":"1",
+ "precompiled":{
+ "name":"ripemd160",
+ "linear":{
+ "base":600,
+ "word":120
+ }
+ }
+ },
+ "0000000000000000000000000000000000000004":{
+ "balance":"1",
+ "precompiled":{
+ "name":"identity",
+ "linear":{
+ "base":15,
+ "word":3
+ }
+ }
+ },
+ "0000000000000000000000000000000000000005":{
+ "balance":"1",
+ "precompiled":{
+ "name":"modexp",
+ "startingBlock":"0x7530"
+ }
+ },
+ "0000000000000000000000000000000000000006":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_G1_add",
+ "startingBlock":"0x7530",
+ "linear":{
+ "base":500,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000007":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_G1_mul",
+ "startingBlock":"0x7530",
+ "linear":{
+ "base":40000,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000008":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_pairing_product",
+ "startingBlock":"0x7530"
+ }
+ }
+ }
+}
diff --git a/cmd/puppeth/testdata/stureby_geth.json b/cmd/puppeth/testdata/stureby_geth.json
new file mode 100644
index 000000000..c8c3b3c95
--- /dev/null
+++ b/cmd/puppeth/testdata/stureby_geth.json
@@ -0,0 +1,47 @@
+{
+ "config": {
+ "ethash":{},
+ "chainId": 314158,
+ "homesteadBlock": 10000,
+ "eip150Block": 15000,
+ "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "eip155Block": 23000,
+ "eip158Block": 23000,
+ "byzantiumBlock": 30000,
+ "constantinopleBlock": 40000
+ },
+ "nonce": "0x0",
+ "timestamp": "0x59a4e76d",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit": "0x47b760",
+ "difficulty": "0x20000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase": "0x0000000000000000000000000000000000000000",
+ "alloc": {
+ "0000000000000000000000000000000000000001": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000002": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000003": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000004": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000005": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000006": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000007": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000008": {
+ "balance": "0x01"
+ }
+ }
+}
diff --git a/cmd/puppeth/testdata/stureby_parity.json b/cmd/puppeth/testdata/stureby_parity.json
new file mode 100644
index 000000000..f3fa8386a
--- /dev/null
+++ b/cmd/puppeth/testdata/stureby_parity.json
@@ -0,0 +1,181 @@
+{
+ "name":"Stureby",
+ "dataDir":"stureby",
+ "engine":{
+ "Ethash":{
+ "params":{
+ "minimumDifficulty":"0x20000",
+ "difficultyBoundDivisor":"0x800",
+ "durationLimit":"0xd",
+ "blockReward":{
+ "0x0":"0x4563918244f40000",
+ "0x7530":"0x29a2241af62c0000",
+ "0x9c40":"0x1bc16d674ec80000"
+ },
+ "homesteadTransition":"0x2710",
+ "eip100bTransition":"0x7530",
+ "difficultyBombDelays":{
+ "0x7530":"0x2dc6c0",
+ "0x9c40":"0x1e8480"
+ }
+ }
+ }
+ },
+ "params":{
+ "accountStartNonce":"0x0",
+ "maximumExtraDataSize":"0x20",
+ "gasLimitBoundDivisor":"0x400",
+ "minGasLimit":"0x1388",
+ "networkID":"0x4cb2e",
+ "chainID":"0x4cb2e",
+ "maxCodeSize":"0x6000",
+ "maxCodeSizeTransition":"0x0",
+ "eip98Transition": "0x7fffffffffffffff",
+ "eip150Transition":"0x3a98",
+ "eip160Transition":"0x59d8",
+ "eip161abcTransition":"0x59d8",
+ "eip161dTransition":"0x59d8",
+ "eip155Transition":"0x59d8",
+ "eip140Transition":"0x7530",
+ "eip211Transition":"0x7530",
+ "eip214Transition":"0x7530",
+ "eip658Transition":"0x7530",
+ "eip145Transition":"0x9c40",
+ "eip1014Transition":"0x9c40",
+ "eip1052Transition":"0x9c40",
+ "eip1283Transition":"0x9c40"
+ },
+ "genesis":{
+ "seal":{
+ "ethereum":{
+ "nonce":"0x0000000000000000",
+ "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
+ }
+ },
+ "difficulty":"0x20000",
+ "author":"0x0000000000000000000000000000000000000000",
+ "timestamp":"0x59a4e76d",
+ "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit":"0x47b760"
+ },
+ "nodes":[
+ "enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
+ "enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
+ "enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
+ "enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
+ "enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
+ "enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
+ "enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
+ "enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
+ ],
+ "accounts":{
+ "0000000000000000000000000000000000000001":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"ecrecover",
+ "pricing":{
+ "linear":{
+ "base":3000,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000002":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"sha256",
+ "pricing":{
+ "linear":{
+ "base":60,
+ "word":12
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000003":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"ripemd160",
+ "pricing":{
+ "linear":{
+ "base":600,
+ "word":120
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000004":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"identity",
+ "pricing":{
+ "linear":{
+ "base":15,
+ "word":3
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000005":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"modexp",
+ "activate_at":"0x7530",
+ "pricing":{
+ "modexp":{
+ "divisor":20
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000006":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_add",
+ "activate_at":"0x7530",
+ "pricing":{
+ "linear":{
+ "base":500,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000007":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_mul",
+ "activate_at":"0x7530",
+ "pricing":{
+ "linear":{
+ "base":40000,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000008":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_pairing",
+ "activate_at":"0x7530",
+ "pricing":{
+ "alt_bn128_pairing":{
+ "base":100000,
+ "pair":80000
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/cmd/puppeth/wizard.go b/cmd/puppeth/wizard.go
index b88a61de7..83536506c 100644
--- a/cmd/puppeth/wizard.go
+++ b/cmd/puppeth/wizard.go
@@ -23,6 +23,7 @@ import (
"io/ioutil"
"math/big"
"net"
+ "net/url"
"os"
"path/filepath"
"sort"
@@ -118,6 +119,47 @@ func (w *wizard) readDefaultString(def string) string {
return def
}
+// readDefaultYesNo reads a single line from stdin, trimming if from spaces and
+// interpreting it as a 'yes' or a 'no'. If an empty line is entered, the default
+// value is returned.
+func (w *wizard) readDefaultYesNo(def bool) bool {
+ for {
+ fmt.Printf("> ")
+ text, err := w.in.ReadString('\n')
+ if err != nil {
+ log.Crit("Failed to read user input", "err", err)
+ }
+ if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
+ return def
+ }
+ if text == "y" || text == "yes" {
+ return true
+ }
+ if text == "n" || text == "no" {
+ return false
+ }
+ log.Error("Invalid input, expected 'y', 'yes', 'n', 'no' or empty")
+ }
+}
+
+// readURL reads a single line from stdin, trimming if from spaces and trying to
+// interpret it as a URL (http, https or file).
+func (w *wizard) readURL() *url.URL {
+ for {
+ fmt.Printf("> ")
+ text, err := w.in.ReadString('\n')
+ if err != nil {
+ log.Crit("Failed to read user input", "err", err)
+ }
+ uri, err := url.Parse(strings.TrimSpace(text))
+ if err != nil {
+ log.Error("Invalid input, expected URL", "err", err)
+ continue
+ }
+ return uri
+ }
+}
+
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into an integer.
func (w *wizard) readInt() int {
diff --git a/cmd/puppeth/wizard_dashboard.go b/cmd/puppeth/wizard_dashboard.go
index 1a01631ff..8a8370845 100644
--- a/cmd/puppeth/wizard_dashboard.go
+++ b/cmd/puppeth/wizard_dashboard.go
@@ -137,14 +137,14 @@ func (w *wizard) deployDashboard() {
if w.conf.ethstats != "" {
fmt.Println()
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
- infos.trusted = w.readDefaultString("y") == "y"
+ infos.trusted = w.readDefaultYesNo(true)
}
// Try to deploy the dashboard container on the host
nocache := false
if existed {
fmt.Println()
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
log.Error("Failed to deploy dashboard container", "err", err)
diff --git a/cmd/puppeth/wizard_ethstats.go b/cmd/puppeth/wizard_ethstats.go
index fb2529c26..58ff3efbe 100644
--- a/cmd/puppeth/wizard_ethstats.go
+++ b/cmd/puppeth/wizard_ethstats.go
@@ -67,11 +67,11 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
// The user might want to clear the entire list, although generally probably not
fmt.Println()
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
- if w.readDefaultString("n") != "n" {
+ if w.readDefaultYesNo(false) {
infos.banned = nil
}
// Offer the user to explicitly add/remove certain IP addresses
@@ -106,7 +106,7 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
trusted := make([]string, 0, len(w.servers))
for _, client := range w.servers {
diff --git a/cmd/puppeth/wizard_explorer.go b/cmd/puppeth/wizard_explorer.go
index 413511c1c..a128fb9fb 100644
--- a/cmd/puppeth/wizard_explorer.go
+++ b/cmd/puppeth/wizard_explorer.go
@@ -100,7 +100,7 @@ func (w *wizard) deployExplorer() {
if existed {
fmt.Println()
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
log.Error("Failed to deploy explorer container", "err", err)
diff --git a/cmd/puppeth/wizard_faucet.go b/cmd/puppeth/wizard_faucet.go
index 6f0840894..9068c1d30 100644
--- a/cmd/puppeth/wizard_faucet.go
+++ b/cmd/puppeth/wizard_faucet.go
@@ -81,7 +81,7 @@ func (w *wizard) deployFaucet() {
if infos.captchaToken != "" {
fmt.Println()
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.captchaToken, infos.captchaSecret = "", ""
}
}
@@ -89,7 +89,7 @@ func (w *wizard) deployFaucet() {
// No previous authorization (or old one discarded)
fmt.Println()
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
- if w.readDefaultString("n") == "n" {
+ if !w.readDefaultYesNo(false) {
log.Warn("Users will be able to requests funds via automated scripts")
} else {
// Captcha protection explicitly requested, read the site and secret keys
@@ -132,7 +132,7 @@ func (w *wizard) deployFaucet() {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex())
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.node.keyJSON, infos.node.keyPass = "", ""
}
}
@@ -166,7 +166,7 @@ func (w *wizard) deployFaucet() {
if existed {
fmt.Println()
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy faucet container", "err", err)
diff --git a/cmd/puppeth/wizard_genesis.go b/cmd/puppeth/wizard_genesis.go
index 6c4cd571f..95da5bd4f 100644
--- a/cmd/puppeth/wizard_genesis.go
+++ b/cmd/puppeth/wizard_genesis.go
@@ -20,9 +20,13 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "io"
"io/ioutil"
"math/big"
"math/rand"
+ "net/http"
+ "os"
+ "path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -40,11 +44,12 @@ func (w *wizard) makeGenesis() {
Difficulty: big.NewInt(524288),
Alloc: make(core.GenesisAlloc),
Config: ¶ms.ChainConfig{
- HomesteadBlock: big.NewInt(1),
- EIP150Block: big.NewInt(2),
- EIP155Block: big.NewInt(3),
- EIP158Block: big.NewInt(3),
- ByzantiumBlock: big.NewInt(4),
+ HomesteadBlock: big.NewInt(1),
+ EIP150Block: big.NewInt(2),
+ EIP155Block: big.NewInt(3),
+ EIP158Block: big.NewInt(3),
+ ByzantiumBlock: big.NewInt(4),
+ ConstantinopleBlock: big.NewInt(5),
},
}
// Figure out which consensus engine to choose
@@ -114,9 +119,13 @@ func (w *wizard) makeGenesis() {
}
break
}
- // Add a batch of precompile balances to avoid them getting deleted
- for i := int64(0); i < 256; i++ {
- genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
+ fmt.Println()
+ fmt.Println("Should the precompile-addresses (0x1 .. 0xff) be pre-funded with 1 wei? (advisable yes)")
+ if w.readDefaultYesNo(true) {
+ // Add a batch of precompile balances to avoid them getting deleted
+ for i := int64(0); i < 256; i++ {
+ genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
+ }
}
// Query the user for some custom extras
fmt.Println()
@@ -130,53 +139,130 @@ func (w *wizard) makeGenesis() {
w.conf.flush()
}
+// importGenesis imports a Geth genesis spec into puppeth.
+func (w *wizard) importGenesis() {
+ // Request the genesis JSON spec URL from the user
+ fmt.Println()
+ fmt.Println("Where's the genesis file? (local file or http/https url)")
+ url := w.readURL()
+
+ // Convert the various allowed URLs to a reader stream
+ var reader io.Reader
+
+ switch url.Scheme {
+ case "http", "https":
+ // Remote web URL, retrieve it via an HTTP client
+ res, err := http.Get(url.String())
+ if err != nil {
+ log.Error("Failed to retrieve remote genesis", "err", err)
+ return
+ }
+ defer res.Body.Close()
+ reader = res.Body
+
+ case "":
+ // Schemaless URL, interpret as a local file
+ file, err := os.Open(url.String())
+ if err != nil {
+ log.Error("Failed to open local genesis", "err", err)
+ return
+ }
+ defer file.Close()
+ reader = file
+
+ default:
+ log.Error("Unsupported genesis URL scheme", "scheme", url.Scheme)
+ return
+ }
+ // Parse the genesis file and inject it successful
+ var genesis core.Genesis
+ if err := json.NewDecoder(reader).Decode(&genesis); err != nil {
+ log.Error("Invalid genesis spec: %v", err)
+ return
+ }
+ log.Info("Imported genesis block")
+
+ w.conf.Genesis = &genesis
+ w.conf.flush()
+}
+
// manageGenesis permits the modification of chain configuration parameters in
// a genesis config and the export of the entire genesis spec.
func (w *wizard) manageGenesis() {
// Figure out whether to modify or export the genesis
fmt.Println()
fmt.Println(" 1. Modify existing fork rules")
- fmt.Println(" 2. Export genesis configuration")
+ fmt.Println(" 2. Export genesis configurations")
fmt.Println(" 3. Remove genesis configuration")
choice := w.read()
- switch {
- case choice == "1":
+ switch choice {
+ case "1":
// Fork rule updating requested, iterate over each fork
fmt.Println()
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
fmt.Println()
- fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
+ fmt.Printf("Which block should EIP150 (Tangerine Whistle) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
fmt.Println()
- fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
+ fmt.Printf("Which block should EIP155 (Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
fmt.Println()
- fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
+ fmt.Printf("Which block should EIP158/161 (also Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
fmt.Println()
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
+ fmt.Println()
+ fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock)
+ w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock)
+
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
- case choice == "2":
+ case "2":
// Save whatever genesis configuration we currently have
fmt.Println()
- fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
- out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
- if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
- log.Error("Failed to save genesis file", "err", err)
- }
- log.Info("Exported existing genesis block")
+ fmt.Printf("Which folder to save the genesis specs into? (default = current)\n")
+ fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network)
- case choice == "3":
+ folder := w.readDefaultString(".")
+ if err := os.MkdirAll(folder, 0755); err != nil {
+ log.Error("Failed to create spec folder", "folder", folder, "err", err)
+ return
+ }
+ out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
+
+ // Export the native genesis spec used by puppeth and Geth
+ gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network))
+ if err := ioutil.WriteFile((gethJson), out, 0644); err != nil {
+ log.Error("Failed to save genesis file", "err", err)
+ return
+ }
+ log.Info("Saved native genesis chain spec", "path", gethJson)
+
+ // Export the genesis spec used by Aleth (formerly C++ Ethereum)
+ if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil {
+ log.Error("Failed to create Aleth chain spec", "err", err)
+ } else {
+ saveGenesis(folder, w.network, "aleth", spec)
+ }
+ // Export the genesis spec used by Parity
+ if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil {
+ log.Error("Failed to create Parity chain spec", "err", err)
+ } else {
+ saveGenesis(folder, w.network, "parity", spec)
+ }
+ // Export the genesis spec used by Harmony (formerly EthereumJ
+ saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
+
+ case "3":
// Make sure we don't have any services running
if len(w.conf.servers()) > 0 {
log.Error("Genesis reset requires all services and servers torn down")
@@ -186,8 +272,20 @@ func (w *wizard) manageGenesis() {
w.conf.Genesis = nil
w.conf.flush()
-
default:
log.Error("That's not something I can do")
+ return
}
}
+
+// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file.
+func saveGenesis(folder, network, client string, spec interface{}) {
+ path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
+
+ out, _ := json.Marshal(spec)
+ if err := ioutil.WriteFile(path, out, 0644); err != nil {
+ log.Error("Failed to save genesis file", "client", client, "err", err)
+ return
+ }
+ log.Info("Saved genesis chain spec", "client", client, "path", path)
+}
diff --git a/cmd/puppeth/wizard_intro.go b/cmd/puppeth/wizard_intro.go
index 60aa0f7ff..75fb04b76 100644
--- a/cmd/puppeth/wizard_intro.go
+++ b/cmd/puppeth/wizard_intro.go
@@ -61,14 +61,14 @@ func (w *wizard) run() {
// Make sure we have a good network name to work with fmt.Println()
// Docker accepts hyphens in image names, but doesn't like it for container names
if w.network == "" {
- fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
+ fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)")
for {
w.network = w.readString()
- if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
+ if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network {
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
break
}
- log.Error("I also like to live dangerously, still no spaces or hyphens")
+ log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters")
}
}
log.Info("Administering Ethereum network", "name", w.network)
@@ -131,7 +131,20 @@ func (w *wizard) run() {
case choice == "2":
if w.conf.Genesis == nil {
- w.makeGenesis()
+ fmt.Println()
+ fmt.Println("What would you like to do? (default = create)")
+ fmt.Println(" 1. Create new genesis from scratch")
+ fmt.Println(" 2. Import already existing genesis")
+
+ choice := w.read()
+ switch {
+ case choice == "" || choice == "1":
+ w.makeGenesis()
+ case choice == "2":
+ w.importGenesis()
+ default:
+ log.Error("That's not something I can do")
+ }
} else {
w.manageGenesis()
}
@@ -149,7 +162,6 @@ func (w *wizard) run() {
} else {
w.manageComponents()
}
-
default:
log.Error("That's not something I can do")
}
diff --git a/cmd/puppeth/wizard_nginx.go b/cmd/puppeth/wizard_nginx.go
index 4eeae93a0..8397b7fd5 100644
--- a/cmd/puppeth/wizard_nginx.go
+++ b/cmd/puppeth/wizard_nginx.go
@@ -41,12 +41,12 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str
// Reverse proxy is not running, offer to deploy a new one
fmt.Println()
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
- if w.readDefaultString("y") == "y" {
+ if w.readDefaultYesNo(true) {
nocache := false
if proxy != nil {
fmt.Println()
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
log.Error("Failed to deploy reverse-proxy", "err", err)
diff --git a/cmd/puppeth/wizard_node.go b/cmd/puppeth/wizard_node.go
index 49b10a023..e37297f6d 100644
--- a/cmd/puppeth/wizard_node.go
+++ b/cmd/puppeth/wizard_node.go
@@ -126,7 +126,7 @@ func (w *wizard) deployNode(boot bool) {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex())
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.keyJSON, infos.keyPass = "", ""
}
}
@@ -165,7 +165,7 @@ func (w *wizard) deployNode(boot bool) {
if existed {
fmt.Println()
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy Ethereum node container", "err", err)
diff --git a/cmd/puppeth/wizard_wallet.go b/cmd/puppeth/wizard_wallet.go
index 7624d11e2..ca1ea5bd2 100644
--- a/cmd/puppeth/wizard_wallet.go
+++ b/cmd/puppeth/wizard_wallet.go
@@ -96,7 +96,7 @@ func (w *wizard) deployWallet() {
if existed {
fmt.Println()
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy wallet container", "err", err)
diff --git a/cmd/swarm/access_test.go b/cmd/swarm/access_test.go
index 9357c577e..967ef2742 100644
--- a/cmd/swarm/access_test.go
+++ b/cmd/swarm/access_test.go
@@ -33,11 +33,11 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarmapi "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -598,7 +598,7 @@ func TestKeypairSanity(t *testing.T) {
t.Fatal(err)
}
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(salt)
shared, err := hex.DecodeString(sharedSecret)
if err != nil {
diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go
index 02198f878..18be316e5 100644
--- a/cmd/swarm/config_test.go
+++ b/cmd/swarm/config_test.go
@@ -26,14 +26,14 @@ import (
"testing"
"time"
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm"
"github.com/ethereum/go-ethereum/swarm/api"
-
- "github.com/docker/docker/pkg/reexec"
)
-func TestDumpConfig(t *testing.T) {
+func TestConfigDump(t *testing.T) {
swarm := runSwarm(t, "dumpconfig")
defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
@@ -91,8 +91,8 @@ func TestConfigCmdLineOverrides(t *testing.T) {
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
- "--datadir", dir,
- "--ipcpath", conf.IPCPath,
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -189,9 +189,9 @@ func TestConfigFileOverrides(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
- "--ens-api", "",
- "--ipcpath", conf.IPCPath,
- "--datadir", dir,
+ fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -407,9 +407,9 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
- "--ens-api", "",
- "--datadir", dir,
- "--ipcpath", conf.IPCPath,
+ fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -466,7 +466,7 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
node.Shutdown()
}
-func TestValidateConfig(t *testing.T) {
+func TestConfigValidate(t *testing.T) {
for _, c := range []struct {
cfg *api.Config
err string
diff --git a/cmd/swarm/feeds.go b/cmd/swarm/feeds.go
index f26a8cc7d..6cd971a92 100644
--- a/cmd/swarm/feeds.go
+++ b/cmd/swarm/feeds.go
@@ -169,7 +169,6 @@ func feedUpdate(ctx *cli.Context) {
query = new(feed.Query)
query.User = signer.Address()
query.Topic = getTopic(ctx)
-
}
// Retrieve a feed update request
@@ -178,6 +177,11 @@ func feedUpdate(ctx *cli.Context) {
utils.Fatalf("Error retrieving feed status: %s", err.Error())
}
+ // Check that the provided signer matches the request to sign
+ if updateRequest.User != signer.Address() {
+ utils.Fatalf("Signer address does not match the update request")
+ }
+
// set the new data
updateRequest.SetData(data)
diff --git a/cmd/swarm/feeds_test.go b/cmd/swarm/feeds_test.go
index a0cedf0d3..4c40f62a8 100644
--- a/cmd/swarm/feeds_test.go
+++ b/cmd/swarm/feeds_test.go
@@ -19,7 +19,6 @@ package main
import (
"bytes"
"encoding/json"
- "fmt"
"io/ioutil"
"os"
"testing"
@@ -69,7 +68,7 @@ func TestCLIFeedUpdate(t *testing.T) {
hexData}
// create an update and expect an exit without errors
- log.Info(fmt.Sprintf("updating a feed with 'swarm feed update'"))
+ log.Info("updating a feed with 'swarm feed update'")
cmd := runSwarm(t, flags...)
cmd.ExpectExit()
@@ -116,7 +115,7 @@ func TestCLIFeedUpdate(t *testing.T) {
"--user", address.Hex(),
}
- log.Info(fmt.Sprintf("getting feed info with 'swarm feed info'"))
+ log.Info("getting feed info with 'swarm feed info'")
cmd = runSwarm(t, flags...)
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
cmd.ExpectExit()
@@ -141,9 +140,9 @@ func TestCLIFeedUpdate(t *testing.T) {
"--topic", topic.Hex(),
}
- log.Info(fmt.Sprintf("Publishing manifest with 'swarm feed create'"))
+ log.Info("Publishing manifest with 'swarm feed create'")
cmd = runSwarm(t, flags...)
- _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) // regex hack to extract stdout
+ _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
cmd.ExpectExit()
manifestAddress := matches[0] // read the received feed manifest
@@ -162,4 +161,36 @@ func TestCLIFeedUpdate(t *testing.T) {
if !bytes.Equal(data, retrieved) {
t.Fatalf("Received %s, expected %s", retrieved, data)
}
+
+ // test publishing a manifest for a different user
+ flags = []string{
+ "--bzzapi", srv.URL,
+ "feed", "create",
+ "--topic", topic.Hex(),
+ "--user", "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // different user
+ }
+
+ log.Info("Publishing manifest with 'swarm feed create' for a different user")
+ cmd = runSwarm(t, flags...)
+ _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
+ cmd.ExpectExit()
+
+ manifestAddress = matches[0] // read the received feed manifest
+
+ // now let's try to update that user's manifest which we don't have the private key for
+ flags = []string{
+ "--bzzapi", srv.URL,
+ "--bzzaccount", pkFileName,
+ "feed", "update",
+ "--manifest", manifestAddress,
+ hexData}
+
+ // create an update and expect an error given there is a user mismatch
+ log.Info("updating a feed with 'swarm feed update'")
+ cmd = runSwarm(t, flags...)
+ cmd.ExpectRegexp("Fatal:.*") // best way so far to detect a failure.
+ cmd.ExpectExit()
+ if cmd.ExitStatus() == 0 {
+ t.Fatal("Expected nonzero exit code when updating a manifest with the wrong user. Got 0.")
+ }
}
diff --git a/cmd/swarm/flags.go b/cmd/swarm/flags.go
index 0dedca674..12edc8cc9 100644
--- a/cmd/swarm/flags.go
+++ b/cmd/swarm/flags.go
@@ -164,10 +164,6 @@ var (
Name: "topic",
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
}
- SwarmFeedDataOnCreateFlag = cli.StringFlag{
- Name: "data",
- Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
- }
SwarmFeedManifestFlag = cli.StringFlag{
Name: "manifest",
Usage: "Refers to the feed through a manifest",
diff --git a/cmd/swarm/fs.go b/cmd/swarm/fs.go
index b970b2e8c..edeeddff8 100644
--- a/cmd/swarm/fs.go
+++ b/cmd/swarm/fs.go
@@ -24,7 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
- "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/fuse"
"gopkg.in/urfave/cli.v1"
@@ -41,27 +41,24 @@ var fsCommand = cli.Command{
Action: mount,
CustomHelpTemplate: helpTemplate,
Name: "mount",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "mount a swarm hash to a mount point",
- ArgsUsage: "swarm fs mount --ipcpath ",
+ ArgsUsage: "swarm fs mount ",
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: unmount,
CustomHelpTemplate: helpTemplate,
Name: "unmount",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "unmount a swarmfs mount",
- ArgsUsage: "swarm fs unmount --ipcpath ",
+ ArgsUsage: "swarm fs unmount ",
Description: "Unmounts a swarmfs mount residing at . This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: listMounts,
CustomHelpTemplate: helpTemplate,
Name: "list",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "list swarmfs mounts",
- ArgsUsage: "swarm fs list --ipcpath ",
+ ArgsUsage: "swarm fs list",
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
},
@@ -70,7 +67,7 @@ var fsCommand = cli.Command{
func mount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 2 {
- utils.Fatalf("Usage: swarm fs mount --ipcpath ")
+ utils.Fatalf("Usage: swarm fs mount ")
}
client, err := dialRPC(cliContext)
@@ -97,7 +94,7 @@ func unmount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 1 {
- utils.Fatalf("Usage: swarm fs unmount --ipcpath ")
+ utils.Fatalf("Usage: swarm fs unmount ")
}
client, err := dialRPC(cliContext)
if err != nil {
@@ -145,20 +142,21 @@ func listMounts(cliContext *cli.Context) {
}
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
- var endpoint string
+ endpoint := getIPCEndpoint(ctx)
+ log.Info("IPC endpoint", "path", endpoint)
+ return rpc.Dial(endpoint)
+}
- if ctx.IsSet(utils.IPCPathFlag.Name) {
- endpoint = ctx.String(utils.IPCPathFlag.Name)
- } else {
- utils.Fatalf("swarm ipc endpoint not specified")
- }
+func getIPCEndpoint(ctx *cli.Context) string {
+ cfg := defaultNodeConfig
+ utils.SetNodeConfig(ctx, &cfg)
- if endpoint == "" {
- endpoint = node.DefaultIPCEndpoint(clientIdentifier)
- } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
+ endpoint := cfg.IPCEndpoint()
+
+ if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
// Backwards compatibility with geth < 1.5 which required
// these prefixes.
endpoint = endpoint[4:]
}
- return rpc.Dial(endpoint)
+ return endpoint
}
diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go
index ac4223b66..5f58d6c0d 100644
--- a/cmd/swarm/fs_test.go
+++ b/cmd/swarm/fs_test.go
@@ -20,6 +20,7 @@ package main
import (
"bytes"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -28,6 +29,7 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
)
@@ -36,6 +38,26 @@ type testFile struct {
content string
}
+// TestCLISwarmFsDefaultIPCPath tests if the most basic fs command, i.e., list
+// can find and correctly connect to a running Swarm node on the default
+// IPCPath.
+func TestCLISwarmFsDefaultIPCPath(t *testing.T) {
+ cluster := newTestCluster(t, 1)
+ defer cluster.Shutdown()
+
+ handlingNode := cluster.Nodes[0]
+ list := runSwarm(t, []string{
+ "--datadir", handlingNode.Dir,
+ "fs",
+ "list",
+ }...)
+
+ list.WaitExit()
+ if list.Err != nil {
+ t.Fatal(list.Err)
+ }
+}
+
// TestCLISwarmFs is a high-level test of swarmfs
//
// This test fails on travis for macOS as this executable exits with code 1
@@ -59,9 +81,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
mount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"mount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mhash,
mountPoint,
}...)
@@ -101,9 +123,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"unmount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mountPoint,
}...)
_, matches := unmount.ExpectRegexp(hashRegexp)
@@ -136,9 +158,9 @@ func TestCLISwarmFs(t *testing.T) {
//remount, check files
newMount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"mount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
hash, // the latest hash
secondMountPoint,
}...)
@@ -172,9 +194,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmountSec := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"unmount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
secondMountPoint,
}...)
diff --git a/cmd/swarm/swarm-smoke/feed_upload_and_sync.go b/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
index 1371d6654..2c5e3fd23 100644
--- a/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
+++ b/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
@@ -2,11 +2,13 @@ package main
import (
"bytes"
+ "context"
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"net/http"
+ "net/http/httptrace"
"os"
"os/exec"
"strings"
@@ -16,9 +18,13 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/swarm/multihash"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/api/client"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
+ "github.com/ethereum/go-ethereum/swarm/testutil"
colorable "github.com/mattn/go-colorable"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
@@ -27,16 +33,34 @@ const (
feedRandomDataLength = 8
)
-// TODO: retrieve with manifest + extract repeating code
func cliFeedUploadAndSync(c *cli.Context) error {
-
+ metrics.GetOrRegisterCounter("feed-and-sync", nil).Inc(1)
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))))
+ errc := make(chan error)
+ go func() {
+ errc <- feedUploadAndSync(c)
+ }()
+
+ select {
+ case err := <-errc:
+ if err != nil {
+ metrics.GetOrRegisterCounter("feed-and-sync.fail", nil).Inc(1)
+ }
+ return err
+ case <-time.After(time.Duration(timeout) * time.Second):
+ metrics.GetOrRegisterCounter("feed-and-sync.timeout", nil).Inc(1)
+ return fmt.Errorf("timeout after %v sec", timeout)
+ }
+}
+
+// TODO: retrieve with manifest + extract repeating code
+func feedUploadAndSync(c *cli.Context) error {
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
- generateEndpoints(scheme, cluster, from, to)
+ generateEndpoints(scheme, cluster, appName, from, to)
- log.Info("generating and uploading MRUs to " + endpoints[0] + " and syncing")
+ log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing")
// create a random private key to sign updates with and derive the address
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
@@ -205,12 +229,12 @@ func cliFeedUploadAndSync(c *cli.Context) error {
log.Info("all endpoints synced random data successfully")
// upload test file
- log.Info("uploading to " + endpoints[0] + " and syncing")
+ seed := int(time.Now().UnixNano() / 1e6)
+ log.Info("feed uploading to "+endpoints[0]+" and syncing", "seed", seed)
- f, cleanup := generateRandomFile(filesize * 1000)
- defer cleanup()
+ randomBytes := testutil.RandomBytes(seed, filesize*1000)
- hash, err := upload(f, endpoints[0])
+ hash, err := upload(&randomBytes, endpoints[0])
if err != nil {
return err
}
@@ -218,9 +242,8 @@ func cliFeedUploadAndSync(c *cli.Context) error {
if err != nil {
return err
}
- multihashHex := hexutil.Encode(multihash.ToMultihash(hashBytes))
-
- fileHash, err := digest(f)
+ multihashHex := hexutil.Encode(hashBytes)
+ fileHash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
return err
}
@@ -286,14 +309,37 @@ func cliFeedUploadAndSync(c *cli.Context) error {
}
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
+ ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch")
+ defer sp.Finish()
+
log.Trace("sleeping", "ruid", ruid)
time.Sleep(3 * time.Second)
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
- res, err := http.Get(endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user)
+
+ var tn time.Time
+ reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user
+ req, _ := http.NewRequest("GET", reqUri, nil)
+
+ opentracing.GlobalTracer().Inject(
+ sp.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+
+ trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn)
+
+ req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
+ transport := http.DefaultTransport
+
+ //transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+
+ tn = time.Now()
+ res, err := transport.RoundTrip(req)
if err != nil {
+ log.Error(err.Error(), "ruid", ruid)
return err
}
+
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
if res.StatusCode != 200 {
diff --git a/cmd/swarm/swarm-smoke/main.go b/cmd/swarm/swarm-smoke/main.go
index 4ff17fd5b..66cecdc5c 100644
--- a/cmd/swarm/swarm-smoke/main.go
+++ b/cmd/swarm/swarm-smoke/main.go
@@ -17,23 +17,38 @@
package main
import (
+ "fmt"
"os"
"sort"
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ gethmetrics "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/metrics/influxdb"
+ swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
+ "github.com/ethereum/go-ethereum/swarm/tracing"
+
"github.com/ethereum/go-ethereum/log"
cli "gopkg.in/urfave/cli.v1"
)
+var (
+ gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
+)
+
var (
endpoints []string
includeLocalhost bool
cluster string
+ appName string
scheme string
filesize int
+ syncDelay int
from int
to int
verbosity int
+ timeout int
+ single bool
)
func main() {
@@ -49,6 +64,12 @@ func main() {
Usage: "cluster to point to (prod or a given namespace)",
Destination: &cluster,
},
+ cli.StringFlag{
+ Name: "app",
+ Value: "swarm",
+ Usage: "application to point to (swarm or swarm-private)",
+ Destination: &appName,
+ },
cli.IntFlag{
Name: "cluster-from",
Value: 8501,
@@ -78,14 +99,42 @@ func main() {
Usage: "file size for generated random file in KB",
Destination: &filesize,
},
+ cli.IntFlag{
+ Name: "sync-delay",
+ Value: 5,
+ Usage: "duration of delay in seconds to wait for content to be synced",
+ Destination: &syncDelay,
+ },
cli.IntFlag{
Name: "verbosity",
Value: 1,
Usage: "verbosity",
Destination: &verbosity,
},
+ cli.IntFlag{
+ Name: "timeout",
+ Value: 120,
+ Usage: "timeout in seconds after which kill the process",
+ Destination: &timeout,
+ },
+ cli.BoolFlag{
+ Name: "single",
+ Usage: "whether to fetch content from a single node or from all nodes",
+ Destination: &single,
+ },
}
+ app.Flags = append(app.Flags, []cli.Flag{
+ utils.MetricsEnabledFlag,
+ swarmmetrics.MetricsInfluxDBEndpointFlag,
+ swarmmetrics.MetricsInfluxDBDatabaseFlag,
+ swarmmetrics.MetricsInfluxDBUsernameFlag,
+ swarmmetrics.MetricsInfluxDBPasswordFlag,
+ swarmmetrics.MetricsInfluxDBHostTagFlag,
+ }...)
+
+ app.Flags = append(app.Flags, tracing.Flags...)
+
app.Commands = []cli.Command{
{
Name: "upload_and_sync",
@@ -104,8 +153,38 @@ func main() {
sort.Sort(cli.FlagsByName(app.Flags))
sort.Sort(cli.CommandsByName(app.Commands))
+ app.Before = func(ctx *cli.Context) error {
+ tracing.Setup(ctx)
+ return nil
+ }
+
+ app.After = func(ctx *cli.Context) error {
+ return emitMetrics(ctx)
+ }
+
err := app.Run(os.Args)
if err != nil {
log.Error(err.Error())
+
+ os.Exit(1)
}
}
+
+func emitMetrics(ctx *cli.Context) error {
+ if gethmetrics.Enabled {
+ var (
+ endpoint = ctx.GlobalString(swarmmetrics.MetricsInfluxDBEndpointFlag.Name)
+ database = ctx.GlobalString(swarmmetrics.MetricsInfluxDBDatabaseFlag.Name)
+ username = ctx.GlobalString(swarmmetrics.MetricsInfluxDBUsernameFlag.Name)
+ password = ctx.GlobalString(swarmmetrics.MetricsInfluxDBPasswordFlag.Name)
+ hosttag = ctx.GlobalString(swarmmetrics.MetricsInfluxDBHostTagFlag.Name)
+ )
+ return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", map[string]string{
+ "host": hosttag,
+ "version": gitCommit,
+ "filesize": fmt.Sprintf("%v", filesize),
+ })
+ }
+
+ return nil
+}
diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go
index 7872421d3..d605f79a3 100644
--- a/cmd/swarm/swarm-smoke/upload_and_sync.go
+++ b/cmd/swarm/swarm-smoke/upload_and_sync.go
@@ -18,35 +18,40 @@ package main
import (
"bytes"
+ "context"
"crypto/md5"
crand "crypto/rand"
- "crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
+ "math/rand"
"net/http"
+ "net/http/httptrace"
"os"
- "os/exec"
- "strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
- colorable "github.com/mattn/go-colorable"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/api/client"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
+ "github.com/ethereum/go-ethereum/swarm/testutil"
+ opentracing "github.com/opentracing/opentracing-go"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
-func generateEndpoints(scheme string, cluster string, from int, to int) {
+func generateEndpoints(scheme string, cluster string, app string, from int, to int) {
if cluster == "prod" {
- for port := from; port <= to; port++ {
+ for port := from; port < to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
}
} else {
- for port := from; port <= to; port++ {
- endpoints = append(endpoints, fmt.Sprintf("%s://swarm-%v-%s.stg.swarm-gateways.net", scheme, port, cluster))
+ for port := from; port < to; port++ {
+ endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm-gateways.net", scheme, app, port, cluster))
}
}
@@ -57,24 +62,50 @@ func generateEndpoints(scheme string, cluster string, from int, to int) {
func cliUploadAndSync(c *cli.Context) error {
log.PrintOrigins(true)
- log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
- defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
+ metrics.GetOrRegisterCounter("upload-and-sync", nil).Inc(1)
- generateEndpoints(scheme, cluster, from, to)
+ errc := make(chan error)
+ go func() {
+ errc <- uploadAndSync(c)
+ }()
- log.Info("uploading to " + endpoints[0] + " and syncing")
+ select {
+ case err := <-errc:
+ if err != nil {
+ metrics.GetOrRegisterCounter("upload-and-sync.fail", nil).Inc(1)
+ }
+ return err
+ case <-time.After(time.Duration(timeout) * time.Second):
+ metrics.GetOrRegisterCounter("upload-and-sync.timeout", nil).Inc(1)
+ return fmt.Errorf("timeout after %v sec", timeout)
+ }
+}
- f, cleanup := generateRandomFile(filesize * 1000)
- defer cleanup()
+func uploadAndSync(c *cli.Context) error {
+ defer func(now time.Time) {
+ totalTime := time.Since(now)
- hash, err := upload(f, endpoints[0])
+ log.Info("total time", "time", totalTime, "kb", filesize)
+ metrics.GetOrRegisterCounter("upload-and-sync.total-time", nil).Inc(int64(totalTime))
+ }(time.Now())
+
+ generateEndpoints(scheme, cluster, appName, from, to)
+ seed := int(time.Now().UnixNano() / 1e6)
+ log.Info("uploading to "+endpoints[0]+" and syncing", "seed", seed)
+
+ randomBytes := testutil.RandomBytes(seed, filesize*1000)
+
+ t1 := time.Now()
+ hash, err := upload(&randomBytes, endpoints[0])
if err != nil {
log.Error(err.Error())
return err
}
+ metrics.GetOrRegisterCounter("upload-and-sync.upload-time", nil).Inc(int64(time.Since(t1)))
- fhash, err := digest(f)
+ fhash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
log.Error(err.Error())
return err
@@ -82,23 +113,47 @@ func cliUploadAndSync(c *cli.Context) error {
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
- time.Sleep(3 * time.Second)
+ time.Sleep(time.Duration(syncDelay) * time.Second)
wg := sync.WaitGroup{}
- for _, endpoint := range endpoints {
+ if single {
+ rand.Seed(time.Now().UTC().UnixNano())
+ randIndex := 1 + rand.Intn(len(endpoints)-1)
ruid := uuid.New()[:8]
wg.Add(1)
go func(endpoint string, ruid string) {
for {
+ start := time.Now()
err := fetch(hash, endpoint, fhash, ruid)
+ fetchTime := time.Since(start)
if err != nil {
continue
}
+ metrics.GetOrRegisterMeter("upload-and-sync.single.fetch-time", nil).Mark(int64(fetchTime))
wg.Done()
return
}
- }(endpoint, ruid)
+ }(endpoints[randIndex], ruid)
+ } else {
+ for _, endpoint := range endpoints {
+ ruid := uuid.New()[:8]
+ wg.Add(1)
+ go func(endpoint string, ruid string) {
+ for {
+ start := time.Now()
+ err := fetch(hash, endpoint, fhash, ruid)
+ fetchTime := time.Since(start)
+ if err != nil {
+ continue
+ }
+
+ metrics.GetOrRegisterMeter("upload-and-sync.each.fetch-time", nil).Mark(int64(fetchTime))
+ wg.Done()
+ return
+ }
+ }(endpoint, ruid)
+ }
}
wg.Wait()
log.Info("all endpoints synced random file successfully")
@@ -108,16 +163,33 @@ func cliUploadAndSync(c *cli.Context) error {
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
func fetch(hash string, endpoint string, original []byte, ruid string) error {
+ ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
+ defer sp.Finish()
+
log.Trace("sleeping", "ruid", ruid)
time.Sleep(3 * time.Second)
-
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
- client := &http.Client{Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- }}
- res, err := client.Get(endpoint + "/bzz:/" + hash + "/")
+
+ var tn time.Time
+ reqUri := endpoint + "/bzz:/" + hash + "/"
+ req, _ := http.NewRequest("GET", reqUri, nil)
+
+ opentracing.GlobalTracer().Inject(
+ sp.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+
+ trace := client.GetClientTrace("upload-and-sync - http get", "upload-and-sync", ruid, &tn)
+
+ req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
+ transport := http.DefaultTransport
+
+ //transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+
+ tn = time.Now()
+ res, err := transport.RoundTrip(req)
if err != nil {
- log.Warn(err.Error(), "ruid", ruid)
+ log.Error(err.Error(), "ruid", ruid)
return err
}
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
@@ -148,16 +220,19 @@ func fetch(hash string, endpoint string, original []byte, ruid string) error {
}
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
-func upload(f *os.File, endpoint string) (string, error) {
- var out bytes.Buffer
- cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
- cmd.Stdout = &out
- err := cmd.Run()
- if err != nil {
- return "", err
+func upload(dataBytes *[]byte, endpoint string) (string, error) {
+ swarm := client.NewClient(endpoint)
+ f := &client.File{
+ ReadCloser: ioutil.NopCloser(bytes.NewReader(*dataBytes)),
+ ManifestEntry: api.ManifestEntry{
+ ContentType: "text/plain",
+ Mode: 0660,
+ Size: int64(len(*dataBytes)),
+ },
}
- hash := strings.TrimRight(out.String(), "\r\n")
- return hash, nil
+
+ // upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
+ return swarm.Upload(f, "", false)
}
func digest(r io.Reader) ([]byte, error) {
@@ -180,27 +255,3 @@ func generateRandomData(datasize int) ([]byte, error) {
}
return b, nil
}
-
-// generateRandomFile is creating a temporary file with the requested byte size
-func generateRandomFile(size int) (f *os.File, teardown func()) {
- // create a tmp file
- tmp, err := ioutil.TempFile("", "swarm-test")
- if err != nil {
- panic(err)
- }
-
- // callback for tmp file cleanup
- teardown = func() {
- tmp.Close()
- os.Remove(tmp.Name())
- }
-
- buf := make([]byte, size)
- _, err = crand.Read(buf)
- if err != nil {
- panic(err)
- }
- ioutil.WriteFile(tmp.Name(), buf, 0755)
-
- return tmp, teardown
-}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 299621bc7..d4ef50e0e 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -142,6 +142,10 @@ var (
Name: "rinkeby",
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
}
+ ConstantinopleOverrideFlag = cli.Uint64Flag{
+ Name: "override.constantinople",
+ Usage: "Manually specify constantinople fork-block, overriding the bundled setting",
+ }
DeveloperFlag = cli.BoolFlag{
Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@@ -184,6 +188,10 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
+ WhitelistFlag = cli.StringFlag{
+ Name: "whitelist",
+ Usage: "Comma separated block number-to-hash mappings to enforce (=)",
+ }
// Dashboard settings
DashboardEnabledFlag = cli.BoolFlag{
Name: metrics.DashboardEnabledFlag,
@@ -843,17 +851,12 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
// makeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
func makeDatabaseHandles() int {
- limit, err := fdlimit.Current()
+ limit, err := fdlimit.Maximum()
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
- if limit < 2048 {
- if err := fdlimit.Raise(2048); err != nil {
- Fatalf("Failed to raise file descriptor allowance: %v", err)
- }
- }
- if limit > 2048 { // cap database file descriptors even if more is available
- limit = 2048
+ if err := fdlimit.Raise(uint64(limit)); err != nil {
+ Fatalf("Failed to raise file descriptor allowance: %v", err)
}
return limit / 2 // Leave half for networking and other stuff
}
@@ -997,16 +1000,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setWS(ctx, cfg)
setNodeUserIdent(ctx, cfg)
- switch {
- case ctx.GlobalIsSet(DataDirFlag.Name):
- cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
- case ctx.GlobalBool(DeveloperFlag.Name):
- cfg.DataDir = "" // unless explicitly requested, use memory databases
- case ctx.GlobalBool(TestnetFlag.Name):
- cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
- case ctx.GlobalBool(RinkebyFlag.Name):
- cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
- }
+ setDataDir(ctx, cfg)
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
@@ -1019,6 +1013,19 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
}
}
+func setDataDir(ctx *cli.Context, cfg *node.Config) {
+ switch {
+ case ctx.GlobalIsSet(DataDirFlag.Name):
+ cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
+ case ctx.GlobalBool(DeveloperFlag.Name):
+ cfg.DataDir = "" // unless explicitly requested, use memory databases
+ case ctx.GlobalBool(TestnetFlag.Name):
+ cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
+ case ctx.GlobalBool(RinkebyFlag.Name):
+ cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
+ }
+}
+
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
@@ -1092,6 +1099,29 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) {
}
}
+func setWhitelist(ctx *cli.Context, cfg *eth.Config) {
+ whitelist := ctx.GlobalString(WhitelistFlag.Name)
+ if whitelist == "" {
+ return
+ }
+ cfg.Whitelist = make(map[uint64]common.Hash)
+ for _, entry := range strings.Split(whitelist, ",") {
+ parts := strings.Split(entry, "=")
+ if len(parts) != 2 {
+ Fatalf("Invalid whitelist entry: %s", entry)
+ }
+ number, err := strconv.ParseUint(parts[0], 0, 64)
+ if err != nil {
+ Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
+ }
+ var hash common.Hash
+ if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
+ Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
+ }
+ cfg.Whitelist[number] = hash
+ }
+}
+
// checkExclusive verifies that only a single instance of the provided flags was
// set by the user. Each flag might optionally be followed by a string type to
// specialize it further.
@@ -1157,6 +1187,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
setGPO(ctx, &cfg.GPO)
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
+ setWhitelist(ctx, cfg)
if ctx.GlobalIsSet(SyncModeFlag.Name) {
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
@@ -1170,7 +1201,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
}
-
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
}
@@ -1423,7 +1453,6 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
var err error
chainDb = MakeChainDatabase(ctx, stack)
-
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
diff --git a/common/types.go b/common/types.go
index a4b999526..0f4892d28 100644
--- a/common/types.go
+++ b/common/types.go
@@ -27,7 +27,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/crypto/sha3"
+ "golang.org/x/crypto/sha3"
)
// Lengths of hashes and addresses in bytes.
@@ -196,7 +196,7 @@ func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Hex returns an EIP55-compliant hex string representation of the address.
func (a Address) Hex() string {
unchecksummed := hex.EncodeToString(a[:])
- sha := sha3.NewKeccak256()
+ sha := sha3.NewLegacyKeccak256()
sha.Write([]byte(unchecksummed))
hash := sha.Sum(nil)
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 0cb72c35c..c79c30cae 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -33,13 +33,13 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
lru "github.com/hashicorp/golang-lru"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -148,7 +148,7 @@ type SignerFn func(accounts.Account, []byte) ([]byte, error)
// panics. This is done to avoid accidentally using both forms (signature present
// or not), which could be abused to produce different hashes for the same header.
func sigHash(header *types.Header) (hash common.Hash) {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, []interface{}{
header.ParentHash,
diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go
index f252a7f3a..d6c871092 100644
--- a/consensus/ethash/algorithm.go
+++ b/consensus/ethash/algorithm.go
@@ -30,8 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -123,7 +123,7 @@ func seedHash(block uint64) []byte {
if block < epochLength {
return seed
}
- keccak256 := makeHasher(sha3.NewKeccak256())
+ keccak256 := makeHasher(sha3.NewLegacyKeccak256())
for i := 0; i < int(block/epochLength); i++ {
keccak256(seed, seed)
}
@@ -177,7 +177,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
}
}()
// Create a hasher to reuse between invocations
- keccak512 := makeHasher(sha3.NewKeccak512())
+ keccak512 := makeHasher(sha3.NewLegacyKeccak512())
// Sequentially produce the initial dataset
keccak512(cache, seed)
@@ -301,7 +301,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
defer pend.Done()
// Create a hasher to reuse between invocations
- keccak512 := makeHasher(sha3.NewKeccak512())
+ keccak512 := makeHasher(sha3.NewLegacyKeccak512())
// Calculate the data segment this thread should generate
batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
@@ -375,7 +375,7 @@ func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32)
// in-memory cache) in order to produce our final value for a particular header
// hash and nonce.
func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
- keccak512 := makeHasher(sha3.NewKeccak512())
+ keccak512 := makeHasher(sha3.NewLegacyKeccak512())
lookup := func(index uint32) []uint32 {
rawData := generateDatasetItem(cache, index, keccak512)
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index 548c57cd9..62e3f8fca 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -31,9 +31,9 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
// Ethash proof-of-work protocol constants.
@@ -575,7 +575,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header
// SealHash returns the hash of a block prior to it being sealed.
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, []interface{}{
header.ParentHash,
diff --git a/core/blockchain.go b/core/blockchain.go
index d173b2de2..49aedf669 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -47,7 +47,10 @@ import (
)
var (
- blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
+ blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
ErrNoGenesis = errors.New("Genesis not found in chain")
)
@@ -62,7 +65,7 @@ const (
triesInMemory = 128
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
- BlockChainVersion = 3
+ BlockChainVersion uint64 = 3
)
// CacheConfig contains the configuration values for the trie caching/pruning
@@ -207,6 +210,11 @@ func (bc *BlockChain) getProcInterrupt() bool {
return atomic.LoadInt32(&bc.procInterrupt) == 1
}
+// GetVMConfig returns the block chain VM config.
+func (bc *BlockChain) GetVMConfig() *vm.Config {
+ return &bc.vmConfig
+}
+
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
@@ -445,7 +453,11 @@ func (bc *BlockChain) repair(head **types.Block) error {
return nil
}
// Otherwise rewind one block and recheck state availability there
- (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
+ block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
+ if block == nil {
+ return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
+ }
+ (*head) = block
}
}
@@ -1036,6 +1048,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
return status, nil
}
+// addFutureBlock checks if the block is within the max allowed window to get
+// accepted for future processing, and returns an error if the block is too far
+// ahead and was not added.
+func (bc *BlockChain) addFutureBlock(block *types.Block) error {
+ max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
+ if block.Time().Cmp(max) > 0 {
+ return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
+ }
+ bc.futureBlocks.Add(block.Hash(), block)
+ return nil
+}
+
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
@@ -1043,18 +1067,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
//
// After insertion is done, all accumulated events will be fired.
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
- n, events, logs, err := bc.insertChain(chain)
- bc.PostChainEvents(events, logs)
- return n, err
-}
-
-// insertChain will execute the actual chain insertion and event aggregation. The
-// only reason this method exists as a separate one is to make locking cleaner
-// with deferred statements.
-func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
// Sanity check that we have something meaningful to import
if len(chain) == 0 {
- return 0, nil, nil, nil
+ return 0, nil
}
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
@@ -1063,16 +1078,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
- return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
+ return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
- defer bc.wg.Done()
-
bc.chainmu.Lock()
- defer bc.chainmu.Unlock()
+ n, events, logs, err := bc.insertChain(chain, true)
+ bc.chainmu.Unlock()
+ bc.wg.Done()
+
+ bc.PostChainEvents(events, logs)
+ return n, err
+}
+
+// insertChain is the internal implementation of insertChain, which assumes that
+// 1) chains are contiguous, and 2) The chain mutex is held.
+//
+// This method is split out so that import batches that require re-injecting
+// historical blocks can do so without releasing the lock, which could lead to
+// racey behaviour. If a sidechain import is in progress, and the historic state
+// is imported, but then new canon-head is added before the actual sidechain
+// completes, then the historic state could be pruned again
+func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
+ // If the chain is terminating, don't even bother starting u
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ return 0, nil, nil, nil
+ }
+ // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
+ senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
@@ -1089,16 +1124,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i, block := range chain {
headers[i] = block.Header()
- seals[i] = true
+ seals[i] = verifySeals
}
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
defer close(abort)
- // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
- senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
+ // Peek the error for the first block to decide the directing import logic
+ it := newInsertIterator(chain, results, bc.Validator())
- // Iterate over the blocks and insert when the verifier permits
- for i, block := range chain {
+ block, err := it.next()
+ switch {
+ // First block is pruned, insert as sidechain and reorg only if TD grows enough
+ case err == consensus.ErrPrunedAncestor:
+ return bc.insertSidechain(it)
+
+ // First block is future, shove it (and all children) to the future queue (unknown ancestor)
+ case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
+ for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ block, err = it.next()
+ }
+ stats.queued += it.processed()
+ stats.ignored += it.remaining()
+
+ // If there are any still remaining, mark as ignored
+ return it.index, events, coalescedLogs, err
+
+ // First block (and state) is known
+ // 1. We did a roll-back, and should now do a re-import
+ // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
+ // from the canonical chain, which has not been verified.
+ case err == ErrKnownBlock:
+ // Skip all known blocks that behind us
+ current := bc.CurrentBlock().NumberU64()
+
+ for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
+ stats.ignored++
+ block, err = it.next()
+ }
+ // Falls through to the block import
+
+ // Some other error occurred, abort
+ case err != nil:
+ stats.ignored += len(it.chain)
+ bc.reportBlock(block, nil, err)
+ return it.index, events, coalescedLogs, err
+ }
+ // No validation errors for the first block (or chain prefix skipped)
+ for ; block != nil && err == nil; block, err = it.next() {
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
@@ -1107,115 +1182,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash)
- return i, events, coalescedLogs, ErrBlacklistedHash
+ return it.index, events, coalescedLogs, ErrBlacklistedHash
}
- // Wait for the block's verification to complete
- bstart := time.Now()
+ // Retrieve the parent block and it's state to execute on top
+ start := time.Now()
- err := <-results
- if err == nil {
- err = bc.Validator().ValidateBody(block)
- }
- switch {
- case err == ErrKnownBlock:
- // Block and state both already known. However if the current block is below
- // this number we did a rollback and we should reimport it nonetheless.
- if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
- stats.ignored++
- continue
- }
-
- case err == consensus.ErrFutureBlock:
- // Allow up to MaxFuture second in the future blocks. If this limit is exceeded
- // the chain is discarded and processed at a later time if given.
- max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
- if block.Time().Cmp(max) > 0 {
- return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
- }
- bc.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
-
- case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
- bc.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
-
- case err == consensus.ErrPrunedAncestor:
- // Block competing with the canonical chain, store in the db, but don't process
- // until the competitor TD goes above the canonical TD
- currentBlock := bc.CurrentBlock()
- localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
- externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
- if localTd.Cmp(externTd) > 0 {
- if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
- return i, events, coalescedLogs, err
- }
- continue
- }
- // Competitor chain beat canonical, gather all blocks from the common ancestor
- var winner []*types.Block
-
- parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
- for !bc.HasState(parent.Root()) {
- winner = append(winner, parent)
- parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
- }
- for j := 0; j < len(winner)/2; j++ {
- winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
- }
- // Import all the pruned blocks to make the state available
- bc.chainmu.Unlock()
- _, evs, logs, err := bc.insertChain(winner)
- bc.chainmu.Lock()
- events, coalescedLogs = evs, logs
-
- if err != nil {
- return i, events, coalescedLogs, err
- }
-
- case err != nil:
- bc.reportBlock(block, nil, err)
- return i, events, coalescedLogs, err
- }
- // Create a new statedb using the parent block and report an
- // error if it fails.
- var parent *types.Block
- if i == 0 {
+ parent := it.previous()
+ if parent == nil {
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
- } else {
- parent = chain[i-1]
}
state, err := state.New(parent.Root(), bc.stateCache)
if err != nil {
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
// Process block using the parent state as reference point.
+ t0 := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
+ t1 := time.Now()
if err != nil {
bc.reportBlock(block, receipts, err)
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
// Validate the state using the default validator
- err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
- if err != nil {
+ if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
- proctime := time.Since(bstart)
+ t2 := time.Now()
+ proctime := time.Since(start)
// Write the block to the chain and get the status.
status, err := bc.WriteBlockWithState(block, receipts, state)
+ t3 := time.Now()
if err != nil {
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
+ blockInsertTimer.UpdateSince(start)
+ blockExecutionTimer.Update(t1.Sub(t0))
+ blockValidationTimer.Update(t2.Sub(t1))
+ blockWriteTimer.Update(t3.Sub(t2))
switch status {
case CanonStatTy:
- log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
- "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
+ log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
+ "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
+ "elapsed", common.PrettyDuration(time.Since(start)),
+ "root", block.Root())
coalescedLogs = append(coalescedLogs, logs...)
- blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
@@ -1223,78 +1236,153 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
bc.gcproc += proctime
case SideStatTy:
- log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
- common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
-
- blockInsertTimer.UpdateSince(bstart)
+ log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
events = append(events, ChainSideEvent{block})
}
+ blockInsertTimer.UpdateSince(start)
stats.processed++
stats.usedGas += usedGas
cache, _ := bc.stateCache.TrieDB().Size()
- stats.report(chain, i, cache)
+ stats.report(chain, it.index, cache)
}
+ // Any blocks remaining here? The only ones we care about are the future ones
+ if block != nil && err == consensus.ErrFutureBlock {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ block, err = it.next()
+
+ for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ stats.queued++
+ }
+ }
+ stats.ignored += it.remaining()
+
// Append a single chain head event if we've progressed the chain
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
events = append(events, ChainHeadEvent{lastCanon})
}
- return 0, events, coalescedLogs, nil
+ return it.index, events, coalescedLogs, err
}
-// insertStats tracks and reports on block insertion.
-type insertStats struct {
- queued, processed, ignored int
- usedGas uint64
- lastIndex int
- startTime mclock.AbsTime
-}
-
-// statsReportLimit is the time limit during import and export after which we
-// always print out progress. This avoids the user wondering what's going on.
-const statsReportLimit = 8 * time.Second
-
-// report prints statistics if some number of blocks have been processed
-// or more than a few seconds have passed since the last message.
-func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
- // Fetch the timings for the batch
+// insertSidechain is called when an import batch hits upon a pruned ancestor
+// error, which happens when a sidechain with a sufficiently old fork-block is
+// found.
+//
+// The method writes all (header-and-body-valid) blocks to disk, then tries to
+// switch over to the new chain if the TD exceeded the current chain.
+func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
var (
- now = mclock.Now()
- elapsed = time.Duration(now) - time.Duration(st.startTime)
+ externTd *big.Int
+ current = bc.CurrentBlock().NumberU64()
)
- // If we're at the last block of the batch or report period reached, log
- if index == len(chain)-1 || elapsed >= statsReportLimit {
- var (
- end = chain[index]
- txs = countTransactions(chain[st.lastIndex : index+1])
- )
- context := []interface{}{
- "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
- "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
- "number", end.Number(), "hash", end.Hash(),
- }
- if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
- context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
- }
- context = append(context, []interface{}{"cache", cache}...)
+ // The first sidechain block error is already verified to be ErrPrunedAncestor.
+ // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
+ // ones. Any other errors means that the block is invalid, and should not be written
+ // to disk.
+ block, err := it.current(), consensus.ErrPrunedAncestor
+ for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
+ // Check the canonical state root for that number
+ if number := block.NumberU64(); current >= number {
+ if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() {
+ // This is most likely a shadow-state attack. When a fork is imported into the
+ // database, and it eventually reaches a block height which is not pruned, we
+ // just found that the state already exist! This means that the sidechain block
+ // refers to a state which already exists in our canon chain.
+ //
+ // If left unchecked, we would now proceed importing the blocks, without actually
+ // having verified the state of the previous blocks.
+ log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
- if st.queued > 0 {
- context = append(context, []interface{}{"queued", st.queued}...)
+ // If someone legitimately side-mines blocks, they would still be imported as usual. However,
+ // we cannot risk writing unverified blocks to disk when they obviously target the pruning
+ // mechanism.
+ return it.index, nil, nil, errors.New("sidechain ghost-state attack")
+ }
}
- if st.ignored > 0 {
- context = append(context, []interface{}{"ignored", st.ignored}...)
+ if externTd == nil {
+ externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
}
- log.Info("Imported new chain segment", context...)
+ externTd = new(big.Int).Add(externTd, block.Difficulty())
- *st = insertStats{startTime: now, lastIndex: index + 1}
+ if !bc.HasBlock(block.Hash(), block.NumberU64()) {
+ start := time.Now()
+ if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
+ return it.index, nil, nil, err
+ }
+ log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
+ }
}
-}
-
-func countTransactions(chain []*types.Block) (c int) {
- for _, b := range chain {
- c += len(b.Transactions())
+ // At this point, we've written all sidechain blocks to database. Loop ended
+ // either on some other error or all were processed. If there was some other
+ // error, we can ignore the rest of those blocks.
+ //
+ // If the externTd was larger than our local TD, we now need to reimport the previous
+ // blocks to regenerate the required state
+ localTd := bc.GetTd(bc.CurrentBlock().Hash(), current)
+ if localTd.Cmp(externTd) > 0 {
+ log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd)
+ return it.index, nil, nil, err
}
- return c
+ // Gather all the sidechain hashes (full blocks may be memory heavy)
+ var (
+ hashes []common.Hash
+ numbers []uint64
+ )
+ parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64())
+ for parent != nil && !bc.HasState(parent.Root) {
+ hashes = append(hashes, parent.Hash())
+ numbers = append(numbers, parent.Number.Uint64())
+
+ parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
+ }
+ if parent == nil {
+ return it.index, nil, nil, errors.New("missing parent")
+ }
+ // Import all the pruned blocks to make the state available
+ var (
+ blocks []*types.Block
+ memory common.StorageSize
+ )
+ for i := len(hashes) - 1; i >= 0; i-- {
+ // Append the next block to our batch
+ block := bc.GetBlock(hashes[i], numbers[i])
+
+ blocks = append(blocks, block)
+ memory += block.Size()
+
+ // If memory use grew too large, import and continue. Sadly we need to discard
+ // all raised events and logs from notifications since we're too heavy on the
+ // memory here.
+ if len(blocks) >= 2048 || memory > 64*1024*1024 {
+ log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
+ if _, _, _, err := bc.insertChain(blocks, false); err != nil {
+ return 0, nil, nil, err
+ }
+ blocks, memory = blocks[:0], 0
+
+ // If the chain is terminating, stop processing blocks
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ log.Debug("Premature abort during blocks processing")
+ return 0, nil, nil, nil
+ }
+ }
+ }
+ if len(blocks) > 0 {
+ log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
+ return bc.insertChain(blocks, false)
+ }
+ return 0, nil, nil, nil
}
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
@@ -1469,8 +1557,10 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
bc.addBadBlock(block)
var receiptString string
- for _, receipt := range receipts {
- receiptString += fmt.Sprintf("\t%v\n", receipt)
+ for i, receipt := range receipts {
+ receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
+ i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
+ receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
}
log.Error(fmt.Sprintf(`
########## BAD BLOCK #########
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
new file mode 100644
index 000000000..70bea3544
--- /dev/null
+++ b/core/blockchain_insert.go
@@ -0,0 +1,143 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package core
+
+import (
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// insertStats tracks and reports on block insertion.
+type insertStats struct {
+ queued, processed, ignored int
+ usedGas uint64
+ lastIndex int
+ startTime mclock.AbsTime
+}
+
+// statsReportLimit is the time limit during import and export after which we
+// always print out progress. This avoids the user wondering what's going on.
+const statsReportLimit = 8 * time.Second
+
+// report prints statistics if some number of blocks have been processed
+// or more than a few seconds have passed since the last message.
+func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
+ // Fetch the timings for the batch
+ var (
+ now = mclock.Now()
+ elapsed = time.Duration(now) - time.Duration(st.startTime)
+ )
+ // If we're at the last block of the batch or report period reached, log
+ if index == len(chain)-1 || elapsed >= statsReportLimit {
+ // Count the number of transactions in this segment
+ var txs int
+ for _, block := range chain[st.lastIndex : index+1] {
+ txs += len(block.Transactions())
+ }
+ end := chain[index]
+
+ // Assemble the log context and send it to the logger
+ context := []interface{}{
+ "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
+ "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
+ "number", end.Number(), "hash", end.Hash(),
+ }
+ if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
+ context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
+ }
+ context = append(context, []interface{}{"cache", cache}...)
+
+ if st.queued > 0 {
+ context = append(context, []interface{}{"queued", st.queued}...)
+ }
+ if st.ignored > 0 {
+ context = append(context, []interface{}{"ignored", st.ignored}...)
+ }
+ log.Info("Imported new chain segment", context...)
+
+ // Bump the stats reported to the next section
+ *st = insertStats{startTime: now, lastIndex: index + 1}
+ }
+}
+
+// insertIterator is a helper to assist during chain import.
+type insertIterator struct {
+ chain types.Blocks
+ results <-chan error
+ index int
+ validator Validator
+}
+
+// newInsertIterator creates a new iterator based on the given blocks, which are
+// assumed to be a contiguous chain.
+func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
+ return &insertIterator{
+ chain: chain,
+ results: results,
+ index: -1,
+ validator: validator,
+ }
+}
+
+// next returns the next block in the iterator, along with any potential validation
+// error for that block. When the end is reached, it will return (nil, nil).
+func (it *insertIterator) next() (*types.Block, error) {
+ if it.index+1 >= len(it.chain) {
+ it.index = len(it.chain)
+ return nil, nil
+ }
+ it.index++
+ if err := <-it.results; err != nil {
+ return it.chain[it.index], err
+ }
+ return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
+}
+
+// current returns the current block that's being processed.
+func (it *insertIterator) current() *types.Block {
+ if it.index < 0 || it.index+1 >= len(it.chain) {
+ return nil
+ }
+ return it.chain[it.index]
+}
+
+// previous returns the previous block was being processed, or nil
+func (it *insertIterator) previous() *types.Block {
+ if it.index < 1 {
+ return nil
+ }
+ return it.chain[it.index-1]
+}
+
+// first returns the first block in the it.
+func (it *insertIterator) first() *types.Block {
+ return it.chain[0]
+}
+
+// remaining returns the number of remaining blocks.
+func (it *insertIterator) remaining() int {
+ return len(it.chain) - it.index
+}
+
+// processed returns the number of processed blocks.
+func (it *insertIterator) processed() int {
+ return it.index + 1
+}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index aef810050..5ab29e205 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -579,11 +579,11 @@ func testInsertNonceError(t *testing.T, full bool) {
blockchain.hc.engine = blockchain.engine
failRes, err = blockchain.InsertHeaderChain(headers, 1)
}
- // Check that the returned error indicates the failure.
+ // Check that the returned error indicates the failure
if failRes != failAt {
- t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
+ t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
}
- // Check that all no blocks after the failing block have been inserted.
+ // Check that all blocks after the failing block have been inserted
for j := 0; j < i-failAt; j++ {
if full {
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
@@ -1345,7 +1345,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
t.Fatalf("failed to insert shared chain: %v", err)
}
if _, err := chain.InsertChain(original); err != nil {
- t.Fatalf("failed to insert shared chain: %v", err)
+ t.Fatalf("failed to insert original chain: %v", err)
}
// Ensure that the state associated with the forking point is pruned away
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
diff --git a/core/genesis.go b/core/genesis.go
index 6e71afd61..c96cb17a3 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -151,6 +151,9 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
+ return SetupGenesisBlockWithOverride(db, genesis, nil)
+}
+func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constantinopleOverride *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@@ -178,6 +181,9 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
// Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored)
+ if constantinopleOverride != nil {
+ newcfg.ConstantinopleBlock = constantinopleOverride
+ }
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
log.Warn("Found genesis block without chain config")
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 9ddae6e2b..fcc36dc2b 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -23,9 +23,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
// Tests block header storage and retrieval operations.
@@ -47,7 +47,7 @@ func TestHeaderStorage(t *testing.T) {
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
t.Fatalf("Stored header RLP not found")
} else {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(entry)
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
@@ -68,7 +68,7 @@ func TestBodyStorage(t *testing.T) {
// Create a test body to move around the database and make sure it's really new
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
rlp.Encode(hasher, body)
hash := common.BytesToHash(hasher.Sum(nil))
@@ -85,7 +85,7 @@ func TestBodyStorage(t *testing.T) {
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
t.Fatalf("Stored body RLP not found")
} else {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(entry)
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
index 3b6e6548d..82e4bf045 100644
--- a/core/rawdb/accessors_metadata.go
+++ b/core/rawdb/accessors_metadata.go
@@ -26,19 +26,27 @@ import (
)
// ReadDatabaseVersion retrieves the version number of the database.
-func ReadDatabaseVersion(db DatabaseReader) int {
- var version int
+func ReadDatabaseVersion(db DatabaseReader) *uint64 {
+ var version uint64
enc, _ := db.Get(databaseVerisionKey)
- rlp.DecodeBytes(enc, &version)
+ if len(enc) == 0 {
+ return nil
+ }
+ if err := rlp.DecodeBytes(enc, &version); err != nil {
+ return nil
+ }
- return version
+ return &version
}
// WriteDatabaseVersion stores the version number of the database
-func WriteDatabaseVersion(db DatabaseWriter, version int) {
- enc, _ := rlp.EncodeToBytes(version)
- if err := db.Put(databaseVerisionKey, enc); err != nil {
+func WriteDatabaseVersion(db DatabaseWriter, version uint64) {
+ enc, err := rlp.EncodeToBytes(version)
+ if err != nil {
+ log.Crit("Failed to encode database version", "err", err)
+ }
+ if err = db.Put(databaseVerisionKey, enc); err != nil {
log.Crit("Failed to store the database version", "err", err)
}
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 76e67d839..2230b10ef 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -468,9 +468,9 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
//
// Carrying over the balance ensures that Ether doesn't disappear.
func (self *StateDB) CreateAccount(addr common.Address) {
- new, prev := self.createObject(addr)
+ newObj, prev := self.createObject(addr)
if prev != nil {
- new.setBalance(prev.data.Balance)
+ newObj.setBalance(prev.data.Balance)
}
}
diff --git a/core/tx_cacher.go b/core/tx_cacher.go
index bcaa5ead3..b1e5d676a 100644
--- a/core/tx_cacher.go
+++ b/core/tx_cacher.go
@@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
-// senderCacher is a concurrent transaction sender recoverer anc cacher.
+// senderCacher is a concurrent transaction sender recoverer and cacher.
var senderCacher = newTxSenderCacher(runtime.NumCPU())
// txSenderCacherRequest is a request for recovering transaction senders with a
diff --git a/core/tx_pool.go b/core/tx_pool.go
index fc35d1f24..552d3692b 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -172,6 +172,26 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig {
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
conf.PriceBump = DefaultTxPoolConfig.PriceBump
}
+ if conf.AccountSlots < 1 {
+ log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots)
+ conf.AccountSlots = DefaultTxPoolConfig.AccountSlots
+ }
+ if conf.GlobalSlots < 1 {
+ log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots)
+ conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots
+ }
+ if conf.AccountQueue < 1 {
+ log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue)
+ conf.AccountQueue = DefaultTxPoolConfig.AccountQueue
+ }
+ if conf.GlobalQueue < 1 {
+ log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue)
+ conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue
+ }
+ if conf.Lifetime < 1 {
+ log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime)
+ conf.Lifetime = DefaultTxPoolConfig.Lifetime
+ }
return conf
}
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index 5a5920544..6d3bd7a5a 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -1095,7 +1095,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
config := testTxPoolConfig
- config.GlobalSlots = 0
+ config.GlobalSlots = 1
pool := NewTxPool(config, params.TestChainConfig, blockchain)
defer pool.Stop()
diff --git a/core/types/block.go b/core/types/block.go
index 8a21bba1e..57905d8c7 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -28,8 +28,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
var (
@@ -81,8 +81,8 @@ type Header struct {
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Time *big.Int `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash" gencodec:"required"`
- Nonce BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
}
// field type overrides for gencodec
@@ -109,7 +109,7 @@ func (h *Header) Size() common.StorageSize {
}
func rlpHash(x interface{}) (h common.Hash) {
- hw := sha3.NewKeccak256()
+ hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go
index 1b92cd9cf..59a1c9c43 100644
--- a/core/types/gen_header_json.go
+++ b/core/types/gen_header_json.go
@@ -13,6 +13,7 @@ import (
var _ = (*headerMarshaling)(nil)
+// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
@@ -28,8 +29,8 @@ func (h Header) MarshalJSON() ([]byte, error) {
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash" gencodec:"required"`
- Nonce BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
Hash common.Hash `json:"hash"`
}
var enc Header
@@ -52,6 +53,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
return json.Marshal(&enc)
}
+// UnmarshalJSON unmarshals from JSON.
func (h *Header) UnmarshalJSON(input []byte) error {
type Header struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
@@ -67,8 +69,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest *common.Hash `json:"mixHash" gencodec:"required"`
- Nonce *BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest *common.Hash `json:"mixHash"`
+ Nonce *BlockNonce `json:"nonce"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@@ -126,13 +128,11 @@ func (h *Header) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'extraData' for Header")
}
h.Extra = *dec.Extra
- if dec.MixDigest == nil {
- return errors.New("missing required field 'mixHash' for Header")
+ if dec.MixDigest != nil {
+ h.MixDigest = *dec.MixDigest
}
- h.MixDigest = *dec.MixDigest
- if dec.Nonce == nil {
- return errors.New("missing required field 'nonce' for Header")
+ if dec.Nonce != nil {
+ h.Nonce = *dec.Nonce
}
- h.Nonce = *dec.Nonce
return nil
}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 7b53cac2c..ba3d5de91 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -234,7 +234,7 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) {
}
// WithSignature returns a new transaction with the given signature.
-// This signature needs to be formatted as described in the yellow paper (v+27).
+// This signature needs to be in the [R || S || V] format where V is 0 or 1.
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
r, s, v, err := signer.SignatureValues(tx, sig)
if err != nil {
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 968d2219e..ba4d1e9eb 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -339,6 +339,12 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
contract := NewContract(caller, to, new(big.Int), gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
+ // We do an AddBalance of zero here, just in order to trigger a touch.
+ // This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium,
+ // but is the correct thing to do and matters on other networks, in tests, and potential
+ // future scenarios
+ evm.StateDB.AddBalance(addr, bigZero)
+
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in Homestead this also counts for code storage gas errors.
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 6696c6e3d..5195e716b 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -24,8 +24,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/params"
+ "golang.org/x/crypto/sha3"
)
var (
@@ -387,7 +387,7 @@ func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory
data := memory.Get(offset.Int64(), size.Int64())
if interpreter.hasher == nil {
- interpreter.hasher = sha3.NewKeccak256().(keccakState)
+ interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState)
} else {
interpreter.hasher.Reset()
}
diff --git a/cmd/evm/json_logger.go b/core/vm/logger_json.go
similarity index 81%
rename from cmd/evm/json_logger.go
rename to core/vm/logger_json.go
index 50cb4f0e4..ac3c40759 100644
--- a/cmd/evm/json_logger.go
+++ b/core/vm/logger_json.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see .
-package main
+package vm
import (
"encoding/json"
@@ -24,17 +24,16 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/core/vm"
)
type JSONLogger struct {
encoder *json.Encoder
- cfg *vm.LogConfig
+ cfg *LogConfig
}
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
// into the provided stream.
-func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
+func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger {
return &JSONLogger{json.NewEncoder(writer), cfg}
}
@@ -43,8 +42,8 @@ func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create
}
// CaptureState outputs state information on the logger.
-func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
- log := vm.StructLog{
+func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
+ log := StructLog{
Pc: pc,
Op: op,
Gas: gas,
@@ -65,7 +64,7 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
}
// CaptureFault outputs state information on the logger.
-func (l *JSONLogger) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
+func (l *JSONLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
return nil
}
diff --git a/crypto/crypto.go b/crypto/crypto.go
index 9b3e76d40..4567fafc7 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -30,8 +30,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
var (
@@ -43,7 +43,7 @@ var errInvalidPubkey = errors.New("invalid secp256k1 public key")
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
- d := sha3.NewKeccak256()
+ d := sha3.NewLegacyKeccak256()
for _, b := range data {
d.Write(b)
}
@@ -53,7 +53,7 @@ func Keccak256(data ...[]byte) []byte {
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
// converting it to an internal Hash data structure.
func Keccak256Hash(data ...[]byte) (h common.Hash) {
- d := sha3.NewKeccak256()
+ d := sha3.NewLegacyKeccak256()
for _, b := range data {
d.Write(b)
}
@@ -63,7 +63,7 @@ func Keccak256Hash(data ...[]byte) (h common.Hash) {
// Keccak512 calculates and returns the Keccak512 hash of the input data.
func Keccak512(data ...[]byte) []byte {
- d := sha3.NewKeccak512()
+ d := sha3.NewLegacyKeccak512()
for _, b := range data {
d.Write(b)
}
diff --git a/crypto/secp256k1/curve.go b/crypto/secp256k1/curve.go
index 56be235b3..5409ee1d2 100644
--- a/crypto/secp256k1/curve.go
+++ b/crypto/secp256k1/curve.go
@@ -310,7 +310,7 @@ var theCurve = new(BitCurve)
func init() {
// See SEC 2 section 2.7.1
// curve parameters taken from:
- // http://www.secg.org/collateral/sec2_final.pdf
+ // http://www.secg.org/sec2-v2.pdf
theCurve.P, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 0)
theCurve.N, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 0)
theCurve.B, _ = new(big.Int).SetString("0x0000000000000000000000000000000000000000000000000000000000000007", 0)
diff --git a/crypto/sha3/LICENSE b/crypto/sha3/LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/crypto/sha3/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/crypto/sha3/PATENTS b/crypto/sha3/PATENTS
deleted file mode 100644
index 733099041..000000000
--- a/crypto/sha3/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/crypto/sha3/sha3_test.go b/crypto/sha3/sha3_test.go
deleted file mode 100644
index 0e33676ce..000000000
--- a/crypto/sha3/sha3_test.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// Tests include all the ShortMsgKATs provided by the Keccak team at
-// https://github.com/gvanas/KeccakCodePackage
-//
-// They only include the zero-bit case of the bitwise testvectors
-// published by NIST in the draft of FIPS-202.
-
-import (
- "bytes"
- "compress/flate"
- "encoding/hex"
- "encoding/json"
- "hash"
- "os"
- "strings"
- "testing"
-)
-
-const (
- testString = "brekeccakkeccak koax koax"
- katFilename = "testdata/keccakKats.json.deflate"
-)
-
-// Internal-use instances of SHAKE used to test against KATs.
-func newHashShake128() hash.Hash {
- return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
-}
-func newHashShake256() hash.Hash {
- return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
-}
-
-// testDigests contains functions returning hash.Hash instances
-// with output-length equal to the KAT length for both SHA-3 and
-// SHAKE instances.
-var testDigests = map[string]func() hash.Hash{
- "SHA3-224": New224,
- "SHA3-256": New256,
- "SHA3-384": New384,
- "SHA3-512": New512,
- "SHAKE128": newHashShake128,
- "SHAKE256": newHashShake256,
-}
-
-// testShakes contains functions that return ShakeHash instances for
-// testing the ShakeHash-specific interface.
-var testShakes = map[string]func() ShakeHash{
- "SHAKE128": NewShake128,
- "SHAKE256": NewShake256,
-}
-
-// structs used to marshal JSON test-cases.
-type KeccakKats struct {
- Kats map[string][]struct {
- Digest string `json:"digest"`
- Length int64 `json:"length"`
- Message string `json:"message"`
- }
-}
-
-func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) {
- xorInOrig, copyOutOrig := xorIn, copyOut
- xorIn, copyOut = xorInGeneric, copyOutGeneric
- testf("generic")
- if xorImplementationUnaligned != "generic" {
- xorIn, copyOut = xorInUnaligned, copyOutUnaligned
- testf("unaligned")
- }
- xorIn, copyOut = xorInOrig, copyOutOrig
-}
-
-// TestKeccakKats tests the SHA-3 and Shake implementations against all the
-// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
-// (The testvectors are stored in keccakKats.json.deflate due to their length.)
-func TestKeccakKats(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- // Read the KATs.
- deflated, err := os.Open(katFilename)
- if err != nil {
- t.Errorf("error opening %s: %s", katFilename, err)
- }
- file := flate.NewReader(deflated)
- dec := json.NewDecoder(file)
- var katSet KeccakKats
- err = dec.Decode(&katSet)
- if err != nil {
- t.Errorf("error decoding KATs: %s", err)
- }
-
- // Do the KATs.
- for functionName, kats := range katSet.Kats {
- d := testDigests[functionName]()
- for _, kat := range kats {
- d.Reset()
- in, err := hex.DecodeString(kat.Message)
- if err != nil {
- t.Errorf("error decoding KAT: %s", err)
- }
- d.Write(in[:kat.Length/8])
- got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
- if got != kat.Digest {
- t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s",
- functionName, impl, kat.Length, kat.Message, got, kat.Digest)
- t.Logf("wanted %+v", kat)
- t.FailNow()
- }
- continue
- }
- }
- })
-}
-
-// TestUnalignedWrite tests that writing data in an arbitrary pattern with
-// small input buffers.
-func TestUnalignedWrite(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- buf := sequentialBytes(0x10000)
- for alg, df := range testDigests {
- d := df()
- d.Reset()
- d.Write(buf)
- want := d.Sum(nil)
- d.Reset()
- for i := 0; i < len(buf); {
- // Cycle through offsets which make a 137 byte sequence.
- // Because 137 is prime this sequence should exercise all corner cases.
- offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
- for _, j := range offsets {
- if v := len(buf) - i; v < j {
- j = v
- }
- d.Write(buf[i : i+j])
- i += j
- }
- }
- got := d.Sum(nil)
- if !bytes.Equal(got, want) {
- t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want)
- }
- }
- })
-}
-
-// TestAppend checks that appending works when reallocation is necessary.
-func TestAppend(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- d := New224()
-
- for capacity := 2; capacity <= 66; capacity += 64 {
- // The first time around the loop, Sum will have to reallocate.
- // The second time, it will not.
- buf := make([]byte, 2, capacity)
- d.Reset()
- d.Write([]byte{0xcc})
- buf = d.Sum(buf)
- expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
- if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
- t.Errorf("got %s, want %s", got, expected)
- }
- }
- })
-}
-
-// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
-func TestAppendNoRealloc(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- buf := make([]byte, 1, 200)
- d := New224()
- d.Write([]byte{0xcc})
- buf = d.Sum(buf)
- expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
- if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
- t.Errorf("%s: got %s, want %s", impl, got, expected)
- }
- })
-}
-
-// TestSqueezing checks that squeezing the full output a single time produces
-// the same output as repeatedly squeezing the instance.
-func TestSqueezing(t *testing.T) {
- testUnalignedAndGeneric(t, func(impl string) {
- for functionName, newShakeHash := range testShakes {
- d0 := newShakeHash()
- d0.Write([]byte(testString))
- ref := make([]byte, 32)
- d0.Read(ref)
-
- d1 := newShakeHash()
- d1.Write([]byte(testString))
- var multiple []byte
- for range ref {
- one := make([]byte, 1)
- d1.Read(one)
- multiple = append(multiple, one...)
- }
- if !bytes.Equal(ref, multiple) {
- t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
- }
- }
- })
-}
-
-// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
-func sequentialBytes(size int) []byte {
- result := make([]byte, size)
- for i := range result {
- result[i] = byte(i)
- }
- return result
-}
-
-// BenchmarkPermutationFunction measures the speed of the permutation function
-// with no input data.
-func BenchmarkPermutationFunction(b *testing.B) {
- b.SetBytes(int64(200))
- var lanes [25]uint64
- for i := 0; i < b.N; i++ {
- keccakF1600(&lanes)
- }
-}
-
-// benchmarkHash tests the speed to hash num buffers of buflen each.
-func benchmarkHash(b *testing.B, h hash.Hash, size, num int) {
- b.StopTimer()
- h.Reset()
- data := sequentialBytes(size)
- b.SetBytes(int64(size * num))
- b.StartTimer()
-
- var state []byte
- for i := 0; i < b.N; i++ {
- for j := 0; j < num; j++ {
- h.Write(data)
- }
- state = h.Sum(state[:0])
- }
- b.StopTimer()
- h.Reset()
-}
-
-// benchmarkShake is specialized to the Shake instances, which don't
-// require a copy on reading output.
-func benchmarkShake(b *testing.B, h ShakeHash, size, num int) {
- b.StopTimer()
- h.Reset()
- data := sequentialBytes(size)
- d := make([]byte, 32)
-
- b.SetBytes(int64(size * num))
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- h.Reset()
- for j := 0; j < num; j++ {
- h.Write(data)
- }
- h.Read(d)
- }
-}
-
-func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) }
-func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) }
-func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) }
-func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) }
-
-func BenchmarkShake128_MTU(b *testing.B) { benchmarkShake(b, NewShake128(), 1350, 1) }
-func BenchmarkShake256_MTU(b *testing.B) { benchmarkShake(b, NewShake256(), 1350, 1) }
-func BenchmarkShake256_16x(b *testing.B) { benchmarkShake(b, NewShake256(), 16, 1024) }
-func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) }
-
-func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) }
-
-func Example_sum() {
- buf := []byte("some data to hash")
- // A hash needs to be 64 bytes long to have 256-bit collision resistance.
- h := make([]byte, 64)
- // Compute a 64-byte hash of buf and put it in h.
- ShakeSum256(h, buf)
-}
-
-func Example_mac() {
- k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long")
- buf := []byte("and this is some data to authenticate")
- // A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
- h := make([]byte, 32)
- d := NewShake256()
- // Write the key into the hash.
- d.Write(k)
- // Now write the data.
- d.Write(buf)
- // Read 32 bytes of output from the hash into h.
- d.Read(h)
-}
diff --git a/crypto/sha3/testdata/keccakKats.json.deflate b/crypto/sha3/testdata/keccakKats.json.deflate
deleted file mode 100644
index 62e85ae24..000000000
Binary files a/crypto/sha3/testdata/keccakKats.json.deflate and /dev/null differ
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 8748d444f..a48815e0d 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -125,12 +125,12 @@ func (b *EthAPIBackend) GetTd(blockHash common.Hash) *big.Int {
return b.eth.blockchain.GetTdByHash(blockHash)
}
-func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
+func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {
state.SetBalance(msg.From(), math.MaxBig256)
vmError := func() error { return nil }
context := core.NewEVMContext(msg, header, b.eth.BlockChain(), nil)
- return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), vmError, nil
+ return vm.NewEVM(context, state, b.eth.chainConfig, *b.eth.blockchain.GetVMConfig()), vmError, nil
}
func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
diff --git a/eth/api_tracer.go b/eth/api_tracer.go
index 2ebbcc5fd..0b8f8aa00 100644
--- a/eth/api_tracer.go
+++ b/eth/api_tracer.go
@@ -17,11 +17,13 @@
package eth
import (
+ "bufio"
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
+ "os"
"runtime"
"sync"
"time"
@@ -60,6 +62,13 @@ type TraceConfig struct {
Reexec *uint64
}
+// StdTraceConfig holds extra parameters to standard-json trace functions.
+type StdTraceConfig struct {
+ *vm.LogConfig
+ Reexec *uint64
+ TxHash common.Hash
+}
+
// txTraceResult is the result of a single transaction trace.
type txTraceResult struct {
Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer
@@ -366,7 +375,7 @@ func (api *PrivateDebugAPI) TraceBlockByNumber(ctx context.Context, number rpc.B
func (api *PrivateDebugAPI) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
block := api.eth.blockchain.GetBlockByHash(hash)
if block == nil {
- return nil, fmt.Errorf("block #%x not found", hash)
+ return nil, fmt.Errorf("block %#x not found", hash)
}
return api.traceBlock(ctx, block, config)
}
@@ -391,13 +400,41 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string,
return api.TraceBlock(ctx, blob, config)
}
-// TraceBadBlock returns the structured logs created during the execution of a block
-// within the blockchain 'badblocks' cache
-func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, index int, config *TraceConfig) ([]*txTraceResult, error) {
- if blocks := api.eth.blockchain.BadBlocks(); index < len(blocks) {
- return api.traceBlock(ctx, blocks[index], config)
+// TraceBadBlockByHash returns the structured logs created during the execution of
+// EVM against a block pulled from the pool of bad ones and returns them as a JSON
+// object.
+func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
+ blocks := api.eth.blockchain.BadBlocks()
+ for _, block := range blocks {
+ if block.Hash() == hash {
+ return api.traceBlock(ctx, block, config)
+ }
}
- return nil, fmt.Errorf("index out of range")
+ return nil, fmt.Errorf("bad block %#x not found", hash)
+}
+
+// StandardTraceBlockToFile dumps the structured logs created during the
+// execution of EVM to the local file system and returns a list of files
+// to the caller.
+func (api *PrivateDebugAPI) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
+ block := api.eth.blockchain.GetBlockByHash(hash)
+ if block == nil {
+ return nil, fmt.Errorf("block %#x not found", hash)
+ }
+ return api.standardTraceBlockToFile(ctx, block, config)
+}
+
+// StandardTraceBadBlockToFile dumps the structured logs created during the
+// execution of EVM against a block pulled from the pool of bad ones to the
+// local file system and returns a list of files to the caller.
+func (api *PrivateDebugAPI) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
+ blocks := api.eth.blockchain.BadBlocks()
+ for _, block := range blocks {
+ if block.Hash() == hash {
+ return api.standardTraceBlockToFile(ctx, block, config)
+ }
+ }
+ return nil, fmt.Errorf("bad block %#x not found", hash)
}
// traceBlock configures a new tracer according to the provided configuration, and
@@ -410,7 +447,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
}
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
- return nil, fmt.Errorf("parent %x not found", block.ParentHash())
+ return nil, fmt.Errorf("parent %#x not found", block.ParentHash())
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
@@ -481,6 +518,106 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
return results, nil
}
+// standardTraceBlockToFile configures a new tracer which uses standard JSON output,
+// and traces either a full block or an individual transaction. The return value will
+// be one filename per transaction traced.
+func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) {
+ // If we're tracing a single transaction, make sure it's present
+ if config != nil && config.TxHash != (common.Hash{}) {
+ var exists bool
+ for _, tx := range block.Transactions() {
+ if exists = (tx.Hash() == config.TxHash); exists {
+ break
+ }
+ }
+ if !exists {
+ return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash)
+ }
+ }
+ // Create the parent state database
+ if err := api.eth.engine.VerifyHeader(api.eth.blockchain, block.Header(), true); err != nil {
+ return nil, err
+ }
+ parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
+ if parent == nil {
+ return nil, fmt.Errorf("parent %#x not found", block.ParentHash())
+ }
+ reexec := defaultTraceReexec
+ if config != nil && config.Reexec != nil {
+ reexec = *config.Reexec
+ }
+ statedb, err := api.computeStateDB(parent, reexec)
+ if err != nil {
+ return nil, err
+ }
+ // Retrieve the tracing configurations, or use default values
+ var (
+ logConfig vm.LogConfig
+ txHash common.Hash
+ )
+ if config != nil {
+ if config.LogConfig != nil {
+ logConfig = *config.LogConfig
+ }
+ txHash = config.TxHash
+ }
+ logConfig.Debug = true
+
+ // Execute transaction, either tracing all or just the requested one
+ var (
+ signer = types.MakeSigner(api.config, block.Number())
+ dumps []string
+ )
+ for i, tx := range block.Transactions() {
+ // Prepare the trasaction for un-traced execution
+ var (
+ msg, _ = tx.AsMessage(signer)
+ vmctx = core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
+
+ vmConf vm.Config
+ dump *os.File
+ err error
+ )
+ // If the transaction needs tracing, swap out the configs
+ if tx.Hash() == txHash || txHash == (common.Hash{}) {
+ // Generate a unique temporary file to dump it into
+ prefix := fmt.Sprintf("block_%#x-%d-%#x-", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4])
+
+ dump, err = ioutil.TempFile(os.TempDir(), prefix)
+ if err != nil {
+ return nil, err
+ }
+ dumps = append(dumps, dump.Name())
+
+ // Swap out the noop logger to the standard tracer
+ vmConf = vm.Config{
+ Debug: true,
+ Tracer: vm.NewJSONLogger(&logConfig, bufio.NewWriter(dump)),
+ EnablePreimageRecording: true,
+ }
+ }
+ // Execute the transaction and flush any traces to disk
+ vmenv := vm.NewEVM(vmctx, statedb, api.config, vmConf)
+ _, _, _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
+
+ if dump != nil {
+ dump.Close()
+ log.Info("Wrote standard trace", "file", dump.Name())
+ }
+ if err != nil {
+ return dumps, err
+ }
+ // Finalize the state so any modifications are written to the trie
+ statedb.Finalise(true)
+
+ // If we've traced the transaction we were looking for, abort
+ if tx.Hash() == txHash {
+ break
+ }
+ }
+ return dumps, nil
+}
+
// computeStateDB retrieves the state database associated with a certain block.
// If no state is locally available for the given block, a number of blocks are
// attempted to be reexecuted to generate the desired state.
@@ -506,7 +643,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
if err != nil {
switch err.(type) {
case *trie.MissingNodeError:
- return nil, errors.New("required historical state unavailable")
+ return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec)
default:
return nil, err
}
@@ -520,7 +657,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
for block.NumberU64() < origin {
// Print progress logs if long enough time elapsed
if time.Since(logged) > 8*time.Second {
- log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "elapsed", time.Since(start))
+ log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "remaining", origin-block.NumberU64()-1, "elapsed", time.Since(start))
logged = time.Now()
}
// Retrieve the next block to regenerate and process it
@@ -529,15 +666,15 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
}
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(true)
+ root, err := statedb.Commit(api.eth.blockchain.Config().IsEIP158(block.Number()))
if err != nil {
return nil, err
}
if err := statedb.Reset(root); err != nil {
- return nil, err
+ return nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err)
}
database.TrieDB().Reference(root, common.Hash{})
if proot != (common.Hash{}) {
@@ -556,7 +693,7 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Ha
// Retrieve the transaction and assemble its EVM context
tx, blockHash, _, index := rawdb.ReadTransaction(api.eth.ChainDb(), hash)
if tx == nil {
- return nil, fmt.Errorf("transaction %x not found", hash)
+ return nil, fmt.Errorf("transaction %#x not found", hash)
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
@@ -636,11 +773,11 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
// Create the parent state database
block := api.eth.blockchain.GetBlockByHash(blockHash)
if block == nil {
- return nil, vm.Context{}, nil, fmt.Errorf("block %x not found", blockHash)
+ return nil, vm.Context{}, nil, fmt.Errorf("block %#x not found", blockHash)
}
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
- return nil, vm.Context{}, nil, fmt.Errorf("parent %x not found", block.ParentHash())
+ return nil, vm.Context{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash())
}
statedb, err := api.computeStateDB(parent, reexec)
if err != nil {
@@ -659,10 +796,10 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{})
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
- return nil, vm.Context{}, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err)
+ return nil, vm.Context{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
statedb.Finalise(true)
}
- return nil, vm.Context{}, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash)
+ return nil, vm.Context{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, blockHash)
}
diff --git a/eth/backend.go b/eth/backend.go
index 0de0a1980..7900eb474 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -118,7 +118,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.ConstantinopleOverride)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}
@@ -143,8 +143,10 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if !config.SkipBcVersionCheck {
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
- if bcVersion != core.BlockChainVersion && bcVersion != 0 {
- return nil, fmt.Errorf("Blockchain DB version mismatch (%d / %d).\n", bcVersion, core.BlockChainVersion)
+ if bcVersion != nil && *bcVersion > core.BlockChainVersion {
+ return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)
+ } else if bcVersion != nil && *bcVersion < core.BlockChainVersion {
+ log.Warn("Upgrade blockchain database version", "from", *bcVersion, "to", core.BlockChainVersion)
}
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
}
@@ -178,7 +180,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
}
eth.txPool = core.NewTxPool(config.TxPool, eth.chainConfig, eth.blockchain)
- if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil {
+ if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb, config.Whitelist); err != nil {
return nil, err
}
diff --git a/eth/config.go b/eth/config.go
index 601f4735e..7c041d1af 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -87,6 +87,9 @@ type Config struct {
SyncMode downloader.SyncMode
NoPruning bool
+ // Whitelist of required block number -> hash values to accept
+ Whitelist map[uint64]common.Hash `toml:"-"`
+
// Light client options
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
LightPeers int `toml:",omitempty"` // Maximum number of LES client peers
@@ -126,8 +129,12 @@ type Config struct {
// Type of the EWASM interpreter ("" for default)
EWASMInterpreter string
+
// Type of the EVM interpreter ("" for default)
EVMInterpreter string
+
+ // Constantinople block override (TODO: remove after the fork)
+ ConstantinopleOverride *big.Int
}
type configMarshaling struct {
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index f81a5cbac..4db689f73 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -99,6 +99,7 @@ type Downloader struct {
mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
mux *event.TypeMux // Event multiplexer to announce sync operation events
+ genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT)
queue *queue // Scheduler for selecting the hashes to download
peers *peerSet // Set of active peers from which download can proceed
stateDB ethdb.Database
@@ -664,7 +665,28 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
}
p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
if localHeight >= MaxForkAncestry {
+ // We're above the max reorg threshold, find the earliest fork point
floor = int64(localHeight - MaxForkAncestry)
+
+ // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
+ // all headers before that point will be missing.
+ if d.mode == LightSync {
+ // If we dont know the current CHT position, find it
+ if d.genesis == 0 {
+ header := d.lightchain.CurrentHeader()
+ for header != nil {
+ d.genesis = header.Number.Uint64()
+ if floor >= int64(d.genesis)-1 {
+ break
+ }
+ header = d.lightchain.GetHeaderByHash(header.ParentHash)
+ }
+ }
+ // We already know the "genesis" block number, cap floor to that
+ if floor < int64(d.genesis)-1 {
+ floor = int64(d.genesis) - 1
+ }
+ }
}
from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
@@ -1466,7 +1488,15 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
}
if index, err := d.blockchain.InsertChain(blocks); err != nil {
- log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
+ if index < len(results) {
+ log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
+ } else {
+ // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
+ // when it needs to preprocess blocks to import a sidechain.
+ // The importer will put together a new list of blocks to import, which is a superset
+ // of the blocks delivered from the downloader, and the indexing will be off.
+ log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
+ }
return errInvalidChain
}
return nil
diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go
index 29d5ee4dd..0675a91cd 100644
--- a/eth/downloader/statesync.go
+++ b/eth/downloader/statesync.go
@@ -25,10 +25,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
+ "golang.org/x/crypto/sha3"
)
// stateReq represents a batch of state fetch requests grouped together into
@@ -152,7 +152,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
finished = append(finished, req)
delete(active, pack.PeerId())
- // Handle dropped peer connections:
+ // Handle dropped peer connections:
case p := <-peerDrop:
// Skip if no request is currently pending
req := active[p.id]
@@ -240,7 +240,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
return &stateSync{
d: d,
sched: state.NewStateSync(root, d.stateDB),
- keccak: sha3.NewKeccak256(),
+ keccak: sha3.NewLegacyKeccak256(),
tasks: make(map[common.Hash]*stateTask),
deliver: make(chan *stateReq),
cancel: make(chan struct{}),
@@ -398,9 +398,8 @@ func (s *stateSync) fillTasks(n int, req *stateReq) {
// process iterates over a batch of delivered state data, injecting each item
// into a running state sync, re-queuing any items that were requested but not
-// delivered.
-// Returns whether the peer actually managed to deliver anything of value,
-// and any error that occurred
+// delivered. Returns whether the peer actually managed to deliver anything of
+// value, and any error that occurred.
func (s *stateSync) process(req *stateReq) (int, error) {
// Collect processing stats and update progress if valid data was received
duplicate, unexpected, successful := 0, 0, 0
@@ -412,14 +411,12 @@ func (s *stateSync) process(req *stateReq) (int, error) {
}(time.Now())
// Iterate over all the delivered data and inject one-by-one into the trie
- progress := false
for _, blob := range req.response {
- prog, hash, err := s.processNodeData(blob)
+ _, hash, err := s.processNodeData(blob)
switch err {
case nil:
s.numUncommitted++
s.bytesUncommitted += len(blob)
- progress = progress || prog
successful++
case trie.ErrNotRequested:
unexpected++
diff --git a/eth/handler.go b/eth/handler.go
index 741fc9d5a..b42612a56 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -88,6 +88,8 @@ type ProtocolManager struct {
txsSub event.Subscription
minedBlockSub *event.TypeMuxSubscription
+ whitelist map[uint64]common.Hash
+
// channels for fetcher, syncer, txsyncLoop
newPeerCh chan *peer
txsyncCh chan *txsync
@@ -101,7 +103,7 @@ type ProtocolManager struct {
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the Ethereum network.
-func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
+func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, whitelist map[uint64]common.Hash) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkID: networkID,
@@ -110,6 +112,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
blockchain: blockchain,
chainconfig: config,
peers: newPeerSet(),
+ whitelist: whitelist,
newPeerCh: make(chan *peer),
noMorePeers: make(chan struct{}),
txsyncCh: make(chan *txsync),
@@ -307,7 +310,13 @@ func (pm *ProtocolManager) handle(p *peer) error {
}
}()
}
- // main loop. handle incoming messages.
+ // If we have any explicit whitelist block hashes, request them
+ for number := range pm.whitelist {
+ if err := p.RequestHeadersByNumber(number, 1, 0, false); err != nil {
+ return err
+ }
+ }
+ // Handle incoming messages until the connection is torn down
for {
if err := pm.handleMsg(p); err != nil {
p.Log().Debug("Ethereum message handling failed", "err", err)
@@ -466,6 +475,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
p.Log().Debug("Verified to be on the same side of the DAO fork")
return nil
}
+ // Otherwise if it's a whitelisted block, validate against the set
+ if want, ok := pm.whitelist[headers[0].Number.Uint64()]; ok {
+ if hash := headers[0].Hash(); want != hash {
+ p.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want)
+ return errors.New("whitelist block mismatch")
+ }
+ p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
+ }
// Irrelevant of the fork checks, send the header to the fetcher just in case
headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
}
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 7811cd480..9fffd9581 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -478,7 +478,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
- pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db)
+ pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db, nil)
if err != nil {
t.Fatalf("failed to start test protocol manager: %v", err)
}
@@ -559,7 +559,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
- pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db)
+ pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db, nil)
if err != nil {
t.Fatalf("failed to start test protocol manager: %v", err)
}
@@ -585,7 +585,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
}
}(peer)
}
- timeoutCh := time.NewTimer(time.Millisecond * 100).C
+ timeout := time.After(300 * time.Millisecond)
var receivedCount int
outer:
for {
@@ -597,7 +597,7 @@ outer:
if receivedCount == totalPeers {
break outer
}
- case <-timeoutCh:
+ case <-timeout:
break outer
}
}
diff --git a/eth/helper_test.go b/eth/helper_test.go
index 4e38a129e..b18a02baf 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -66,7 +66,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
panic(err)
}
- pm, err := NewProtocolManager(gspec.Config, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db)
+ pm, err := NewProtocolManager(gspec.Config, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db, nil)
if err != nil {
return nil, nil, err
}
diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go
index 04dd6fe89..d0a0bf7c1 100644
--- a/eth/tracers/internal/tracers/assets.go
+++ b/eth/tracers/internal/tracers/assets.go
@@ -1,14 +1,14 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
-// 4byte_tracer.js
-// bigram_tracer.js
-// call_tracer.js
-// evmdis_tracer.js
-// noop_tracer.js
-// opcount_tracer.js
-// prestate_tracer.js
-// trigram_tracer.js
-// unigram_tracer.js
+// 4byte_tracer.js (2.933kB)
+// bigram_tracer.js (1.712kB)
+// call_tracer.js (8.643kB)
+// evmdis_tracer.js (4.194kB)
+// noop_tracer.js (1.271kB)
+// opcount_tracer.js (1.372kB)
+// prestate_tracer.js (4.234kB)
+// trigram_tracer.js (1.788kB)
+// unigram_tracer.js (1.51kB)
package tracers
@@ -28,7 +28,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
- return nil, fmt.Errorf("Read %q: %v", name, err)
+ return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
@@ -36,7 +36,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close()
if err != nil {
- return nil, fmt.Errorf("Read %q: %v", name, err)
+ return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
@@ -117,7 +117,7 @@ func bigram_tracerJs() (*asset, error) {
return a, nil
}
-var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\xdf\x6f\x1b\x37\xf2\x7f\x96\xfe\x8a\x49\x1e\x6a\x09\x51\x24\x27\xe9\xb7\x5f\xc0\xae\x7a\xd0\x39\x4a\x6a\xc0\x8d\x03\x5b\x69\x10\x04\x79\xa0\x76\x67\x25\xd6\x5c\x72\x4b\x72\x2d\xef\xa5\xfe\xdf\x0f\x33\xe4\xae\x56\x3f\xec\xe8\x7a\xb8\x43\xef\x45\xd0\x2e\x67\x86\xc3\x99\xcf\xfc\xe2\x8e\x46\x70\x66\x8a\xca\xca\xc5\xd2\xc3\xcb\xe3\x17\xff\x0f\xb3\x25\xc2\xc2\x3c\x47\xbf\x44\x8b\x65\x0e\x93\xd2\x2f\x8d\x75\xdd\xd1\x08\x66\x4b\xe9\x20\x93\x0a\x41\x3a\x28\x84\xf5\x60\x32\xf0\x5b\xf4\x4a\xce\xad\xb0\xd5\xb0\x3b\x1a\x05\x9e\xbd\xcb\x24\x21\xb3\x88\xe0\x4c\xe6\x57\xc2\xe2\x09\x54\xa6\x84\x44\x68\xb0\x98\x4a\xe7\xad\x9c\x97\x1e\x41\x7a\x10\x3a\x1d\x19\x0b\xb9\x49\x65\x56\x91\x48\xe9\xa1\xd4\x29\x5a\xde\xda\xa3\xcd\x5d\xad\xc7\xdb\x77\x1f\xe0\x02\x9d\x43\x0b\x6f\x51\xa3\x15\x0a\xde\x97\x73\x25\x13\xb8\x90\x09\x6a\x87\x20\x1c\x14\xf4\xc6\x2d\x31\x85\x39\x8b\x23\xc6\x37\xa4\xca\x75\x54\x05\xde\x98\x52\xa7\xc2\x4b\xa3\x07\x80\x92\x34\x87\x5b\xb4\x4e\x1a\x0d\xaf\xea\xad\xa2\xc0\x01\x18\x4b\x42\x7a\xc2\xd3\x01\x2c\x98\x82\xf8\xfa\x20\x74\x05\x4a\xf8\x35\xeb\x01\x06\x59\x9f\x3b\x05\xa9\x79\x9b\xa5\x29\x10\xfc\x52\x78\x3a\xf5\x4a\x2a\x05\x73\x84\xd2\x61\x56\xaa\x01\x49\x9b\x97\x1e\x3e\x9e\xcf\x7e\xbe\xfc\x30\x83\xc9\xbb\x4f\xf0\x71\x72\x75\x35\x79\x37\xfb\x74\x0a\x2b\xe9\x97\xa6\xf4\x80\xb7\x18\x44\xc9\xbc\x50\x12\x53\x58\x09\x6b\x85\xf6\x15\x98\x8c\x24\xfc\x32\xbd\x3a\xfb\x79\xf2\x6e\x36\xf9\xfb\xf9\xc5\xf9\xec\x13\x18\x0b\x6f\xce\x67\xef\xa6\xd7\xd7\xf0\xe6\xf2\x0a\x26\xf0\x7e\x72\x35\x3b\x3f\xfb\x70\x31\xb9\x82\xf7\x1f\xae\xde\x5f\x5e\x4f\x87\x70\x8d\xa4\x15\x12\xff\xb7\x6d\x9e\xb1\xf7\x2c\x42\x8a\x5e\x48\xe5\x6a\x4b\x7c\x32\x25\xb8\xa5\x29\x55\x0a\x4b\x71\x8b\x60\x31\x41\x79\x8b\x29\x08\x48\x4c\x51\x1d\xec\x54\x92\x25\x94\xd1\x0b\x3e\xf3\x83\x80\x84\xf3\x0c\xb4\xf1\x03\x70\x88\xf0\xe3\xd2\xfb\xe2\x64\x34\x5a\xad\x56\xc3\x85\x2e\x87\xc6\x2e\x46\x2a\x88\x73\xa3\x9f\x86\x5d\x92\x99\x08\xa5\x66\x56\x24\x68\xc9\x39\x02\xb2\x92\xcc\xaf\xcc\x4a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x7f\xc2\x60\x14\x1e\xf0\x8e\x9e\xbc\x23\xd0\x82\xc5\xc2\x58\xfa\xaf\x54\x8d\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\x90\x8b\x14\x61\x5e\x81\x68\x0b\x1c\xb4\x0f\x43\x30\x0a\xee\x06\xa9\x33\x63\x73\x86\xe5\xb0\xfb\xb5\xdb\x89\x1a\x3a\x2f\x92\x1b\x52\x90\xe4\x27\xa5\xb5\xa8\x3d\x99\xb2\xb4\x4e\xde\x22\x93\x40\xa0\x89\xf6\x9c\xfe\xfa\x0b\xe0\x1d\x26\x65\x90\xd4\x69\x84\x9c\xc0\xe7\xaf\xf7\x5f\x06\x5d\x16\x9d\xa2\x4b\x50\xa7\x98\xf2\xf9\x6e\x1c\xac\x96\x6c\x51\x58\xe1\xd1\x2d\xc2\x6f\xa5\xf3\x2d\x9a\xcc\x9a\x1c\x84\x06\x53\x12\xe2\xdb\xd6\x91\xda\x1b\x16\x28\xe8\xbf\x46\xcb\x1a\x0d\xbb\x9d\x86\xf9\x04\x32\xa1\x1c\xc6\x7d\x9d\xc7\x82\x4e\x23\xf5\xad\xb9\x21\xc9\xc6\x12\x84\x6d\x05\xa6\x48\x4c\x1a\x83\x81\xce\xd1\x1c\x03\xdd\xb0\xdb\x21\xbe\x13\xc8\x4a\xcd\xdb\xf6\x94\x59\x0c\x20\x9d\xf7\xe1\x6b\xb7\x43\x62\xcf\x44\xe1\x4b\x8b\x6c\x4f\xb4\xd6\x58\x07\x32\xcf\x31\x95\xc2\xa3\xaa\xba\x9d\xce\xad\xb0\x61\x01\xc6\xa0\xcc\x62\xb8\x40\x3f\xa5\xc7\x5e\xff\xb4\xdb\xe9\xc8\x0c\x7a\x61\xf5\xc9\x78\xcc\xd9\x27\x93\x1a\xd3\x20\xbe\xe3\x97\xd2\x0d\x33\x51\x2a\xdf\xec\x4b\x4c\x1d\x8b\xbe\xb4\x9a\xfe\xde\x07\x2d\x3e\x22\x18\xad\x2a\x48\x28\xcb\x88\x39\x85\xa7\xab\x9c\xc7\x3c\x1e\xce\x0d\x20\x13\x8e\x4c\x28\x33\x58\x21\x14\x16\x9f\x27\x4b\x24\xdf\xe9\x04\xa3\x96\xae\x72\xec\xd4\x31\xd0\x6e\x43\x53\x0c\xbd\x79\x57\xe6\x73\xb4\xbd\x3e\x7c\x07\xc7\x77\xd9\x71\x1f\xc6\x63\xfe\x53\xeb\x1e\x79\xa2\xbe\x24\xc5\x14\xf1\xa0\xcc\x7f\xed\xad\xd4\x8b\x70\xd6\xa8\xeb\x79\x06\x02\x34\xae\x20\x31\x9a\x41\x4d\x5e\x99\xa3\xd4\x0b\x48\x2c\x0a\x8f\xe9\x00\x44\x9a\x82\x37\x01\x79\x0d\xce\x36\xb7\x84\xef\xbe\xe3\xbd\xc6\x70\x74\x76\x35\x9d\xcc\xa6\x47\x2d\x25\xa4\xbe\xcc\xb2\xa8\x07\xf3\x0e\x0b\xc4\x9b\xde\x8b\xfe\xf0\x56\xa8\x12\x2f\xb3\xa0\x51\xa4\x9d\xea\x14\xc6\x91\xe7\xd9\x36\xcf\xcb\x0d\x1e\x62\x1a\x8d\x60\xe2\x1c\xe6\x73\x85\xbb\xb1\x17\x83\x93\xe3\xd4\x79\x4a\x4e\x04\xb4\xc4\xe4\x85\x42\x02\x50\xbd\x6b\xb4\x34\x6b\xdc\xf1\x55\x81\x27\x00\x00\xa6\x18\xf0\x0b\x82\x3d\xbf\xf0\xe6\x67\xbc\x63\x77\xd4\xd6\x22\x00\x4d\xd2\xd4\xa2\x73\xbd\x7e\x3f\x90\x4b\x5d\x94\xfe\x64\x83\x3c\xc7\xdc\xd8\x6a\xe8\x28\xf7\xf4\xf8\x68\x83\x70\xd2\x9a\x67\x21\xdc\xb9\x26\x9e\x08\xca\xb7\xc2\xf5\xd6\x4b\x67\xc6\xf9\x93\x7a\x89\x1e\xea\x35\xb6\x05\xb1\x1d\x1d\xdf\x1d\xed\x5a\xeb\xb8\xbf\x76\xfa\x8b\x1f\xfa\xc4\x72\x7f\xda\x40\xb9\xc9\x08\xc3\xa2\x74\xcb\x1e\x23\x67\xbd\xba\x8e\xfa\x31\x78\x5b\xe2\x5e\xa4\x33\x7a\x76\x91\xe3\x50\x65\x94\x36\xbc\x2d\x13\x46\xd0\x42\x70\x52\xe1\xa0\x16\x94\x64\x5d\x39\x67\x9b\x7b\x63\x1e\x04\xd2\xf5\xf4\xe2\xcd\xeb\xe9\xf5\xec\xea\xc3\xd9\xac\x0d\x27\x85\x99\x27\xa5\x36\xcf\xa0\x50\x2f\xfc\x92\xf5\x27\x71\x9b\xab\x9f\x89\xe7\xf9\x8b\x2f\xe1\x0d\x8c\xf7\x44\x77\xe7\x71\x0e\xf8\xfc\x85\x65\xdf\xef\x9a\x6f\x93\x34\x18\xf3\x6b\x00\x91\x29\xee\xdb\x39\x62\x4f\xd8\xe5\xe8\x97\x26\xe5\x3c\x98\x88\x90\x4a\x6b\x2b\xa6\x46\xe3\xc1\xc1\xd7\xab\xa3\x6f\x72\x71\x71\x04\x7f\xfc\x01\xad\xe7\xb3\xcb\xd7\xd3\xf6\xbb\xd7\xd3\x8b\xe9\xdb\xc9\x6c\xba\x4d\x7b\x3d\x9b\xcc\xce\xcf\xf8\x6d\x3f\x5a\x65\x34\x82\xeb\x1b\x59\x70\x42\xe5\x34\x65\xf2\x82\x3b\xc3\x46\x5f\x37\x00\xbf\x34\xd4\x73\xd9\x58\x2f\x32\xa1\x93\x3a\x8f\xbb\xda\x69\xde\x90\xcb\x4c\x1d\x2b\xbb\xa9\xa0\x0d\xd4\x7e\xe3\x46\xe9\xde\x5b\x8c\x9b\xa6\x3d\x6f\x6a\xbd\xd6\x06\x0d\x1e\xe1\x5c\xc7\x49\xa6\x77\xf8\x21\xe1\x6f\x70\x0c\x27\xf0\x22\x66\x92\x47\x52\xd5\x4b\x78\x46\xe2\xff\x44\xc2\x7a\xb5\x87\xf3\xaf\x99\xb6\xbc\x61\xe2\x9a\xdc\x9b\xff\x7e\x3a\x33\xa5\xbf\xcc\xb2\x13\xd8\x36\xe2\xf7\x3b\x46\x6c\xe8\x2f\x50\xef\xd2\xff\xdf\x0e\xfd\x3a\xf5\x11\xaa\x4c\x01\x4f\x76\x20\x12\x12\xcf\x93\xad\x38\x88\xc6\xe5\x6e\x86\xa5\xc1\xf8\x81\x64\xfb\x72\x13\xc3\x0f\x65\x8b\x7f\x2b\xd9\xee\xed\xca\xa8\xf7\xda\xec\xbb\x06\x60\xd1\x5b\x89\xb7\x34\x59\x1d\x39\x16\x49\xfd\xa9\x59\x09\x9d\xe0\x10\x3e\x62\x90\xa8\x11\x39\xb9\xc4\x7e\x96\xda\x11\x6e\xf1\xa8\x27\x8d\x93\x09\x43\x4c\x70\xdb\x69\x11\x72\x51\xd1\x64\x92\x95\xfa\xa6\x82\x85\x70\x90\x56\x5a\xe4\x32\x71\x41\x1e\xf7\xb2\x16\x17\xc2\xb2\x58\x8b\xbf\x97\xe8\x68\xcc\x21\x20\x8b\xc4\x97\x42\xa9\x0a\x16\x92\x66\x15\xe2\xee\xbd\x7c\x75\x7c\x0c\xce\xcb\x02\x75\x3a\x80\x1f\x5e\x8d\x7e\xf8\x1e\x6c\xa9\xb0\x3f\xec\xb6\xd2\x78\x73\xd4\xe8\x0d\x5a\x88\xe8\x79\x8d\x85\x5f\xf6\xfa\xf0\xd3\x03\xf5\xe0\x81\xe4\xbe\x97\x16\x9e\xc3\x8b\x2f\x43\xd2\x6b\xbc\x81\xdb\xe0\x49\x40\xe5\x30\x4a\xa3\xf9\xee\xf2\xf5\x65\xef\x46\x58\xa1\xc4\x1c\xfb\x27\x3c\xef\xb1\xad\x56\x22\x36\xfc\xe4\x14\x28\x94\x90\x1a\x44\x92\x98\x52\x7b\x32\x7c\xdd\xbb\xab\x8a\xf2\xfb\x91\xaf\xe5\xf1\x68\x24\x92\x04\x9d\xab\xd3\x3d\x7b\x8d\xd4\x11\x39\x71\x83\xd4\x4e\xa6\xd8\xf2\x0a\x65\x07\xc3\xa9\x39\x52\xd0\xe4\x58\x0b\xcc\x8d\xa3\x4d\xe6\x08\x2b\x4b\x73\x86\x93\x3a\xe1\x41\x3b\x45\xb2\xb6\x03\xa3\x41\x80\x32\x3c\xdd\x73\x8c\x83\xb0\x0b\x37\x0c\xf9\x9e\xb6\xa5\x9c\xa3\xcd\x6a\xb8\x09\xe4\x36\x54\xb9\xa3\xdf\x6a\x07\x34\xe0\x9d\x74\x9e\x1b\x48\xd2\x52\x3a\x08\x48\x96\x7a\x31\x80\xc2\x14\x9c\xa7\x0f\xec\x25\xaf\xa6\xbf\x4e\xaf\x9a\xe2\x7f\xb8\x13\xeb\x16\xff\x69\x33\x01\x81\xa5\xf1\xc2\x63\xfa\x74\x4f\xcf\xbe\x07\x50\xe3\x07\x00\x45\xf2\xd7\xb5\xf1\x7d\xeb\x38\x4a\x38\xbf\x76\xcc\x02\xc3\xf8\xd2\x56\xc0\x95\xca\xbb\xad\xdc\xbd\x9d\x1c\x4c\x51\x57\x08\x52\x8a\xd3\x0e\x25\xf6\x3d\x9d\x75\x34\xb8\x6f\x03\x4f\x40\xa0\x69\x25\x00\x5e\xaf\x3b\x34\x11\x72\x3e\x6b\x68\x4a\x4f\x4e\xa7\x2a\xbd\x4e\x71\x0b\xe1\x3e\x38\xf6\x6d\x4c\x72\x73\xb9\x38\xd7\xbe\x57\x2f\x9e\x6b\x78\x0e\xf5\x03\xa5\x6e\x78\xbe\x11\x2b\x7b\x72\x60\x27\x45\x85\x1e\x61\x2d\xe2\x14\xb6\x5e\x91\xa0\x70\x68\x36\x8d\x45\xbf\x5b\x82\x8f\xa3\x34\x32\xcb\x13\x8b\x7e\x88\xbf\x97\x42\xb9\xde\x71\xd3\x12\x84\x13\x78\xc3\x45\x6c\xdc\x94\xb1\xba\xce\x11\xcf\x46\x93\x11\x05\x06\xb6\x68\x8d\x9a\x2d\x9d\x87\xda\x94\xe2\xa3\x12\xa2\x88\x98\x1c\x1a\x8f\x45\xf8\xed\xeb\x32\x3b\x6d\x02\x78\xda\x94\xfd\x4c\x48\x55\x5a\x7c\x7a\x0a\x7b\x92\x8b\x2b\x6d\x26\x12\xf6\xa5\x43\xe0\x11\xd4\x81\x33\x39\x2e\xcd\x2a\x28\xb0\x2f\x45\xed\x82\xa3\xc1\xc1\x56\x91\xe0\xbb\x14\xe1\xa0\x74\x62\x81\x2d\x70\x34\x06\xaf\x1d\xb5\x77\x2e\xfe\xd3\xd0\x79\xd6\x3c\x7e\x03\x45\x61\x97\x6f\x42\xe3\x31\x6c\xec\xf5\xf2\x4e\x2f\x53\x13\x71\x47\xd3\x7a\xa8\x55\x0d\x0d\x47\x83\x9c\x7f\xc5\xef\xff\x19\xc7\x07\xcf\xc7\xdf\x43\x03\x6d\x9b\x36\x9c\x71\x93\x38\x9c\x74\xdd\xc4\x7c\x1b\x05\xcd\xea\x43\x00\x78\xa8\x3f\x22\xa8\xea\xdf\x30\xf1\x6b\xb8\x72\x4b\x43\x4f\x85\xc5\x5b\x69\x4a\xaa\x56\xf8\xbf\x34\xff\x35\xfd\xdd\x7d\xb7\x73\x1f\xef\xbc\xd8\x7d\xed\x4b\xaf\xd5\x32\xde\xd9\x86\xd6\xa8\x55\x2b\x0c\x17\xd2\x78\x15\x96\x85\xdb\xd4\x0e\xf3\x3f\x72\xf9\x15\xe3\xdd\x9b\x82\x6a\x7f\x2c\x45\xca\xa2\x48\xab\xa6\xfa\x0d\x42\xd7\x01\x4b\xa1\xd3\x38\x79\x88\x34\x95\x24\x8f\xb1\x48\x1a\x8a\x85\x90\xba\xbb\xd7\x8c\xdf\x2c\xb9\xfb\x90\xb1\xd3\xc8\xb6\xab\x66\x9c\x18\x69\xbc\x63\x8d\xbb\x07\x54\xc7\xad\x58\xda\xbe\xc7\x8b\x57\x81\x46\xbb\x32\xe7\xb6\x17\xc4\xad\x90\x4a\xd0\xa8\xc5\xed\x94\x4e\x21\x51\x28\x74\xb8\xbd\xc7\xcc\x9b\x5b\xb4\xae\x7b\x00\xc8\xff\x0c\xc6\xb7\x92\x63\xfd\x18\xcd\x71\x78\xcc\x1e\x1a\xb1\xe1\xf8\x6f\x94\xf0\x3e\xc2\xab\x65\xde\x10\x59\xd2\xf3\x87\x1d\xd4\xbe\x7b\x58\x48\x71\x83\x44\x34\x3f\xc1\x71\xab\x09\xff\xab\x04\xd9\x2e\xc4\x2e\x9a\x66\x2c\x1e\xde\x1b\x33\x00\x85\x82\x47\xa2\xfa\xb3\x4b\xdd\x7c\x3e\x36\xa1\xd5\xd1\x1b\xda\xb7\x9d\xf0\xe5\x4b\xac\x25\xd6\xd7\x1d\xa1\x8f\x9f\x23\x6a\x90\x1e\xad\xa0\xe1\x87\xd0\x15\xbf\x14\x90\x96\x8e\xc5\xb1\x5f\x24\x05\x5d\x14\x1c\xaf\xed\xa9\x3e\x4b\xbd\x18\x76\x3b\xe1\x7d\x2b\xde\x13\x7f\xb7\x8e\xf7\x50\x0c\x99\x33\x5e\x00\x34\xf3\x7f\xe2\xef\xb8\x67\xe4\x19\x79\xeb\x12\x80\xd6\xe8\x55\x18\xa0\xb7\x46\x7e\x66\x8c\x63\xff\xf6\xcd\x22\xad\xf1\xbb\x0d\x80\x33\xe9\x42\xb8\x20\x66\x2b\x24\xfc\xdd\x6e\x44\xd4\x0c\x14\x0c\x27\xfb\x19\x68\x69\x0f\xd3\xd6\x35\x04\x11\xf3\xab\xb0\x1a\x0a\xfb\x49\x7b\x35\xbc\x8a\x07\x95\x79\xcb\x36\x32\x67\xdb\xdc\x9f\xee\x4f\x72\xc7\x35\x1e\xf7\x27\x33\xb2\x79\x03\xd8\x07\x58\xdb\x83\xc5\x2e\xc9\x63\xa9\x92\xa5\xd7\x99\xed\x01\x56\x96\xde\x6a\x3d\xfc\xdd\xe1\x22\x1b\xe2\xb6\x8a\x1b\x34\xfb\x84\xc4\x3c\x13\xe9\x82\x65\x6b\x01\x01\xd5\x41\x57\x46\xb4\xfc\x07\x46\x89\xed\xf8\xa9\x97\xc0\x62\xf8\xb0\xc0\x0d\x29\x85\x8f\x99\x73\xf1\x2f\x1d\xcd\x8c\xeb\xb8\x48\xd1\x49\x8b\x29\x64\x12\x55\x0a\x26\x45\xcb\x13\xe9\x6f\xce\xe8\xf0\x09\x09\xad\x24\x89\xe1\x53\x59\xf8\x6a\xcd\x1f\xf0\xb4\x4c\xd0\x57\x90\xa1\xe0\x6f\x41\xde\x40\x21\x9c\x83\x1c\x05\xcd\xa0\x59\xa9\x54\x05\xc6\xa6\x48\xc2\x9b\xa1\x8c\x42\xd2\x40\xe9\xd0\x3a\x58\x2d\x4d\x2c\x93\xdc\xa5\x15\xd4\x74\x4a\x3f\x88\xf7\x2e\xd2\x15\x4a\x54\x20\x3d\x95\xe4\x78\xa8\x76\x94\x36\x1f\x60\xf8\x2b\x8e\xa1\xaa\xbb\x1b\xa2\xf5\x5c\xb7\x19\xa3\xfc\x9a\x9e\x36\xa3\x33\xce\x35\x9b\x71\xb9\xbe\x91\xda\x0c\xc2\xba\x6c\x6c\x46\x5a\xbb\x08\x6d\x86\x13\xaf\xf0\xd3\x66\x20\xb5\xfa\x65\x5e\x60\x70\x34\x0c\xfc\xb4\x15\x5a\xac\x65\x8c\xad\xf0\xb9\xb1\x21\xe7\xa7\x41\x04\x0c\x79\xb1\x47\xc6\xb9\xc1\x8a\x32\x71\xb0\x51\xab\xac\x84\x17\x9f\x6f\xb0\xfa\xb2\xbf\x8a\x44\x38\xb6\xe8\x9a\xb2\x51\x43\x3a\xac\x3d\x12\xc8\x8d\x16\x72\x7c\x7c\x0a\xf2\xc7\x36\x43\x5d\xf9\x40\x3e\x7b\x56\xef\xd9\x5e\xff\x2c\xbf\xd4\xd1\xd9\x20\x7e\x6b\xbd\xbf\xa1\x51\x8c\x91\x40\x43\x41\xd1\xbd\xef\xfe\x33\x00\x00\xff\xff\xb5\x25\x8b\x4d\x94\x21\x00\x00")
+var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x59\x5f\x6f\x1b\xb7\xb2\x7f\x96\x3e\xc5\x24\x0f\xb5\x84\x28\x92\x93\xf4\xf6\x02\x76\xd5\x0b\x5d\x47\x49\x0d\xb8\x71\x60\x2b\x0d\x82\x20\x0f\xd4\xee\xac\xc4\x9a\x4b\x6e\x49\xae\xe4\x3d\xa9\xbf\xfb\xc1\x0c\xb9\xab\xd5\x1f\x3b\x6e\x0f\xce\x41\xcf\x8b\xa0\x5d\xce\x0c\x87\x33\xbf\xf9\xc7\x1d\x8d\xe0\xcc\x14\x95\x95\x8b\xa5\x87\x97\xc7\x2f\xfe\x17\x66\x4b\x84\x85\x79\x8e\x7e\x89\x16\xcb\x1c\x26\xa5\x5f\x1a\xeb\xba\xa3\x11\xcc\x96\xd2\x41\x26\x15\x82\x74\x50\x08\xeb\xc1\x64\xe0\x77\xe8\x95\x9c\x5b\x61\xab\x61\x77\x34\x0a\x3c\x07\x97\x49\x42\x66\x11\xc1\x99\xcc\xaf\x85\xc5\x13\xa8\x4c\x09\x89\xd0\x60\x31\x95\xce\x5b\x39\x2f\x3d\x82\xf4\x20\x74\x3a\x32\x16\x72\x93\xca\xac\x22\x91\xd2\x43\xa9\x53\xb4\xbc\xb5\x47\x9b\xbb\x5a\x8f\xb7\xef\x3e\xc0\x05\x3a\x87\x16\xde\xa2\x46\x2b\x14\xbc\x2f\xe7\x4a\x26\x70\x21\x13\xd4\x0e\x41\x38\x28\xe8\x8d\x5b\x62\x0a\x73\x16\x47\x8c\x6f\x48\x95\xeb\xa8\x0a\xbc\x31\xa5\x4e\x85\x97\x46\x0f\x00\x25\x69\x0e\x2b\xb4\x4e\x1a\x0d\xaf\xea\xad\xa2\xc0\x01\x18\x4b\x42\x7a\xc2\xd3\x01\x2c\x98\x82\xf8\xfa\x20\x74\x05\x4a\xf8\x0d\xeb\x23\x0c\xb2\x39\x77\x0a\x52\xf3\x36\x4b\x53\x20\xf8\xa5\xf0\x74\xea\xb5\x54\x0a\xe6\x08\xa5\xc3\xac\x54\x03\x92\x36\x2f\x3d\x7c\x3c\x9f\xfd\x7c\xf9\x61\x06\x93\x77\x9f\xe0\xe3\xe4\xea\x6a\xf2\x6e\xf6\xe9\x14\xd6\xd2\x2f\x4d\xe9\x01\x57\x18\x44\xc9\xbc\x50\x12\x53\x58\x0b\x6b\x85\xf6\x15\x98\x8c\x24\xfc\x32\xbd\x3a\xfb\x79\xf2\x6e\x36\xf9\xff\xf3\x8b\xf3\xd9\x27\x30\x16\xde\x9c\xcf\xde\x4d\xaf\xaf\xe1\xcd\xe5\x15\x4c\xe0\xfd\xe4\x6a\x76\x7e\xf6\xe1\x62\x72\x05\xef\x3f\x5c\xbd\xbf\xbc\x9e\x0e\xe1\x1a\x49\x2b\x24\xfe\x6f\xdb\x3c\x63\xef\x59\x84\x14\xbd\x90\xca\xd5\x96\xf8\x64\x4a\x70\x4b\x53\xaa\x14\x96\x62\x85\x60\x31\x41\xb9\xc2\x14\x04\x24\xa6\xa8\x1e\xed\x54\x92\x25\x94\xd1\x0b\x3e\xf3\xbd\x80\x84\xf3\x0c\xb4\xf1\x03\x70\x88\xf0\xe3\xd2\xfb\xe2\x64\x34\x5a\xaf\xd7\xc3\x85\x2e\x87\xc6\x2e\x46\x2a\x88\x73\xa3\x9f\x86\x5d\x92\x99\x08\xa5\x66\x56\x24\x68\xc9\x39\x02\xb2\x92\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x7f\xc2\x60\x14\x1e\xf0\x96\x9e\xbc\x23\xd0\x82\xc5\xc2\x58\xfa\xaf\x54\x8d\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\x90\x8b\x14\x61\x5e\x81\x68\x0b\x1c\xb4\x0f\x43\x30\x0a\xee\x06\xa9\x33\x63\x73\x86\xe5\xb0\xfb\xb5\xdb\x89\x1a\x3a\x2f\x92\x1b\x52\x90\xe4\x27\xa5\xb5\xa8\x3d\x99\xb2\xb4\x4e\xae\x90\x49\x20\xd0\x44\x7b\x4e\x7f\xfd\x05\xf0\x16\x93\x32\x48\xea\x34\x42\x4e\xe0\xf3\xd7\xbb\x2f\x83\x2e\x8b\x4e\xd1\x25\xa8\x53\x4c\xf9\x7c\x37\x0e\xd6\x4b\xb6\x28\xac\xf1\x68\x85\xf0\x5b\xe9\x7c\x8b\x26\xb3\x26\x07\xa1\xc1\x94\x84\xf8\xb6\x75\xa4\xf6\x86\x05\x0a\xfa\xaf\xd1\xb2\x46\xc3\x6e\xa7\x61\x3e\x81\x4c\x28\x87\x71\x5f\xe7\xb1\xa0\xd3\x48\xbd\x32\x37\x24\xd9\x58\x82\xb0\xad\xc0\x14\x89\x49\x63\x30\xd0\x39\x9a\x63\xa0\x1b\x76\x3b\xc4\x77\x02\x59\xa9\x79\xdb\x9e\x32\x8b\x01\xa4\xf3\x3e\x7c\xed\x76\x48\xec\x99\x28\x7c\x69\x91\xed\x89\xd6\x1a\xeb\x40\xe6\x39\xa6\x52\x78\x54\x55\xb7\xd3\x59\x09\x1b\x16\x60\x0c\xca\x2c\x86\x0b\xf4\x53\x7a\xec\xf5\x4f\xbb\x9d\x8e\xcc\xa0\x17\x56\x9f\x8c\xc7\x9c\x7d\x32\xa9\x31\x0d\xe2\x3b\x7e\x29\xdd\x30\x13\xa5\xf2\xcd\xbe\xc4\xd4\xb1\xe8\x4b\xab\xe9\xef\x5d\xd0\xe2\x23\x82\xd1\xaa\x82\x84\xb2\x8c\x98\x53\x78\xba\xca\x79\xcc\xe3\xe1\xdc\x00\x32\xe1\xc8\x84\x32\x83\x35\x42\x61\xf1\x79\xb2\x44\xf2\x9d\x4e\x30\x6a\xe9\x2a\xc7\x4e\x1d\x03\xed\x36\x34\xc5\xd0\x9b\x77\x65\x3e\x47\xdb\xeb\xc3\x77\x70\x7c\x9b\x1d\xf7\x61\x3c\xe6\x3f\xb5\xee\x91\x27\xea\x4b\x52\x4c\x11\x0f\xca\xfc\xd7\xde\x4a\xbd\x08\x67\x8d\xba\x9e\x67\x20\x40\xe3\x1a\x12\xa3\x19\xd4\xe4\x95\x39\x4a\xbd\x80\xc4\xa2\xf0\x98\x0e\x40\xa4\x29\x78\x13\x90\xd7\xe0\x6c\x7b\x4b\xf8\xee\x3b\xe8\xd1\x66\x63\x38\x3a\xbb\x9a\x4e\x66\xd3\x23\xf8\xe3\x0f\x08\x6f\x9e\x86\x37\x2f\x9f\xf6\x5b\x9a\x49\x7d\x99\x65\x51\x39\x16\x38\x2c\x10\x6f\x7a\x2f\xfa\xc3\x95\x50\x25\x5e\x66\x41\xcd\x48\x3b\xd5\x29\x8c\x23\xcf\xb3\x5d\x9e\x97\x5b\x3c\xc4\x34\x1a\xc1\xc4\x39\xcc\xe7\x0a\xf7\x03\x32\x46\x2c\x07\xaf\xf3\x94\xb1\x08\x7d\x89\xc9\x0b\x85\x84\xaa\x7a\xd7\x68\x7e\xd6\xb8\xe3\xab\x02\x4f\x00\x00\x4c\x31\xe0\x17\x14\x0b\xfc\xc2\x9b\x9f\xf1\x96\x7d\x54\x9b\x90\x50\x35\x49\x53\x8b\xce\xf5\xfa\xfd\x40\x2e\x75\x51\xfa\x93\x2d\xf2\x1c\x73\x63\xab\xa1\xa3\x84\xd4\xe3\xa3\x0d\xc2\x49\x6b\x9e\x85\x70\xe7\x9a\x78\x22\x52\xdf\x0a\xd7\xdb\x2c\x9d\x19\xe7\x4f\xea\x25\x7a\xa8\xd7\xd8\x16\xc4\x76\x74\x7c\x7b\xb4\x6f\xad\xe3\xfe\x06\x09\x2f\x7e\xe8\x13\xcb\xdd\x69\x83\xef\x26\x4d\x0c\x8b\xd2\x2d\x7b\x0c\xa7\xcd\xea\x26\x15\x8c\xc1\xdb\x12\x0f\xc2\x9f\x21\xb5\x0f\x27\x87\x2a\xa3\x5c\xe2\x6d\x99\x30\xac\x16\x82\x33\x0d\x47\xba\xa0\xcc\xeb\xca\x39\xdb\xdc\x1b\xb3\x8f\xae\x08\xae\xeb\xe9\xc5\x9b\xd7\xd3\xeb\xd9\xd5\x87\xb3\xd9\x51\x0b\x4e\x0a\x33\x4f\x4a\x6d\x9f\x41\xa1\x5e\xf8\x25\xeb\x4f\xe2\xb6\x57\x3f\x13\xcf\xf3\x17\x5f\xc2\x1b\x18\x1f\x08\xf9\xce\xc3\x1c\xf0\xf9\x0b\xcb\xbe\xdb\x37\xdf\x36\x69\x30\xe6\xd7\x00\x22\x53\xdc\xb5\x13\xc7\x81\x58\xcc\xd1\x2f\x4d\xca\xc9\x31\x11\x21\xbf\xd6\x56\x4c\x8d\xc6\x3f\x1f\x91\x93\x8b\x8b\x56\x3c\xf2\xf3\xd9\xe5\xeb\x76\x8c\x1e\xbd\x9e\x5e\x4c\xdf\x4e\x66\xd3\x5d\xda\xeb\xd9\x64\x76\x7e\xc6\x6f\xeb\xf0\x1d\x8d\xe0\xfa\x46\x16\x9c\x65\x39\x77\x99\xbc\xe0\x76\xb1\xd1\xd7\x0d\xc0\x2f\x0d\x35\x62\x36\x16\x91\x4c\xe8\xa4\x4e\xee\xae\x76\x9a\x37\xe4\x32\x53\xc7\xca\x7e\x2a\x68\x03\xb5\xdf\xb8\x51\xba\xf7\x16\xe3\xa6\x69\xcf\x9b\x5a\xaf\x8d\x41\x83\x47\x38\x01\x72\x92\xe9\x3d\xfe\x90\xf0\x7f\x70\x0c\x27\xf0\x22\x66\x92\x07\x52\xd5\x4b\x78\x46\xe2\xff\x42\xc2\x7a\x75\x80\xf3\xef\x99\xb6\xbc\x61\xe2\x9a\xdc\x9b\xff\x7c\x3a\x33\xa5\xbf\xcc\xb2\x13\xd8\x35\xe2\xf7\x7b\x46\x6c\xe8\x2f\x50\xef\xd3\xff\xcf\x1e\xfd\x26\xf5\x11\xaa\x4c\x01\x4f\xf6\x20\x12\x12\xcf\x93\x9d\x38\x88\xc6\xe5\x16\x87\xa5\xc1\xf8\x9e\x64\xfb\x72\x1b\xc3\xf7\x65\x8b\x7f\x29\xd9\x1e\x6c\xd5\xa8\x21\xdb\x6e\xc6\x06\x60\xd1\x5b\x89\x2b\x1a\xb7\x8e\x1c\x8b\xa4\xa6\xd5\xac\x85\x4e\x70\x08\x1f\x31\x48\xd4\x88\x9c\x5c\x62\x93\x4b\x3d\x0a\xf7\x7d\xd4\xa8\xc6\x71\x85\x21\x26\xb8\x17\xb5\x08\xb9\xa8\x68\x5c\xc9\x4a\x7d\x53\xc1\x42\x38\x48\x2b\x2d\x72\x99\xb8\x20\x8f\x1b\x5c\x8b\x0b\x61\x59\xac\xc5\xdf\x4b\x74\x34\xfb\x10\x90\x45\xe2\x4b\xa1\x54\x05\x0b\x49\x03\x0c\x71\xf7\x5e\xbe\x3a\x3e\x06\xe7\x65\x81\x3a\x1d\xc0\x0f\xaf\x46\x3f\x7c\x0f\xb6\x54\xd8\x1f\x76\x5b\x69\xbc\x39\x6a\xf4\x06\x2d\x44\xf4\xbc\xc6\xc2\x2f\x7b\x7d\xf8\xe9\x9e\x7a\x70\x4f\x72\x3f\x48\x0b\xcf\xe1\xc5\x97\x21\xe9\x35\xde\xc2\x6d\xf0\x24\xa0\x72\x18\xa5\xd1\xd0\x77\xf9\xfa\xb2\x77\x23\xac\x50\x62\x8e\xfd\x13\x1e\x02\xd9\x56\x6b\x11\xa7\x00\x72\x0a\x14\x4a\x48\x0d\x22\x49\x4c\xa9\x3d\x19\xbe\x6e\xe8\x55\x45\xf9\xfd\xc8\xd7\xf2\x78\x5e\x12\x49\x82\xce\xd5\xe9\x9e\xbd\x46\xea\x88\x9c\xb8\x41\x6a\x27\x53\x6c\x79\x85\xb2\x83\xe1\xd4\x1c\x29\x68\x9c\xac\x05\xe6\xc6\xd1\x26\x73\x84\xb5\xa5\xe1\xc3\x49\x9d\xf0\xf4\x9d\x22\x59\xdb\x81\xd1\x20\x40\x19\x1e\xf9\x39\xc6\x41\xd8\x85\x1b\x86\x7c\x4f\xdb\x52\xce\xd1\x66\x3d\xdc\x06\x72\x1b\xaa\xdc\xe6\xef\xb4\x03\x1a\xf0\x56\x3a\xcf\x5d\x25\x69\x29\x1d\x04\x24\x4b\xbd\x18\x40\x61\x0a\xce\xd3\xdf\x2a\x67\x31\x59\x5f\x4d\x7f\x9d\x5e\x35\xc5\xff\xf1\x4e\xac\xfb\xfe\xa7\xcd\x58\x04\x96\x66\x0e\x8f\xe9\xd3\x03\x8d\xfc\x01\x40\x8d\xef\x01\x14\xc9\xdf\xd4\xc6\xf7\xad\xe3\x28\xe1\xfc\xc6\x31\x0b\x0c\x33\x4d\x5b\x01\x57\x2a\xef\x76\x72\xf7\x6e\x72\x30\x45\x5d\x21\x48\x29\x4e\x3b\x94\xd8\x77\xbb\xed\xad\x85\x4d\xd3\xbd\xc1\xe7\x79\xcb\xc6\x6b\x6e\xb9\x02\x51\x2b\x35\xf0\x7a\xdd\xbb\x89\x50\x0d\x58\x77\x53\x7a\x82\x03\xd5\xef\x4d\xf2\x5b\x08\xf7\xc1\xb1\xd7\x63\xfa\x9b\xcb\xc5\xb9\xf6\xbd\x7a\xf1\x5c\xc3\x73\xa8\x1f\x28\xa9\xc3\xf3\xad\x28\x3a\x90\x1d\x3b\x29\x2a\xf4\x08\x1b\x11\xa7\xb0\xf3\x8a\x04\x05\x73\xb0\xd1\x2c\xfa\xfd\xe2\x7c\x1c\xa5\x91\xc1\x9e\x58\xf4\x43\xfc\xbd\x14\xca\xf5\x8e\x9b\x66\x21\x9c\xc0\x1b\x2e\x6f\xe3\xa6\xc0\xd5\x15\x90\x78\xb6\xda\x8f\x28\x30\xb0\x45\x6b\xd4\x6c\xe9\x3c\x54\xad\x14\x1f\x94\x10\x45\xc4\xb4\xd1\xf8\x32\x02\xf3\x50\xff\xd9\x69\x13\xc0\xd3\xa6\x21\xc8\x84\x54\xa5\xc5\xa7\xa7\x70\x20\xed\xb8\xd2\x66\x22\x61\x5f\x3a\x04\x9e\x58\x1d\x38\x93\xe3\xd2\xac\x83\x02\x87\x92\xd7\x3e\x38\x1a\x1c\xec\x94\x0f\xbe\x7a\x11\x0e\x4a\x27\x16\xd8\x02\x47\x63\xf0\xda\x51\x07\xc7\xe8\xbf\x0c\x9d\x67\xcd\xe3\x37\x50\x14\x76\xf9\x26\x34\x1e\xc2\xc6\x41\x2f\xef\x75\x39\x35\x11\xf7\x3a\xad\x87\x5a\xd5\xd0\x8a\x34\xc8\xf9\x33\x7e\xff\xf7\x38\x3e\x78\x3e\xfe\x3e\x36\xd0\x76\x69\xc3\x19\xb7\x89\xc3\x49\x37\xed\xcd\xb7\x51\xd0\xac\xde\x07\x80\xfb\x3a\x27\x82\xaa\xfe\x0d\x13\xbf\x81\x2b\x37\x3b\xf4\x54\x58\x5c\x49\x53\x52\x1d\xc3\xff\xa6\xc9\xb0\xe9\xfc\xee\xba\x9d\xbb\x78\x45\xc6\xee\x6b\xdf\x91\xad\x97\xf1\x8a\x37\x34\x4d\xad\x2a\x62\xb8\xc4\xc6\x9b\xb3\x2c\x5c\xbe\x76\x98\xff\x81\xbb\xb2\x18\xef\xde\x14\xd4\x15\xc4\x22\xa5\x2c\x8a\xb4\x6a\xea\xe2\x20\xf4\x23\xb0\x14\x3a\x8d\x33\x89\x48\x53\x49\xf2\x18\x8b\xa4\xa1\x58\x08\xa9\xbb\x07\xcd\xf8\xcd\x62\x7c\x08\x19\x7b\x2d\x6e\xbb\x9e\xc6\x59\x92\x06\x3f\xd6\xb8\xfb\x88\xba\xb9\x13\x4b\xbb\xd7\x7e\xf1\xe6\xd0\x68\x57\xe6\xdc\x10\x83\x58\x09\xa9\x04\x0d\x61\xdc\x68\xe9\x14\x12\x85\x42\x87\xcb\x7e\xcc\xbc\x59\xa1\x75\xdd\x47\x80\xfc\xaf\x60\x7c\x27\x39\xd6\x8f\xd1\x1c\x8f\x8f\xd9\xc7\x46\x6c\x38\xfe\x1b\x25\xbc\x8f\xf0\x6a\x99\x37\x44\x96\xf4\xfc\x1d\x08\xb5\xef\x3e\x2e\xa4\xb8\x75\x22\x9a\x9f\xe0\xb8\xd5\x9e\xff\x5d\x82\x6c\x1f\x62\x17\x4d\x9b\x16\x0f\xef\x8d\x19\x80\x42\xc1\xc3\x52\xfd\x95\xa6\x6e\x4b\x1f\x9a\xdd\xea\xe8\x0d\x8d\xdd\x5e\xf8\xf2\xf5\xd6\x12\xeb\x8b\x90\xd0\xe1\xcf\x11\x35\x48\x8f\x56\xd0\x58\x44\xe8\x8a\x1f\x16\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\xb7\xfc\x54\x9f\xa5\x5e\x0c\xbb\x9d\xf0\xbe\x15\xef\x89\xbf\xdd\xc4\x7b\x28\x86\xcc\x19\xaf\x06\x9a\x9b\x81\xc4\xdf\x72\xd3\xc8\xd3\xf3\xce\xf5\x00\xad\xd1\xab\x30\x5a\xef\x5c\x06\x30\x63\xbc\x10\xd8\xbd\x73\xa4\x35\x7e\xb7\x05\x70\x26\x5d\x08\x17\xc4\xec\x84\x84\xbf\xdd\x8f\x88\x9a\x81\x82\xe1\xe4\x30\x03\x2d\x1d\x60\xda\xb9\xa0\x20\x62\x7e\x15\x56\x43\x61\x3f\x69\xaf\x86\x57\xf1\xa0\x32\x6f\xd9\x46\xe6\x6c\x9b\xbb\xd3\xc3\x49\xee\xb8\xc6\xe3\xe1\x64\x46\x36\x6f\x00\x7b\x0f\x6b\x7b\xe4\xd8\x27\x79\x28\x55\xb2\xf4\x3a\xb3\xdd\xc3\xca\xd2\x5b\xad\x87\xbf\x7d\xbc\xc8\x86\xb8\xad\xe2\x16\xcd\x21\x21\x31\xcf\x44\xba\x60\xd9\x5a\x40\x40\x75\xd0\x95\x11\x2d\xff\x81\x51\x62\x3b\x7e\xea\x25\xb0\x18\xbe\x43\x70\x43\x4a\xe1\x63\xe6\x5c\xfc\x4b\x47\xd3\xe4\x26\x2e\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xac\xfa\x9b\x33\x3a\x7c\x71\x42\x2b\x49\x62\xf8\xb2\x16\x3e\x72\xf3\xf7\x3e\x2d\x13\xf4\x15\x64\x28\xf8\xd3\x91\x37\x50\x08\xe7\x20\x47\x41\xd3\x69\x56\x2a\x55\x81\xb1\x29\x92\xf0\x66\x5c\xa3\x90\x34\x50\x3a\xb4\x0e\xd6\x4b\x13\xcb\x24\x77\x69\x05\x35\x9d\xd2\x0f\xe2\x8d\x8c\x74\x85\x12\x15\x48\x4f\x25\x39\x1e\xaa\x1d\xa5\xcd\xf7\x1a\xfe\xe8\x63\xa8\xea\xee\x87\x68\x3d\xd8\x6d\xc7\x28\xbf\xa6\xa7\xed\xe8\x8c\x73\xcd\x76\x5c\x6e\xee\xaa\xb6\x83\xb0\x2e\x1b\xdb\x91\xd6\x2e\x42\xdb\xe1\xc4\x2b\xfc\xb4\x1d\x48\xad\x7e\x99\x17\x18\x1c\x0d\x03\x3f\xed\x84\x16\x6b\x19\x63\x2b\x7c\x9d\x6c\xc8\xf9\x69\x10\x01\x43\x5e\xec\x91\x71\x6e\xb0\xa2\x4c\x1c\x6c\xd4\x2a\x2b\xe1\xc5\xe7\x1b\xac\xbe\x1c\xae\x22\x11\x8e\x2d\xba\xa6\x6c\xd4\x90\x0e\x6b\x0f\x04\x72\xa3\x85\x1c\x1f\x9f\x82\xfc\xb1\xcd\x50\x57\x3e\x90\xcf\x9e\xd5\x7b\xb6\xd7\x3f\xcb\x2f\x75\x74\x36\x88\xdf\x59\xef\x6f\x69\x14\x63\x24\xd0\x50\x50\x74\xef\xba\xff\x0c\x00\x00\xff\xff\x00\x24\x55\x1f\xc3\x21\x00\x00")
func call_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -133,7 +133,7 @@ func call_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "call_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0xb3, 0xb6, 0xe8, 0x19, 0xc3, 0xa, 0xce, 0xfd, 0x50, 0x84, 0xf7, 0x8a, 0xc5, 0x99, 0x10, 0x58, 0xc4, 0x69, 0xfb, 0x8, 0xad, 0x67, 0xea, 0x12, 0x38, 0xcb, 0xd, 0x2a, 0x94, 0xa1, 0x70}}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xef, 0x68, 0xda, 0xd8, 0x9, 0xf5, 0xd5, 0x71, 0xa8, 0x8a, 0xfb, 0x30, 0xe8, 0xf0, 0x72, 0x14, 0x36, 0x6b, 0x62, 0x5a, 0x4e, 0xff, 0x16, 0xdc, 0xd3, 0x2c, 0x68, 0x7b, 0x79, 0x9f, 0xd3}}
return a, nil
}
@@ -197,7 +197,7 @@ func opcount_tracerJs() (*asset, error) {
return a, nil
}
-var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x57\xdd\x6f\x1b\xb9\x11\x7f\xde\xfd\x2b\xa6\x7e\x91\x84\x53\x56\xce\x15\xb8\x02\x72\x5d\x60\xa3\x28\x89\x00\x9d\x6d\x48\x4a\x5d\xf7\x70\x0f\x5c\x72\x76\xc5\x13\x45\x2e\x48\xae\x3e\x10\xf8\x7f\x2f\x86\xfb\x21\xcb\x67\x27\x6e\xeb\x27\x2f\x39\xfc\xcd\xf7\x6f\x46\xa3\x11\x4c\x4c\x79\xb4\xb2\x58\x7b\xf8\xf9\xf2\xfd\xdf\x60\xb5\x46\x28\xcc\x3b\xf4\x6b\xb4\x58\x6d\x21\xad\xfc\xda\x58\x17\x8f\x46\xb0\x5a\x4b\x07\xb9\x54\x08\xd2\x41\xc9\xac\x07\x93\x83\x7f\x26\xaf\x64\x66\x99\x3d\x26\xf1\x68\x54\xbf\x79\xf1\x9a\x10\x72\x8b\x08\xce\xe4\x7e\xcf\x2c\x8e\xe1\x68\x2a\xe0\x4c\x83\x45\x21\x9d\xb7\x32\xab\x3c\x82\xf4\xc0\xb4\x18\x19\x0b\x5b\x23\x64\x7e\x24\x48\xe9\xa1\xd2\x02\x6d\x50\xed\xd1\x6e\x5d\x6b\xc7\xe7\x9b\xaf\x30\x47\xe7\xd0\xc2\x67\xd4\x68\x99\x82\xbb\x2a\x53\x92\xc3\x5c\x72\xd4\x0e\x81\x39\x28\xe9\xc4\xad\x51\x40\x16\xe0\xe8\xe1\x27\x32\x65\xd9\x98\x02\x9f\x4c\xa5\x05\xf3\xd2\xe8\x21\xa0\x24\xcb\x61\x87\xd6\x49\xa3\xe1\xaf\xad\xaa\x06\x70\x08\xc6\x12\x48\x9f\x79\x72\xc0\x82\x29\xe9\xdd\x00\x98\x3e\x82\x62\xfe\xf4\xf4\x0d\x01\x39\xf9\x2d\x40\xea\xa0\x66\x6d\x4a\x04\xbf\x66\x9e\xbc\xde\x4b\xa5\x20\x43\xa8\x1c\xe6\x95\x1a\x12\x5a\x56\x79\xb8\x9f\xad\xbe\xdc\x7e\x5d\x41\x7a\xf3\x00\xf7\xe9\x62\x91\xde\xac\x1e\xae\x60\x2f\xfd\xda\x54\x1e\x70\x87\x35\x94\xdc\x96\x4a\xa2\x80\x3d\xb3\x96\x69\x7f\x04\x93\x13\xc2\xaf\xd3\xc5\xe4\x4b\x7a\xb3\x4a\x3f\xcc\xe6\xb3\xd5\x03\x18\x0b\x9f\x66\xab\x9b\xe9\x72\x09\x9f\x6e\x17\x90\xc2\x5d\xba\x58\xcd\x26\x5f\xe7\xe9\x02\xee\xbe\x2e\xee\x6e\x97\xd3\x04\x96\x48\x56\x21\xbd\xff\x71\xcc\xf3\x90\x3d\x8b\x20\xd0\x33\xa9\x5c\x1b\x89\x07\x53\x81\x5b\x9b\x4a\x09\x58\xb3\x1d\x82\x45\x8e\x72\x87\x02\x18\x70\x53\x1e\xdf\x9c\x54\xc2\x62\xca\xe8\x22\xf8\xfc\x6a\x41\xc2\x2c\x07\x6d\xfc\x10\x1c\x22\xfc\x7d\xed\x7d\x39\x1e\x8d\xf6\xfb\x7d\x52\xe8\x2a\x31\xb6\x18\xa9\x1a\xce\x8d\xfe\x91\xc4\x84\x59\x5a\x74\x9e\x79\x5c\x59\xc6\xd1\x82\xa9\x7c\x59\x79\x07\xae\xca\x73\xc9\x25\x6a\x0f\x52\xe7\xc6\x6e\x43\xa5\x80\x37\xc0\x2d\x32\x8f\xc0\x40\x19\xce\x14\xe0\x01\x79\x15\xee\xea\x48\x87\x72\xb5\x4c\x3b\xc6\xc3\x69\x6e\xcd\x96\x7c\xad\x9c\xa7\x7f\x9c\xc3\x6d\xa6\x50\x40\x81\x1a\x9d\x74\x90\x29\xc3\x37\x49\xfc\x2d\x8e\x9e\x18\x43\x75\x12\x3c\x6c\x84\x42\x6d\xec\xb1\x67\x11\xb2\x4a\x2a\x21\x75\x91\xc4\x51\x2b\x3d\x06\x5d\x29\x35\x8c\x03\x84\x32\x66\x53\x95\x29\xe7\xa6\x0a\xb6\xff\x81\xdc\xd7\x60\xae\x44\x2e\x73\x2a\x0e\xd6\xdd\x7a\x13\xae\x3a\xbd\x26\x23\xf9\x24\x8e\xce\x60\xc6\x90\x57\x3a\xb8\xd3\x67\x42\xd8\x21\x88\x6c\xf0\x2d\x8e\xa2\x1d\xb3\x84\x05\xd7\xe0\xcd\x17\x3c\x84\xcb\xc1\x55\x1c\x45\x32\x87\xbe\x5f\x4b\x97\xb4\xc0\xbf\x31\xce\x7f\x87\xeb\xeb\xeb\xd0\xd4\xb9\xd4\x28\x06\x40\x10\xd1\x4b\x62\xf5\x4d\x94\x31\xc5\x34\xc7\x31\xf4\x2e\x0f\x3d\xf8\x09\x44\x96\x14\xe8\x3f\xd4\xa7\xb5\xb2\xc4\x9b\xa5\xb7\x52\x17\xfd\xf7\xbf\x0c\x86\xe1\x95\x36\xe1\x0d\x34\xe2\x37\xa6\x13\xae\xef\xb9\x11\xe1\xba\xb1\xb9\x96\x9a\x18\xd1\x08\x35\x52\xce\x1b\xcb\x0a\x1c\xc3\xb7\x47\xfa\x7e\x24\xaf\x1e\xe3\xe8\xf1\x2c\xca\xcb\x5a\xe8\x95\x28\x37\x10\x80\xda\xdb\xae\xce\x0b\x49\x9d\xfa\x34\x01\x01\xef\x7b\x49\x58\xb6\xa6\x3c\x4b\xc2\x06\x8f\x3f\xce\x04\x5d\x48\x71\xe8\x2e\x36\x78\x1c\x5c\xc5\xaf\xa6\x28\x69\x8c\xfe\x4d\x8a\xc3\xcb\xf9\x22\xc0\x1d\x53\x1d\x60\x1d\xbf\x25\x21\x9c\xec\x1a\x04\xdd\x41\x07\xc9\xfe\xe5\x1a\x2e\x2e\x0f\x97\xff\xe7\xdf\x45\x63\xc1\x0b\x25\xf3\xcc\xec\x37\x98\xf6\x78\x9e\x4f\x8b\xae\x52\x9e\xda\x4e\xea\x9d\xd9\x10\x81\xae\x29\x4f\x4a\x85\xd4\x98\x92\xaa\xc6\xd5\x0c\x96\x21\x6a\x90\x1e\x2d\x23\x0a\x37\x3b\xb4\x34\xbd\xc0\xa2\xaf\xac\x76\x5d\x3a\x73\xa9\x99\x6a\x81\x9b\xec\x7b\xcb\x78\xdd\xbb\xf5\xf9\x93\x9c\x72\x7f\x08\xd9\x0c\x3e\x8e\x46\x90\x7a\x20\x3f\xa1\x34\x52\xfb\x21\xec\x11\x34\xa2\x20\x02\x12\x28\x2a\xee\x03\x5e\x6f\xc7\x54\x85\xbd\x9a\x64\x88\xaa\xc3\x53\x53\xd1\x44\x7a\x42\x42\xc3\x60\xe0\xd6\xec\xc2\xa8\xcd\x18\xdf\x40\xd3\xf8\xc6\xca\x42\xea\xb8\x89\xe9\x59\xd3\x93\x45\x09\x01\x07\xb3\x42\xcd\x50\xee\xe9\xe4\x43\xc8\x7f\x26\x8b\x99\xf6\xcf\x8a\xa8\x8e\x7c\xfb\x74\xf0\x7b\xd2\x34\x71\xe2\x88\x78\xfb\x3f\x0f\x86\xf0\xfe\x97\xae\x32\xbd\x21\x28\xf8\x31\x98\x37\xaf\x43\xc5\xcf\x2b\xe2\xe5\x67\x41\x0d\x31\xc9\x4f\x41\x6b\xe2\xaa\x8c\xd2\x51\xfb\x19\xe2\x78\xce\x26\x57\xdf\xc1\x3d\xf7\xad\xc5\x6d\x42\x93\x30\x21\x5e\x07\xad\x53\xf4\x11\xb9\xc5\x2d\x4d\x17\xca\x02\x67\x4a\xa1\xed\x39\x08\xdc\x35\x6c\xca\x29\xe4\x0b\xb7\xa5\x3f\xb6\x33\xc7\x33\x5b\xa0\x77\x3f\x36\x2c\xe0\xbc\x7b\xd7\x52\x71\x08\xc5\xb1\x44\xb8\xbe\x86\xde\x64\x31\x4d\x57\xd3\x5e\xd3\x4c\xa3\x11\xdc\x63\xd8\xc8\x32\x25\x33\xa1\x8e\x20\x50\xa1\xc7\xda\x2e\xa3\x43\x88\x3a\x6a\x1a\xd2\x6a\x45\x4b\x0f\x1e\xa4\xf3\x52\x17\x50\x33\xd6\x9e\xe6\x7b\x03\x17\x7a\x84\xb3\xca\x51\xb5\x3e\x1b\x86\xde\xd0\x66\x63\x91\xf8\x8d\xe6\x50\x68\x37\xa6\x64\xb7\x09\xe5\xd2\x3a\x0f\xa5\x62\x1c\x13\xc2\xeb\x8c\x79\x3d\xbf\x0d\x33\x93\xea\x45\x68\xc1\x00\x74\x1a\xb4\x4c\xd1\xa0\x26\xf5\x0e\xfa\x2d\xc6\x20\x8e\x22\xdb\x4a\x3f\xc1\xbe\x3a\x51\x82\xf3\x58\x3e\x25\x04\x5a\x70\x70\x87\x44\xe5\x81\x0d\xea\xa1\x4c\xba\xfe\xf9\x6b\xb3\x05\xa0\x4b\xe2\x88\xde\x3d\xe9\x6b\x65\x8a\xf3\xbe\x16\x75\x58\x78\x65\x2d\xe5\xbf\x1b\x05\x39\xf5\xf8\x1f\x95\xf3\x14\x53\x4b\xe1\x69\xd8\xe2\x25\xb2\x0e\xd4\x4c\x53\x7f\xf0\xe7\x21\x4a\xf3\x33\xcc\x2b\x52\xd7\x4c\xcb\x7a\xab\x2c\x8d\x47\xed\x25\x53\xea\x48\x79\xd8\x5b\x5a\xa7\x68\x81\x1a\x82\x93\x24\x15\x18\x27\x88\x4a\xcd\x55\x25\xea\x32\x08\x75\xdc\xe0\xb9\x60\xf3\xf9\x1e\xb6\x45\xe7\x58\x81\x09\x55\x52\x2e\x0f\xcd\x26\xab\xa1\x57\x93\x5c\x7f\xd0\x4b\x3a\x23\xcf\x29\x46\x99\x22\x69\x8b\x8c\xb8\x3a\x15\xc2\xa2\x73\xfd\x41\xc3\x39\x5d\x66\xef\xd7\xa8\x29\xf8\xa0\x71\x0f\xdd\x8a\xc4\x38\xa7\x95\x51\x0c\x81\x09\x41\xd4\xf6\x6c\x9d\x89\xa3\xc8\xed\xa5\xe7\x6b\x08\x9a\x4c\x79\xea\xc5\x41\x53\xff\x9c\x39\x84\x8b\xe9\xbf\x56\x93\xdb\x8f\xd3\xc9\xed\xdd\xc3\xc5\x18\xce\xce\x96\xb3\x7f\x4f\xbb\xb3\x0f\xe9\x3c\xbd\x99\x4c\x2f\xc6\xa7\x39\x74\xee\x90\x37\xad\x0b\xa4\xd0\x79\xc6\x37\x49\x89\xb8\xe9\x5f\x9e\xf3\xc0\xc9\xc1\x28\xca\x2c\xb2\xcd\xd5\xc9\x98\xba\x41\x1b\x1d\x2d\xe5\xc2\x35\xbc\x1a\xac\xab\xd7\xad\x99\x34\xf2\xfd\x96\xc8\x4f\x2b\x51\xa0\x8a\xef\xda\x91\xce\xe7\x9d\xe7\xf4\x41\xe1\xe8\x0e\x3e\x4e\xe7\xd3\xcf\xe9\x6a\x7a\x26\xb5\x5c\xa5\xab\xd9\xa4\x3e\xfa\xaf\x43\xf4\xfe\xcd\x21\xea\x2d\x97\xab\xdb\xc5\xb4\x37\x6e\xbe\xe6\xb7\xe9\xc7\xde\x9f\x14\x36\x7b\xd3\xf7\x8a\xcc\x9b\x7b\x63\xc5\xff\x92\xab\x27\xbb\x43\xce\x5e\x5a\x1d\x02\x09\x71\x5f\x3d\xfb\x89\x00\x4c\xb7\xfc\x91\xd7\x3f\x93\xa2\xf0\xfe\x45\xc6\x78\x8c\x1f\xe3\xff\x04\x00\x00\xff\xff\xb5\x44\x89\xaf\xbc\x0f\x00\x00")
+var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6f\xdb\x38\x12\x7f\x96\xfe\x8a\x41\x5f\x6c\xa3\xae\xdc\x64\x81\x3d\xc0\xb9\x1c\xa0\xba\x6e\x1b\x20\x9b\x04\xb6\x7b\xb9\xdc\x62\x1f\x28\x72\x24\x73\x4d\x93\x02\x49\xd9\xf1\x15\xf9\xdf\x0f\x43\x7d\xf8\xa3\x49\xd3\xdd\x37\x9b\x1c\xfe\xe6\xfb\x37\xa3\xd1\x08\x26\xa6\xdc\x59\x59\x2c\x3d\x9c\xbf\x3f\xfb\x07\x2c\x96\x08\x85\x79\x87\x7e\x89\x16\xab\x35\xa4\x95\x5f\x1a\xeb\xe2\xd1\x08\x16\x4b\xe9\x20\x97\x0a\x41\x3a\x28\x99\xf5\x60\x72\xf0\x27\xf2\x4a\x66\x96\xd9\x5d\x12\x8f\x46\xf5\x9b\x67\xaf\x09\x21\xb7\x88\xe0\x4c\xee\xb7\xcc\xe2\x18\x76\xa6\x02\xce\x34\x58\x14\xd2\x79\x2b\xb3\xca\x23\x48\x0f\x4c\x8b\x91\xb1\xb0\x36\x42\xe6\x3b\x82\x94\x1e\x2a\x2d\xd0\x06\xd5\x1e\xed\xda\xb5\x76\x7c\xbe\xf9\x0a\xd7\xe8\x1c\x5a\xf8\x8c\x1a\x2d\x53\x70\x57\x65\x4a\x72\xb8\x96\x1c\xb5\x43\x60\x0e\x4a\x3a\x71\x4b\x14\x90\x05\x38\x7a\xf8\x89\x4c\x99\x37\xa6\xc0\x27\x53\x69\xc1\xbc\x34\x7a\x08\x28\xc9\x72\xd8\xa0\x75\xd2\x68\xf8\xa5\x55\xd5\x00\x0e\xc1\x58\x02\xe9\x33\x4f\x0e\x58\x30\x25\xbd\x1b\x00\xd3\x3b\x50\xcc\xef\x9f\xfe\x44\x40\xf6\x7e\x0b\x90\x3a\xa8\x59\x9a\x12\xc1\x2f\x99\x27\xaf\xb7\x52\x29\xc8\x10\x2a\x87\x79\xa5\x86\x84\x96\x55\x1e\xee\xaf\x16\x5f\x6e\xbf\x2e\x20\xbd\x79\x80\xfb\x74\x36\x4b\x6f\x16\x0f\x17\xb0\x95\x7e\x69\x2a\x0f\xb8\xc1\x1a\x4a\xae\x4b\x25\x51\xc0\x96\x59\xcb\xb4\xdf\x81\xc9\x09\xe1\xb7\xe9\x6c\xf2\x25\xbd\x59\xa4\x1f\xae\xae\xaf\x16\x0f\x60\x2c\x7c\xba\x5a\xdc\x4c\xe7\x73\xf8\x74\x3b\x83\x14\xee\xd2\xd9\xe2\x6a\xf2\xf5\x3a\x9d\xc1\xdd\xd7\xd9\xdd\xed\x7c\x9a\xc0\x1c\xc9\x2a\xa4\xf7\xaf\xc7\x3c\x0f\xd9\xb3\x08\x02\x3d\x93\xca\xb5\x91\x78\x30\x15\xb8\xa5\xa9\x94\x80\x25\xdb\x20\x58\xe4\x28\x37\x28\x80\x01\x37\xe5\xee\xa7\x93\x4a\x58\x4c\x19\x5d\x04\x9f\x5f\x2c\x48\xb8\xca\x41\x1b\x3f\x04\x87\x08\xff\x5c\x7a\x5f\x8e\x47\xa3\xed\x76\x9b\x14\xba\x4a\x8c\x2d\x46\xaa\x86\x73\xa3\x7f\x25\x31\x61\x96\x16\x9d\x67\x1e\x17\x96\x71\xb4\x60\x2a\x5f\x56\xde\x81\xab\xf2\x5c\x72\x89\xda\x83\xd4\xb9\xb1\xeb\x50\x29\xe0\x0d\x70\x8b\xcc\x23\x30\x50\x86\x33\x05\xf8\x88\xbc\x0a\x77\x75\xa4\x43\xb9\x5a\xa6\x1d\xe3\xe1\x34\xb7\x66\x4d\xbe\x56\xce\xd3\x0f\xe7\x70\x9d\x29\x14\x50\xa0\x46\x27\x1d\x64\xca\xf0\x55\x12\x7f\x8b\xa3\x03\x63\xa8\x4e\x82\x87\x8d\x50\xa8\x8d\x2d\xf6\x2c\x42\x56\x49\x25\xa4\x2e\x92\x38\x6a\xa5\xc7\xa0\x2b\xa5\x86\x71\x80\x50\xc6\xac\xaa\x32\xe5\xdc\x54\xc1\xf6\x3f\x91\xfb\x1a\xcc\x95\xc8\x65\x4e\xc5\xc1\xba\x5b\x6f\xc2\x55\xa7\xd7\x64\x24\x9f\xc4\xd1\x11\xcc\x18\xf2\x4a\x07\x77\xfa\x4c\x08\x3b\x04\x91\x0d\xbe\xc5\x51\xb4\x61\x96\xb0\xe0\x12\xbc\xf9\x82\x8f\xe1\x72\x70\x11\x47\x91\xcc\xa1\xef\x97\xd2\x25\x2d\xf0\xef\x8c\xf3\x3f\xe0\xf2\xf2\x32\x34\x75\x2e\x35\x8a\x01\x10\x44\xf4\x9c\x58\x7d\x13\x65\x4c\x31\xcd\x71\x0c\xbd\xf7\x8f\x3d\x78\x0b\x22\x4b\x0a\xf4\x1f\xea\xd3\x5a\x59\xe2\xcd\xdc\x5b\xa9\x8b\xfe\xd9\xaf\x83\x61\x78\xa5\x4d\x78\x03\x8d\xf8\x8d\xe9\x84\xeb\x7b\x6e\x44\xb8\x6e\x6c\xae\xa5\x26\x46\x34\x42\x8d\x94\xf3\xc6\xb2\x02\xc7\xf0\xed\x89\xfe\x3f\x91\x57\x4f\x71\xf4\x74\x14\xe5\x79\x2d\xf4\x42\x94\x1b\x08\x40\xed\x6d\x57\xe7\x85\xa4\x4e\x3d\x4c\x40\xc0\xfb\x51\x12\xe6\xad\x29\x27\x49\x58\xe1\xee\xf5\x4c\xd0\x85\x14\x8f\xdd\xc5\x0a\x77\x83\x8b\xf8\xc5\x14\x25\x8d\xd1\xbf\x4b\xf1\xf8\xb3\xf9\x3a\x79\x73\x14\xd7\x39\x49\xed\xed\x1d\x0c\x4e\xe2\x68\xd1\x55\xca\x53\xb9\x4b\xbd\x31\x2b\x22\xae\x25\xc5\x47\xa9\x10\x12\x53\x52\xb6\x5c\xcd\x1c\x19\xa2\x06\xe9\xd1\x32\xa2\x4e\xb3\x41\x4b\x53\x03\x2c\xfa\xca\x6a\xd7\x85\x31\x97\x9a\xa9\x16\xb8\x89\xba\xb7\x8c\xd7\x3d\x53\x9f\x1f\xc4\x92\xfb\xc7\x10\xc5\xe0\xdd\x68\x04\xa9\x07\x72\x11\x4a\x23\xb5\x1f\xc2\x16\x41\x23\x0a\x6a\x7c\x81\xa2\xe2\x3e\xe0\xf5\x36\x4c\x55\xd8\xab\x9b\x9b\x28\x32\x3c\x35\x15\x4d\x82\x83\xe6\x1f\x06\x03\xd7\x66\x13\x46\x5c\xc6\xf8\x0a\x9a\x86\x33\x56\x16\x52\xc7\x4d\x38\x8f\x9a\x8d\x2c\x4a\x08\x38\x98\x15\x72\x45\x49\xa4\x93\x0f\x4c\xc1\x25\x64\xb2\xb8\xd2\xfe\x24\x79\x75\xd0\xdb\xa7\x83\x3f\x92\xa6\x79\x12\x47\x84\xd7\x3f\x1f\x0c\xe1\xec\xd7\xae\x22\xbc\x21\x28\x78\x1d\xcc\x9b\x97\xa1\xe2\xd3\x62\x78\xfe\x59\x50\x43\x1d\xfc\x36\x68\x4d\x5c\x95\x51\x3a\x6a\x3f\x43\x1c\x8f\xbb\xf8\xe2\x07\xb8\xc7\xbe\xb5\xb8\x4d\x68\x12\x26\xc4\xcb\xa0\x75\x8a\x3e\x22\xb7\xb8\x26\x56\xa7\x2c\x70\xa6\x14\xda\x9e\x83\xc0\x19\xc3\xa6\x9c\x42\xbe\x70\x5d\xfa\x5d\xcb\xf5\x9e\xd9\x02\xbd\x7b\xdd\xb0\x80\xf3\xee\x5d\x4b\x81\x21\x14\xbb\x12\xe1\xf2\x12\x7a\x93\xd9\x34\x5d\x4c\x7b\x4d\x1b\x8d\x46\x70\x8f\x61\x13\xca\x94\xcc\x84\xda\x81\x40\x85\x1e\x6b\xbb\x8c\x0e\x21\xea\x28\x61\x48\x2b\x0d\x2d\x1b\xf8\x28\x9d\x97\xba\x80\x9a\x29\xb6\x34\x57\x1b\xb8\xd0\x23\x9c\x55\x8e\xaa\xf5\x64\x08\x79\x43\x1b\x85\x45\xe2\x15\xe2\xff\xd0\x6e\x4c\xc9\x6e\x03\xc9\xa5\x75\x1e\x4a\xc5\x38\x26\x84\xd7\x19\xf3\x72\x7e\x9b\x4e\x26\xd5\xb3\xd0\x82\x01\x68\x3f\xe0\x98\xa2\x01\x49\xea\x1d\xf4\x5b\x8c\x41\x1c\x45\xb6\x95\x3e\xc0\xbe\xd8\x53\x82\xf3\x58\x1e\x12\x02\x2d\x16\xb8\x41\xa2\xd0\xc0\x06\xf5\x30\x24\x5d\xff\xfe\xad\x99\xbe\xe8\x92\x38\xa2\x77\x07\x7d\xad\x4c\x71\xdc\xd7\xa2\x0e\x0b\xaf\xac\xa5\xfc\x77\x14\x9c\x53\x8f\xff\x59\x39\x4f\x31\xb5\x14\x9e\x86\x2d\x9e\x23\xc9\x40\x89\x34\x6d\x07\xdf\x93\x21\xcd\xad\x30\x27\x48\x5d\x33\xa5\xea\x6d\xae\x34\x1e\xb5\x97\x4c\xa9\x1d\xe5\x61\x6b\x69\x8d\xa1\xc5\x65\x08\x4e\x92\x54\x60\x9c\x20\x2a\x35\x57\x95\xa8\xcb\x20\xd4\x71\x83\xe7\x82\xcd\xc7\xfb\xcf\x1a\x9d\x63\x05\x26\x54\x49\xb9\x7c\x6c\x36\x48\x0d\xbd\x9a\xe4\xfa\x83\x5e\xd2\x19\x79\x4c\x31\xca\x14\x49\x5b\x64\x44\xd3\xa9\x10\x16\x9d\xeb\x0f\x1a\xce\xe9\x32\x7b\xbf\x44\x4d\xc1\x07\x8d\x5b\xe8\x56\x13\xc6\x39\xad\x6a\x62\x08\x4c\x08\xa2\xb6\x93\x35\x22\x8e\x22\xb7\x95\x9e\x2f\x21\x68\x32\xe5\xbe\x17\x07\x4d\xfd\x73\xe6\x10\xde\x4c\xff\xb3\x98\xdc\x7e\x9c\x4e\x6e\xef\x1e\xde\x8c\xe1\xe8\x6c\x7e\xf5\xdf\x69\x77\xf6\x21\xbd\x4e\x6f\x26\xd3\x37\xe3\x30\x9b\x9f\x71\xc8\x9b\xd6\x05\x52\xe8\x3c\xe3\xab\xa4\x44\x5c\xf5\xdf\x1f\xf3\xc0\xde\xc1\x28\xca\x2c\xb2\xd5\xc5\xde\x98\xba\x41\x1b\x1d\x2d\xe5\xc2\x25\xbc\x18\xac\x8b\x97\xad\x99\x34\xf2\xfd\x96\xc8\xf7\xab\x48\xa0\x8a\xd7\xed\x38\xff\xcb\x86\x84\xde\x61\x7c\x35\x06\xc7\x14\x6d\xc0\xf2\x7f\xf4\xe5\x92\xe7\x0e\xfd\x10\x50\x0b\xb3\x25\xe6\xeb\x50\xeb\x9b\x06\xf7\x20\x64\x67\x83\x9a\x41\x6f\xf3\xfe\xa0\x13\x26\xb0\xef\x45\xcf\x9f\x13\x45\x2d\xe0\xb2\x45\x7f\x1b\x5e\xbe\x1e\xa8\xf3\x26\x52\x27\x0a\x7e\x39\xd9\xf0\xc2\xfd\x1a\xd7\xc6\xee\x9a\x71\x74\xe0\xdf\x8f\xa3\x9a\x5e\x5f\x77\xf5\x44\x7f\xa8\xc8\xba\x83\x8f\xd3\xeb\xe9\xe7\x74\x31\x3d\x92\x9a\x2f\xd2\xc5\xd5\xa4\x3e\xfa\xcb\x85\x77\xf6\xd3\x85\xd7\x9b\xcf\x17\xb7\xb3\x69\x6f\xdc\xfc\xbb\xbe\x4d\x3f\xf6\xbe\x53\xd8\x6c\x81\x3f\x6a\x5d\x6f\xee\x8d\x15\x7f\xa7\x03\x0e\x36\xb2\x9c\x3d\xb7\x90\x05\x6a\xe7\xbe\x3a\xf9\xe0\x01\xa6\x5b\x56\xce\xeb\x8f\xbe\x28\xbc\x7f\x96\x87\x9f\xe2\xa7\xf8\xff\x01\x00\x00\xff\xff\xb1\x28\x85\x2a\x8a\x10\x00\x00")
func prestate_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -213,7 +213,7 @@ func prestate_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd0, 0xd5, 0x5, 0x92, 0xed, 0xf4, 0x69, 0x2e, 0x14, 0x48, 0x35, 0x67, 0xcc, 0xf2, 0x3e, 0xc7, 0xf, 0x18, 0x22, 0x7a, 0x4d, 0x6f, 0x31, 0xad, 0x3c, 0x92, 0x77, 0xb4, 0x1, 0x2a, 0xd3, 0x7c}}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0x79, 0x70, 0x4f, 0xc5, 0x78, 0x57, 0x63, 0x6f, 0x5, 0x31, 0xce, 0x3e, 0x5d, 0xbd, 0x71, 0x4, 0x46, 0x78, 0xcd, 0x1d, 0xcd, 0xb9, 0xd8, 0x10, 0xff, 0xe6, 0xc5, 0x59, 0xb9, 0x25, 0x6e}}
return a, nil
}
diff --git a/eth/tracers/internal/tracers/call_tracer.js b/eth/tracers/internal/tracers/call_tracer.js
index 83495b157..f8b383cd9 100644
--- a/eth/tracers/internal/tracers/call_tracer.js
+++ b/eth/tracers/internal/tracers/call_tracer.js
@@ -38,7 +38,7 @@
var op = log.op.toString();
}
// If a new contract is being created, add to the call stack
- if (syscall && op == 'CREATE') {
+ if (syscall && (op == 'CREATE' || op == "CREATE2")) {
var inOff = log.stack.peek(1).valueOf();
var inEnd = inOff + log.stack.peek(2).valueOf();
@@ -116,7 +116,7 @@
// Pop off the last call and get the execution results
var call = this.callstack.pop();
- if (call.type == 'CREATE') {
+ if (call.type == 'CREATE' || call.type == "CREATE2") {
// If the call was a CREATE, retrieve the contract address and output code
call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16);
delete call.gasIn; delete call.gasCost;
diff --git a/eth/tracers/internal/tracers/prestate_tracer.js b/eth/tracers/internal/tracers/prestate_tracer.js
index 99f71d2c3..e0a22bf15 100644
--- a/eth/tracers/internal/tracers/prestate_tracer.js
+++ b/eth/tracers/internal/tracers/prestate_tracer.js
@@ -40,10 +40,7 @@
var idx = toHex(key);
if (this.prestate[acc].storage[idx] === undefined) {
- var val = toHex(db.getState(addr, key));
- if (val != "0x0000000000000000000000000000000000000000000000000000000000000000") {
- this.prestate[acc].storage[idx] = toHex(db.getState(addr, key));
- }
+ this.prestate[acc].storage[idx] = toHex(db.getState(addr, key));
}
},
@@ -89,6 +86,14 @@
var from = log.contract.getAddress();
this.lookupAccount(toContract(from, db.getNonce(from)), db);
break;
+ case "CREATE2":
+ var from = log.contract.getAddress();
+ // stack: salt, size, offset, endowment
+ var offset = log.stack.peek(1).valueOf()
+ var size = log.stack.peek(2).valueOf()
+ var end = offset + size
+ this.lookupAccount(toContract2(from, log.stack.peek(3).toString(16), log.memory.slice(offset, end)), db);
+ break;
case "CALL": case "CALLCODE": case "DELEGATECALL": case "STATICCALL":
this.lookupAccount(toAddress(log.stack.peek(1).toString(16)), db);
break;
diff --git a/eth/tracers/internal/tracers/tracers.go b/eth/tracers/internal/tracers/tracers.go
index dcf0d49da..2e40975bb 100644
--- a/eth/tracers/internal/tracers/tracers.go
+++ b/eth/tracers/internal/tracers/tracers.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore ((tracers)|(assets)).go ./...
+//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore tracers.go -ignore assets.go ./...
//go:generate gofmt -s -w assets.go
// Package tracers contains the actual JavaScript tracer assets.
diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go
index 3533a831f..9d6701868 100644
--- a/eth/tracers/tracer.go
+++ b/eth/tracers/tracer.go
@@ -367,6 +367,28 @@ func New(code string) (*Tracer, error) {
copy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])
return 1
})
+ tracer.vm.PushGlobalGoFunction("toContract2", func(ctx *duktape.Context) int {
+ var from common.Address
+ if ptr, size := ctx.GetBuffer(-3); ptr != nil {
+ from = common.BytesToAddress(makeSlice(ptr, size))
+ } else {
+ from = common.HexToAddress(ctx.GetString(-3))
+ }
+ // Retrieve salt hex string from js stack
+ salt := common.HexToHash(ctx.GetString(-2))
+ // Retrieve code slice from js stack
+ var code []byte
+ if ptr, size := ctx.GetBuffer(-1); ptr != nil {
+ code = common.CopyBytes(makeSlice(ptr, size))
+ } else {
+ code = common.FromHex(ctx.GetString(-1))
+ }
+ codeHash := crypto.Keccak256(code)
+ ctx.Pop3()
+ contract := crypto.CreateAddress2(from, salt, codeHash)
+ copy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])
+ return 1
+ })
tracer.vm.PushGlobalGoFunction("isPrecompiled", func(ctx *duktape.Context) int {
_, ok := vm.PrecompiledContractsByzantium[common.BytesToAddress(popSlice(ctx))]
ctx.PushBoolean(ok)
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index d25fc459a..b435b1694 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -17,6 +17,8 @@
package tracers
import (
+ "crypto/ecdsa"
+ "crypto/rand"
"encoding/json"
"io/ioutil"
"math/big"
@@ -31,7 +33,9 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests"
)
@@ -116,6 +120,83 @@ type callTracerTest struct {
Result *callTrace `json:"result"`
}
+func TestPrestateTracerCreate2(t *testing.T) {
+ unsigned_tx := types.NewTransaction(1, common.HexToAddress("0x00000000000000000000000000000000deadbeef"),
+ new(big.Int), 5000000, big.NewInt(1), []byte{})
+
+ privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader)
+ if err != nil {
+ t.Fatalf("err %v", err)
+ }
+ signer := types.NewEIP155Signer(big.NewInt(1))
+ tx, err := types.SignTx(unsigned_tx, signer, privateKeyECDSA)
+ if err != nil {
+ t.Fatalf("err %v", err)
+ }
+ /**
+ This comes from one of the test-vectors on the Skinny Create2 - EIP
+
+ address 0x00000000000000000000000000000000deadbeef
+ salt 0x00000000000000000000000000000000000000000000000000000000cafebabe
+ init_code 0xdeadbeef
+ gas (assuming no mem expansion): 32006
+ result: 0x60f3f640a8508fC6a86d45DF051962668E1e8AC7
+ */
+ origin, _ := signer.Sender(tx)
+ context := vm.Context{
+ CanTransfer: core.CanTransfer,
+ Transfer: core.Transfer,
+ Origin: origin,
+ Coinbase: common.Address{},
+ BlockNumber: new(big.Int).SetUint64(8000000),
+ Time: new(big.Int).SetUint64(5),
+ Difficulty: big.NewInt(0x30000),
+ GasLimit: uint64(6000000),
+ GasPrice: big.NewInt(1),
+ }
+ alloc := core.GenesisAlloc{}
+ // The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns
+ // the address
+ alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = core.GenesisAccount{
+ Nonce: 1,
+ Code: hexutil.MustDecode("0x63deadbeef60005263cafebabe6004601c6000F560005260206000F3"),
+ Balance: big.NewInt(1),
+ }
+ alloc[origin] = core.GenesisAccount{
+ Nonce: 1,
+ Code: []byte{},
+ Balance: big.NewInt(500000000000000),
+ }
+ statedb := tests.MakePreState(ethdb.NewMemDatabase(), alloc)
+ // Create the tracer, the EVM environment and run it
+ tracer, err := New("prestateTracer")
+ if err != nil {
+ t.Fatalf("failed to create call tracer: %v", err)
+ }
+ evm := vm.NewEVM(context, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
+
+ msg, err := tx.AsMessage(signer)
+ if err != nil {
+ t.Fatalf("failed to prepare transaction for tracing: %v", err)
+ }
+ st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
+ if _, _, _, err = st.TransitionDb(); err != nil {
+ t.Fatalf("failed to execute transaction: %v", err)
+ }
+ // Retrieve the trace result and compare against the etalon
+ res, err := tracer.GetResult()
+ if err != nil {
+ t.Fatalf("failed to retrieve trace result: %v", err)
+ }
+ ret := make(map[string]interface{})
+ if err := json.Unmarshal(res, &ret); err != nil {
+ t.Fatalf("failed to unmarshal trace result: %v", err)
+ }
+ if _, has := ret["0x60f3f640a8508fc6a86d45df051962668e1e8ac7"]; !has {
+ t.Fatalf("Expected 0x60f3f640a8508fc6a86d45df051962668e1e8ac7 in result")
+ }
+}
+
// Iterates over all the input-output datasets in the tracer test harness and
// runs the JavaScript tracers against them.
func TestCallTracer(t *testing.T) {
@@ -185,8 +266,9 @@ func TestCallTracer(t *testing.T) {
if err := json.Unmarshal(res, ret); err != nil {
t.Fatalf("failed to unmarshal trace result: %v", err)
}
+
if !reflect.DeepEqual(ret, test.Result) {
- t.Fatalf("trace mismatch: have %+v, want %+v", ret, test.Result)
+ t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result)
}
})
}
diff --git a/interfaces.go b/interfaces.go
index 26b0fcbc1..be7834406 100644
--- a/interfaces.go
+++ b/interfaces.go
@@ -146,7 +146,7 @@ type FilterQuery struct {
// {{A}} matches topic A in first position
// {{}, {B}} matches any topic in first position, B in second position
// {{A}, {B}} matches topic A in first position, B in second position
- // {{A, B}}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position
+ // {{A, B}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position
Topics [][]common.Hash
}
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 43a33e992..73b629bd9 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -683,7 +683,7 @@ type CallArgs struct {
Data hexutil.Bytes `json:"data"`
}
-func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration) ([]byte, uint64, bool, error) {
+func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration) ([]byte, uint64, bool, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
@@ -724,7 +724,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
defer cancel()
// Get a new instance of the EVM.
- evm, vmError, err := s.b.GetEVM(ctx, msg, state, header, vmCfg)
+ evm, vmError, err := s.b.GetEVM(ctx, msg, state, header)
if err != nil {
return nil, 0, false, err
}
@@ -748,7 +748,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
// Call executes the given transaction on the state for the given block number.
// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
- result, _, _, err := s.doCall(ctx, args, blockNr, vm.Config{}, 5*time.Second)
+ result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second)
return (hexutil.Bytes)(result), err
}
@@ -777,7 +777,7 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
executable := func(gas uint64) bool {
args.Gas = hexutil.Uint64(gas)
- _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, vm.Config{}, 0)
+ _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0)
if err != nil || failed {
return false
}
@@ -1074,6 +1074,15 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByBlockHashAndIndex(ctx cont
// GetTransactionCount returns the number of transactions the given address has sent for the given block number
func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*hexutil.Uint64, error) {
+ // Ask transaction pool for the nonce which includes pending transactions
+ if blockNr == rpc.PendingBlockNumber {
+ nonce, err := s.b.GetPoolNonce(ctx, address)
+ if err != nil {
+ return nil, err
+ }
+ return (*hexutil.Uint64)(&nonce), nil
+ }
+ // Resolve block number and use its state to ask for the nonce
state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
if state == nil || err != nil {
return nil, err
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index c9ffe230c..e23ee03b1 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -53,7 +53,7 @@ type Backend interface {
GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetTd(blockHash common.Hash) *big.Int
- GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error)
+ GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error)
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index a5f319653..6b98c8b7e 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -18,6 +18,7 @@
package web3ext
var Modules = map[string]string{
+ "accounting": Accounting_JS,
"admin": Admin_JS,
"chequebook": Chequebook_JS,
"clique": Clique_JS,
@@ -384,6 +385,18 @@ web3._extend({
params: 1,
inputFormatter: [null]
}),
+ new web3._extend.Method({
+ name: 'standardTraceBadBlockToFile',
+ call: 'debug_standardTraceBadBlockToFile',
+ params: 2,
+ inputFormatter: [null, null]
+ }),
+ new web3._extend.Method({
+ name: 'standardTraceBlockToFile',
+ call: 'debug_standardTraceBlockToFile',
+ params: 2,
+ inputFormatter: [null, null]
+ }),
new web3._extend.Method({
name: 'traceBlockByNumber',
call: 'debug_traceBlockByNumber',
@@ -692,3 +705,47 @@ web3._extend({
]
});
`
+
+const Accounting_JS = `
+web3._extend({
+ property: 'accounting',
+ methods: [
+ new web3._extend.Property({
+ name: 'balance',
+ getter: 'account_balance'
+ }),
+ new web3._extend.Property({
+ name: 'balanceCredit',
+ getter: 'account_balanceCredit'
+ }),
+ new web3._extend.Property({
+ name: 'balanceDebit',
+ getter: 'account_balanceDebit'
+ }),
+ new web3._extend.Property({
+ name: 'bytesCredit',
+ getter: 'account_bytesCredit'
+ }),
+ new web3._extend.Property({
+ name: 'bytesDebit',
+ getter: 'account_bytesDebit'
+ }),
+ new web3._extend.Property({
+ name: 'msgCredit',
+ getter: 'account_msgCredit'
+ }),
+ new web3._extend.Property({
+ name: 'msgDebit',
+ getter: 'account_msgDebit'
+ }),
+ new web3._extend.Property({
+ name: 'peerDrops',
+ getter: 'account_peerDrops'
+ }),
+ new web3._extend.Property({
+ name: 'selfDrops',
+ getter: 'account_selfDrops'
+ }),
+ ]
+});
+`
diff --git a/les/api_backend.go b/les/api_backend.go
index aa748a4ea..753139623 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -105,10 +105,10 @@ func (b *LesApiBackend) GetTd(hash common.Hash) *big.Int {
return b.eth.blockchain.GetTdByHash(hash)
}
-func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
+func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {
state.SetBalance(msg.From(), math.MaxBig256)
context := core.NewEVMContext(msg, header, b.eth.blockchain, nil)
- return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), state.Error, nil
+ return vm.NewEVM(context, state, b.eth.chainConfig, vm.Config{}), state.Error, nil
}
func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
diff --git a/les/backend.go b/les/backend.go
index a3474a683..d0db71019 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -82,7 +82,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.ConstantinopleOverride)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr
}
diff --git a/les/fetcher.go b/les/fetcher.go
index f0d3b188d..2615f69df 100644
--- a/les/fetcher.go
+++ b/les/fetcher.go
@@ -141,36 +141,39 @@ func (f *lightFetcher) syncLoop() {
s := requesting
requesting = false
var (
- rq *distReq
- reqID uint64
+ rq *distReq
+ reqID uint64
+ syncing bool
)
if !f.syncing && !(newAnnounce && s) {
- rq, reqID = f.nextRequest()
+ rq, reqID, syncing = f.nextRequest()
}
- syncing := f.syncing
f.lock.Unlock()
if rq != nil {
requesting = true
- _, ok := <-f.pm.reqDist.queue(rq)
- if !ok {
+ if _, ok := <-f.pm.reqDist.queue(rq); ok {
+ if syncing {
+ f.lock.Lock()
+ f.syncing = true
+ f.lock.Unlock()
+ } else {
+ go func() {
+ time.Sleep(softRequestTimeout)
+ f.reqMu.Lock()
+ req, ok := f.requested[reqID]
+ if ok {
+ req.timeout = true
+ f.requested[reqID] = req
+ }
+ f.reqMu.Unlock()
+ // keep starting new requests while possible
+ f.requestChn <- false
+ }()
+ }
+ } else {
f.requestChn <- false
}
-
- if !syncing {
- go func() {
- time.Sleep(softRequestTimeout)
- f.reqMu.Lock()
- req, ok := f.requested[reqID]
- if ok {
- req.timeout = true
- f.requested[reqID] = req
- }
- f.reqMu.Unlock()
- // keep starting new requests while possible
- f.requestChn <- false
- }()
- }
}
case reqID := <-f.timeoutChn:
f.reqMu.Lock()
@@ -209,6 +212,7 @@ func (f *lightFetcher) syncLoop() {
f.checkSyncedHeaders(p)
f.syncing = false
f.lock.Unlock()
+ f.requestChn <- false
}
}
}
@@ -405,7 +409,7 @@ func (f *lightFetcher) requestedID(reqID uint64) bool {
// nextRequest selects the peer and announced head to be requested next, amount
// to be downloaded starting from the head backwards is also returned
-func (f *lightFetcher) nextRequest() (*distReq, uint64) {
+func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
var (
bestHash common.Hash
bestAmount uint64
@@ -427,14 +431,12 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
}
}
if bestTd == f.maxConfirmedTd {
- return nil, 0
+ return nil, 0, false
}
- f.syncing = bestSyncing
-
var rq *distReq
reqID := genReqID()
- if f.syncing {
+ if bestSyncing {
rq = &distReq{
getCost: func(dp distPeer) uint64 {
return 0
@@ -500,7 +502,7 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
},
}
}
- return rq, reqID
+ return rq, reqID, bestSyncing
}
// deliverHeaders delivers header download request responses for processing
diff --git a/les/flowcontrol/control.go b/les/flowcontrol/control.go
index d50eb809c..8ef4ba511 100644
--- a/les/flowcontrol/control.go
+++ b/les/flowcontrol/control.go
@@ -82,7 +82,6 @@ func (peer *ClientNode) RequestProcessed(cost uint64) (bv, realCost uint64) {
time := mclock.Now()
peer.recalcBV(time)
peer.bufValue -= cost
- peer.recalcBV(time)
rcValue, rcost := peer.cm.processed(peer.cmNode, time)
if rcValue < peer.params.BufLimit {
bv := peer.params.BufLimit - rcValue
diff --git a/light/trie.go b/light/trie.go
index c07e99461..ab4e18b43 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -108,7 +108,7 @@ func (t *odrTrie) TryGet(key []byte) ([]byte, error) {
func (t *odrTrie) TryUpdate(key, value []byte) error {
key = crypto.Keccak256(key)
return t.do(key, func() error {
- return t.trie.TryDelete(key)
+ return t.trie.TryUpdate(key, value)
})
}
diff --git a/light/trie_test.go b/light/trie_test.go
index 51ce9017a..5b5fce31d 100644
--- a/light/trie_test.go
+++ b/light/trie_test.go
@@ -64,7 +64,7 @@ func diffTries(t1, t2 state.Trie) error {
spew.Dump(i2)
return fmt.Errorf("tries have different keys %x, %x", i1.Key, i2.Key)
}
- if !bytes.Equal(i2.Value, i2.Value) {
+ if !bytes.Equal(i1.Value, i2.Value) {
return fmt.Errorf("tries differ at key %x", i1.Key)
}
}
diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go
index 31a5c21b5..c4ef92723 100644
--- a/metrics/influxdb/influxdb.go
+++ b/metrics/influxdb/influxdb.go
@@ -58,6 +58,34 @@ func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, userna
rep.run()
}
+// InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags
+func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error {
+ u, err := uurl.Parse(url)
+ if err != nil {
+ return fmt.Errorf("Unable to parse InfluxDB. url: %s, err: %v", url, err)
+ }
+
+ rep := &reporter{
+ reg: r,
+ url: *u,
+ database: database,
+ username: username,
+ password: password,
+ namespace: namespace,
+ tags: tags,
+ cache: make(map[string]int64),
+ }
+ if err := rep.makeClient(); err != nil {
+ return fmt.Errorf("Unable to make InfluxDB client. err: %v", err)
+ }
+
+ if err := rep.send(); err != nil {
+ return fmt.Errorf("Unable to send to InfluxDB. err: %v", err)
+ }
+
+ return nil
+}
+
func (r *reporter) makeClient() (err error) {
r.client, err = client.NewClient(client.Config{
URL: r.url,
diff --git a/miner/worker.go b/miner/worker.go
index 8579c5c84..48473796b 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@@ -692,7 +691,7 @@ func (w *worker) updateSnapshot() {
func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
snap := w.current.state.Snapshot()
- receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, vm.Config{})
+ receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig())
if err != nil {
w.current.state.RevertToSnapshot(snap)
return nil, err
diff --git a/mobile/big.go b/mobile/big.go
index dd7b15878..86ea93245 100644
--- a/mobile/big.go
+++ b/mobile/big.go
@@ -84,6 +84,13 @@ func (bi *BigInt) SetString(x string, base int) {
// BigInts represents a slice of big ints.
type BigInts struct{ bigints []*big.Int }
+// NewBigInts creates a slice of uninitialized big numbers.
+func NewBigInts(size int) *BigInts {
+ return &BigInts{
+ bigints: make([]*big.Int, size),
+ }
+}
+
// Size returns the number of big ints in the slice.
func (bi *BigInts) Size() int {
return len(bi.bigints)
diff --git a/node/config.go b/node/config.go
index 8f10f4f61..7b32a5908 100644
--- a/node/config.go
+++ b/node/config.go
@@ -24,6 +24,7 @@ import (
"path/filepath"
"runtime"
"strings"
+ "sync"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
@@ -152,6 +153,10 @@ type Config struct {
// Logger is a custom logger to use with the p2p.Server.
Logger log.Logger `toml:",omitempty"`
+
+ staticNodesWarning bool
+ trustedNodesWarning bool
+ oldGethResourceWarning bool
}
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
@@ -263,8 +268,8 @@ var isOldGethResource = map[string]bool{
"chaindata": true,
"nodes": true,
"nodekey": true,
- "static-nodes.json": true,
- "trusted-nodes.json": true,
+ "static-nodes.json": false, // no warning for these because they have their
+ "trusted-nodes.json": false, // own separate warning.
}
// ResolvePath resolves path in the instance directory.
@@ -277,13 +282,15 @@ func (c *Config) ResolvePath(path string) string {
}
// Backwards-compatibility: ensure that data directory files created
// by geth 1.4 are used if they exist.
- if c.name() == "geth" && isOldGethResource[path] {
+ if warn, isOld := isOldGethResource[path]; isOld {
oldpath := ""
- if c.Name == "geth" {
+ if c.name() == "geth" {
oldpath = filepath.Join(c.DataDir, path)
}
if oldpath != "" && common.FileExist(oldpath) {
- // TODO: print warning
+ if warn {
+ c.warnOnce(&c.oldGethResourceWarning, "Using deprecated resource file %s, please move this file to the 'geth' subdirectory of datadir.", oldpath)
+ }
return oldpath
}
}
@@ -337,17 +344,17 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
// StaticNodes returns a list of node enode URLs configured as static nodes.
func (c *Config) StaticNodes() []*enode.Node {
- return c.parsePersistentNodes(c.ResolvePath(datadirStaticNodes))
+ return c.parsePersistentNodes(&c.staticNodesWarning, c.ResolvePath(datadirStaticNodes))
}
// TrustedNodes returns a list of node enode URLs configured as trusted nodes.
func (c *Config) TrustedNodes() []*enode.Node {
- return c.parsePersistentNodes(c.ResolvePath(datadirTrustedNodes))
+ return c.parsePersistentNodes(&c.trustedNodesWarning, c.ResolvePath(datadirTrustedNodes))
}
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json
// file from within the data directory.
-func (c *Config) parsePersistentNodes(path string) []*enode.Node {
+func (c *Config) parsePersistentNodes(w *bool, path string) []*enode.Node {
// Short circuit if no node config is present
if c.DataDir == "" {
return nil
@@ -355,10 +362,12 @@ func (c *Config) parsePersistentNodes(path string) []*enode.Node {
if _, err := os.Stat(path); err != nil {
return nil
}
+ c.warnOnce(w, "Found deprecated node list file %s, please use the TOML config file instead.", path)
+
// Load the nodes from the config file.
var nodelist []string
if err := common.LoadJSON(path, &nodelist); err != nil {
- log.Error(fmt.Sprintf("Can't load node file %s: %v", path, err))
+ log.Error(fmt.Sprintf("Can't load node list file: %v", err))
return nil
}
// Interpret the list as a discovery node array
@@ -440,3 +449,20 @@ func makeAccountManager(conf *Config) (*accounts.Manager, string, error) {
}
return accounts.NewManager(backends...), ephemeral, nil
}
+
+var warnLock sync.Mutex
+
+func (c *Config) warnOnce(w *bool, format string, args ...interface{}) {
+ warnLock.Lock()
+ defer warnLock.Unlock()
+
+ if *w {
+ return
+ }
+ l := c.Logger
+ if l == nil {
+ l = log.Root()
+ }
+ l.Warn(fmt.Sprintf(format, args...))
+ *w = true
+}
diff --git a/node/node.go b/node/node.go
index 85299dba7..c35a50972 100644
--- a/node/node.go
+++ b/node/node.go
@@ -287,7 +287,7 @@ func (n *Node) startInProc(apis []rpc.API) error {
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
return err
}
- n.log.Debug("InProc registered", "service", api.Service, "namespace", api.Namespace)
+ n.log.Debug("InProc registered", "namespace", api.Namespace)
}
n.inprocHandler = handler
return nil
@@ -322,7 +322,7 @@ func (n *Node) stopIPC() {
n.ipcListener.Close()
n.ipcListener = nil
- n.log.Info("IPC endpoint closed", "endpoint", n.ipcEndpoint)
+ n.log.Info("IPC endpoint closed", "url", n.ipcEndpoint)
}
if n.ipcHandler != nil {
n.ipcHandler.Stop()
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index afd4c9a27..9f7f1d41b 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -434,7 +434,7 @@ func (tab *Table) loadSeedNodes() {
for i := range seeds {
seed := seeds[i]
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
- log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
+ log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.add(seed)
}
}
diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go
index a6cabf080..de7d8de6a 100644
--- a/p2p/discv5/net.go
+++ b/p2p/discv5/net.go
@@ -27,10 +27,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
var (
@@ -567,12 +567,11 @@ loop:
net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
if n.state != nil && n.state.canQuery {
return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
- } else {
- if n.state == unknown {
- net.ping(n, n.addr())
- }
- return nil
}
+ if n.state == unknown {
+ net.ping(n, n.addr())
+ }
+ return nil
})
case <-statsDump.C:
@@ -801,7 +800,7 @@ func (n *nodeNetGuts) startNextQuery(net *Network) {
func (q *findnodeQuery) start(net *Network) bool {
// Satisfy queries against the local node directly.
if q.remote == net.tab.self {
- closest := net.tab.closest(crypto.Keccak256Hash(q.target[:]), bucketSize)
+ closest := net.tab.closest(q.target, bucketSize)
q.reply <- closest.entries
return true
}
@@ -1235,7 +1234,7 @@ func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) {
}
func rlpHash(x interface{}) (h common.Hash) {
- hw := sha3.NewKeccak256()
+ hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
diff --git a/p2p/enode/idscheme.go b/p2p/enode/idscheme.go
index 9b495fd4f..c1834f069 100644
--- a/p2p/enode/idscheme.go
+++ b/p2p/enode/idscheme.go
@@ -23,9 +23,9 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
// List of known secure identity schemes.
@@ -48,7 +48,7 @@ func SignV4(r *enr.Record, privkey *ecdsa.PrivateKey) error {
cpy.Set(enr.ID("v4"))
cpy.Set(Secp256k1(privkey.PublicKey))
- h := sha3.NewKeccak256()
+ h := sha3.NewLegacyKeccak256()
rlp.Encode(h, cpy.AppendElements(nil))
sig, err := crypto.Sign(h.Sum(nil), privkey)
if err != nil {
@@ -69,7 +69,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error {
return fmt.Errorf("invalid public key")
}
- h := sha3.NewKeccak256()
+ h := sha3.NewLegacyKeccak256()
rlp.Encode(h, r.AppendElements(nil))
if !crypto.VerifySignature(entry, h.Sum(nil), sig) {
return enr.ErrInvalidSig
diff --git a/p2p/protocols/accounting.go b/p2p/protocols/accounting.go
index 06a1a5845..bdc490e59 100644
--- a/p2p/protocols/accounting.go
+++ b/p2p/protocols/accounting.go
@@ -16,34 +16,39 @@
package protocols
-import "github.com/ethereum/go-ethereum/metrics"
+import (
+ "time"
-//define some metrics
-var (
- //NOTE: these metrics just define the interfaces and are currently *NOT persisted* over sessions
- //All metrics are cumulative
-
- //total amount of units credited
- mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", nil)
- //total amount of units debited
- mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", nil)
- //total amount of bytes credited
- mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", nil)
- //total amount of bytes debited
- mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", nil)
- //total amount of credited messages
- mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", nil)
- //total amount of debited messages
- mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", nil)
- //how many times local node had to drop remote peers
- mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", nil)
- //how many times local node overdrafted and dropped
- mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", nil)
+ "github.com/ethereum/go-ethereum/metrics"
)
-//Prices defines how prices are being passed on to the accounting instance
+// define some metrics
+var (
+ // All metrics are cumulative
+
+ // total amount of units credited
+ mBalanceCredit metrics.Counter
+ // total amount of units debited
+ mBalanceDebit metrics.Counter
+ // total amount of bytes credited
+ mBytesCredit metrics.Counter
+ // total amount of bytes debited
+ mBytesDebit metrics.Counter
+ // total amount of credited messages
+ mMsgCredit metrics.Counter
+ // total amount of debited messages
+ mMsgDebit metrics.Counter
+ // how many times local node had to drop remote peers
+ mPeerDrops metrics.Counter
+ // how many times local node overdrafted and dropped
+ mSelfDrops metrics.Counter
+
+ MetricsRegistry metrics.Registry
+)
+
+// Prices defines how prices are being passed on to the accounting instance
type Prices interface {
- //Return the Price for a message
+ // Return the Price for a message
Price(interface{}) *Price
}
@@ -54,20 +59,20 @@ const (
Receiver = Payer(false)
)
-//Price represents the costs of a message
+// Price represents the costs of a message
type Price struct {
- Value uint64 //
- PerByte bool //True if the price is per byte or for unit
+ Value uint64
+ PerByte bool // True if the price is per byte or for unit
Payer Payer
}
-//For gives back the price for a message
-//A protocol provides the message price in absolute value
-//This method then returns the correct signed amount,
-//depending on who pays, which is identified by the `payer` argument:
-//`Send` will pass a `Sender` payer, `Receive` will pass the `Receiver` argument.
-//Thus: If Sending and sender pays, amount positive, otherwise negative
-//If Receiving, and receiver pays, amount positive, otherwise negative
+// For gives back the price for a message
+// A protocol provides the message price in absolute value
+// This method then returns the correct signed amount,
+// depending on who pays, which is identified by the `payer` argument:
+// `Send` will pass a `Sender` payer, `Receive` will pass the `Receiver` argument.
+// Thus: If Sending and sender pays, amount positive, otherwise negative
+// If Receiving, and receiver pays, amount positive, otherwise negative
func (p *Price) For(payer Payer, size uint32) int64 {
price := p.Value
if p.PerByte {
@@ -79,22 +84,22 @@ func (p *Price) For(payer Payer, size uint32) int64 {
return int64(price)
}
-//Balance is the actual accounting instance
-//Balance defines the operations needed for accounting
-//Implementations internally maintain the balance for every peer
+// Balance is the actual accounting instance
+// Balance defines the operations needed for accounting
+// Implementations internally maintain the balance for every peer
type Balance interface {
- //Adds amount to the local balance with remote node `peer`;
- //positive amount = credit local node
- //negative amount = debit local node
+ // Adds amount to the local balance with remote node `peer`;
+ // positive amount = credit local node
+ // negative amount = debit local node
Add(amount int64, peer *Peer) error
}
-//Accounting implements the Hook interface
-//It interfaces to the balances through the Balance interface,
-//while interfacing with protocols and its prices through the Prices interface
+// Accounting implements the Hook interface
+// It interfaces to the balances through the Balance interface,
+// while interfacing with protocols and its prices through the Prices interface
type Accounting struct {
- Balance //interface to accounting logic
- Prices //interface to prices logic
+ Balance // interface to accounting logic
+ Prices // interface to prices logic
}
func NewAccounting(balance Balance, po Prices) *Accounting {
@@ -105,59 +110,77 @@ func NewAccounting(balance Balance, po Prices) *Accounting {
return ah
}
-//Implement Hook.Send
+// SetupAccountingMetrics creates a separate registry for p2p accounting metrics;
+// this registry should be independent of any other metrics as it persists at different endpoints.
+// It also instantiates the given metrics and starts the persisting go-routine which
+// at the passed interval writes the metrics to a LevelDB
+func SetupAccountingMetrics(reportInterval time.Duration, path string) *AccountingMetrics {
+ // create an empty registry
+ MetricsRegistry = metrics.NewRegistry()
+ // instantiate the metrics
+ mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", MetricsRegistry)
+ mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", MetricsRegistry)
+ mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", MetricsRegistry)
+ mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", MetricsRegistry)
+ mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", MetricsRegistry)
+ mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", MetricsRegistry)
+ mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", MetricsRegistry)
+ mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", MetricsRegistry)
+ // create the DB and start persisting
+ return NewAccountingMetrics(MetricsRegistry, reportInterval, path)
+}
+
// Send takes a peer, a size and a msg and
-// - calculates the cost for the local node sending a msg of size to peer using the Prices interface
-// - credits/debits local node using balance interface
+// - calculates the cost for the local node sending a msg of size to peer using the Prices interface
+// - credits/debits local node using balance interface
func (ah *Accounting) Send(peer *Peer, size uint32, msg interface{}) error {
- //get the price for a message (through the protocol spec)
+ // get the price for a message (through the protocol spec)
price := ah.Price(msg)
- //this message doesn't need accounting
+ // this message doesn't need accounting
if price == nil {
return nil
}
- //evaluate the price for sending messages
+ // evaluate the price for sending messages
costToLocalNode := price.For(Sender, size)
- //do the accounting
+ // do the accounting
err := ah.Add(costToLocalNode, peer)
- //record metrics: just increase counters for user-facing metrics
+ // record metrics: just increase counters for user-facing metrics
ah.doMetrics(costToLocalNode, size, err)
return err
}
-//Implement Hook.Receive
// Receive takes a peer, a size and a msg and
-// - calculates the cost for the local node receiving a msg of size from peer using the Prices interface
-// - credits/debits local node using balance interface
+// - calculates the cost for the local node receiving a msg of size from peer using the Prices interface
+// - credits/debits local node using balance interface
func (ah *Accounting) Receive(peer *Peer, size uint32, msg interface{}) error {
- //get the price for a message (through the protocol spec)
+ // get the price for a message (through the protocol spec)
price := ah.Price(msg)
- //this message doesn't need accounting
+ // this message doesn't need accounting
if price == nil {
return nil
}
- //evaluate the price for receiving messages
+ // evaluate the price for receiving messages
costToLocalNode := price.For(Receiver, size)
- //do the accounting
+ // do the accounting
err := ah.Add(costToLocalNode, peer)
- //record metrics: just increase counters for user-facing metrics
+ // record metrics: just increase counters for user-facing metrics
ah.doMetrics(costToLocalNode, size, err)
return err
}
-//record some metrics
-//this is not an error handling. `err` is returned by both `Send` and `Receive`
-//`err` will only be non-nil if a limit has been violated (overdraft), in which case the peer has been dropped.
-//if the limit has been violated and `err` is thus not nil:
-// * if the price is positive, local node has been credited; thus `err` implicitly signals the REMOTE has been dropped
-// * if the price is negative, local node has been debited, thus `err` implicitly signals LOCAL node "overdraft"
+// record some metrics
+// this is not an error handling. `err` is returned by both `Send` and `Receive`
+// `err` will only be non-nil if a limit has been violated (overdraft), in which case the peer has been dropped.
+// if the limit has been violated and `err` is thus not nil:
+// * if the price is positive, local node has been credited; thus `err` implicitly signals the REMOTE has been dropped
+// * if the price is negative, local node has been debited, thus `err` implicitly signals LOCAL node "overdraft"
func (ah *Accounting) doMetrics(price int64, size uint32, err error) {
if price > 0 {
mBalanceCredit.Inc(price)
mBytesCredit.Inc(int64(size))
mMsgCredit.Inc(1)
if err != nil {
- //increase the number of times a remote node has been dropped due to "overdraft"
+ // increase the number of times a remote node has been dropped due to "overdraft"
mPeerDrops.Inc(1)
}
} else {
@@ -165,7 +188,7 @@ func (ah *Accounting) doMetrics(price int64, size uint32, err error) {
mBytesDebit.Inc(int64(size))
mMsgDebit.Inc(1)
if err != nil {
- //increase the number of times the local node has done an "overdraft" in respect to other nodes
+ // increase the number of times the local node has done an "overdraft" in respect to other nodes
mSelfDrops.Inc(1)
}
}
diff --git a/p2p/protocols/accounting_api.go b/p2p/protocols/accounting_api.go
new file mode 100644
index 000000000..48e2af9fe
--- /dev/null
+++ b/p2p/protocols/accounting_api.go
@@ -0,0 +1,94 @@
+package protocols
+
+import (
+ "errors"
+)
+
+// Textual version number of accounting API
+const AccountingVersion = "1.0"
+
+var errNoAccountingMetrics = errors.New("accounting metrics not enabled")
+
+// AccountingApi provides an API to access account related information
+type AccountingApi struct {
+ metrics *AccountingMetrics
+}
+
+// NewAccountingApi creates a new AccountingApi
+// m will be used to check if accounting metrics are enabled
+func NewAccountingApi(m *AccountingMetrics) *AccountingApi {
+ return &AccountingApi{m}
+}
+
+// Balance returns local node balance (units credited - units debited)
+func (self *AccountingApi) Balance() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ balance := mBalanceCredit.Count() - mBalanceDebit.Count()
+ return balance, nil
+}
+
+// BalanceCredit returns total amount of units credited by local node
+func (self *AccountingApi) BalanceCredit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mBalanceCredit.Count(), nil
+}
+
+// BalanceCredit returns total amount of units debited by local node
+func (self *AccountingApi) BalanceDebit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mBalanceDebit.Count(), nil
+}
+
+// BytesCredit returns total amount of bytes credited by local node
+func (self *AccountingApi) BytesCredit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mBytesCredit.Count(), nil
+}
+
+// BalanceCredit returns total amount of bytes debited by local node
+func (self *AccountingApi) BytesDebit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mBytesDebit.Count(), nil
+}
+
+// MsgCredit returns total amount of messages credited by local node
+func (self *AccountingApi) MsgCredit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mMsgCredit.Count(), nil
+}
+
+// MsgDebit returns total amount of messages debited by local node
+func (self *AccountingApi) MsgDebit() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mMsgDebit.Count(), nil
+}
+
+// PeerDrops returns number of times when local node had to drop remote peers
+func (self *AccountingApi) PeerDrops() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mPeerDrops.Count(), nil
+}
+
+// SelfDrops returns number of times when local node was overdrafted and dropped
+func (self *AccountingApi) SelfDrops() (int64, error) {
+ if self.metrics == nil {
+ return 0, errNoAccountingMetrics
+ }
+ return mSelfDrops.Count(), nil
+}
diff --git a/p2p/protocols/accounting_simulation_test.go b/p2p/protocols/accounting_simulation_test.go
index 65b737abe..e90a1d81d 100644
--- a/p2p/protocols/accounting_simulation_test.go
+++ b/p2p/protocols/accounting_simulation_test.go
@@ -20,7 +20,10 @@ import (
"context"
"flag"
"fmt"
+ "io/ioutil"
"math/rand"
+ "os"
+ "path/filepath"
"reflect"
"sync"
"testing"
@@ -66,6 +69,13 @@ func init() {
func TestAccountingSimulation(t *testing.T) {
//setup the balances objects for every node
bal := newBalances(*nodes)
+ //setup the metrics system or tests will fail trying to write metrics
+ dir, err := ioutil.TempDir("", "account-sim")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ SetupAccountingMetrics(1*time.Second, filepath.Join(dir, "metrics.db"))
//define the node.Service for this test
services := adapters.Services{
"accounting": func(ctx *adapters.ServiceContext) (node.Service, error) {
diff --git a/p2p/protocols/protocol.go b/p2p/protocols/protocol.go
index 7dddd852f..b16720dd3 100644
--- a/p2p/protocols/protocol.go
+++ b/p2p/protocols/protocol.go
@@ -381,7 +381,7 @@ func (p *Peer) handleIncoming(handle func(ctx context.Context, msg interface{})
// * arguments
// * context
// * the local handshake to be sent to the remote peer
-// * funcion to be called on the remote handshake (can be nil)
+// * function to be called on the remote handshake (can be nil)
// * expects a remote handshake back of the same type
// * the dialing peer needs to send the handshake first and then waits for remote
// * the listening peer waits for the remote handshake and then sends it
diff --git a/p2p/protocols/reporter.go b/p2p/protocols/reporter.go
new file mode 100644
index 000000000..215d4fe31
--- /dev/null
+++ b/p2p/protocols/reporter.go
@@ -0,0 +1,147 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package protocols
+
+import (
+ "encoding/binary"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+//AccountMetrics abstracts away the metrics DB and
+//the reporter to persist metrics
+type AccountingMetrics struct {
+ reporter *reporter
+}
+
+//Close will be called when the node is being shutdown
+//for a graceful cleanup
+func (am *AccountingMetrics) Close() {
+ close(am.reporter.quit)
+ am.reporter.db.Close()
+}
+
+//reporter is an internal structure used to write p2p accounting related
+//metrics to a LevelDB. It will periodically write the accrued metrics to the DB.
+type reporter struct {
+ reg metrics.Registry //the registry for these metrics (independent of other metrics)
+ interval time.Duration //duration at which the reporter will persist metrics
+ db *leveldb.DB //the actual DB
+ quit chan struct{} //quit the reporter loop
+}
+
+//NewMetricsDB creates a new LevelDB instance used to persist metrics defined
+//inside p2p/protocols/accounting.go
+func NewAccountingMetrics(r metrics.Registry, d time.Duration, path string) *AccountingMetrics {
+ var val = make([]byte, 8)
+ var err error
+
+ //Create the LevelDB
+ db, err := leveldb.OpenFile(path, nil)
+ if err != nil {
+ log.Error(err.Error())
+ return nil
+ }
+
+ //Check for all defined metrics that there is a value in the DB
+ //If there is, assign it to the metric. This means that the node
+ //has been running before and that metrics have been persisted.
+ metricsMap := map[string]metrics.Counter{
+ "account.balance.credit": mBalanceCredit,
+ "account.balance.debit": mBalanceDebit,
+ "account.bytes.credit": mBytesCredit,
+ "account.bytes.debit": mBytesDebit,
+ "account.msg.credit": mMsgCredit,
+ "account.msg.debit": mMsgDebit,
+ "account.peerdrops": mPeerDrops,
+ "account.selfdrops": mSelfDrops,
+ }
+ //iterate the map and get the values
+ for key, metric := range metricsMap {
+ val, err = db.Get([]byte(key), nil)
+ //until the first time a value is being written,
+ //this will return an error.
+ //it could be beneficial though to log errors later,
+ //but that would require a different logic
+ if err == nil {
+ metric.Inc(int64(binary.BigEndian.Uint64(val)))
+ }
+ }
+
+ //create the reporter
+ rep := &reporter{
+ reg: r,
+ interval: d,
+ db: db,
+ quit: make(chan struct{}),
+ }
+
+ //run the go routine
+ go rep.run()
+
+ m := &AccountingMetrics{
+ reporter: rep,
+ }
+
+ return m
+}
+
+//run is the goroutine which periodically sends the metrics to the configured LevelDB
+func (r *reporter) run() {
+ intervalTicker := time.NewTicker(r.interval)
+
+ for {
+ select {
+ case <-intervalTicker.C:
+ //at each tick send the metrics
+ if err := r.save(); err != nil {
+ log.Error("unable to send metrics to LevelDB", "err", err)
+ //If there is an error in writing, exit the routine; we assume here that the error is
+ //severe and don't attempt to write again.
+ //Also, this should prevent leaking when the node is stopped
+ return
+ }
+ case <-r.quit:
+ //graceful shutdown
+ return
+ }
+ }
+}
+
+//send the metrics to the DB
+func (r *reporter) save() error {
+ //create a LevelDB Batch
+ batch := leveldb.Batch{}
+ //for each metric in the registry (which is independent)...
+ r.reg.Each(func(name string, i interface{}) {
+ metric, ok := i.(metrics.Counter)
+ if ok {
+ //assuming every metric here to be a Counter (separate registry)
+ //...create a snapshot...
+ ms := metric.Snapshot()
+ byteVal := make([]byte, 8)
+ binary.BigEndian.PutUint64(byteVal, uint64(ms.Count()))
+ //...and save the value to the DB
+ batch.Put([]byte(name), byteVal)
+ }
+ })
+ return r.db.Write(&batch, nil)
+}
diff --git a/p2p/protocols/reporter_test.go b/p2p/protocols/reporter_test.go
new file mode 100644
index 000000000..b9f06e674
--- /dev/null
+++ b/p2p/protocols/reporter_test.go
@@ -0,0 +1,77 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package protocols
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+)
+
+//TestReporter tests that the metrics being collected for p2p accounting
+//are being persisted and available after restart of a node.
+//It simulates restarting by just recreating the DB as if the node had restarted.
+func TestReporter(t *testing.T) {
+ //create a test directory
+ dir, err := ioutil.TempDir("", "reporter-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ //setup the metrics
+ log.Debug("Setting up metrics first time")
+ reportInterval := 5 * time.Millisecond
+ metrics := SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db"))
+ log.Debug("Done.")
+
+ //do some metrics
+ mBalanceCredit.Inc(12)
+ mBytesCredit.Inc(34)
+ mMsgDebit.Inc(9)
+
+ //give the reporter time to write the metrics to DB
+ time.Sleep(20 * time.Millisecond)
+
+ //set the metrics to nil - this effectively simulates the node having shut down...
+ mBalanceCredit = nil
+ mBytesCredit = nil
+ mMsgDebit = nil
+ //close the DB also, or we can't create a new one
+ metrics.Close()
+
+ //setup the metrics again
+ log.Debug("Setting up metrics second time")
+ metrics = SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db"))
+ defer metrics.Close()
+ log.Debug("Done.")
+
+ //now check the metrics, they should have the same value as before "shutdown"
+ if mBalanceCredit.Count() != 12 {
+ t.Fatalf("Expected counter to be %d, but is %d", 12, mBalanceCredit.Count())
+ }
+ if mBytesCredit.Count() != 34 {
+ t.Fatalf("Expected counter to be %d, but is %d", 23, mBytesCredit.Count())
+ }
+ if mMsgDebit.Count() != 9 {
+ t.Fatalf("Expected counter to be %d, but is %d", 9, mMsgDebit.Count())
+ }
+}
diff --git a/p2p/rlpx.go b/p2p/rlpx.go
index 22a27dd96..67cc1d9bf 100644
--- a/p2p/rlpx.go
+++ b/p2p/rlpx.go
@@ -39,9 +39,9 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -253,10 +253,10 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
}
// setup sha3 instances for the MACs
- mac1 := sha3.NewKeccak256()
+ mac1 := sha3.NewLegacyKeccak256()
mac1.Write(xor(s.MAC, h.respNonce))
mac1.Write(auth)
- mac2 := sha3.NewKeccak256()
+ mac2 := sha3.NewLegacyKeccak256()
mac2.Write(xor(s.MAC, h.initNonce))
mac2.Write(authResp)
if h.initiator {
diff --git a/p2p/rlpx_test.go b/p2p/rlpx_test.go
index 64172217b..5d8981802 100644
--- a/p2p/rlpx_test.go
+++ b/p2p/rlpx_test.go
@@ -34,9 +34,9 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/p2p/simulations/pipes"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
func TestSharedSecret(t *testing.T) {
@@ -334,8 +334,8 @@ func TestRLPXFrameRW(t *testing.T) {
s1 := secrets{
AES: aesSecret,
MAC: macSecret,
- EgressMAC: sha3.NewKeccak256(),
- IngressMAC: sha3.NewKeccak256(),
+ EgressMAC: sha3.NewLegacyKeccak256(),
+ IngressMAC: sha3.NewLegacyKeccak256(),
}
s1.EgressMAC.Write(egressMACinit)
s1.IngressMAC.Write(ingressMACinit)
@@ -344,8 +344,8 @@ func TestRLPXFrameRW(t *testing.T) {
s2 := secrets{
AES: aesSecret,
MAC: macSecret,
- EgressMAC: sha3.NewKeccak256(),
- IngressMAC: sha3.NewKeccak256(),
+ EgressMAC: sha3.NewLegacyKeccak256(),
+ IngressMAC: sha3.NewLegacyKeccak256(),
}
s2.EgressMAC.Write(ingressMACinit)
s2.IngressMAC.Write(egressMACinit)
diff --git a/p2p/server.go b/p2p/server.go
index 667860863..566f01ffc 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -22,7 +22,6 @@ import (
"crypto/ecdsa"
"encoding/hex"
"errors"
- "fmt"
"net"
"sort"
"sync"
@@ -391,7 +390,7 @@ type sharedUDPConn struct {
func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) {
packet, ok := <-s.unhandled
if !ok {
- return 0, nil, fmt.Errorf("Connection was closed")
+ return 0, nil, errors.New("Connection was closed")
}
l := len(packet.Data)
if l > len(b) {
@@ -425,7 +424,7 @@ func (srv *Server) Start() (err error) {
// static fields
if srv.PrivateKey == nil {
- return fmt.Errorf("Server.PrivateKey must be set to a non-nil key")
+ return errors.New("Server.PrivateKey must be set to a non-nil key")
}
if srv.newTransport == nil {
srv.newTransport = newRLPX
@@ -903,7 +902,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
if dialDest != nil {
dialPubkey = new(ecdsa.PublicKey)
if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil {
- return fmt.Errorf("dial destination doesn't have a secp256k1 public key")
+ return errors.New("dial destination doesn't have a secp256k1 public key")
}
}
// Run the encryption handshake.
@@ -937,7 +936,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
return err
}
if id := c.node.ID(); !bytes.Equal(crypto.Keccak256(phs.ID), id[:]) {
- clog.Trace("Wrong devp2p handshake identity", "phsid", fmt.Sprintf("%x", phs.ID))
+ clog.Trace("Wrong devp2p handshake identity", "phsid", hex.EncodeToString(phs.ID))
return DiscUnexpectedIdentity
}
c.caps, c.name = phs.Caps, phs.Name
diff --git a/p2p/server_test.go b/p2p/server_test.go
index 7e11577d6..f665c1424 100644
--- a/p2p/server_test.go
+++ b/p2p/server_test.go
@@ -26,10 +26,10 @@ import (
"time"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
+ "golang.org/x/crypto/sha3"
)
// func init() {
@@ -48,8 +48,8 @@ func newTestTransport(rpub *ecdsa.PublicKey, fd net.Conn) transport {
wrapped.rw = newRLPXFrameRW(fd, secrets{
MAC: zero16,
AES: zero16,
- IngressMAC: sha3.NewKeccak256(),
- EgressMAC: sha3.NewKeccak256(),
+ IngressMAC: sha3.NewLegacyKeccak256(),
+ EgressMAC: sha3.NewLegacyKeccak256(),
})
return &testTransport{rpub: rpub, rlpx: wrapped}
}
diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go
index abb196717..9b588db1b 100644
--- a/p2p/simulations/adapters/exec.go
+++ b/p2p/simulations/adapters/exec.go
@@ -46,7 +46,7 @@ import (
func init() {
// Register a reexec function to start a simulation node when the current binary is
- // executed as "p2p-node" (rather than whataver the main() function would normally do).
+ // executed as "p2p-node" (rather than whatever the main() function would normally do).
reexec.Register("p2p-node", execP2PNode)
}
diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go
index 52a662be6..eada9579e 100644
--- a/p2p/simulations/adapters/inproc.go
+++ b/p2p/simulations/adapters/inproc.go
@@ -130,7 +130,7 @@ func (s *SimAdapter) Dial(dest *enode.Node) (conn net.Conn, err error) {
return nil, err
}
// this is simulated 'listening'
- // asynchronously call the dialed destintion node's p2p server
+ // asynchronously call the dialed destination node's p2p server
// to set up connection on the 'listening' side
go srv.SetupConn(pipe1, 0, nil)
return pipe2, nil
@@ -351,17 +351,3 @@ func (sn *SimNode) NodeInfo() *p2p.NodeInfo {
}
return server.NodeInfo()
}
-
-func setSocketBuffer(conn net.Conn, socketReadBuffer int, socketWriteBuffer int) error {
- if v, ok := conn.(*net.UnixConn); ok {
- err := v.SetReadBuffer(socketReadBuffer)
- if err != nil {
- return err
- }
- err = v.SetWriteBuffer(socketWriteBuffer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/p2p/simulations/connect.go b/p2p/simulations/connect.go
new file mode 100644
index 000000000..bb7e7999a
--- /dev/null
+++ b/p2p/simulations/connect.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package simulations
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/p2p/enode"
+)
+
+var (
+ ErrNodeNotFound = errors.New("node not found")
+)
+
+// ConnectToLastNode connects the node with provided NodeID
+// to the last node that is up, and avoiding connection to self.
+// It is useful when constructing a chain network topology
+// when Network adds and removes nodes dynamically.
+func (net *Network) ConnectToLastNode(id enode.ID) (err error) {
+ ids := net.getUpNodeIDs()
+ l := len(ids)
+ if l < 2 {
+ return nil
+ }
+ last := ids[l-1]
+ if last == id {
+ last = ids[l-2]
+ }
+ return net.connect(last, id)
+}
+
+// ConnectToRandomNode connects the node with provided NodeID
+// to a random node that is up.
+func (net *Network) ConnectToRandomNode(id enode.ID) (err error) {
+ selected := net.GetRandomUpNode(id)
+ if selected == nil {
+ return ErrNodeNotFound
+ }
+ return net.connect(selected.ID(), id)
+}
+
+// ConnectNodesFull connects all nodes one to another.
+// It provides a complete connectivity in the network
+// which should be rarely needed.
+func (net *Network) ConnectNodesFull(ids []enode.ID) (err error) {
+ if ids == nil {
+ ids = net.getUpNodeIDs()
+ }
+ for i, lid := range ids {
+ for _, rid := range ids[i+1:] {
+ if err = net.connect(lid, rid); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// ConnectNodesChain connects all nodes in a chain topology.
+// If ids argument is nil, all nodes that are up will be connected.
+func (net *Network) ConnectNodesChain(ids []enode.ID) (err error) {
+ if ids == nil {
+ ids = net.getUpNodeIDs()
+ }
+ l := len(ids)
+ for i := 0; i < l-1; i++ {
+ if err := net.connect(ids[i], ids[i+1]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ConnectNodesRing connects all nodes in a ring topology.
+// If ids argument is nil, all nodes that are up will be connected.
+func (net *Network) ConnectNodesRing(ids []enode.ID) (err error) {
+ if ids == nil {
+ ids = net.getUpNodeIDs()
+ }
+ l := len(ids)
+ if l < 2 {
+ return nil
+ }
+ if err := net.ConnectNodesChain(ids); err != nil {
+ return err
+ }
+ return net.connect(ids[l-1], ids[0])
+}
+
+// ConnectNodesStar connects all nodes into a star topology
+// If ids argument is nil, all nodes that are up will be connected.
+func (net *Network) ConnectNodesStar(ids []enode.ID, center enode.ID) (err error) {
+ if ids == nil {
+ ids = net.getUpNodeIDs()
+ }
+ for _, id := range ids {
+ if center == id {
+ continue
+ }
+ if err := net.connect(center, id); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// connect connects two nodes but ignores already connected error.
+func (net *Network) connect(oneID, otherID enode.ID) error {
+ return ignoreAlreadyConnectedErr(net.Connect(oneID, otherID))
+}
+
+func ignoreAlreadyConnectedErr(err error) error {
+ if err == nil || strings.Contains(err.Error(), "already connected") {
+ return nil
+ }
+ return err
+}
diff --git a/p2p/simulations/connect_test.go b/p2p/simulations/connect_test.go
new file mode 100644
index 000000000..32d18347d
--- /dev/null
+++ b/p2p/simulations/connect_test.go
@@ -0,0 +1,172 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package simulations
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+)
+
+func newTestNetwork(t *testing.T, nodeCount int) (*Network, []enode.ID) {
+ t.Helper()
+ adapter := adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ return NewNoopService(nil), nil
+ },
+ })
+
+ // create network
+ network := NewNetwork(adapter, &NetworkConfig{
+ DefaultService: "noopwoop",
+ })
+
+ // create and start nodes
+ ids := make([]enode.ID, nodeCount)
+ for i := range ids {
+ conf := adapters.RandomNodeConfig()
+ node, err := network.NewNodeWithConfig(conf)
+ if err != nil {
+ t.Fatalf("error creating node: %s", err)
+ }
+ if err := network.Start(node.ID()); err != nil {
+ t.Fatalf("error starting node: %s", err)
+ }
+ ids[i] = node.ID()
+ }
+
+ if len(network.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ return network, ids
+}
+
+func TestConnectToLastNode(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ first := ids[0]
+ if err := net.ConnectToLastNode(first); err != nil {
+ t.Fatal(err)
+ }
+
+ last := ids[len(ids)-1]
+ for i, id := range ids {
+ if id == first || id == last {
+ continue
+ }
+
+ if net.GetConn(first, id) != nil {
+ t.Errorf("connection must not exist with node(ind: %v, id: %v)", i, id)
+ }
+ }
+
+ if net.GetConn(first, last) == nil {
+ t.Error("first and last node must be connected")
+ }
+}
+
+func TestConnectToRandomNode(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ err := net.ConnectToRandomNode(ids[0])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var cc int
+ for i, a := range ids {
+ for _, b := range ids[i:] {
+ if net.GetConn(a, b) != nil {
+ cc++
+ }
+ }
+ }
+
+ if cc != 1 {
+ t.Errorf("expected one connection, got %v", cc)
+ }
+}
+
+func TestConnectNodesFull(t *testing.T) {
+ tests := []struct {
+ name string
+ nodeCount int
+ }{
+ {name: "no node", nodeCount: 0},
+ {name: "single node", nodeCount: 1},
+ {name: "2 nodes", nodeCount: 2},
+ {name: "3 nodes", nodeCount: 3},
+ {name: "even number of nodes", nodeCount: 12},
+ {name: "odd number of nodes", nodeCount: 13},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ net, ids := newTestNetwork(t, test.nodeCount)
+ defer net.Shutdown()
+
+ err := net.ConnectNodesFull(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ VerifyFull(t, net, ids)
+ })
+ }
+}
+
+func TestConnectNodesChain(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ err := net.ConnectNodesChain(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ VerifyChain(t, net, ids)
+}
+
+func TestConnectNodesRing(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ err := net.ConnectNodesRing(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ VerifyRing(t, net, ids)
+}
+
+func TestConnectNodesStar(t *testing.T) {
+ net, ids := newTestNetwork(t, 10)
+ defer net.Shutdown()
+
+ pivotIndex := 2
+
+ err := net.ConnectNodesStar(ids, ids[pivotIndex])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ VerifyStar(t, net, ids, pivotIndex)
+}
diff --git a/p2p/simulations/http_test.go b/p2p/simulations/http_test.go
index d9513caaa..c0a5acb3d 100644
--- a/p2p/simulations/http_test.go
+++ b/p2p/simulations/http_test.go
@@ -35,7 +35,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
- colorable "github.com/mattn/go-colorable"
+ "github.com/mattn/go-colorable"
)
var (
@@ -294,6 +294,7 @@ var testServices = adapters.Services{
}
func testHTTPServer(t *testing.T) (*Network, *httptest.Server) {
+ t.Helper()
adapter := adapters.NewSimAdapter(testServices)
network := NewNetwork(adapter, &NetworkConfig{
DefaultService: "test",
diff --git a/p2p/simulations/mocker_test.go b/p2p/simulations/mocker_test.go
index 7c7016a5e..192be1732 100644
--- a/p2p/simulations/mocker_test.go
+++ b/p2p/simulations/mocker_test.go
@@ -15,7 +15,7 @@
// along with the go-ethereum library. If not, see .
// Package simulations simulates p2p networks.
-// A mokcer simulates starting and stopping real nodes in a network.
+// A mocker simulates starting and stopping real nodes in a network.
package simulations
import (
@@ -135,13 +135,13 @@ func TestMocker(t *testing.T) {
wg.Wait()
//check there are nodeCount number of nodes in the network
- nodes_info, err := client.GetNodes()
+ nodesInfo, err := client.GetNodes()
if err != nil {
t.Fatalf("Could not get nodes list: %s", err)
}
- if len(nodes_info) != nodeCount {
- t.Fatalf("Expected %d number of nodes, got: %d", nodeCount, len(nodes_info))
+ if len(nodesInfo) != nodeCount {
+ t.Fatalf("Expected %d number of nodes, got: %d", nodeCount, len(nodesInfo))
}
//stop the mocker
@@ -160,12 +160,12 @@ func TestMocker(t *testing.T) {
}
//now the number of nodes in the network should be zero
- nodes_info, err = client.GetNodes()
+ nodesInfo, err = client.GetNodes()
if err != nil {
t.Fatalf("Could not get nodes list: %s", err)
}
- if len(nodes_info) != 0 {
- t.Fatalf("Expected empty list of nodes, got: %d", len(nodes_info))
+ if len(nodesInfo) != 0 {
+ t.Fatalf("Expected empty list of nodes, got: %d", len(nodesInfo))
}
}
diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go
index 92ccfde81..86f7dc9be 100644
--- a/p2p/simulations/network.go
+++ b/p2p/simulations/network.go
@@ -20,7 +20,9 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
+ "math/rand"
"sync"
"time"
@@ -369,23 +371,32 @@ func (net *Network) DidReceive(sender, receiver enode.ID, proto string, code uin
// GetNode gets the node with the given ID, returning nil if the node does not
// exist
func (net *Network) GetNode(id enode.ID) *Node {
- net.lock.Lock()
- defer net.lock.Unlock()
+ net.lock.RLock()
+ defer net.lock.RUnlock()
return net.getNode(id)
}
// GetNode gets the node with the given name, returning nil if the node does
// not exist
func (net *Network) GetNodeByName(name string) *Node {
- net.lock.Lock()
- defer net.lock.Unlock()
+ net.lock.RLock()
+ defer net.lock.RUnlock()
return net.getNodeByName(name)
}
+func (net *Network) getNodeByName(name string) *Node {
+ for _, node := range net.Nodes {
+ if node.Config.Name == name {
+ return node
+ }
+ }
+ return nil
+}
+
// GetNodes returns the existing nodes
func (net *Network) GetNodes() (nodes []*Node) {
- net.lock.Lock()
- defer net.lock.Unlock()
+ net.lock.RLock()
+ defer net.lock.RUnlock()
nodes = append(nodes, net.Nodes...)
return nodes
@@ -399,20 +410,67 @@ func (net *Network) getNode(id enode.ID) *Node {
return net.Nodes[i]
}
-func (net *Network) getNodeByName(name string) *Node {
+// GetRandomUpNode returns a random node on the network, which is running.
+func (net *Network) GetRandomUpNode(excludeIDs ...enode.ID) *Node {
+ net.lock.RLock()
+ defer net.lock.RUnlock()
+ return net.getRandomNode(net.getUpNodeIDs(), excludeIDs)
+}
+
+func (net *Network) getUpNodeIDs() (ids []enode.ID) {
for _, node := range net.Nodes {
- if node.Config.Name == name {
- return node
+ if node.Up {
+ ids = append(ids, node.ID())
}
}
- return nil
+ return ids
+}
+
+// GetRandomDownNode returns a random node on the network, which is stopped.
+func (net *Network) GetRandomDownNode(excludeIDs ...enode.ID) *Node {
+ net.lock.RLock()
+ defer net.lock.RUnlock()
+ return net.getRandomNode(net.getDownNodeIDs(), excludeIDs)
+}
+
+func (net *Network) getDownNodeIDs() (ids []enode.ID) {
+ for _, node := range net.GetNodes() {
+ if !node.Up {
+ ids = append(ids, node.ID())
+ }
+ }
+ return ids
+}
+
+func (net *Network) getRandomNode(ids []enode.ID, excludeIDs []enode.ID) *Node {
+ filtered := filterIDs(ids, excludeIDs)
+
+ l := len(filtered)
+ if l == 0 {
+ return nil
+ }
+ return net.GetNode(filtered[rand.Intn(l)])
+}
+
+func filterIDs(ids []enode.ID, excludeIDs []enode.ID) []enode.ID {
+ exclude := make(map[enode.ID]bool)
+ for _, id := range excludeIDs {
+ exclude[id] = true
+ }
+ var filtered []enode.ID
+ for _, id := range ids {
+ if _, found := exclude[id]; !found {
+ filtered = append(filtered, id)
+ }
+ }
+ return filtered
}
// GetConn returns the connection which exists between "one" and "other"
// regardless of which node initiated the connection
func (net *Network) GetConn(oneID, otherID enode.ID) *Conn {
- net.lock.Lock()
- defer net.lock.Unlock()
+ net.lock.RLock()
+ defer net.lock.RUnlock()
return net.getConn(oneID, otherID)
}
@@ -458,7 +516,7 @@ func (net *Network) getConn(oneID, otherID enode.ID) *Conn {
return net.Conns[i]
}
-// InitConn(one, other) retrieves the connectiton model for the connection between
+// InitConn(one, other) retrieves the connection model for the connection between
// peers one and other, or creates a new one if it does not exist
// the order of nodes does not matter, i.e., Conn(i,j) == Conn(j, i)
// it checks if the connection is already up, and if the nodes are running
@@ -504,8 +562,8 @@ func (net *Network) Shutdown() {
close(net.quitc)
}
-//Reset resets all network properties:
-//emtpies the nodes and the connection list
+// Reset resets all network properties:
+// empties the nodes and the connection list
func (net *Network) Reset() {
net.lock.Lock()
defer net.lock.Unlock()
@@ -705,8 +763,11 @@ func (net *Network) snapshot(addServices []string, removeServices []string) (*Sn
return snap, nil
}
+var snapshotLoadTimeout = 120 * time.Second
+
// Load loads a network snapshot
func (net *Network) Load(snap *Snapshot) error {
+ // Start nodes.
for _, n := range snap.Nodes {
if _, err := net.NewNodeWithConfig(n.Node.Config); err != nil {
return err
@@ -718,6 +779,69 @@ func (net *Network) Load(snap *Snapshot) error {
return err
}
}
+
+ // Prepare connection events counter.
+ allConnected := make(chan struct{}) // closed when all connections are established
+ done := make(chan struct{}) // ensures that the event loop goroutine is terminated
+ defer close(done)
+
+ // Subscribe to event channel.
+ // It needs to be done outside of the event loop goroutine (created below)
+ // to ensure that the event channel is blocking before connect calls are made.
+ events := make(chan *Event)
+ sub := net.Events().Subscribe(events)
+ defer sub.Unsubscribe()
+
+ go func() {
+ // Expected number of connections.
+ total := len(snap.Conns)
+ // Set of all established connections from the snapshot, not other connections.
+ // Key array element 0 is the connection One field value, and element 1 connection Other field.
+ connections := make(map[[2]enode.ID]struct{}, total)
+
+ for {
+ select {
+ case e := <-events:
+ // Ignore control events as they do not represent
+ // connect or disconnect (Up) state change.
+ if e.Control {
+ continue
+ }
+ // Detect only connection events.
+ if e.Type != EventTypeConn {
+ continue
+ }
+ connection := [2]enode.ID{e.Conn.One, e.Conn.Other}
+ // Nodes are still not connected or have been disconnected.
+ if !e.Conn.Up {
+ // Delete the connection from the set of established connections.
+ // This will prevent false positive in case disconnections happen.
+ delete(connections, connection)
+ log.Warn("load snapshot: unexpected disconnection", "one", e.Conn.One, "other", e.Conn.Other)
+ continue
+ }
+ // Check that the connection is from the snapshot.
+ for _, conn := range snap.Conns {
+ if conn.One == e.Conn.One && conn.Other == e.Conn.Other {
+ // Add the connection to the set of established connections.
+ connections[connection] = struct{}{}
+ if len(connections) == total {
+ // Signal that all nodes are connected.
+ close(allConnected)
+ return
+ }
+
+ break
+ }
+ }
+ case <-done:
+ // Load function returned, terminate this goroutine.
+ return
+ }
+ }
+ }()
+
+ // Start connecting.
for _, conn := range snap.Conns {
if !net.GetNode(conn.One).Up || !net.GetNode(conn.Other).Up {
@@ -729,6 +853,14 @@ func (net *Network) Load(snap *Snapshot) error {
return err
}
}
+
+ select {
+ // Wait until all connections from the snapshot are established.
+ case <-allConnected:
+ // Make sure that we do not wait forever.
+ case <-time.After(snapshotLoadTimeout):
+ return errors.New("snapshot connections not established")
+ }
return nil
}
diff --git a/p2p/simulations/network_test.go b/p2p/simulations/network_test.go
index f34935265..b7852addb 100644
--- a/p2p/simulations/network_test.go
+++ b/p2p/simulations/network_test.go
@@ -18,14 +18,266 @@ package simulations
import (
"context"
+ "encoding/json"
"fmt"
+ "strconv"
+ "strings"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
)
+// Tests that a created snapshot with a minimal service only contains the expected connections
+// and that a network when loaded with this snapshot only contains those same connections
+func TestSnapshot(t *testing.T) {
+
+ // PART I
+ // create snapshot from ring network
+
+ // this is a minimal service, whose protocol will take exactly one message OR close of connection before quitting
+ adapter := adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ return NewNoopService(nil), nil
+ },
+ })
+
+ // create network
+ network := NewNetwork(adapter, &NetworkConfig{
+ DefaultService: "noopwoop",
+ })
+ // \todo consider making a member of network, set to true threadsafe when shutdown
+ runningOne := true
+ defer func() {
+ if runningOne {
+ network.Shutdown()
+ }
+ }()
+
+ // create and start nodes
+ nodeCount := 20
+ ids := make([]enode.ID, nodeCount)
+ for i := 0; i < nodeCount; i++ {
+ conf := adapters.RandomNodeConfig()
+ node, err := network.NewNodeWithConfig(conf)
+ if err != nil {
+ t.Fatalf("error creating node: %s", err)
+ }
+ if err := network.Start(node.ID()); err != nil {
+ t.Fatalf("error starting node: %s", err)
+ }
+ ids[i] = node.ID()
+ }
+
+ // subscribe to peer events
+ evC := make(chan *Event)
+ sub := network.Events().Subscribe(evC)
+ defer sub.Unsubscribe()
+
+ // connect nodes in a ring
+ // spawn separate thread to avoid deadlock in the event listeners
+ go func() {
+ for i, id := range ids {
+ peerID := ids[(i+1)%len(ids)]
+ if err := network.Connect(id, peerID); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }()
+
+ // collect connection events up to expected number
+ ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ checkIds := make(map[enode.ID][]enode.ID)
+ connEventCount := nodeCount
+OUTER:
+ for {
+ select {
+ case <-ctx.Done():
+ t.Fatal(ctx.Err())
+ case ev := <-evC:
+ if ev.Type == EventTypeConn && !ev.Control {
+
+ // fail on any disconnect
+ if !ev.Conn.Up {
+ t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other)
+ }
+ checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other)
+ checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One)
+ connEventCount--
+ log.Debug("ev", "count", connEventCount)
+ if connEventCount == 0 {
+ break OUTER
+ }
+ }
+ }
+ }
+
+ // create snapshot of current network
+ snap, err := network.Snapshot()
+ if err != nil {
+ t.Fatal(err)
+ }
+ j, err := json.Marshal(snap)
+ if err != nil {
+ t.Fatal(err)
+ }
+ log.Debug("snapshot taken", "nodes", len(snap.Nodes), "conns", len(snap.Conns), "json", string(j))
+
+ // verify that the snap element numbers check out
+ if len(checkIds) != len(snap.Conns) || len(checkIds) != len(snap.Nodes) {
+ t.Fatalf("snapshot wrong node,conn counts %d,%d != %d", len(snap.Nodes), len(snap.Conns), len(checkIds))
+ }
+
+ // shut down sim network
+ runningOne = false
+ sub.Unsubscribe()
+ network.Shutdown()
+
+ // check that we have all the expected connections in the snapshot
+ for nodid, nodConns := range checkIds {
+ for _, nodConn := range nodConns {
+ var match bool
+ for _, snapConn := range snap.Conns {
+ if snapConn.One == nodid && snapConn.Other == nodConn {
+ match = true
+ break
+ } else if snapConn.Other == nodid && snapConn.One == nodConn {
+ match = true
+ break
+ }
+ }
+ if !match {
+ t.Fatalf("snapshot missing conn %v -> %v", nodid, nodConn)
+ }
+ }
+ }
+ log.Info("snapshot checked")
+
+ // PART II
+ // load snapshot and verify that exactly same connections are formed
+
+ adapter = adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ return NewNoopService(nil), nil
+ },
+ })
+ network = NewNetwork(adapter, &NetworkConfig{
+ DefaultService: "noopwoop",
+ })
+ defer func() {
+ network.Shutdown()
+ }()
+
+ // subscribe to peer events
+ // every node up and conn up event will generate one additional control event
+ // therefore multiply the count by two
+ evC = make(chan *Event, (len(snap.Conns)*2)+(len(snap.Nodes)*2))
+ sub = network.Events().Subscribe(evC)
+ defer sub.Unsubscribe()
+
+ // load the snapshot
+ // spawn separate thread to avoid deadlock in the event listeners
+ err = network.Load(snap)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // collect connection events up to expected number
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second*3)
+ defer cancel()
+
+ connEventCount = nodeCount
+
+OUTER_TWO:
+ for {
+ select {
+ case <-ctx.Done():
+ t.Fatal(ctx.Err())
+ case ev := <-evC:
+ if ev.Type == EventTypeConn && !ev.Control {
+
+ // fail on any disconnect
+ if !ev.Conn.Up {
+ t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other)
+ }
+ log.Debug("conn", "on", ev.Conn.One, "other", ev.Conn.Other)
+ checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other)
+ checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One)
+ connEventCount--
+ log.Debug("ev", "count", connEventCount)
+ if connEventCount == 0 {
+ break OUTER_TWO
+ }
+ }
+ }
+ }
+
+ // check that we have all expected connections in the network
+ for _, snapConn := range snap.Conns {
+ var match bool
+ for nodid, nodConns := range checkIds {
+ for _, nodConn := range nodConns {
+ if snapConn.One == nodid && snapConn.Other == nodConn {
+ match = true
+ break
+ } else if snapConn.Other == nodid && snapConn.One == nodConn {
+ match = true
+ break
+ }
+ }
+ }
+ if !match {
+ t.Fatalf("network missing conn %v -> %v", snapConn.One, snapConn.Other)
+ }
+ }
+
+ // verify that network didn't generate any other additional connection events after the ones we have collected within a reasonable period of time
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-ctx.Done():
+ case ev := <-evC:
+ if ev.Type == EventTypeConn {
+ t.Fatalf("Superfluous conn found %v -> %v", ev.Conn.One, ev.Conn.Other)
+ }
+ }
+
+ // This test validates if all connections from the snapshot
+ // are created in the network.
+ t.Run("conns after load", func(t *testing.T) {
+ // Create new network.
+ n := NewNetwork(
+ adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ return NewNoopService(nil), nil
+ },
+ }),
+ &NetworkConfig{
+ DefaultService: "noopwoop",
+ },
+ )
+ defer n.Shutdown()
+
+ // Load the same snapshot.
+ err := n.Load(snap)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check every connection from the snapshot
+ // if it is in the network, too.
+ for _, c := range snap.Conns {
+ if n.GetConn(c.One, c.Other) == nil {
+ t.Errorf("missing connection: %s -> %s", c.One, c.Other)
+ }
+ }
+ })
+}
+
// TestNetworkSimulation creates a multi-node simulation network with each node
// connected in a ring topology, checks that all nodes successfully handshake
// with each other and that a snapshot fully represents the desired topology
@@ -158,3 +410,78 @@ func triggerChecks(ctx context.Context, ids []enode.ID, trigger chan enode.ID, i
}
}
}
+
+// \todo: refactor to implement shapshots
+// and connect configuration methods once these are moved from
+// swarm/network/simulations/connect.go
+func BenchmarkMinimalService(b *testing.B) {
+ b.Run("ring/32", benchmarkMinimalServiceTmp)
+}
+
+func benchmarkMinimalServiceTmp(b *testing.B) {
+
+ // stop timer to discard setup time pollution
+ args := strings.Split(b.Name(), "/")
+ nodeCount, err := strconv.ParseInt(args[2], 10, 16)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ // this is a minimal service, whose protocol will close a channel upon run of protocol
+ // making it possible to bench the time it takes for the service to start and protocol actually to be run
+ protoCMap := make(map[enode.ID]map[enode.ID]chan struct{})
+ adapter := adapters.NewSimAdapter(adapters.Services{
+ "noopwoop": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ protoCMap[ctx.Config.ID] = make(map[enode.ID]chan struct{})
+ svc := NewNoopService(protoCMap[ctx.Config.ID])
+ return svc, nil
+ },
+ })
+
+ // create network
+ network := NewNetwork(adapter, &NetworkConfig{
+ DefaultService: "noopwoop",
+ })
+ defer network.Shutdown()
+
+ // create and start nodes
+ ids := make([]enode.ID, nodeCount)
+ for i := 0; i < int(nodeCount); i++ {
+ conf := adapters.RandomNodeConfig()
+ node, err := network.NewNodeWithConfig(conf)
+ if err != nil {
+ b.Fatalf("error creating node: %s", err)
+ }
+ if err := network.Start(node.ID()); err != nil {
+ b.Fatalf("error starting node: %s", err)
+ }
+ ids[i] = node.ID()
+ }
+
+ // ready, set, go
+ b.ResetTimer()
+
+ // connect nodes in a ring
+ for i, id := range ids {
+ peerID := ids[(i+1)%len(ids)]
+ if err := network.Connect(id, peerID); err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ // wait for all protocols to signal to close down
+ ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ for nodid, peers := range protoCMap {
+ for peerid, peerC := range peers {
+ log.Debug("getting ", "node", nodid, "peer", peerid)
+ select {
+ case <-ctx.Done():
+ b.Fatal(ctx.Err())
+ case <-peerC:
+ }
+ }
+ }
+ }
+}
diff --git a/p2p/simulations/test.go b/p2p/simulations/test.go
new file mode 100644
index 000000000..beeb414e4
--- /dev/null
+++ b/p2p/simulations/test.go
@@ -0,0 +1,134 @@
+package simulations
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/enr"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// NoopService is the service that does not do anything
+// but implements node.Service interface.
+type NoopService struct {
+ c map[enode.ID]chan struct{}
+}
+
+func NewNoopService(ackC map[enode.ID]chan struct{}) *NoopService {
+ return &NoopService{
+ c: ackC,
+ }
+}
+
+func (t *NoopService) Protocols() []p2p.Protocol {
+ return []p2p.Protocol{
+ {
+ Name: "noop",
+ Version: 666,
+ Length: 0,
+ Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
+ if t.c != nil {
+ t.c[peer.ID()] = make(chan struct{})
+ close(t.c[peer.ID()])
+ }
+ rw.ReadMsg()
+ return nil
+ },
+ NodeInfo: func() interface{} {
+ return struct{}{}
+ },
+ PeerInfo: func(id enode.ID) interface{} {
+ return struct{}{}
+ },
+ Attributes: []enr.Entry{},
+ },
+ }
+}
+
+func (t *NoopService) APIs() []rpc.API {
+ return []rpc.API{}
+}
+
+func (t *NoopService) Start(server *p2p.Server) error {
+ return nil
+}
+
+func (t *NoopService) Stop() error {
+ return nil
+}
+
+func VerifyRing(t *testing.T, net *Network, ids []enode.ID) {
+ t.Helper()
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := net.GetConn(ids[i], ids[j])
+ if i == j-1 || (i == 0 && j == n-1) {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
+
+func VerifyChain(t *testing.T, net *Network, ids []enode.ID) {
+ t.Helper()
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := net.GetConn(ids[i], ids[j])
+ if i == j-1 {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
+
+func VerifyFull(t *testing.T, net *Network, ids []enode.ID) {
+ t.Helper()
+ n := len(ids)
+ var connections int
+ for i, lid := range ids {
+ for _, rid := range ids[i+1:] {
+ if net.GetConn(lid, rid) != nil {
+ connections++
+ }
+ }
+ }
+
+ want := n * (n - 1) / 2
+ if connections != want {
+ t.Errorf("wrong number of connections, got: %v, want: %v", connections, want)
+ }
+}
+
+func VerifyStar(t *testing.T, net *Network, ids []enode.ID, centerIndex int) {
+ t.Helper()
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := net.GetConn(ids[i], ids[j])
+ if i == centerIndex || j == centerIndex {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
diff --git a/params/config.go b/params/config.go
index 007e4a66d..fefc16106 100644
--- a/params/config.go
+++ b/params/config.go
@@ -49,10 +49,10 @@ var (
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "mainnet",
- SectionIndex: 203,
- SectionHead: common.HexToHash("0xc9e05fc67c6a9815adc8072eb18805b53da53a9a6a273e05541e1b7542cf937a"),
- CHTRoot: common.HexToHash("0xb85f42447d59f7c3e6679b9a37ed983593fd52efd6251b883592662e95769d5b"),
- BloomRoot: common.HexToHash("0xf93d50cb4c49b403c6fd33cd60896d3b36184275be0a51bae4df5e8844ac624c"),
+ SectionIndex: 208,
+ SectionHead: common.HexToHash("0x5e9f7696c397d9df8f3b1abda857753575c6f5cff894e1a3d9e1a2af1bd9d6ac"),
+ CHTRoot: common.HexToHash("0x954a63134f6897f015f026387c59c98c4dae7b336610ff5a143455aac9153e9d"),
+ BloomRoot: common.HexToHash("0x8006c5e44b14d90d7cc9cd5fa1cb48cf53697ee3bbbf4b76fdfa70b0242500a9"),
}
// TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network.
@@ -73,10 +73,10 @@ var (
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
TestnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "testnet",
- SectionIndex: 134,
- SectionHead: common.HexToHash("0x17053ecbe045bebefaa01e7716cc85a4e22647e181416cc1098ccbb73a088931"),
- CHTRoot: common.HexToHash("0x4d2b86422e46ed76f0e3f50f06632c409f809c8375e53c8bc0f782bcb93dd49a"),
- BloomRoot: common.HexToHash("0xccba62232ee56c2967afc58f136a47ba7dc545ae586e6be666430d94516306c7"),
+ SectionIndex: 139,
+ SectionHead: common.HexToHash("0x9fad89a5e3b993c8339b9cf2cbbeb72cd08774ea6b71b105b3dd880420c618f4"),
+ CHTRoot: common.HexToHash("0xc815833881989c5d2035147e1a79a33d22cbc5313e104ff01e6ab405bd28b317"),
+ BloomRoot: common.HexToHash("0xd94ee9f3c480858f53ec5d059aebdbb2e8d904702f100875ee59ec5f366e841d"),
}
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
@@ -90,7 +90,7 @@ var (
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(1035301),
- ConstantinopleBlock: nil,
+ ConstantinopleBlock: big.NewInt(3660663),
Clique: &CliqueConfig{
Period: 15,
Epoch: 30000,
@@ -100,10 +100,10 @@ var (
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
Name: "rinkeby",
- SectionIndex: 100,
- SectionHead: common.HexToHash("0xf18f9b43e16f37b12e68818536ffe455ff18d676274ffdd856a8520ed61bb514"),
- CHTRoot: common.HexToHash("0x473f5d603b1fedad75d97fd58692130b9ac9ade1aca01eb9363d79bd1c43c791"),
- BloomRoot: common.HexToHash("0xa39ced3ddbb87e909c7531df2afb6414bea9c9a60ab94da9c6b467535f05326e"),
+ SectionIndex: 105,
+ SectionHead: common.HexToHash("0xec8147d43f936258aaf1b9b9ec91b0a853abf7109f436a23649be809ea43d507"),
+ CHTRoot: common.HexToHash("0xd92703b444846a3db928e87e450770e5d5cbe193131dc8f7c4cf18b4de925a75"),
+ BloomRoot: common.HexToHash("0xff45a6f807138a2cde0cea0c209d9ce5ad8e43ccaae5a7c41af801bb72a1ef96"),
}
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
@@ -111,16 +111,16 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
+ AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
+ AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
- TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
+ TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
diff --git a/params/version.go b/params/version.go
index b9dcc2a84..f8848d47c 100644
--- a/params/version.go
+++ b/params/version.go
@@ -21,10 +21,10 @@ import (
)
const (
- VersionMajor = 1 // Major version component of the current release
- VersionMinor = 8 // Minor version component of the current release
- VersionPatch = 19 // Patch version component of the current release
- VersionMeta = "unstable" // Version metadata to append to the version string
+ VersionMajor = 1 // Major version component of the current release
+ VersionMinor = 8 // Minor version component of the current release
+ VersionPatch = 21 // Patch version component of the current release
+ VersionMeta = "stable" // Version metadata to append to the version string
)
// Version holds the textual version string.
diff --git a/rpc/http.go b/rpc/http.go
index af79858e2..674166fb3 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -36,11 +36,15 @@ import (
)
const (
- contentType = "application/json"
maxRequestContentLength = 1024 * 512
)
-var nullAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:0")
+var (
+ // https://www.jsonrpc.org/historical/json-rpc-over-http.html#id13
+ acceptedContentTypes = []string{"application/json", "application/json-rpc", "application/jsonrequest"}
+ contentType = acceptedContentTypes[0]
+ nullAddr, _ = net.ResolveTCPAddr("tcp", "127.0.0.1:0")
+)
type httpConn struct {
client *http.Client
@@ -263,12 +267,21 @@ func validateRequest(r *http.Request) (int, error) {
err := fmt.Errorf("content length too large (%d>%d)", r.ContentLength, maxRequestContentLength)
return http.StatusRequestEntityTooLarge, err
}
- mt, _, err := mime.ParseMediaType(r.Header.Get("content-type"))
- if r.Method != http.MethodOptions && (err != nil || mt != contentType) {
- err := fmt.Errorf("invalid content type, only %s is supported", contentType)
- return http.StatusUnsupportedMediaType, err
+ // Allow OPTIONS (regardless of content-type)
+ if r.Method == http.MethodOptions {
+ return 0, nil
}
- return 0, nil
+ // Check content-type
+ if mt, _, err := mime.ParseMediaType(r.Header.Get("content-type")); err == nil {
+ for _, accepted := range acceptedContentTypes {
+ if accepted == mt {
+ return 0, nil
+ }
+ }
+ }
+ // Invalid content-type
+ err := fmt.Errorf("invalid content type, only %s is supported", contentType)
+ return http.StatusUnsupportedMediaType, err
}
func newCorsHandler(srv *Server, allowedOrigins []string) http.Handler {
diff --git a/rpc/ipc.go b/rpc/ipc.go
index b05e503d7..4cce1cf74 100644
--- a/rpc/ipc.go
+++ b/rpc/ipc.go
@@ -29,12 +29,12 @@ func (srv *Server) ServeListener(l net.Listener) error {
for {
conn, err := l.Accept()
if netutil.IsTemporaryError(err) {
- log.Warn("RPC accept error", "err", err)
+ log.Warn("IPC accept error", "err", err)
continue
} else if err != nil {
return err
}
- log.Trace("Accepted connection", "addr", conn.RemoteAddr())
+ log.Trace("IPC accepted connection")
go srv.ServeCodec(NewJSONCodec(conn), OptionMethodInvocation|OptionSubscriptions)
}
}
diff --git a/rpc/ipc_unix.go b/rpc/ipc_unix.go
index 0851ea61e..707b47fd7 100644
--- a/rpc/ipc_unix.go
+++ b/rpc/ipc_unix.go
@@ -20,13 +20,31 @@ package rpc
import (
"context"
+ "fmt"
"net"
"os"
"path/filepath"
+
+ "github.com/ethereum/go-ethereum/log"
)
+/*
+#include
+
+int max_socket_path_size() {
+struct sockaddr_un s;
+return sizeof(s.sun_path);
+}
+*/
+import "C"
+
// ipcListen will create a Unix socket on the given endpoint.
func ipcListen(endpoint string) (net.Listener, error) {
+ if len(endpoint) > int(C.max_socket_path_size()) {
+ log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", C.max_socket_path_size()),
+ "endpoint", endpoint)
+ }
+
// Ensure the IPC path exists and remove any previous leftover
if err := os.MkdirAll(filepath.Dir(endpoint), 0751); err != nil {
return nil, err
diff --git a/signer/core/api.go b/signer/core/api.go
index 2b96cdb5f..e9a335785 100644
--- a/signer/core/api.go
+++ b/signer/core/api.go
@@ -82,7 +82,7 @@ type SignerUI interface {
// OnSignerStartup is invoked when the signer boots, and tells the UI info about external API location and version
// information
OnSignerStartup(info StartupInfo)
- // OnInputRequried is invoked when clef requires user input, for example master password or
+ // OnInputRequired is invoked when clef requires user input, for example master password or
// pin-code for unlocking hardware wallets
OnInputRequired(info UserInputRequest) (UserInputResponse, error)
}
diff --git a/swarm/OWNERS b/swarm/OWNERS
index d4204e08c..4b9ca96eb 100644
--- a/swarm/OWNERS
+++ b/swarm/OWNERS
@@ -7,7 +7,6 @@ swarm
├── fuse ────────────────── @jmozah, @holisticode
├── grafana_dashboards ──── @nonsense
├── metrics ─────────────── @nonsense, @holisticode
-├── multihash ───────────── @nolash
├── network ─────────────── ethersphere
│ ├── bitvector ───────── @zelig, @janos, @gbalint
│ ├── priorityqueue ───── @zelig, @janos, @gbalint
diff --git a/swarm/api/act.go b/swarm/api/act.go
index e54369f9a..9566720b0 100644
--- a/swarm/api/act.go
+++ b/swarm/api/act.go
@@ -15,11 +15,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/storage"
"golang.org/x/crypto/scrypt"
+ "golang.org/x/crypto/sha3"
cli "gopkg.in/urfave/cli.v1"
)
@@ -336,7 +336,7 @@ func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.Priva
}
func (a *API) getACTDecryptionKey(ctx context.Context, actManifestAddress storage.Address, sessionKey []byte) (found bool, ciphertext, decryptionKey []byte, err error) {
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)
hasher.Reset()
@@ -462,7 +462,7 @@ func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees
return nil, nil, nil, err
}
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)
@@ -484,7 +484,7 @@ func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees
if err != nil {
return nil, nil, nil, err
}
- hasher := sha3.NewKeccak256()
+ hasher := sha3.NewLegacyKeccak256()
hasher.Write(append(sessionKey, 0))
lookupKey := hasher.Sum(nil)
diff --git a/swarm/api/api.go b/swarm/api/api.go
index 7bb631967..c6ca1b577 100644
--- a/swarm/api/api.go
+++ b/swarm/api/api.go
@@ -42,7 +42,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
@@ -51,10 +50,6 @@ import (
opentracing "github.com/opentracing/opentracing-go"
)
-var (
- ErrNotFound = errors.New("not found")
-)
-
var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
@@ -137,13 +132,6 @@ func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolv
}
}
-// MultiResolverOptionWithNameHash is unused at the time of this writing
-func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption {
- return func(m *MultiResolver) {
- m.nameHash = nameHash
- }
-}
-
// NewMultiResolver creates a new instance of MultiResolver.
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
m = &MultiResolver{
@@ -174,40 +162,6 @@ func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
return
}
-// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address
-func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) {
- rs, err := m.getResolveValidator(name)
- if err != nil {
- return false, err
- }
- var addr common.Address
- for _, r := range rs {
- addr, err = r.Owner(m.nameHash(name))
- // we hide the error if it is not for the last resolver we check
- if err == nil {
- return addr == address, nil
- }
- }
- return false, err
-}
-
-// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number
-func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) {
- rs, err := m.getResolveValidator(name)
- if err != nil {
- return nil, err
- }
- for _, r := range rs {
- var header *types.Header
- header, err = r.HeaderByNumber(ctx, blockNr)
- // we hide the error if it is not for the last resolver we check
- if err == nil {
- return header, nil
- }
- }
- return nil, err
-}
-
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
rs := m.resolvers[""]
@@ -225,11 +179,6 @@ func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, er
return rs, nil
}
-// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses
-func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) {
- m.nameHash = nameHash
-}
-
/*
API implements webserver/file system related content storage and retrieval
on top of the FileStore
@@ -266,9 +215,6 @@ func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt b
return a.fileStore.Store(ctx, data, size, toEncrypt)
}
-// ErrResolve is returned when an URI cannot be resolved from ENS.
-type ErrResolve error
-
// Resolve a name into a content-addressed hash
// where address could be an ENS name, or a content addressed hash
func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) {
@@ -417,7 +363,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
return reader, mimeType, status, nil, err
}
// get the data of the update
- _, rsrcData, err := a.feed.GetContent(entry.Feed)
+ _, contentAddr, err := a.feed.GetContent(entry.Feed)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
@@ -425,23 +371,23 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
return reader, mimeType, status, nil, err
}
- // extract multihash
- decodedMultihash, err := multihash.FromMultihash(rsrcData)
- if err != nil {
+ // extract content hash
+ if len(contentAddr) != storage.AddressLength {
apiGetInvalid.Inc(1)
status = http.StatusUnprocessableEntity
- log.Warn("invalid multihash in feed update", "err", err)
- return reader, mimeType, status, nil, err
+ errorMessage := fmt.Sprintf("invalid swarm hash in feed update. Expected %d bytes. Got %d", storage.AddressLength, len(contentAddr))
+ log.Warn(errorMessage)
+ return reader, mimeType, status, nil, errors.New(errorMessage)
}
- manifestAddr = storage.Address(decodedMultihash)
- log.Trace("feed update contains multihash", "key", manifestAddr)
+ manifestAddr = storage.Address(contentAddr)
+ log.Trace("feed update contains swarm hash", "key", manifestAddr)
- // get the manifest the multihash digest points to
+ // get the manifest the swarm hash points to
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
- log.Warn(fmt.Sprintf("loadManifestTrie (feed update multihash) error: %v", err))
+ log.Warn(fmt.Sprintf("loadManifestTrie (feed update) error: %v", err))
return reader, mimeType, status, nil, err
}
@@ -451,8 +397,8 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
if entry == nil {
status = http.StatusNotFound
apiGetNotFound.Inc(1)
- err = fmt.Errorf("manifest (feed update multihash) entry for '%s' not found", path)
- log.Trace("manifest (feed update multihash) entry not found", "key", manifestAddr, "path", path)
+ err = fmt.Errorf("manifest (feed update) entry for '%s' not found", path)
+ log.Trace("manifest (feed update) entry not found", "key", manifestAddr, "path", path)
return reader, mimeType, status, nil, err
}
}
@@ -472,7 +418,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
// no entry found
status = http.StatusNotFound
apiGetNotFound.Inc(1)
- err = fmt.Errorf("manifest entry for '%s' not found", path)
+ err = fmt.Errorf("Not found: could not find resource '%s'", path)
log.Trace("manifest entry not found", "key", contentAddr, "path", path)
}
return
@@ -981,11 +927,6 @@ func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.A
return a.feed.Update(ctx, request)
}
-// FeedsHashSize returned the size of the digest produced by Swarm feeds' hashing function
-func (a *API) FeedsHashSize() int {
- return a.feed.HashSize
-}
-
// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails
var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest")
diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go
index d9837ca73..5e293cca7 100644
--- a/swarm/api/client/client.go
+++ b/swarm/api/client/client.go
@@ -19,6 +19,7 @@ package client
import (
"archive/tar"
"bytes"
+ "context"
"encoding/json"
"errors"
"fmt"
@@ -26,6 +27,7 @@ import (
"io/ioutil"
"mime/multipart"
"net/http"
+ "net/http/httptrace"
"net/textproto"
"net/url"
"os"
@@ -33,14 +35,14 @@ import (
"regexp"
"strconv"
"strings"
+ "time"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
-)
-
-var (
- DefaultGateway = "http://localhost:8500"
- DefaultClient = NewClient(DefaultGateway)
+ "github.com/pborman/uuid"
)
var (
@@ -474,6 +476,11 @@ type UploadFn func(file *File) error
// TarUpload uses the given Uploader to upload files to swarm as a tar stream,
// returning the resulting manifest hash
func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, toEncrypt bool) (string, error) {
+ ctx, sp := spancontext.StartSpan(context.Background(), "api.client.tarupload")
+ defer sp.Finish()
+
+ var tn time.Time
+
reqR, reqW := io.Pipe()
defer reqR.Close()
addr := hash
@@ -489,6 +496,12 @@ func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, t
if err != nil {
return "", err
}
+
+ trace := GetClientTrace("swarm api client - upload tar", "api.client.uploadtar", uuid.New()[:8], &tn)
+
+ req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
+ transport := http.DefaultTransport
+
req.Header.Set("Content-Type", "application/x-tar")
if defaultPath != "" {
q := req.URL.Query()
@@ -529,8 +542,8 @@ func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, t
}
reqW.CloseWithError(err)
}()
-
- res, err := http.DefaultClient.Do(req)
+ tn = time.Now()
+ res, err := transport.RoundTrip(req)
if err != nil {
return "", err
}
@@ -728,3 +741,57 @@ func (c *Client) GetFeedRequest(query *feed.Query, manifestAddressOrDomain strin
}
return &metadata, nil
}
+
+func GetClientTrace(traceMsg, metricPrefix, ruid string, tn *time.Time) *httptrace.ClientTrace {
+ trace := &httptrace.ClientTrace{
+ GetConn: func(_ string) {
+ log.Trace(traceMsg+" - http get", "event", "GetConn", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".getconn", nil).Update(time.Since(*tn))
+ },
+ GotConn: func(_ httptrace.GotConnInfo) {
+ log.Trace(traceMsg+" - http get", "event", "GotConn", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".gotconn", nil).Update(time.Since(*tn))
+ },
+ PutIdleConn: func(err error) {
+ log.Trace(traceMsg+" - http get", "event", "PutIdleConn", "ruid", ruid, "err", err)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".putidle", nil).Update(time.Since(*tn))
+ },
+ GotFirstResponseByte: func() {
+ log.Trace(traceMsg+" - http get", "event", "GotFirstResponseByte", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".firstbyte", nil).Update(time.Since(*tn))
+ },
+ Got100Continue: func() {
+ log.Trace(traceMsg, "event", "Got100Continue", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".got100continue", nil).Update(time.Since(*tn))
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ log.Trace(traceMsg, "event", "DNSStart", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsstart", nil).Update(time.Since(*tn))
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ log.Trace(traceMsg, "event", "DNSDone", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsdone", nil).Update(time.Since(*tn))
+ },
+ ConnectStart: func(network, addr string) {
+ log.Trace(traceMsg, "event", "ConnectStart", "ruid", ruid, "network", network, "addr", addr)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".connectstart", nil).Update(time.Since(*tn))
+ },
+ ConnectDone: func(network, addr string, err error) {
+ log.Trace(traceMsg, "event", "ConnectDone", "ruid", ruid, "network", network, "addr", addr, "err", err)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".connectdone", nil).Update(time.Since(*tn))
+ },
+ WroteHeaders: func() {
+ log.Trace(traceMsg, "event", "WroteHeaders(request)", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".wroteheaders", nil).Update(time.Since(*tn))
+ },
+ Wait100Continue: func() {
+ log.Trace(traceMsg, "event", "Wait100Continue", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".wait100continue", nil).Update(time.Since(*tn))
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ log.Trace(traceMsg, "event", "WroteRequest", "ruid", ruid)
+ metrics.GetOrRegisterResettingTimer(metricPrefix+".wroterequest", nil).Update(time.Since(*tn))
+ },
+ }
+ return trace
+}
diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go
index 76b349397..39f6e4797 100644
--- a/swarm/api/client/client_test.go
+++ b/swarm/api/client/client_test.go
@@ -25,13 +25,13 @@ import (
"sort"
"testing"
+ "github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/swarm/api"
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
)
@@ -368,58 +368,99 @@ func newTestSigner() (*feed.GenericSigner, error) {
return feed.NewGenericSigner(privKey), nil
}
-// test the transparent resolving of multihash feed updates with bzz:// scheme
+// Test the transparent resolving of feed updates with bzz:// scheme
//
-// first upload data, and store the multihash to the resulting manifest in a feed update
-// retrieving the update with the multihash should return the manifest pointing directly to the data
+// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
+// This effectively uses a feed to store a pointer to content rather than the content itself
+// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
// and raw retrieve of that hash should return the data
-func TestClientCreateFeedMultihash(t *testing.T) {
+func TestClientBzzWithFeed(t *testing.T) {
signer, _ := newTestSigner()
+ // Initialize a Swarm test server
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
- client := NewClient(srv.URL)
+ swarmClient := NewClient(srv.URL)
defer srv.Close()
- // add the data our multihash aliased manifest will point to
- databytes := []byte("bar")
+ // put together some data for our test:
+ dataBytes := []byte(`
+ //
+ // Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
+ // So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
+ //
+ // MANIFEST HASH --> DATA
+ //
+ // Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
+ // we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
+ //
+ // FEED MANIFEST HASH --> MANIFEST HASH --> DATA
+ //
+ // Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
+ // stays constant, we have effectively created a fixed address to changing content. (Applause)
+ //
+ // FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2)
+ //
+ `)
- swarmHash, err := client.UploadRaw(bytes.NewReader(databytes), int64(len(databytes)), false)
- if err != nil {
- t.Fatalf("Error uploading raw test data: %s", err)
+ // Create a virtual File out of memory containing the above data
+ f := &File{
+ ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)),
+ ManifestEntry: api.ManifestEntry{
+ ContentType: "text/plain",
+ Mode: 0660,
+ Size: int64(len(dataBytes)),
+ },
}
- s := common.FromHex(swarmHash)
- mh := multihash.ToMultihash(s)
+ // upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
+ manifestAddressHex, err := swarmClient.Upload(f, "", false)
+ if err != nil {
+ t.Fatalf("Error creating manifest: %s", err)
+ }
- // our feed topic
- topic, _ := feed.NewTopic("foo.eth", nil)
+ // convert the hex-encoded manifest hash to a 32-byte slice
+ manifestAddress := common.FromHex(manifestAddressHex)
- createRequest := feed.NewFirstRequest(topic)
+ if len(manifestAddress) != storage.AddressLength {
+ t.Fatalf("Something went wrong. Got a hash of an unexpected length. Expected %d bytes. Got %d", storage.AddressLength, len(manifestAddress))
+ }
- createRequest.SetData(mh)
- if err := createRequest.Sign(signer); err != nil {
+ // Now create a **feed manifest**. For that, we need a topic:
+ topic, _ := feed.NewTopic("interesting topic indeed", nil)
+
+ // Build a feed request to update data
+ request := feed.NewFirstRequest(topic)
+
+ // Put the 32-byte address of the manifest into the feed update
+ request.SetData(manifestAddress)
+
+ // Sign the update
+ if err := request.Sign(signer); err != nil {
t.Fatalf("Error signing update: %s", err)
}
- feedManifestHash, err := client.CreateFeedWithManifest(createRequest)
-
+ // Publish the update and at the same time request a **feed manifest** to be created
+ feedManifestAddressHex, err := swarmClient.CreateFeedWithManifest(request)
if err != nil {
t.Fatalf("Error creating feed manifest: %s", err)
}
- correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b"
- if feedManifestHash != correctManifestAddrHex {
- t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestHash)
+ // Check we have received the exact **feed manifest** to be expected
+ // given the topic and user signing the updates:
+ correctFeedManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2"
+ if feedManifestAddressHex != correctFeedManifestAddrHex {
+ t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctFeedManifestAddrHex, feedManifestAddressHex)
}
// Check we get a not found error when trying to get feed updates with a made-up manifest
- _, err = client.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
+ _, err = swarmClient.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
if err != ErrNoFeedUpdatesFound {
t.Fatalf("Expected to receive ErrNoFeedUpdatesFound error. Got: %s", err)
}
- reader, err := client.QueryFeed(nil, correctManifestAddrHex)
+ // If we query the feed directly we should get **manifest hash** back:
+ reader, err := swarmClient.QueryFeed(nil, correctFeedManifestAddrHex)
if err != nil {
t.Fatalf("Error retrieving feed updates: %s", err)
}
@@ -428,10 +469,27 @@ func TestClientCreateFeedMultihash(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(mh, gotData) {
- t.Fatalf("Expected: %v, got %v", mh, gotData)
+
+ //Check that indeed the **manifest hash** is retrieved
+ if !bytes.Equal(manifestAddress, gotData) {
+ t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
}
+ // Now the final test we were looking for: Use bzz:// and that should resolve all manifests
+ // and return the original data directly:
+ f, err = swarmClient.Download(feedManifestAddressHex, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotData, err = ioutil.ReadAll(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that we get back the original data:
+ if !bytes.Equal(dataBytes, gotData) {
+ t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
+ }
}
// TestClientCreateUpdateFeed will check that feeds can be created and updated via the HTTP client.
diff --git a/swarm/api/encrypt.go b/swarm/api/encrypt.go
index ffe6c16d2..0d516b3d5 100644
--- a/swarm/api/encrypt.go
+++ b/swarm/api/encrypt.go
@@ -20,8 +20,8 @@ import (
"encoding/binary"
"errors"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
+ "golang.org/x/crypto/sha3"
)
type RefEncryption struct {
@@ -39,12 +39,12 @@ func NewRefEncryption(refSize int) *RefEncryption {
}
func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) {
- spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewKeccak256)
+ spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256)
encryptedSpan, err := spanEncryption.Encrypt(re.span)
if err != nil {
return nil, err
}
- dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewKeccak256)
+ dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256)
encryptedData, err := dataEncryption.Encrypt(ref)
if err != nil {
return nil, err
@@ -57,7 +57,7 @@ func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) {
}
func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) {
- spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewKeccak256)
+ spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256)
decryptedSpan, err := spanEncryption.Decrypt(ref[:8])
if err != nil {
return nil, err
@@ -68,7 +68,7 @@ func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) {
return nil, errors.New("invalid span in encrypted reference")
}
- dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewKeccak256)
+ dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256)
decryptedRef, err := dataEncryption.Decrypt(ref[8:])
if err != nil {
return nil, err
diff --git a/swarm/api/http/middleware.go b/swarm/api/http/middleware.go
index f5f70138b..320da3046 100644
--- a/swarm/api/http/middleware.go
+++ b/swarm/api/http/middleware.go
@@ -5,6 +5,7 @@ import (
"net/http"
"runtime/debug"
"strings"
+ "time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
@@ -73,9 +74,15 @@ func ParseURI(h http.Handler) http.Handler {
func InitLoggingResponseWriter(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ tn := time.Now()
+
writer := newLoggingResponseWriter(w)
h.ServeHTTP(writer, r)
- log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode)
+
+ ts := time.Since(tn)
+ log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts)
+ metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).Update(ts)
+ metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.%d.time", r.Method, writer.statusCode), nil).Update(ts)
})
}
@@ -88,6 +95,7 @@ func InstrumentOpenTracing(h http.Handler) http.Handler {
}
spanName := fmt.Sprintf("http.%s.%s", r.Method, uri.Scheme)
ctx, sp := spancontext.StartSpan(r.Context(), spanName)
+
defer sp.Finish()
h.ServeHTTP(w, r.WithContext(ctx))
})
diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go
index 1ef3deece..e82762ce0 100644
--- a/swarm/api/http/server_test.go
+++ b/swarm/api/http/server_test.go
@@ -45,7 +45,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/testutil"
@@ -69,60 +68,91 @@ func newTestSigner() (*feed.GenericSigner, error) {
return feed.NewGenericSigner(privKey), nil
}
-// test the transparent resolving of multihash-containing feed updates with bzz:// scheme
+// Test the transparent resolving of feed updates with bzz:// scheme
//
-// first upload data, and store the multihash to the resulting manifest in a feed update
-// retrieving the update with the multihash should return the manifest pointing directly to the data
+// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
+// This effectively uses a feed to store a pointer to content rather than the content itself
+// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
// and raw retrieve of that hash should return the data
-func TestBzzFeedMultihash(t *testing.T) {
+func TestBzzWithFeed(t *testing.T) {
signer, _ := newTestSigner()
+ // Initialize Swarm test server
srv := NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
- // add the data our multihash aliased manifest will point to
- databytes := "bar"
- testBzzUrl := fmt.Sprintf("%s/bzz:/", srv.URL)
- resp, err := http.Post(testBzzUrl, "text/plain", bytes.NewReader([]byte(databytes)))
+ // put together some data for our test:
+ dataBytes := []byte(`
+ //
+ // Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
+ // So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
+ //
+ // MANIFEST HASH --> DATA
+ //
+ // Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
+ // we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
+ //
+ // FEED MANIFEST HASH --> MANIFEST HASH --> DATA
+ //
+ // Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
+ // stays constant, we have effectively created a fixed address to changing content. (Applause)
+ //
+ // FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2) ...
+ //
+ `)
+
+ // POST data to bzz and get back a content-addressed **manifest hash** pointing to it.
+ resp, err := http.Post(fmt.Sprintf("%s/bzz:/", srv.URL), "text/plain", bytes.NewReader([]byte(dataBytes)))
if err != nil {
t.Fatal(err)
}
+
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err := ioutil.ReadAll(resp.Body)
+ manifestAddressHex, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- s := common.FromHex(string(b))
- mh := multihash.ToMultihash(s)
- log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
+ manifestAddress := common.FromHex(string(manifestAddressHex))
- topic, _ := feed.NewTopic("foo.eth", nil)
+ log.Info("added data", "manifest", string(manifestAddressHex))
+
+ // At this point we have uploaded the data and have a manifest pointing to it
+ // Now store that manifest address in a feed update.
+ // We also want a feed manifest, so we can use it to refer to the feed.
+
+ // First, create a topic for our feed:
+ topic, _ := feed.NewTopic("interesting topic indeed", nil)
+
+ // Create a feed update request:
updateRequest := feed.NewFirstRequest(topic)
- updateRequest.SetData(mh)
+ // Store the **manifest address** as data into the feed update.
+ updateRequest.SetData(manifestAddress)
+ // Sign the update
if err := updateRequest.Sign(signer); err != nil {
t.Fatal(err)
}
- log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
+ log.Info("added data", "data", common.ToHex(manifestAddress))
- testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL))
+ // Build the feed update http request:
+ feedUpdateURL, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL))
if err != nil {
t.Fatal(err)
}
- query := testUrl.Query()
+ query := feedUpdateURL.Query()
body := updateRequest.AppendValues(query) // this adds all query parameters and returns the data to be posted
- query.Set("manifest", "1") // indicate we want a manifest back
- testUrl.RawQuery = query.Encode()
+ query.Set("manifest", "1") // indicate we want a feed manifest back
+ feedUpdateURL.RawQuery = query.Encode()
- // create the multihash update
- resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body))
+ // submit the feed update request to Swarm
+ resp, err = http.Post(feedUpdateURL.String(), "application/octet-stream", bytes.NewReader(body))
if err != nil {
t.Fatal(err)
}
@@ -130,24 +160,25 @@ func TestBzzFeedMultihash(t *testing.T) {
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err = ioutil.ReadAll(resp.Body)
+
+ feedManifestAddressHex, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- rsrcResp := &storage.Address{}
- err = json.Unmarshal(b, rsrcResp)
+ feedManifestAddress := &storage.Address{}
+ err = json.Unmarshal(feedManifestAddressHex, feedManifestAddress)
if err != nil {
- t.Fatalf("data %s could not be unmarshaled: %v", b, err)
+ t.Fatalf("data %s could not be unmarshaled: %v", feedManifestAddressHex, err)
}
- correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b"
- if rsrcResp.Hex() != correctManifestAddrHex {
- t.Fatalf("Response feed manifest address mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
+ correctManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2"
+ if feedManifestAddress.Hex() != correctManifestAddrHex {
+ t.Fatalf("Response feed manifest address mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestAddress.Hex())
}
// get bzz manifest transparent feed update resolve
- testBzzUrl = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
- resp, err = http.Get(testBzzUrl)
+ getBzzURL := fmt.Sprintf("%s/bzz:/%s", srv.URL, feedManifestAddress)
+ resp, err = http.Get(getBzzURL)
if err != nil {
t.Fatal(err)
}
@@ -155,12 +186,12 @@ func TestBzzFeedMultihash(t *testing.T) {
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err = ioutil.ReadAll(resp.Body)
+ retrievedData, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(b, []byte(databytes)) {
- t.Fatalf("retrieved data mismatch, expected %x, got %x", databytes, b)
+ if !bytes.Equal(retrievedData, []byte(dataBytes)) {
+ t.Fatalf("retrieved data mismatch, expected %x, got %x", dataBytes, retrievedData)
}
}
@@ -245,7 +276,8 @@ func TestBzzFeed(t *testing.T) {
t.Fatalf("Expected manifest Feed '%s', got '%s'", correctFeedHex, manifest.Entries[0].Feed.Hex())
}
- // get bzz manifest transparent feed update resolve
+ // take the chance to have bzz: crash on resolving a feed update that does not contain
+ // a swarm hash:
testBzzUrl := fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
resp, err = http.Get(testBzzUrl)
if err != nil {
@@ -253,7 +285,7 @@ func TestBzzFeed(t *testing.T) {
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
- t.Fatal("Expected error status since feed update does not contain multihash. Received 200 OK")
+ t.Fatal("Expected error status since feed update does not contain a Swarm hash. Received 200 OK")
}
_, err = ioutil.ReadAll(resp.Body)
if err != nil {
diff --git a/swarm/api/storage.go b/swarm/api/storage.go
index 8a48fe5bc..254375b77 100644
--- a/swarm/api/storage.go
+++ b/swarm/api/storage.go
@@ -83,23 +83,3 @@ func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) {
}
return &Response{mimeType, status, expsize, string(body[:size])}, err
}
-
-// Modify(rootHash, basePath, contentHash, contentType) takes th e manifest trie rooted in rootHash,
-// and merge on to it. creating an entry w conentType (mime)
-//
-// DEPRECATED: Use the HTTP API instead
-func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, contentType string) (newRootHash string, err error) {
- uri, err := Parse("bzz:/" + rootHash)
- if err != nil {
- return "", err
- }
- addr, err := s.api.Resolve(ctx, uri.Addr)
- if err != nil {
- return "", err
- }
- addr, err = s.api.Modify(ctx, addr, path, contentHash, contentType)
- if err != nil {
- return "", err
- }
- return addr.Hex(), nil
-}
diff --git a/swarm/api/testapi.go b/swarm/api/testapi.go
index 4c7d0982b..6fec55f55 100644
--- a/swarm/api/testapi.go
+++ b/swarm/api/testapi.go
@@ -29,18 +29,6 @@ func NewControl(api *API, hive *network.Hive) *Control {
return &Control{api, hive}
}
-//func (self *Control) BlockNetworkRead(on bool) {
-// self.hive.BlockNetworkRead(on)
-//}
-//
-//func (self *Control) SyncEnabled(on bool) {
-// self.hive.SyncEnabled(on)
-//}
-//
-//func (self *Control) SwapEnabled(on bool) {
-// self.hive.SwapEnabled(on)
-//}
-//
func (c *Control) Hive() string {
return c.hive.String()
}
diff --git a/swarm/api/uri_test.go b/swarm/api/uri_test.go
index ea649e273..a03874c43 100644
--- a/swarm/api/uri_test.go
+++ b/swarm/api/uri_test.go
@@ -26,17 +26,15 @@ import (
func TestParseURI(t *testing.T) {
type test struct {
- uri string
- expectURI *URI
- expectErr bool
- expectRaw bool
- expectImmutable bool
- expectList bool
- expectHash bool
- expectDeprecatedRaw bool
- expectDeprecatedImmutable bool
- expectValidKey bool
- expectAddr storage.Address
+ uri string
+ expectURI *URI
+ expectErr bool
+ expectRaw bool
+ expectImmutable bool
+ expectList bool
+ expectHash bool
+ expectValidKey bool
+ expectAddr storage.Address
}
tests := []test{
{
diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go
index a85d4369e..18eab5a2b 100644
--- a/swarm/bmt/bmt.go
+++ b/swarm/bmt/bmt.go
@@ -61,7 +61,7 @@ const (
)
// BaseHasherFunc is a hash.Hash constructor function used for the base hash of the BMT.
-// implemented by Keccak256 SHA3 sha3.NewKeccak256
+// implemented by Keccak256 SHA3 sha3.NewLegacyKeccak256
type BaseHasherFunc func() hash.Hash
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go
index 683ba4f5b..ab712d08c 100644
--- a/swarm/bmt/bmt_test.go
+++ b/swarm/bmt/bmt_test.go
@@ -26,8 +26,8 @@ import (
"testing"
"time"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
// the actual data length generated (could be longer than max datalength of the BMT)
@@ -44,7 +44,7 @@ var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65,
// calculates the Keccak256 SHA3 hash of the data
func sha3hash(data ...[]byte) []byte {
- h := sha3.NewKeccak256()
+ h := sha3.NewLegacyKeccak256()
return doSum(h, nil, data...)
}
@@ -121,7 +121,7 @@ func TestRefHasher(t *testing.T) {
t.Run(fmt.Sprintf("%d_segments_%d_bytes", segmentCount, length), func(t *testing.T) {
data := testutil.RandomBytes(i, length)
expected := x.expected(data)
- actual := NewRefHasher(sha3.NewKeccak256, segmentCount).Hash(data)
+ actual := NewRefHasher(sha3.NewLegacyKeccak256, segmentCount).Hash(data)
if !bytes.Equal(actual, expected) {
t.Fatalf("expected %x, got %x", expected, actual)
}
@@ -133,7 +133,7 @@ func TestRefHasher(t *testing.T) {
// tests if hasher responds with correct hash comparing the reference implementation return value
func TestHasherEmptyData(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
var data []byte
for _, count := range counts {
t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) {
@@ -153,7 +153,7 @@ func TestHasherEmptyData(t *testing.T) {
// tests sequential write with entire max size written in one go
func TestSyncHasherCorrectness(t *testing.T) {
data := testutil.RandomBytes(1, BufferSize)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
size := hasher().Size()
var err error
@@ -179,7 +179,7 @@ func TestSyncHasherCorrectness(t *testing.T) {
// tests order-neutral concurrent writes with entire max size written in one go
func TestAsyncCorrectness(t *testing.T) {
data := testutil.RandomBytes(1, BufferSize)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
size := hasher().Size()
whs := []whenHash{first, last, random}
@@ -226,7 +226,7 @@ func TestHasherReuse(t *testing.T) {
// tests if bmt reuse is not corrupting result
func testHasherReuse(poolsize int, t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, poolsize)
defer pool.Drain(0)
bmt := New(pool)
@@ -243,7 +243,7 @@ func testHasherReuse(poolsize int, t *testing.T) {
// Tests if pool can be cleanly reused even in concurrent use by several hasher
func TestBMTConcurrentUse(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
defer pool.Drain(0)
cycles := 100
@@ -277,7 +277,7 @@ LOOP:
// Tests BMT Hasher io.Writer interface is working correctly
// even multiple short random write buffers
func TestBMTWriterBuffers(t *testing.T) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
for _, count := range counts {
t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) {
@@ -410,7 +410,7 @@ func BenchmarkPool(t *testing.B) {
// benchmarks simple sha3 hash on chunks
func benchmarkSHA3(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
h := hasher()
t.ReportAllocs()
@@ -426,7 +426,7 @@ func benchmarkSHA3(t *testing.B, n int) {
// the premise is that this is the minimum computation needed for a BMT
// therefore this serves as a theoretical optimum for concurrent implementations
func benchmarkBMTBaseline(t *testing.B, n int) {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
hashSize := hasher().Size()
data := testutil.RandomBytes(1, hashSize)
@@ -453,7 +453,7 @@ func benchmarkBMTBaseline(t *testing.B, n int) {
// benchmarks BMT Hasher
func benchmarkBMT(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
bmt := New(pool)
@@ -467,7 +467,7 @@ func benchmarkBMT(t *testing.B, n int) {
// benchmarks BMT hasher with asynchronous concurrent segment/section writes
func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, PoolSize)
bmt := New(pool).NewAsyncWriter(double)
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
@@ -485,7 +485,7 @@ func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
// benchmarks 100 concurrent bmt hashes with pool capacity
func benchmarkPool(t *testing.B, poolsize, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
pool := NewTreePool(hasher, segmentCount, poolsize)
cycles := 100
@@ -508,7 +508,7 @@ func benchmarkPool(t *testing.B, poolsize, n int) {
// benchmarks the reference hasher
func benchmarkRefHasher(t *testing.B, n int) {
data := testutil.RandomBytes(1, n)
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
rbmt := NewRefHasher(hasher, 128)
t.ReportAllocs()
diff --git a/swarm/docker/Dockerfile b/swarm/docker/Dockerfile
new file mode 100644
index 000000000..1ee4e9734
--- /dev/null
+++ b/swarm/docker/Dockerfile
@@ -0,0 +1,23 @@
+FROM golang:1.11-alpine as builder
+
+ARG VERSION
+
+RUN apk add --update git gcc g++ linux-headers
+RUN mkdir -p $GOPATH/src/github.com/ethereum && \
+ cd $GOPATH/src/github.com/ethereum && \
+ git clone https://github.com/ethersphere/go-ethereum && \
+ cd $GOPATH/src/github.com/ethereum/go-ethereum && \
+ git checkout ${VERSION} && \
+ go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm && \
+ go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm/swarm-smoke && \
+ go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/geth && \
+ cp $GOPATH/bin/swarm /swarm && cp $GOPATH/bin/geth /geth && cp $GOPATH/bin/swarm-smoke /swarm-smoke
+
+
+# Release image with the required binaries and scripts
+FROM alpine:3.8
+WORKDIR /
+COPY --from=builder /swarm /geth /swarm-smoke /
+ADD run.sh /run.sh
+ADD run-smoke.sh /run-smoke.sh
+ENTRYPOINT ["/run.sh"]
diff --git a/swarm/docker/run-smoke.sh b/swarm/docker/run-smoke.sh
new file mode 100755
index 000000000..ba57a7ecd
--- /dev/null
+++ b/swarm/docker/run-smoke.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+set -o errexit
+set -o pipefail
+set -o nounset
+
+/swarm-smoke $@ 2>&1 || true
diff --git a/swarm/docker/run.sh b/swarm/docker/run.sh
new file mode 100755
index 000000000..3e613b56d
--- /dev/null
+++ b/swarm/docker/run.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+set -o errexit
+set -o pipefail
+set -o nounset
+
+PASSWORD=${PASSWORD:-}
+DATADIR=${DATADIR:-/root/.ethereum/}
+
+if [ "$PASSWORD" == "" ]; then echo "Password must be set, in order to use swarm non-interactively." && exit 1; fi
+
+echo $PASSWORD > /password
+
+KEYFILE=`find $DATADIR | grep UTC | head -n 1` || true
+if [ ! -f "$KEYFILE" ]; then echo "No keyfile found. Generating..." && /geth --datadir $DATADIR --password /password account new; fi
+KEYFILE=`find $DATADIR | grep UTC | head -n 1` || true
+if [ ! -f "$KEYFILE" ]; then echo "Could not find nor generate a BZZ keyfile." && exit 1; else echo "Found keyfile $KEYFILE"; fi
+
+VERSION=`/swarm version`
+echo "Running Swarm:"
+echo $VERSION
+
+export BZZACCOUNT="`echo -n $KEYFILE | tail -c 40`" || true
+if [ "$BZZACCOUNT" == "" ]; then echo "Could not parse BZZACCOUNT from keyfile." && exit 1; fi
+
+exec /swarm --bzzaccount=$BZZACCOUNT --password /password --datadir $DATADIR $@ 2>&1
diff --git a/swarm/grafana_dashboards/ldbstore.json b/swarm/grafana_dashboards/ldbstore.json
deleted file mode 100644
index 2d64380ba..000000000
--- a/swarm/grafana_dashboards/ldbstore.json
+++ /dev/null
@@ -1,2278 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "$$hashKey": "object:325",
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": null,
- "graphTooltip": 1,
- "id": 5,
- "iteration": 1527598894689,
- "links": [],
- "panels": [
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 40,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 42,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.cachehit.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get cachehit",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 43,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.cachemiss.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get cachemiss",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 7
- },
- "id": 44,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total LocalStore.GetOrCreateRequest",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 7
- },
- "id": 47,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.errfetching.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore GetOrCreateRequest ErrFetching",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 45,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.hit.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore.GetOrCreateRequest hit",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 13
- },
- "id": 49,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.miss.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore GetOrCreateRequest miss",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 19
- },
- "id": 48,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.error.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get error",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 19
- },
- "id": 46,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.errfetching.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get ErrFetching",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LocalStore",
- "type": "row"
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 1
- },
- "id": 27,
- "panels": [],
- "title": "LDBStore",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 2
- },
- "id": 29,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.get.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 2
- },
- "id": 30,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.put.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore put",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 8
- },
- "id": 31,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.synciterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore SyncIterator",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 8
- },
- "id": 32,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.synciterator.seek.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore SyncIterator Seek/Next",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 14
- },
- "id": 50,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.collectgarbage.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore Collect Garbage",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 14
- },
- "id": 51,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.collectgarbage.delete.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore Collect Garbage - Actual Deletes",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 20
- },
- "id": 34,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 39
- },
- "id": 36,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.get.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 39
- },
- "id": 37,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.write.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase write",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 45
- },
- "id": 38,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.newiterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase NewIterator",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LDBDatabase",
- "type": "row"
- }
- ],
- "refresh": "10s",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "auto": false,
- "auto_count": 30,
- "auto_min": "10s",
- "current": {
- "text": "10s",
- "value": "10s"
- },
- "hide": 0,
- "label": "resolution",
- "name": "myinterval",
- "options": [
- {
- "selected": false,
- "text": "5s",
- "value": "5s"
- },
- {
- "selected": true,
- "text": "10s",
- "value": "10s"
- },
- {
- "selected": false,
- "text": "30s",
- "value": "30s"
- },
- {
- "selected": false,
- "text": "100s",
- "value": "100s"
- }
- ],
- "query": "5s,10s,30s,100s",
- "refresh": 2,
- "type": "interval"
- },
- {
- "allValue": null,
- "current": {
- "text": "swarm_30399 + swarm_30400 + swarm_30401",
- "value": [
- "swarm_30399",
- "swarm_30400",
- "swarm_30401"
- ]
- },
- "datasource": "metrics",
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "host",
- "options": [],
- "query": "SHOW TAG VALUES WITH KEY = \"host\"",
- "refresh": 1,
- "regex": "",
- "sort": 1,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "swarm.http.request.GET.time.span",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "LDBStore and LDBDatabase",
- "uid": "zS6beG7iz",
- "version": 28
-}
diff --git a/swarm/grafana_dashboards/swarm.json b/swarm/grafana_dashboards/swarm.json
deleted file mode 100644
index 3ee244d15..000000000
--- a/swarm/grafana_dashboards/swarm.json
+++ /dev/null
@@ -1,3198 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "$$hashKey": "object:147",
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": null,
- "graphTooltip": 1,
- "id": 2,
- "iteration": 1527598859072,
- "links": [],
- "panels": [
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 34,
- "panels": [],
- "title": "P2P",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 36,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.send.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P Send() - messages sent",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 37,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "p95($tag_host)",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.send_t.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P Send() timer - 95%ile",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 10
- },
- "id": 38,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "1 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.1.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "2 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.2.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "3 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.3.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "C",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P SendPriority() - messages sent",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 10
- },
- "id": 39,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "1 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority_t.1.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "2 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority_t.2.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P SendPriority() timer - 95%ile",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 19
- },
- "id": 40,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.registry.peers.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "last"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Registry Peers",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 28
- },
- "id": 32,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 2
- },
- "id": 14,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.stack.uptime.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Uptime",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "Uptime",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 29
- },
- "id": 28,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 7
- },
- "id": 2,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "GET",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "null"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- },
- {
- "alias": "POST",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "null"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.POST.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total HTTP Requests",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 7
- },
- "id": 26,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP GET requests 95% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 15,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p50"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP GET requests 50% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 13
- },
- "id": 8,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "POST",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.POST.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP POST requests 95% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "HTTP",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 30
- },
- "id": 30,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 0,
- "y": 8
- },
- "id": 16,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader read() calls",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 12,
- "y": 8
- },
- "id": 18,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.err.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader read errors",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 17,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.bytes.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader bytes read",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "decbytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LazyChunkReader",
- "type": "row"
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 31
- },
- "id": 25,
- "panels": [],
- "title": "All measurements",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 32
- },
- "id": 3,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.api.get.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "API Get (BZZ)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 32
- },
- "id": 13,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.request_from_peers.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Request from peers",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 38
- },
- "id": 11,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.received_chunks.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Received chunks",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 38
- },
- "id": 12,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.storage.cache.requests.size.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "max"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Requests cache entries",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 44
- },
- "id": 9,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.handle_retrieve_request_msg.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Handle retrieve request msg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 44
- },
- "id": 20,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.syncer.setnextbatch.iterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "syncer setnextbatch iterator calls",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 50
- },
- "id": 21,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlewantedhashesmsg.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleWantedHashesMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 50
- },
- "id": 22,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlesubscribemsg.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleSubscribeMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 56
- },
- "id": 23,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlewantedhashesmsg.actualget.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleWantedHashesMsg actual get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 56
- },
- "id": 19,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handleofferedhashes.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer OfferedHashesMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "refresh": "30s",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "auto": false,
- "auto_count": 30,
- "auto_min": "10s",
- "current": {
- "text": "10s",
- "value": "10s"
- },
- "hide": 0,
- "label": "resolution",
- "name": "myinterval",
- "options": [
- {
- "selected": false,
- "text": "5s",
- "value": "5s"
- },
- {
- "selected": true,
- "text": "10s",
- "value": "10s"
- },
- {
- "selected": false,
- "text": "30s",
- "value": "30s"
- },
- {
- "selected": false,
- "text": "100s",
- "value": "100s"
- }
- ],
- "query": "5s,10s,30s,100s",
- "refresh": 2,
- "type": "interval"
- },
- {
- "allValue": null,
- "current": {
- "text": "swarm_30399 + swarm_30400 + swarm_30401 + swarm_30402",
- "value": [
- "swarm_30399",
- "swarm_30400",
- "swarm_30401",
- "swarm_30402"
- ]
- },
- "datasource": "metrics",
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "host",
- "options": [],
- "query": "SHOW TAG VALUES WITH KEY = \"host\"",
- "refresh": 1,
- "regex": "",
- "sort": 1,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "swarm.http.request.GET.time.span",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "Swarm",
- "uid": "vmEtxxgmz",
- "version": 138
-}
diff --git a/swarm/metrics/flags.go b/swarm/metrics/flags.go
index 79490fd36..7c12120a6 100644
--- a/swarm/metrics/flags.go
+++ b/swarm/metrics/flags.go
@@ -27,26 +27,26 @@ import (
)
var (
- metricsEnableInfluxDBExportFlag = cli.BoolFlag{
+ MetricsEnableInfluxDBExportFlag = cli.BoolFlag{
Name: "metrics.influxdb.export",
Usage: "Enable metrics export/push to an external InfluxDB database",
}
- metricsInfluxDBEndpointFlag = cli.StringFlag{
+ MetricsInfluxDBEndpointFlag = cli.StringFlag{
Name: "metrics.influxdb.endpoint",
Usage: "Metrics InfluxDB endpoint",
Value: "http://127.0.0.1:8086",
}
- metricsInfluxDBDatabaseFlag = cli.StringFlag{
+ MetricsInfluxDBDatabaseFlag = cli.StringFlag{
Name: "metrics.influxdb.database",
Usage: "Metrics InfluxDB database",
Value: "metrics",
}
- metricsInfluxDBUsernameFlag = cli.StringFlag{
+ MetricsInfluxDBUsernameFlag = cli.StringFlag{
Name: "metrics.influxdb.username",
Usage: "Metrics InfluxDB username",
Value: "",
}
- metricsInfluxDBPasswordFlag = cli.StringFlag{
+ MetricsInfluxDBPasswordFlag = cli.StringFlag{
Name: "metrics.influxdb.password",
Usage: "Metrics InfluxDB password",
Value: "",
@@ -55,7 +55,7 @@ var (
// It is used so that we can group all nodes and average a measurement across all of them, but also so
// that we can select a specific node and inspect its measurements.
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
- metricsInfluxDBHostTagFlag = cli.StringFlag{
+ MetricsInfluxDBHostTagFlag = cli.StringFlag{
Name: "metrics.influxdb.host.tag",
Usage: "Metrics InfluxDB `host` tag attached to all measurements",
Value: "localhost",
@@ -65,20 +65,24 @@ var (
// Flags holds all command-line flags required for metrics collection.
var Flags = []cli.Flag{
utils.MetricsEnabledFlag,
- metricsEnableInfluxDBExportFlag,
- metricsInfluxDBEndpointFlag, metricsInfluxDBDatabaseFlag, metricsInfluxDBUsernameFlag, metricsInfluxDBPasswordFlag, metricsInfluxDBHostTagFlag,
+ MetricsEnableInfluxDBExportFlag,
+ MetricsInfluxDBEndpointFlag,
+ MetricsInfluxDBDatabaseFlag,
+ MetricsInfluxDBUsernameFlag,
+ MetricsInfluxDBPasswordFlag,
+ MetricsInfluxDBHostTagFlag,
}
func Setup(ctx *cli.Context) {
if gethmetrics.Enabled {
log.Info("Enabling swarm metrics collection")
var (
- enableExport = ctx.GlobalBool(metricsEnableInfluxDBExportFlag.Name)
- endpoint = ctx.GlobalString(metricsInfluxDBEndpointFlag.Name)
- database = ctx.GlobalString(metricsInfluxDBDatabaseFlag.Name)
- username = ctx.GlobalString(metricsInfluxDBUsernameFlag.Name)
- password = ctx.GlobalString(metricsInfluxDBPasswordFlag.Name)
- hosttag = ctx.GlobalString(metricsInfluxDBHostTagFlag.Name)
+ enableExport = ctx.GlobalBool(MetricsEnableInfluxDBExportFlag.Name)
+ endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
+ database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
+ username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
+ password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
+ hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name)
)
// Start system runtime metrics collection
diff --git a/swarm/multihash/multihash.go b/swarm/multihash/multihash.go
deleted file mode 100644
index 3306e3a6d..000000000
--- a/swarm/multihash/multihash.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package multihash
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
-)
-
-const (
- defaultMultihashLength = 32
- defaultMultihashTypeCode = 0x1b
-)
-
-var (
- multihashTypeCode uint8
- MultihashLength = defaultMultihashLength
-)
-
-func init() {
- multihashTypeCode = defaultMultihashTypeCode
- MultihashLength = defaultMultihashLength
-}
-
-// check if valid swarm multihash
-func isSwarmMultihashType(code uint8) bool {
- return code == multihashTypeCode
-}
-
-// GetMultihashLength returns the digest length of the provided multihash
-// It will fail if the multihash is not a valid swarm mulithash
-func GetMultihashLength(data []byte) (int, int, error) {
- cursor := 0
- typ, c := binary.Uvarint(data)
- if c <= 0 {
- return 0, 0, errors.New("unreadable hashtype field")
- }
- if !isSwarmMultihashType(uint8(typ)) {
- return 0, 0, fmt.Errorf("hash code %x is not a swarm hashtype", typ)
- }
- cursor += c
- hashlength, c := binary.Uvarint(data[cursor:])
- if c <= 0 {
- return 0, 0, errors.New("unreadable length field")
- }
- cursor += c
-
- // we cheekily assume hashlength < maxint
- inthashlength := int(hashlength)
- if len(data[c:]) < inthashlength {
- return 0, 0, errors.New("length mismatch")
- }
- return inthashlength, cursor, nil
-}
-
-// FromMulithash returns the digest portion of the multihash
-// It will fail if the multihash is not a valid swarm multihash
-func FromMultihash(data []byte) ([]byte, error) {
- hashLength, _, err := GetMultihashLength(data)
- if err != nil {
- return nil, err
- }
- return data[len(data)-hashLength:], nil
-}
-
-// ToMulithash wraps the provided digest data with a swarm mulithash header
-func ToMultihash(hashData []byte) []byte {
- buf := bytes.NewBuffer(nil)
- b := make([]byte, 8)
- c := binary.PutUvarint(b, uint64(multihashTypeCode))
- buf.Write(b[:c])
- c = binary.PutUvarint(b, uint64(len(hashData)))
- buf.Write(b[:c])
- buf.Write(hashData)
- return buf.Bytes()
-}
diff --git a/swarm/multihash/multihash_test.go b/swarm/multihash/multihash_test.go
deleted file mode 100644
index 85df741dd..000000000
--- a/swarm/multihash/multihash_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package multihash
-
-import (
- "bytes"
- "math/rand"
- "testing"
-)
-
-// parse multihash, and check that invalid multihashes fail
-func TestCheckMultihash(t *testing.T) {
- hashbytes := make([]byte, 32)
- c, err := rand.Read(hashbytes)
- if err != nil {
- t.Fatal(err)
- } else if c < 32 {
- t.Fatal("short read")
- }
-
- expected := ToMultihash(hashbytes)
-
- l, hl, _ := GetMultihashLength(expected)
- if l != 32 {
- t.Fatalf("expected length %d, got %d", 32, l)
- } else if hl != 2 {
- t.Fatalf("expected header length %d, got %d", 2, hl)
- }
- if _, _, err := GetMultihashLength(expected[1:]); err == nil {
- t.Fatal("expected failure on corrupt header")
- }
- if _, _, err := GetMultihashLength(expected[:len(expected)-2]); err == nil {
- t.Fatal("expected failure on short content")
- }
- dh, _ := FromMultihash(expected)
- if !bytes.Equal(dh, hashbytes) {
- t.Fatalf("expected content hash %x, got %x", hashbytes, dh)
- }
-}
diff --git a/swarm/network/bitvector/bitvector.go b/swarm/network/bitvector/bitvector.go
index edc7c50cb..958328502 100644
--- a/swarm/network/bitvector/bitvector.go
+++ b/swarm/network/bitvector/bitvector.go
@@ -60,7 +60,3 @@ func (bv *BitVector) Set(i int, v bool) {
func (bv *BitVector) Bytes() []byte {
return bv.b
}
-
-func (bv *BitVector) Length() int {
- return bv.len
-}
diff --git a/swarm/network/discovery.go b/swarm/network/discovery.go
index 21703e70f..4c503047a 100644
--- a/swarm/network/discovery.go
+++ b/swarm/network/discovery.go
@@ -65,7 +65,7 @@ func (d *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
// NotifyDepth sends a message to all connections if depth of saturation is changed
func NotifyDepth(depth uint8, kad *Kademlia) {
- f := func(val *Peer, po int, _ bool) bool {
+ f := func(val *Peer, po int) bool {
val.NotifyDepth(depth)
return true
}
@@ -74,7 +74,7 @@ func NotifyDepth(depth uint8, kad *Kademlia) {
// NotifyPeer informs all peers about a newly added node
func NotifyPeer(p *BzzAddr, k *Kademlia) {
- f := func(val *Peer, po int, _ bool) bool {
+ f := func(val *Peer, po int) bool {
val.NotifyPeer(p, uint8(po))
return true
}
@@ -160,8 +160,8 @@ func (d *Peer) handleSubPeersMsg(msg *subPeersMsg) error {
if !d.sentPeers {
d.setDepth(msg.Depth)
var peers []*BzzAddr
- d.kad.EachConn(d.Over(), 255, func(p *Peer, po int, isproxbin bool) bool {
- if pob, _ := pof(d, d.kad.BaseAddr(), 0); pob > po {
+ d.kad.EachConn(d.Over(), 255, func(p *Peer, po int) bool {
+ if pob, _ := Pof(d, d.kad.BaseAddr(), 0); pob > po {
return false
}
if !d.seen(p.BzzAddr) {
diff --git a/swarm/network/hive.go b/swarm/network/hive.go
index 1aa1ae42a..a0b6b988a 100644
--- a/swarm/network/hive.go
+++ b/swarm/network/hive.go
@@ -114,7 +114,7 @@ func (h *Hive) Stop() error {
}
}
log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4]))
- h.EachConn(nil, 255, func(p *Peer, _ int, _ bool) bool {
+ h.EachConn(nil, 255, func(p *Peer, _ int) bool {
log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4]))
p.Drop(nil)
return true
@@ -165,8 +165,8 @@ func (h *Hive) Run(p *BzzPeer) error {
// otherwise just send depth to new peer
dp.NotifyDepth(depth)
}
+ NotifyPeer(p.BzzAddr, h.Kademlia)
}
- NotifyPeer(p.BzzAddr, h.Kademlia)
defer h.Off(dp)
return dp.Run(dp.HandleMsg)
}
@@ -228,7 +228,7 @@ func (h *Hive) loadPeers() error {
// savePeers, savePeer implement persistence callback/
func (h *Hive) savePeers() error {
var peers []*BzzAddr
- h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int, _ bool) bool {
+ h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int) bool {
if pa == nil {
log.Warn(fmt.Sprintf("empty addr: %v", i))
return true
diff --git a/swarm/network/hive_test.go b/swarm/network/hive_test.go
index 56adc5a8e..a29e73083 100644
--- a/swarm/network/hive_test.go
+++ b/swarm/network/hive_test.go
@@ -103,7 +103,7 @@ func TestHiveStatePersistance(t *testing.T) {
pp.Start(s1.Server)
i := 0
- pp.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int, nn bool) bool {
+ pp.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int) bool {
delete(peers, addr.String())
i++
return true
diff --git a/swarm/network/kademlia.go b/swarm/network/kademlia.go
index cd94741be..7d52f26f7 100644
--- a/swarm/network/kademlia.go
+++ b/swarm/network/kademlia.go
@@ -49,32 +49,32 @@ a guaranteed constant maximum limit on the number of hops needed to reach one
node from the other.
*/
-var pof = pot.DefaultPof(256)
+var Pof = pot.DefaultPof(256)
// KadParams holds the config params for Kademlia
type KadParams struct {
// adjustable parameters
- MaxProxDisplay int // number of rows the table shows
- MinProxBinSize int // nearest neighbour core minimum cardinality
- MinBinSize int // minimum number of peers in a row
- MaxBinSize int // maximum number of peers in a row before pruning
- RetryInterval int64 // initial interval before a peer is first redialed
- RetryExponent int // exponent to multiply retry intervals with
- MaxRetries int // maximum number of redial attempts
+ MaxProxDisplay int // number of rows the table shows
+ NeighbourhoodSize int // nearest neighbour core minimum cardinality
+ MinBinSize int // minimum number of peers in a row
+ MaxBinSize int // maximum number of peers in a row before pruning
+ RetryInterval int64 // initial interval before a peer is first redialed
+ RetryExponent int // exponent to multiply retry intervals with
+ MaxRetries int // maximum number of redial attempts
// function to sanction or prevent suggesting a peer
- Reachable func(*BzzAddr) bool
+ Reachable func(*BzzAddr) bool `json:"-"`
}
// NewKadParams returns a params struct with default values
func NewKadParams() *KadParams {
return &KadParams{
- MaxProxDisplay: 16,
- MinProxBinSize: 2,
- MinBinSize: 2,
- MaxBinSize: 4,
- RetryInterval: 4200000000, // 4.2 sec
- MaxRetries: 42,
- RetryExponent: 2,
+ MaxProxDisplay: 16,
+ NeighbourhoodSize: 2,
+ MinBinSize: 2,
+ MaxBinSize: 4,
+ RetryInterval: 4200000000, // 4.2 sec
+ MaxRetries: 42,
+ RetryExponent: 2,
}
}
@@ -145,7 +145,7 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
return fmt.Errorf("add peers: %x is self", k.base)
}
var found bool
- k.addrs, _, found, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
+ k.addrs, _, found, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
// if not found
if v == nil {
// insert new offline peer into conns
@@ -175,11 +175,11 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
k.lock.Lock()
defer k.lock.Unlock()
minsize := k.MinBinSize
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
// if there is a callable neighbour within the current proxBin, connect
// this makes sure nearest neighbour set is fully connected
var ppo int
- k.addrs.EachNeighbour(k.base, pof, func(val pot.Val, po int) bool {
+ k.addrs.EachNeighbour(k.base, Pof, func(val pot.Val, po int) bool {
if po < depth {
return false
}
@@ -198,7 +198,7 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
var bpo []int
prev := -1
- k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
+ k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
prev++
for ; prev < po; prev++ {
bpo = append(bpo, prev)
@@ -219,12 +219,12 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
// try to select a candidate peer
// find the first callable peer
nxt := bpo[0]
- k.addrs.EachBin(k.base, pof, nxt, func(po, _ int, f func(func(pot.Val, int) bool) bool) bool {
+ k.addrs.EachBin(k.base, Pof, nxt, func(po, _ int, f func(func(pot.Val) bool) bool) bool {
// for each bin (up until depth) we find callable candidate peers
if po >= depth {
return false
}
- return f(func(val pot.Val, _ int) bool {
+ return f(func(val pot.Val) bool {
e := val.(*entry)
c := k.callable(e)
if c {
@@ -251,7 +251,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
k.lock.Lock()
defer k.lock.Unlock()
var ins bool
- k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(v pot.Val) pot.Val {
+ k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(v pot.Val) pot.Val {
// if not found live
if v == nil {
ins = true
@@ -265,7 +265,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
a := newEntry(p.BzzAddr)
a.conn = p
// insert new online peer into addrs
- k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
+ k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
return a
})
// send new address count value only if the peer is inserted
@@ -275,7 +275,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
}
log.Trace(k.string())
// calculate if depth of saturation changed
- depth := uint8(k.saturation(k.MinBinSize))
+ depth := uint8(k.saturation())
var changed bool
if depth != k.depth {
changed = true
@@ -289,6 +289,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
// neighbourhood depth on each change.
// Not receiving from the returned channel will block On function
// when the neighbourhood depth is changed.
+// TODO: Why is this exported, and if it should be; why can't we have more subscribers than one?
func (k *Kademlia) NeighbourhoodDepthC() <-chan int {
k.lock.Lock()
defer k.lock.Unlock()
@@ -305,7 +306,7 @@ func (k *Kademlia) sendNeighbourhoodDepthChange() {
// It provides signaling of neighbourhood depth change.
// This part of the code is sending new neighbourhood depth to nDepthC if that condition is met.
if k.nDepthC != nil {
- nDepth := k.neighbourhoodDepth()
+ nDepth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
if nDepth != k.nDepth {
k.nDepth = nDepth
k.nDepthC <- nDepth
@@ -330,7 +331,7 @@ func (k *Kademlia) Off(p *Peer) {
defer k.lock.Unlock()
var del bool
if !p.BzzPeer.LightNode {
- k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
+ k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
// v cannot be nil, must check otherwise we overwrite entry
if v == nil {
panic(fmt.Sprintf("connected peer not found %v", p))
@@ -343,7 +344,7 @@ func (k *Kademlia) Off(p *Peer) {
}
if del {
- k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(_ pot.Val) pot.Val {
+ k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(_ pot.Val) pot.Val {
// v cannot be nil, but no need to check
return nil
})
@@ -355,92 +356,103 @@ func (k *Kademlia) Off(p *Peer) {
}
}
-func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(conn *Peer, po int) bool) {
- k.lock.RLock()
- defer k.lock.RUnlock()
-
- var startPo int
- var endPo int
- kadDepth := k.neighbourhoodDepth()
-
- k.conns.EachBin(base, pof, o, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
- if startPo > 0 && endPo != k.MaxProxDisplay {
- startPo = endPo + 1
- }
- if po < kadDepth {
- endPo = po
- } else {
- endPo = k.MaxProxDisplay
- }
-
- for bin := startPo; bin <= endPo; bin++ {
- f(func(val pot.Val, _ int) bool {
- return eachBinFunc(val.(*Peer), bin)
- })
- }
- return true
- })
-}
-
// EachConn is an iterator with args (base, po, f) applies f to each live peer
// that has proximity order po or less as measured from the base
// if base is nil, kademlia base address is used
-func (k *Kademlia) EachConn(base []byte, o int, f func(*Peer, int, bool) bool) {
+func (k *Kademlia) EachConn(base []byte, o int, f func(*Peer, int) bool) {
k.lock.RLock()
defer k.lock.RUnlock()
k.eachConn(base, o, f)
}
-func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int, bool) bool) {
+func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int) bool) {
if len(base) == 0 {
base = k.base
}
- depth := k.neighbourhoodDepth()
- k.conns.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
+ k.conns.EachNeighbour(base, Pof, func(val pot.Val, po int) bool {
if po > o {
return true
}
- return f(val.(*Peer), po, po >= depth)
+ return f(val.(*Peer), po)
})
}
// EachAddr called with (base, po, f) is an iterator applying f to each known peer
-// that has proximity order po or less as measured from the base
+// that has proximity order o or less as measured from the base
// if base is nil, kademlia base address is used
-func (k *Kademlia) EachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool) {
+func (k *Kademlia) EachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
k.lock.RLock()
defer k.lock.RUnlock()
k.eachAddr(base, o, f)
}
-func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool) {
+func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
if len(base) == 0 {
base = k.base
}
- depth := k.neighbourhoodDepth()
- k.addrs.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
+ k.addrs.EachNeighbour(base, Pof, func(val pot.Val, po int) bool {
if po > o {
return true
}
- return f(val.(*entry).BzzAddr, po, po >= depth)
+ return f(val.(*entry).BzzAddr, po)
})
}
-// neighbourhoodDepth returns the proximity order that defines the distance of
-// the nearest neighbour set with cardinality >= MinProxBinSize
-// if there is altogether less than MinProxBinSize peers it returns 0
+func (k *Kademlia) NeighbourhoodDepth() (depth int) {
+ k.lock.RLock()
+ defer k.lock.RUnlock()
+ return depthForPot(k.conns, k.NeighbourhoodSize, k.base)
+}
+
+// depthForPot returns the proximity order that defines the distance of
+// the nearest neighbour set with cardinality >= NeighbourhoodSize
+// if there is altogether less than NeighbourhoodSize peers it returns 0
// caller must hold the lock
-func (k *Kademlia) neighbourhoodDepth() (depth int) {
- if k.conns.Size() < k.MinProxBinSize {
+func depthForPot(p *pot.Pot, neighbourhoodSize int, pivotAddr []byte) (depth int) {
+ if p.Size() <= neighbourhoodSize {
return 0
}
+
+ // total number of peers in iteration
var size int
+
+ // determining the depth is a two-step process
+ // first we find the proximity bin of the shallowest of the NeighbourhoodSize peers
+ // the numeric value of depth cannot be higher than this
+ var maxDepth int
+
f := func(v pot.Val, i int) bool {
+ // po == 256 means that addr is the pivot address(self)
+ if i == 256 {
+ return true
+ }
size++
- depth = i
- return size < k.MinProxBinSize
+
+ // this means we have all nn-peers.
+ // depth is by default set to the bin of the farthest nn-peer
+ if size == neighbourhoodSize {
+ maxDepth = i
+ return false
+ }
+
+ return true
}
- k.conns.EachNeighbour(k.base, pof, f)
+ p.EachNeighbour(pivotAddr, Pof, f)
+
+ // the second step is to test for empty bins in order from shallowest to deepest
+ // if an empty bin is found, this will be the actual depth
+ // we stop iterating if we hit the maxDepth determined in the first step
+ p.EachBin(pivotAddr, Pof, 0, func(po int, _ int, f func(func(pot.Val) bool) bool) bool {
+ if po == depth {
+ if maxDepth == depth {
+ return false
+ }
+ depth++
+ return true
+ }
+ return false
+ })
+
return depth
}
@@ -495,21 +507,21 @@ func (k *Kademlia) string() string {
rows = append(rows, "=========================================================================")
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()[:3]))
- rows = append(rows, fmt.Sprintf("population: %d (%d), MinProxBinSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.MinProxBinSize, k.MinBinSize, k.MaxBinSize))
+ rows = append(rows, fmt.Sprintf("population: %d (%d), NeighbourhoodSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.NeighbourhoodSize, k.MinBinSize, k.MaxBinSize))
liverows := make([]string, k.MaxProxDisplay)
peersrows := make([]string, k.MaxProxDisplay)
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
rest := k.conns.Size()
- k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
+ k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
var rowlen int
if po >= k.MaxProxDisplay {
po = k.MaxProxDisplay - 1
}
row := []string{fmt.Sprintf("%2d", size)}
rest -= size
- f(func(val pot.Val, vpo int) bool {
+ f(func(val pot.Val) bool {
e := val.(*Peer)
row = append(row, fmt.Sprintf("%x", e.Address()[:2]))
rowlen++
@@ -521,7 +533,7 @@ func (k *Kademlia) string() string {
return true
})
- k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
+ k.addrs.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
var rowlen int
if po >= k.MaxProxDisplay {
po = k.MaxProxDisplay - 1
@@ -531,7 +543,7 @@ func (k *Kademlia) string() string {
}
row := []string{fmt.Sprintf("%2d", size)}
// we are displaying live peers too
- f(func(val pot.Val, vpo int) bool {
+ f(func(val pot.Val) bool {
e := val.(*entry)
row = append(row, Label(e))
rowlen++
@@ -559,155 +571,148 @@ func (k *Kademlia) string() string {
return "\n" + strings.Join(rows, "\n")
}
-// PeerPot keeps info about expected nearest neighbours and empty bins
+// PeerPot keeps info about expected nearest neighbours
// used for testing only
+// TODO move to separate testing tools file
type PeerPot struct {
- NNSet [][]byte
- EmptyBins []int
+ NNSet [][]byte
}
// NewPeerPotMap creates a map of pot record of *BzzAddr with keys
// as hexadecimal representations of the address.
+// the NeighbourhoodSize of the passed kademlia is used
// used for testing only
-func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot {
+// TODO move to separate testing tools file
+func NewPeerPotMap(neighbourhoodSize int, addrs [][]byte) map[string]*PeerPot {
+
// create a table of all nodes for health check
np := pot.NewPot(nil, 0)
for _, addr := range addrs {
- np, _, _ = pot.Add(np, addr, pof)
+ np, _, _ = pot.Add(np, addr, Pof)
}
ppmap := make(map[string]*PeerPot)
+ // generate an allknowing source of truth for connections
+ // for every kademlia passed
for i, a := range addrs {
- pl := 256
- prev := 256
- var emptyBins []int
+
+ // actual kademlia depth
+ depth := depthForPot(np, neighbourhoodSize, a)
+
+ // all nn-peers
var nns [][]byte
- np.EachNeighbour(addrs[i], pof, func(val pot.Val, po int) bool {
- a := val.([]byte)
+
+ // iterate through the neighbours, going from the deepest to the shallowest
+ np.EachNeighbour(a, Pof, func(val pot.Val, po int) bool {
+ addr := val.([]byte)
+ // po == 256 means that addr is the pivot address(self)
+ // we do not include self in the map
if po == 256 {
return true
}
- if pl == 256 || pl == po {
- nns = append(nns, a)
+ // append any neighbors found
+ // a neighbor is any peer in or deeper than the depth
+ if po >= depth {
+ nns = append(nns, addr)
+ return true
}
- if pl == 256 && len(nns) >= kadMinProxSize {
- pl = po
- prev = po
- }
- if prev < pl {
- for j := prev; j > po; j-- {
- emptyBins = append(emptyBins, j)
- }
- }
- prev = po - 1
- return true
+ return false
})
- for j := prev; j >= 0; j-- {
- emptyBins = append(emptyBins, j)
+
+ log.Trace(fmt.Sprintf("%x PeerPotMap NNS: %s", addrs[i][:4], LogAddrs(nns)))
+ ppmap[common.Bytes2Hex(a)] = &PeerPot{
+ NNSet: nns,
}
- log.Trace(fmt.Sprintf("%x NNS: %s", addrs[i][:4], LogAddrs(nns)))
- ppmap[common.Bytes2Hex(a)] = &PeerPot{nns, emptyBins}
}
return ppmap
}
-// saturation returns the lowest proximity order that the bin for that order
-// has less than n peers
-// It is used in Healthy function for testing only
-func (k *Kademlia) saturation(n int) int {
+// saturation iterates through all peers and
+// returns the smallest po value in which the node has less than n peers
+// if the iterator reaches depth, then value for depth is returned
+// TODO move to separate testing tools file
+// TODO this function will stop at the first bin with less than MinBinSize peers, even if there are empty bins between that bin and the depth. This may not be correct behavior
+func (k *Kademlia) saturation() int {
prev := -1
- k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
+ k.addrs.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
prev++
- return prev == po && size >= n
+ return prev == po && size >= k.MinBinSize
})
- depth := k.neighbourhoodDepth()
+ // TODO evaluate whether this check cannot just as well be done within the eachbin
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
if depth < prev {
return depth
}
return prev
}
-// full returns true if all required bins have connected peers.
+// knowNeighbours tests if all neighbours in the peerpot
+// are found among the peers known to the kademlia
// It is used in Healthy function for testing only
-func (k *Kademlia) full(emptyBins []int) (full bool) {
- prev := 0
- e := len(emptyBins)
- ok := true
- depth := k.neighbourhoodDepth()
- k.conns.EachBin(k.base, pof, 0, func(po, _ int, _ func(func(val pot.Val, i int) bool) bool) bool {
- if prev == depth+1 {
- return true
- }
- for i := prev; i < po; i++ {
- e--
- if e < 0 {
- ok = false
- return false
- }
- if emptyBins[e] != i {
- log.Trace(fmt.Sprintf("%08x po: %d, i: %d, e: %d, emptybins: %v", k.BaseAddr()[:4], po, i, e, logEmptyBins(emptyBins)))
- if emptyBins[e] < i {
- panic("incorrect peerpot")
- }
- ok = false
- return false
- }
- }
- prev = po + 1
- return true
- })
- if !ok {
- return false
- }
- return e == 0
-}
-
-// knowNearestNeighbours tests if all known nearest neighbours given as arguments
-// are found in the addressbook
-// It is used in Healthy function for testing only
-func (k *Kademlia) knowNearestNeighbours(peers [][]byte) bool {
+// TODO move to separate testing tools file
+func (k *Kademlia) knowNeighbours(addrs [][]byte) (got bool, n int, missing [][]byte) {
pm := make(map[string]bool)
-
- k.eachAddr(nil, 255, func(p *BzzAddr, po int, nn bool) bool {
- if !nn {
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
+ // create a map with all peers at depth and deeper known in the kademlia
+ k.eachAddr(nil, 255, func(p *BzzAddr, po int) bool {
+ // in order deepest to shallowest compared to the kademlia base address
+ // all bins (except self) are included (0 <= bin <= 255)
+ if po < depth {
return false
}
- pk := fmt.Sprintf("%x", p.Address())
+ pk := common.Bytes2Hex(p.Address())
pm[pk] = true
return true
})
- for _, p := range peers {
- pk := fmt.Sprintf("%x", p)
- if !pm[pk] {
- log.Trace(fmt.Sprintf("%08x: known nearest neighbour %s not found", k.BaseAddr()[:4], pk[:8]))
- return false
- }
- }
- return true
-}
-// gotNearestNeighbours tests if all known nearest neighbours given as arguments
-// are connected peers
-// It is used in Healthy function for testing only
-func (k *Kademlia) gotNearestNeighbours(peers [][]byte) (got bool, n int, missing [][]byte) {
- pm := make(map[string]bool)
-
- k.eachConn(nil, 255, func(p *Peer, po int, nn bool) bool {
- if !nn {
- return false
- }
- pk := fmt.Sprintf("%x", p.Address())
- pm[pk] = true
- return true
- })
+ // iterate through nearest neighbors in the peerpot map
+ // if we can't find the neighbor in the map we created above
+ // then we don't know all our neighbors
+ // (which sadly is all too common in modern society)
var gots int
var culprits [][]byte
- for _, p := range peers {
- pk := fmt.Sprintf("%x", p)
+ for _, p := range addrs {
+ pk := common.Bytes2Hex(p)
if pm[pk] {
gots++
} else {
- log.Trace(fmt.Sprintf("%08x: ExpNN: %s not found", k.BaseAddr()[:4], pk[:8]))
+ log.Trace(fmt.Sprintf("%08x: known nearest neighbour %s not found", k.base, pk))
+ culprits = append(culprits, p)
+ }
+ }
+ return gots == len(addrs), gots, culprits
+}
+
+// connectedNeighbours tests if all neighbours in the peerpot
+// are currently connected in the kademlia
+// It is used in Healthy function for testing only
+func (k *Kademlia) connectedNeighbours(peers [][]byte) (got bool, n int, missing [][]byte) {
+ pm := make(map[string]bool)
+
+ // create a map with all peers at depth and deeper that are connected in the kademlia
+ // in order deepest to shallowest compared to the kademlia base address
+ // all bins (except self) are included (0 <= bin <= 255)
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
+ k.eachConn(nil, 255, func(p *Peer, po int) bool {
+ if po < depth {
+ return false
+ }
+ pk := common.Bytes2Hex(p.Address())
+ pm[pk] = true
+ return true
+ })
+
+ // iterate through nearest neighbors in the peerpot map
+ // if we can't find the neighbor in the map we created above
+ // then we don't know all our neighbors
+ var gots int
+ var culprits [][]byte
+ for _, p := range peers {
+ pk := common.Bytes2Hex(p)
+ if pm[pk] {
+ gots++
+ } else {
+ log.Trace(fmt.Sprintf("%08x: ExpNN: %s not found", k.base, pk))
culprits = append(culprits, p)
}
}
@@ -717,31 +722,40 @@ func (k *Kademlia) gotNearestNeighbours(peers [][]byte) (got bool, n int, missin
// Health state of the Kademlia
// used for testing only
type Health struct {
- KnowNN bool // whether node knows all its nearest neighbours
- GotNN bool // whether node is connected to all its nearest neighbours
- CountNN int // amount of nearest neighbors connected to
- CulpritsNN [][]byte // which known NNs are missing
- Full bool // whether node has a peer in each kademlia bin (where there is such a peer)
- Hive string
+ KnowNN bool // whether node knows all its neighbours
+ CountKnowNN int // amount of neighbors known
+ MissingKnowNN [][]byte // which neighbours we should have known but we don't
+ ConnectNN bool // whether node is connected to all its neighbours
+ CountConnectNN int // amount of neighbours connected to
+ MissingConnectNN [][]byte // which neighbours we should have been connected to but we're not
+ Saturated bool // whether we are connected to all the peers we would have liked to
+ Hive string
}
// Healthy reports the health state of the kademlia connectivity
-// returns a Health struct
+//
+// The PeerPot argument provides an all-knowing view of the network
+// The resulting Health object is a result of comparisons between
+// what is the actual composition of the kademlia in question (the receiver), and
+// what SHOULD it have been when we take all we know about the network into consideration.
+//
// used for testing only
func (k *Kademlia) Healthy(pp *PeerPot) *Health {
k.lock.RLock()
defer k.lock.RUnlock()
- gotnn, countnn, culpritsnn := k.gotNearestNeighbours(pp.NNSet)
- knownn := k.knowNearestNeighbours(pp.NNSet)
- full := k.full(pp.EmptyBins)
- log.Trace(fmt.Sprintf("%08x: healthy: knowNNs: %v, gotNNs: %v, full: %v\n", k.BaseAddr()[:4], knownn, gotnn, full))
- return &Health{knownn, gotnn, countnn, culpritsnn, full, k.string()}
-}
-
-func logEmptyBins(ebs []int) string {
- var ebss []string
- for _, eb := range ebs {
- ebss = append(ebss, fmt.Sprintf("%d", eb))
+ gotnn, countgotnn, culpritsgotnn := k.connectedNeighbours(pp.NNSet)
+ knownn, countknownn, culpritsknownn := k.knowNeighbours(pp.NNSet)
+ depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
+ saturated := k.saturation() < depth
+ log.Trace(fmt.Sprintf("%08x: healthy: knowNNs: %v, gotNNs: %v, saturated: %v\n", k.base, knownn, gotnn, saturated))
+ return &Health{
+ KnowNN: knownn,
+ CountKnowNN: countknownn,
+ MissingKnowNN: culpritsknownn,
+ ConnectNN: gotnn,
+ CountConnectNN: countgotnn,
+ MissingConnectNN: culpritsgotnn,
+ Saturated: saturated,
+ Hive: k.string(),
}
- return strings.Join(ebss, ", ")
}
diff --git a/swarm/network/kademlia_test.go b/swarm/network/kademlia_test.go
index d2e051f45..fcb277fde 100644
--- a/swarm/network/kademlia_test.go
+++ b/swarm/network/kademlia_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The go-ethereum Authors
+// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -25,6 +25,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
"github.com/ethereum/go-ethereum/swarm/pot"
)
@@ -38,12 +41,17 @@ func testKadPeerAddr(s string) *BzzAddr {
return &BzzAddr{OAddr: a, UAddr: a}
}
-func newTestKademlia(b string) *Kademlia {
+func newTestKademliaParams() *KadParams {
params := NewKadParams()
+ // TODO why is this 1?
params.MinBinSize = 1
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
+ return params
+}
+
+func newTestKademlia(b string) *Kademlia {
base := pot.NewAddressFromString(b)
- return NewKademlia(base, params)
+ return NewKademlia(base, newTestKademliaParams())
}
func newTestKadPeer(k *Kademlia, s string, lightNode bool) *Peer {
@@ -73,8 +81,178 @@ func Register(k *Kademlia, regs ...string) {
}
}
+// tests the validity of neighborhood depth calculations
+//
+// in particular, it tests that if there are one or more consecutive
+// empty bins above the farthest "nearest neighbor-peer" then
+// the depth should be set at the farthest of those empty bins
+//
+// TODO: Make test adapt to change in NeighbourhoodSize
+func TestNeighbourhoodDepth(t *testing.T) {
+ baseAddressBytes := RandomAddr().OAddr
+ kad := NewKademlia(baseAddressBytes, NewKadParams())
+
+ baseAddress := pot.NewAddressFromBytes(baseAddressBytes)
+
+ // generate the peers
+ var peers []*Peer
+ for i := 0; i < 7; i++ {
+ addr := pot.RandomAddressAt(baseAddress, i)
+ peers = append(peers, newTestDiscoveryPeer(addr, kad))
+ }
+ var sevenPeers []*Peer
+ for i := 0; i < 2; i++ {
+ addr := pot.RandomAddressAt(baseAddress, 7)
+ sevenPeers = append(sevenPeers, newTestDiscoveryPeer(addr, kad))
+ }
+
+ testNum := 0
+ // first try with empty kademlia
+ depth := kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("%d expected depth 0, was %d", testNum, depth)
+ }
+ testNum++
+
+ // add one peer on 7
+ kad.On(sevenPeers[0])
+ depth = kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("%d expected depth 0, was %d", testNum, depth)
+ }
+ testNum++
+
+ // add a second on 7
+ kad.On(sevenPeers[1])
+ depth = kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("%d expected depth 0, was %d", testNum, depth)
+ }
+ testNum++
+
+ // add from 0 to 6
+ for i, p := range peers {
+ kad.On(p)
+ depth = kad.NeighbourhoodDepth()
+ if depth != i+1 {
+ t.Fatalf("%d.%d expected depth %d, was %d", i+1, testNum, i, depth)
+ }
+ }
+ testNum++
+
+ kad.Off(sevenPeers[1])
+ depth = kad.NeighbourhoodDepth()
+ if depth != 6 {
+ t.Fatalf("%d expected depth 6, was %d", testNum, depth)
+ }
+ testNum++
+
+ kad.Off(peers[4])
+ depth = kad.NeighbourhoodDepth()
+ if depth != 4 {
+ t.Fatalf("%d expected depth 4, was %d", testNum, depth)
+ }
+ testNum++
+
+ kad.Off(peers[3])
+ depth = kad.NeighbourhoodDepth()
+ if depth != 3 {
+ t.Fatalf("%d expected depth 3, was %d", testNum, depth)
+ }
+ testNum++
+}
+
+// TestHealthStrict tests the simplest definition of health
+// Which means whether we are connected to all neighbors we know of
+func TestHealthStrict(t *testing.T) {
+
+ // base address is all zeros
+ // no peers
+ // unhealthy (and lonely)
+ k := newTestKademlia("11111111")
+ assertHealth(t, k, false, false)
+
+ // know one peer but not connected
+ // unhealthy
+ Register(k, "11100000")
+ log.Trace(k.String())
+ assertHealth(t, k, false, false)
+
+ // know one peer and connected
+ // healthy
+ On(k, "11100000")
+ assertHealth(t, k, true, false)
+
+ // know two peers, only one connected
+ // unhealthy
+ Register(k, "11111100")
+ log.Trace(k.String())
+ assertHealth(t, k, false, false)
+
+ // know two peers and connected to both
+ // healthy
+ On(k, "11111100")
+ assertHealth(t, k, true, false)
+
+ // know three peers, connected to the two deepest
+ // healthy
+ Register(k, "00000000")
+ log.Trace(k.String())
+ assertHealth(t, k, true, false)
+
+ // know three peers, connected to all three
+ // healthy
+ On(k, "00000000")
+ assertHealth(t, k, true, false)
+
+ // add fourth peer deeper than current depth
+ // unhealthy
+ Register(k, "11110000")
+ log.Trace(k.String())
+ assertHealth(t, k, false, false)
+
+ // connected to three deepest peers
+ // healthy
+ On(k, "11110000")
+ assertHealth(t, k, true, false)
+
+ // add additional peer in same bin as deepest peer
+ // unhealthy
+ Register(k, "11111101")
+ log.Trace(k.String())
+ assertHealth(t, k, false, false)
+
+ // four deepest of five peers connected
+ // healthy
+ On(k, "11111101")
+ assertHealth(t, k, true, false)
+}
+
+func assertHealth(t *testing.T, k *Kademlia, expectHealthy bool, expectSaturation bool) {
+ t.Helper()
+ kid := common.Bytes2Hex(k.BaseAddr())
+ addrs := [][]byte{k.BaseAddr()}
+ k.EachAddr(nil, 255, func(addr *BzzAddr, po int) bool {
+ addrs = append(addrs, addr.Address())
+ return true
+ })
+
+ pp := NewPeerPotMap(k.NeighbourhoodSize, addrs)
+ healthParams := k.Healthy(pp[kid])
+
+ // definition of health, all conditions but be true:
+ // - we at least know one peer
+ // - we know all neighbors
+ // - we are connected to all known neighbors
+ health := healthParams.KnowNN && healthParams.ConnectNN && healthParams.CountKnowNN > 0
+ if expectHealthy != health {
+ t.Fatalf("expected kademlia health %v, is %v\n%v", expectHealthy, health, k.String())
+ }
+}
+
func testSuggestPeer(k *Kademlia, expAddr string, expPo int, expWant bool) error {
addr, o, want := k.SuggestPeer()
+ log.Trace("suggestpeer return", "a", addr, "o", o, "want", want)
if binStr(addr) != expAddr {
return fmt.Errorf("incorrect peer address suggested. expected %v, got %v", expAddr, binStr(addr))
}
@@ -94,6 +272,7 @@ func binStr(a *BzzAddr) string {
return pot.ToBin(a.Address())[:8]
}
+// TODO explain why this bug occurred and how it should have been mitigated
func TestSuggestPeerBug(t *testing.T) {
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
k := newTestKademlia("00000000")
@@ -113,72 +292,98 @@ func TestSuggestPeerBug(t *testing.T) {
}
func TestSuggestPeerFindPeers(t *testing.T) {
+ t.Skip("The SuggestPeers implementation seems to have weaknesses exposed by the change in the new depth calculation. The results are no longer predictable")
+
+ testnum := 0
+ // test 0
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
k := newTestKademlia("00000000")
On(k, "00100000")
err := testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 1
// 2 row gap, saturated proxbin, no callables -> want PO 0
On(k, "00010000")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 2
// 1 row gap (1 less), saturated proxbin, no callables -> want PO 1
On(k, "10000000")
err = testSuggestPeer(k, "", 1, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 3
// no gap (1 less), saturated proxbin, no callables -> do not want more
On(k, "01000000", "00100001")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 4
// oversaturated proxbin, > do not want more
On(k, "00100001")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 5
// reintroduce gap, disconnected peer callable
Off(k, "01000000")
+ log.Trace(k.String())
err = testSuggestPeer(k, "01000000", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 6
// second time disconnected peer not callable
// with reasonably set Interval
- err = testSuggestPeer(k, "", 1, true)
+ log.Trace("foo")
+ log.Trace(k.String())
+ err = testSuggestPeer(k, "", 1, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 6
// on and off again, peer callable again
On(k, "01000000")
Off(k, "01000000")
+ log.Trace(k.String())
err = testSuggestPeer(k, "01000000", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
- On(k, "01000000")
+ // test 7
// new closer peer appears, it is immediately wanted
+ On(k, "01000000")
Register(k, "00010001")
err = testSuggestPeer(k, "00010001", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 8
// PO1 disconnects
On(k, "00010001")
log.Info(k.String())
@@ -187,70 +392,94 @@ func TestSuggestPeerFindPeers(t *testing.T) {
// second time, gap filling
err = testSuggestPeer(k, "01000000", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 9
On(k, "01000000")
+ log.Info(k.String())
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 10
k.MinBinSize = 2
+ log.Info(k.String())
err = testSuggestPeer(k, "", 0, true)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 11
Register(k, "01000001")
+ log.Info(k.String())
err = testSuggestPeer(k, "01000001", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 12
On(k, "10000001")
log.Trace(fmt.Sprintf("Kad:\n%v", k.String()))
err = testSuggestPeer(k, "", 1, true)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 13
On(k, "01000001")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 14
k.MinBinSize = 3
Register(k, "10000010")
err = testSuggestPeer(k, "10000010", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 15
On(k, "10000010")
err = testSuggestPeer(k, "", 1, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 16
On(k, "01000010")
err = testSuggestPeer(k, "", 2, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 17
On(k, "00100010")
err = testSuggestPeer(k, "", 3, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
+ // test 18
On(k, "00010010")
err = testSuggestPeer(k, "", 0, false)
if err != nil {
- t.Fatal(err.Error())
+ t.Fatalf("%d %v", testnum, err.Error())
}
+ testnum++
}
@@ -376,7 +605,7 @@ func TestKademliaHiveString(t *testing.T) {
Register(k, "10000000", "10000001")
k.MaxProxDisplay = 8
h := k.String()
- expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), MinProxBinSize: 2, MinBinSize: 1, MaxBinSize: 4\n000 0 | 2 8100 (0) 8000 (0)\n============ DEPTH: 1 ==========================================\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
+ expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), NeighbourhoodSize: 2, MinBinSize: 1, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
if expH[104:] != h[104:] {
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
}
@@ -386,27 +615,28 @@ func TestKademliaHiveString(t *testing.T) {
// the SuggestPeer and Healthy methods for provided hex-encoded addresses.
// Argument pivotAddr is the address of the kademlia.
func testKademliaCase(t *testing.T, pivotAddr string, addrs ...string) {
- addr := common.FromHex(pivotAddr)
- addrs = append(addrs, pivotAddr)
+
+ t.Skip("this test relies on SuggestPeer which is now not reliable. See description in TestSuggestPeerFindPeers")
+ addr := common.Hex2Bytes(pivotAddr)
+ var byteAddrs [][]byte
+ for _, ahex := range addrs {
+ byteAddrs = append(byteAddrs, common.Hex2Bytes(ahex))
+ }
k := NewKademlia(addr, NewKadParams())
- as := make([][]byte, len(addrs))
- for i, a := range addrs {
- as[i] = common.FromHex(a)
- }
-
- for _, a := range as {
+ // our pivot kademlia is the last one in the array
+ for _, a := range byteAddrs {
if bytes.Equal(a, addr) {
continue
}
p := &BzzAddr{OAddr: a, UAddr: a}
if err := k.Register(p); err != nil {
- t.Fatal(err)
+ t.Fatalf("a %x addr %x: %v", a, addr, err)
}
}
- ppmap := NewPeerPotMap(2, as)
+ ppmap := NewPeerPotMap(k.NeighbourhoodSize, byteAddrs)
pp := ppmap[pivotAddr]
@@ -419,7 +649,7 @@ func testKademliaCase(t *testing.T, pivotAddr string, addrs ...string) {
}
h := k.Healthy(pp)
- if !(h.GotNN && h.KnowNN && h.Full) {
+ if !(h.ConnectNN && h.KnowNN && h.CountKnowNN > 0) {
t.Fatalf("not healthy: %#v\n%v", h, k.String())
}
}
@@ -432,7 +662,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 12:18:24 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
-population: 9 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 9 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 d7e5 ec56 | 18 ec56 (0) d7e5 (0) d9e0 (0) c735 (0)
001 2 18f1 3176 | 14 18f1 (0) 10bb (0) 10d1 (0) 0421 (0)
002 2 52aa 47cd | 11 52aa (0) 51d9 (0) 5161 (0) 5130 (0)
@@ -515,7 +745,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 18:43:48 UTC 2018 KΛÐΞMLIΛ hive: queen's address: bc7f3b
-population: 9 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 9 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 0f49 67ff | 28 0f49 (0) 0211 (0) 07b2 (0) 0703 (0)
001 2 e84b f3a4 | 13 f3a4 (0) e84b (0) e58b (0) e60b (0)
002 1 8dba | 1 8dba (0)
@@ -549,7 +779,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:04:35 UTC 2018 KΛÐΞMLIΛ hive: queen's address: b4822e
-population: 8 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 8 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 786c 774b | 29 774b (0) 786c (0) 7a79 (0) 7d2f (0)
001 2 d9de cf19 | 10 cf19 (0) d9de (0) d2ff (0) d2a2 (0)
002 2 8ca1 8d74 | 5 8d74 (0) 8ca1 (0) 9793 (0) 9f51 (0)
@@ -583,7 +813,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:16:25 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 9a90fe
-population: 8 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 8 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 72ef 4e6c | 24 0b1e (0) 0d66 (0) 17f5 (0) 17e8 (0)
001 2 fc2b fa47 | 13 fa47 (0) fc2b (0) fffd (0) ecef (0)
002 2 b847 afa8 | 6 afa8 (0) ad77 (0) bb7c (0) b847 (0)
@@ -618,7 +848,7 @@ in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:25:18 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 5dd5c7
-population: 13 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+population: 13 (49), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 e528 fad0 | 22 fad0 (0) e528 (0) e3bb (0) ed13 (0)
001 3 3f30 18e0 1dd3 | 7 3f30 (0) 23db (0) 10b6 (0) 18e0 (0)
002 4 7c54 7804 61e4 60f9 | 10 61e4 (0) 60f9 (0) 636c (0) 7186 (0)
@@ -644,3 +874,17 @@ func TestKademliaCase5(t *testing.T) {
"78fafa0809929a1279ece089a51d12457c2d8416dff859aeb2ccc24bb50df5ec", "1dd39b1257e745f147cbbc3cadd609ccd6207c41056dbc4254bba5d2527d3ee5", "5f61dd66d4d94aec8fcc3ce0e7885c7edf30c43143fa730e2841c5d28e3cd081", "8aa8b0472cb351d967e575ad05c4b9f393e76c4b01ef4b3a54aac5283b78abc9", "4502f385152a915b438a6726ce3ea9342e7a6db91a23c2f6bee83a885ed7eb82", "718677a504249db47525e959ef1784bed167e1c46f1e0275b9c7b588e28a3758", "7c54c6ed1f8376323896ed3a4e048866410de189e9599dd89bf312ca4adb96b5", "18e03bd3378126c09e799a497150da5c24c895aedc84b6f0dbae41fc4bac081a", "23db76ac9e6e58d9f5395ca78252513a7b4118b4155f8462d3d5eec62486cadc", "40ae0e8f065e96c7adb7fa39505136401f01780481e678d718b7f6dbb2c906ec", "c1539998b8bae19d339d6bbb691f4e9daeb0e86847545229e80fe0dffe716e92", "ed139d73a2699e205574c08722ca9f030ad2d866c662f1112a276b91421c3cb9", "5bdb19584b7a36d09ca689422ef7e6bb681b8f2558a6b2177a8f7c812f631022", "636c9de7fe234ffc15d67a504c69702c719f626c17461d3f2918e924cd9d69e2", "de4455413ff9335c440d52458c6544191bd58a16d85f700c1de53b62773064ea", "de1963310849527acabc7885b6e345a56406a8f23e35e436b6d9725e69a79a83", "a80a50a467f561210a114cba6c7fb1489ed43a14d61a9edd70e2eb15c31f074d", "7804f12b8d8e6e4b375b242058242068a3809385e05df0e64973cde805cf729c", "60f9aa320c02c6f2e6370aa740cf7cea38083fa95fca8c99552cda52935c1520", "d8da963602390f6c002c00ce62a84b514edfce9ebde035b277a957264bb54d21", "8463d93256e026fe436abad44697152b9a56ac8e06a0583d318e9571b83d073c", "9a3f78fcefb9a05e40a23de55f6153d7a8b9d973ede43a380bf46bb3b3847de1", "e3bb576f4b3760b9ca6bff59326f4ebfc4a669d263fb7d67ab9797adea54ed13", "4d5cdbd6dcca5bdf819a0fe8d175dc55cc96f088d37462acd5ea14bc6296bdbe", "5a0ed28de7b5258c727cb85447071c74c00a5fbba9e6bc0393bc51944d04ab2a", "61e4ddb479c283c638f4edec24353b6cc7a3a13b930824aad016b0996ca93c47", "7e3610868acf714836cafaaa7b8c009a9ac6e3a6d443e5586cf661530a204ee2", "d74b244d4345d2c86e30a097105e4fb133d53c578320285132a952cdaa64416e", "cfeed57d0f935bfab89e3f630a7c97e0b1605f0724d85a008bbfb92cb47863a8", "580837af95055670e20d494978f60c7f1458dc4b9e389fc7aa4982b2aca3bce3", "df55c0c49e6c8a83d82dfa1c307d3bf6a20e18721c80d8ec4f1f68dc0a137ced", "5f149c51ce581ba32a285439a806c063ced01ccd4211cd024e6a615b8f216f95", "1eb76b00aeb127b10dd1b7cd4c3edeb4d812b5a658f0feb13e85c4d2b7c6fe06", "7a56ba7c3fb7cbfb5561a46a75d95d7722096b45771ec16e6fa7bbfab0b35dfe", "4bae85ad88c28470f0015246d530adc0cd1778bdd5145c3c6b538ee50c4e04bd", "afd1892e2a7145c99ec0ebe9ded0d3fec21089b277a68d47f45961ec5e39e7e0", "953138885d7b36b0ef79e46030f8e61fd7037fbe5ce9e0a94d728e8c8d7eab86", "de761613ef305e4f628cb6bf97d7b7dc69a9d513dc233630792de97bcda777a6", "3f3087280063d09504c084bbf7fdf984347a72b50d097fd5b086ffabb5b3fb4c", "7d18a94bb1ebfdef4d3e454d2db8cb772f30ca57920dd1e402184a9e598581a0", "a7d6fbdc9126d9f10d10617f49fb9f5474ffe1b229f76b7dd27cebba30eccb5d", "fad0246303618353d1387ec10c09ee991eb6180697ed3470ed9a6b377695203d", "1cf66e09ea51ee5c23df26615a9e7420be2ac8063f28f60a3bc86020e94fe6f3", "8269cdaa153da7c358b0b940791af74d7c651cd4d3f5ed13acfe6d0f2c539e7f", "90d52eaaa60e74bf1c79106113f2599471a902d7b1c39ac1f55b20604f453c09", "9788fd0c09190a3f3d0541f68073a2f44c2fcc45bb97558a7c319f36c25a75b3", "10b68fc44157ecfdae238ee6c1ce0333f906ad04d1a4cb1505c8e35c3c87fbb0", "e5284117fdf3757920475c786e0004cb00ba0932163659a89b36651a01e57394", "403ad51d911e113dcd5f9ff58c94f6d278886a2a4da64c3ceca2083282c92de3",
)
}
+
+func newTestDiscoveryPeer(addr pot.Address, kad *Kademlia) *Peer {
+ rw := &p2p.MsgPipeRW{}
+ p := p2p.NewPeer(enode.ID{}, "foo", []p2p.Cap{})
+ pp := protocols.NewPeer(p, rw, &protocols.Spec{})
+ bp := &BzzPeer{
+ Peer: pp,
+ BzzAddr: &BzzAddr{
+ OAddr: addr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", addr[:])),
+ },
+ }
+ return NewPeer(bp, kad)
+}
diff --git a/swarm/network/networkid_test.go b/swarm/network/networkid_test.go
index d1d359de6..99890118f 100644
--- a/swarm/network/networkid_test.go
+++ b/swarm/network/networkid_test.go
@@ -92,11 +92,10 @@ func TestNetworkID(t *testing.T) {
if kademlias[node].addrs.Size() != len(netIDGroup)-1 {
t.Fatalf("Kademlia size has not expected peer size. Kademlia size: %d, expected size: %d", kademlias[node].addrs.Size(), len(netIDGroup)-1)
}
- kademlias[node].EachAddr(nil, 0, func(addr *BzzAddr, _ int, _ bool) bool {
+ kademlias[node].EachAddr(nil, 0, func(addr *BzzAddr, _ int) bool {
found := false
for _, nd := range netIDGroup {
- p := nd.Bytes()
- if bytes.Equal(p, addr.Address()) {
+ if bytes.Equal(kademlias[nd].BaseAddr(), addr.Address()) {
found = true
}
}
@@ -189,7 +188,7 @@ func newServices() adapters.Services {
return k
}
params := NewKadParams()
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
params.MaxBinSize = 3
params.MinBinSize = 1
params.MaxRetries = 1000
diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go
index 66ae94a88..a4b29239c 100644
--- a/swarm/network/protocol.go
+++ b/swarm/network/protocol.go
@@ -35,8 +35,6 @@ import (
const (
DefaultNetworkID = 3
- // ProtocolMaxMsgSize maximum allowed message size
- ProtocolMaxMsgSize = 10 * 1024 * 1024
// timeout for waiting
bzzHandshakeTimeout = 3000 * time.Millisecond
)
@@ -44,7 +42,7 @@ const (
// BzzSpec is the spec of the generic swarm handshake
var BzzSpec = &protocols.Spec{
Name: "bzz",
- Version: 7,
+ Version: 8,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
HandshakeMsg{},
@@ -54,7 +52,7 @@ var BzzSpec = &protocols.Spec{
// DiscoverySpec is the spec for the bzz discovery subprotocols
var DiscoverySpec = &protocols.Spec{
Name: "hive",
- Version: 6,
+ Version: 8,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
peersMsg{},
@@ -250,11 +248,6 @@ func NewBzzPeer(p *protocols.Peer) *BzzPeer {
return &BzzPeer{Peer: p, BzzAddr: NewAddr(p.Node())}
}
-// LastActive returns the time the peer was last active
-func (p *BzzPeer) LastActive() time.Time {
- return p.lastActive
-}
-
// ID returns the peer's underlay node identifier.
func (p *BzzPeer) ID() enode.ID {
// This is here to resolve a method tie: both protocols.Peer and BzzAddr are embedded
diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go
index f0d266628..58477a7b8 100644
--- a/swarm/network/protocol_test.go
+++ b/swarm/network/protocol_test.go
@@ -20,7 +20,6 @@ import (
"flag"
"fmt"
"os"
- "sync"
"testing"
"github.com/ethereum/go-ethereum/log"
@@ -31,7 +30,7 @@ import (
)
const (
- TestProtocolVersion = 7
+ TestProtocolVersion = 8
TestProtocolNetworkID = 3
)
@@ -44,31 +43,7 @@ func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
}
-type testStore struct {
- sync.Mutex
-
- values map[string][]byte
-}
-
-func (t *testStore) Load(key string) ([]byte, error) {
- t.Lock()
- defer t.Unlock()
- v, ok := t.values[key]
- if !ok {
- return nil, fmt.Errorf("key not found: %s", key)
- }
- return v, nil
-}
-
-func (t *testStore) Save(key string, v []byte) error {
- t.Lock()
- defer t.Unlock()
- t.values[key] = v
- return nil
-}
-
func HandshakeMsgExchange(lhs, rhs *HandshakeMsg, id enode.ID) []p2ptest.Exchange {
-
return []p2ptest.Exchange{
{
Expects: []p2ptest.Expect{
diff --git a/swarm/network/simulation/bucket.go b/swarm/network/simulation/bucket.go
index bd15ea2ab..49a1f4309 100644
--- a/swarm/network/simulation/bucket.go
+++ b/swarm/network/simulation/bucket.go
@@ -21,7 +21,7 @@ import "github.com/ethereum/go-ethereum/p2p/enode"
// BucketKey is the type that should be used for keys in simulation buckets.
type BucketKey string
-// NodeItem returns an item set in ServiceFunc function for a particualar node.
+// NodeItem returns an item set in ServiceFunc function for a particular node.
func (s *Simulation) NodeItem(id enode.ID, key interface{}) (value interface{}, ok bool) {
s.mu.Lock()
defer s.mu.Unlock()
diff --git a/swarm/network/simulation/bucket_test.go b/swarm/network/simulation/bucket_test.go
index 69df19bfe..2273d35a2 100644
--- a/swarm/network/simulation/bucket_test.go
+++ b/swarm/network/simulation/bucket_test.go
@@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
)
-// TestServiceBucket tests all bucket functionalities using subtests.
+// TestServiceBucket tests all bucket functionality using subtests.
// It constructs a simulation of two nodes by adding items to their buckets
// in ServiceFunc constructor, then by SetNodeItem. Testing UpNodesItems
// is done by stopping one node and validating availability of its items.
diff --git a/swarm/network/simulation/connect.go b/swarm/network/simulation/connect.go
deleted file mode 100644
index 8b2aa1bfa..000000000
--- a/swarm/network/simulation/connect.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package simulation
-
-import (
- "strings"
-
- "github.com/ethereum/go-ethereum/p2p/enode"
-)
-
-// ConnectToPivotNode connects the node with provided NodeID
-// to the pivot node, already set by Simulation.SetPivotNode method.
-// It is useful when constructing a star network topology
-// when simulation adds and removes nodes dynamically.
-func (s *Simulation) ConnectToPivotNode(id enode.ID) (err error) {
- pid := s.PivotNodeID()
- if pid == nil {
- return ErrNoPivotNode
- }
- return s.connect(*pid, id)
-}
-
-// ConnectToLastNode connects the node with provided NodeID
-// to the last node that is up, and avoiding connection to self.
-// It is useful when constructing a chain network topology
-// when simulation adds and removes nodes dynamically.
-func (s *Simulation) ConnectToLastNode(id enode.ID) (err error) {
- ids := s.UpNodeIDs()
- l := len(ids)
- if l < 2 {
- return nil
- }
- lid := ids[l-1]
- if lid == id {
- lid = ids[l-2]
- }
- return s.connect(lid, id)
-}
-
-// ConnectToRandomNode connects the node with provieded NodeID
-// to a random node that is up.
-func (s *Simulation) ConnectToRandomNode(id enode.ID) (err error) {
- n := s.RandomUpNode(id)
- if n == nil {
- return ErrNodeNotFound
- }
- return s.connect(n.ID, id)
-}
-
-// ConnectNodesFull connects all nodes one to another.
-// It provides a complete connectivity in the network
-// which should be rarely needed.
-func (s *Simulation) ConnectNodesFull(ids []enode.ID) (err error) {
- if ids == nil {
- ids = s.UpNodeIDs()
- }
- l := len(ids)
- for i := 0; i < l; i++ {
- for j := i + 1; j < l; j++ {
- err = s.connect(ids[i], ids[j])
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// ConnectNodesChain connects all nodes in a chain topology.
-// If ids argument is nil, all nodes that are up will be connected.
-func (s *Simulation) ConnectNodesChain(ids []enode.ID) (err error) {
- if ids == nil {
- ids = s.UpNodeIDs()
- }
- l := len(ids)
- for i := 0; i < l-1; i++ {
- err = s.connect(ids[i], ids[i+1])
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// ConnectNodesRing connects all nodes in a ring topology.
-// If ids argument is nil, all nodes that are up will be connected.
-func (s *Simulation) ConnectNodesRing(ids []enode.ID) (err error) {
- if ids == nil {
- ids = s.UpNodeIDs()
- }
- l := len(ids)
- if l < 2 {
- return nil
- }
- for i := 0; i < l-1; i++ {
- err = s.connect(ids[i], ids[i+1])
- if err != nil {
- return err
- }
- }
- return s.connect(ids[l-1], ids[0])
-}
-
-// ConnectNodesStar connects all nodes in a star topology
-// with the center at provided NodeID.
-// If ids argument is nil, all nodes that are up will be connected.
-func (s *Simulation) ConnectNodesStar(id enode.ID, ids []enode.ID) (err error) {
- if ids == nil {
- ids = s.UpNodeIDs()
- }
- l := len(ids)
- for i := 0; i < l; i++ {
- if id == ids[i] {
- continue
- }
- err = s.connect(id, ids[i])
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// ConnectNodesStarPivot connects all nodes in a star topology
-// with the center at already set pivot node.
-// If ids argument is nil, all nodes that are up will be connected.
-func (s *Simulation) ConnectNodesStarPivot(ids []enode.ID) (err error) {
- id := s.PivotNodeID()
- if id == nil {
- return ErrNoPivotNode
- }
- return s.ConnectNodesStar(*id, ids)
-}
-
-// connect connects two nodes but ignores already connected error.
-func (s *Simulation) connect(oneID, otherID enode.ID) error {
- return ignoreAlreadyConnectedErr(s.Net.Connect(oneID, otherID))
-}
-
-func ignoreAlreadyConnectedErr(err error) error {
- if err == nil || strings.Contains(err.Error(), "already connected") {
- return nil
- }
- return err
-}
diff --git a/swarm/network/simulation/connect_test.go b/swarm/network/simulation/connect_test.go
deleted file mode 100644
index 6c94b3a01..000000000
--- a/swarm/network/simulation/connect_test.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package simulation
-
-import (
- "testing"
-
- "github.com/ethereum/go-ethereum/p2p/enode"
-)
-
-func TestConnectToPivotNode(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- pid, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- sim.SetPivotNode(pid)
-
- id, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectToPivotNode(id)
- if err != nil {
- t.Fatal(err)
- }
-
- if sim.Net.GetConn(id, pid) == nil {
- t.Error("node did not connect to pivot node")
- }
-}
-
-func TestConnectToLastNode(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- n := 10
-
- ids, err := sim.AddNodes(n)
- if err != nil {
- t.Fatal(err)
- }
-
- id, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectToLastNode(id)
- if err != nil {
- t.Fatal(err)
- }
-
- for _, i := range ids[:n-2] {
- if sim.Net.GetConn(id, i) != nil {
- t.Error("node connected to the node that is not the last")
- }
- }
-
- if sim.Net.GetConn(id, ids[n-1]) == nil {
- t.Error("node did not connect to the last node")
- }
-}
-
-func TestConnectToRandomNode(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- n := 10
-
- ids, err := sim.AddNodes(n)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectToRandomNode(ids[0])
- if err != nil {
- t.Fatal(err)
- }
-
- var cc int
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- if sim.Net.GetConn(ids[i], ids[j]) != nil {
- cc++
- }
- }
- }
-
- if cc != 1 {
- t.Errorf("expected one connection, got %v", cc)
- }
-}
-
-func TestConnectNodesFull(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(12)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectNodesFull(ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testFull(t, sim, ids)
-}
-
-func testFull(t *testing.T, sim *Simulation, ids []enode.ID) {
- n := len(ids)
- var cc int
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- if sim.Net.GetConn(ids[i], ids[j]) != nil {
- cc++
- }
- }
- }
-
- want := n * (n - 1) / 2
-
- if cc != want {
- t.Errorf("expected %v connection, got %v", want, cc)
- }
-}
-
-func TestConnectNodesChain(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(10)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectNodesChain(ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testChain(t, sim, ids)
-}
-
-func testChain(t *testing.T, sim *Simulation, ids []enode.ID) {
- n := len(ids)
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- c := sim.Net.GetConn(ids[i], ids[j])
- if i == j-1 {
- if c == nil {
- t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
- }
- } else {
- if c != nil {
- t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
- }
- }
- }
- }
-}
-
-func TestConnectNodesRing(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(10)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- err = sim.ConnectNodesRing(ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testRing(t, sim, ids)
-}
-
-func testRing(t *testing.T, sim *Simulation, ids []enode.ID) {
- n := len(ids)
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- c := sim.Net.GetConn(ids[i], ids[j])
- if i == j-1 || (i == 0 && j == n-1) {
- if c == nil {
- t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
- }
- } else {
- if c != nil {
- t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
- }
- }
- }
- }
-}
-
-func TestConnectToNodesStar(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(10)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- centerIndex := 2
-
- err = sim.ConnectNodesStar(ids[centerIndex], ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testStar(t, sim, ids, centerIndex)
-}
-
-func testStar(t *testing.T, sim *Simulation, ids []enode.ID, centerIndex int) {
- n := len(ids)
- for i := 0; i < n; i++ {
- for j := i + 1; j < n; j++ {
- c := sim.Net.GetConn(ids[i], ids[j])
- if i == centerIndex || j == centerIndex {
- if c == nil {
- t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
- }
- } else {
- if c != nil {
- t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
- }
- }
- }
- }
-}
-
-func TestConnectToNodesStarPivot(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- ids, err := sim.AddNodes(10)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(sim.Net.Conns) > 0 {
- t.Fatal("no connections should exist after just adding nodes")
- }
-
- pivotIndex := 4
-
- sim.SetPivotNode(ids[pivotIndex])
-
- err = sim.ConnectNodesStarPivot(ids)
- if err != nil {
- t.Fatal(err)
- }
-
- testStar(t, sim, ids, pivotIndex)
-}
diff --git a/swarm/network/simulation/events_test.go b/swarm/network/simulation/events_test.go
index 0c185d977..529844816 100644
--- a/swarm/network/simulation/events_test.go
+++ b/swarm/network/simulation/events_test.go
@@ -59,7 +59,7 @@ func TestPeerEvents(t *testing.T) {
}
}()
- err = sim.ConnectNodesChain(sim.NodeIDs())
+ err = sim.Net.ConnectNodesChain(sim.NodeIDs())
if err != nil {
t.Fatal(err)
}
@@ -81,6 +81,7 @@ func TestPeerEventsTimeout(t *testing.T) {
events := sim.PeerEvents(ctx, sim.NodeIDs())
done := make(chan struct{})
+ errC := make(chan error)
go func() {
for e := range events {
if e.Error == context.Canceled {
@@ -90,14 +91,16 @@ func TestPeerEventsTimeout(t *testing.T) {
close(done)
return
} else {
- t.Fatal(e.Error)
+ errC <- e.Error
}
}
}()
select {
case <-time.After(time.Second):
- t.Error("no context deadline received")
+ t.Fatal("no context deadline received")
+ case err := <-errC:
+ t.Fatal(err)
case <-done:
// all good, context deadline detected
}
diff --git a/swarm/network/simulation/example_test.go b/swarm/network/simulation/example_test.go
index bacc64d53..9d1492979 100644
--- a/swarm/network/simulation/example_test.go
+++ b/swarm/network/simulation/example_test.go
@@ -31,8 +31,9 @@ import (
// Every node can have a Kademlia associated using the node bucket under
// BucketKeyKademlia key. This allows to use WaitTillHealthy to block until
-// all nodes have the their Kadmlias healthy.
+// all nodes have the their Kademlias healthy.
func ExampleSimulation_WaitTillHealthy() {
+
sim := simulation.New(map[string]simulation.ServiceFunc{
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
addr := network.NewAddr(ctx.Config.Node())
@@ -60,7 +61,7 @@ func ExampleSimulation_WaitTillHealthy() {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
- ill, err := sim.WaitTillHealthy(ctx, 2)
+ ill, err := sim.WaitTillHealthy(ctx)
if err != nil {
// inspect the latest detected not healthy kademlias
for id, kad := range ill {
@@ -71,6 +72,7 @@ func ExampleSimulation_WaitTillHealthy() {
}
// continue with the test
+
}
// Watch all peer events in the simulation network, buy receiving from a channel.
diff --git a/swarm/network/simulation/http_test.go b/swarm/network/simulation/http_test.go
index 775cf9219..dffd03a03 100644
--- a/swarm/network/simulation/http_test.go
+++ b/swarm/network/simulation/http_test.go
@@ -73,7 +73,8 @@ func TestSimulationWithHTTPServer(t *testing.T) {
//this time the timeout should be long enough so that it doesn't kick in too early
ctx, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel2()
- go sendRunSignal(t)
+ errC := make(chan error, 1)
+ go triggerSimulationRun(t, errC)
result = sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
log.Debug("This run waits for the run signal from `frontend`...")
//ensure with a Sleep that simulation doesn't terminate before the signal is received
@@ -83,10 +84,13 @@ func TestSimulationWithHTTPServer(t *testing.T) {
if result.Error != nil {
t.Fatal(result.Error)
}
+ if err := <-errC; err != nil {
+ t.Fatal(err)
+ }
log.Debug("Test terminated successfully")
}
-func sendRunSignal(t *testing.T) {
+func triggerSimulationRun(t *testing.T, errC chan error) {
//We need to first wait for the sim HTTP server to start running...
time.Sleep(2 * time.Second)
//then we can send the signal
@@ -94,16 +98,13 @@ func sendRunSignal(t *testing.T) {
log.Debug("Sending run signal to simulation: POST /runsim...")
resp, err := http.Post(fmt.Sprintf("http://localhost%s/runsim", DefaultHTTPSimAddr), "application/json", nil)
if err != nil {
- t.Fatalf("Request failed: %v", err)
+ errC <- fmt.Errorf("Request failed: %v", err)
+ return
}
- defer func() {
- err := resp.Body.Close()
- if err != nil {
- log.Error("Error closing response body", "err", err)
- }
- }()
log.Debug("Signal sent")
if resp.StatusCode != http.StatusOK {
- t.Fatalf("err %s", resp.Status)
+ errC <- fmt.Errorf("err %s", resp.Status)
+ return
}
+ errC <- resp.Body.Close()
}
diff --git a/swarm/network/simulation/kademlia.go b/swarm/network/simulation/kademlia.go
index f895181d9..6d8d0e0a2 100644
--- a/swarm/network/simulation/kademlia.go
+++ b/swarm/network/simulation/kademlia.go
@@ -28,20 +28,22 @@ import (
)
// BucketKeyKademlia is the key to be used for storing the kademlia
-// instance for particuar node, usually inside the ServiceFunc function.
+// instance for particular node, usually inside the ServiceFunc function.
var BucketKeyKademlia BucketKey = "kademlia"
// WaitTillHealthy is blocking until the health of all kademlias is true.
// If error is not nil, a map of kademlia that was found not healthy is returned.
-func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (ill map[enode.ID]*network.Kademlia, err error) {
+// TODO: Check correctness since change in kademlia depth calculation logic
+func (s *Simulation) WaitTillHealthy(ctx context.Context) (ill map[enode.ID]*network.Kademlia, err error) {
// Prepare PeerPot map for checking Kademlia health
var ppmap map[string]*network.PeerPot
kademlias := s.kademlias()
addrs := make([][]byte, 0, len(kademlias))
+ // TODO verify that all kademlias have same params
for _, k := range kademlias {
addrs = append(addrs, k.BaseAddr())
}
- ppmap = network.NewPeerPotMap(kadMinProxSize, addrs)
+ ppmap = network.NewPeerPotMap(s.neighbourhoodSize, addrs)
// Wait for healthy Kademlia on every node before checking files
ticker := time.NewTicker(200 * time.Millisecond)
@@ -65,10 +67,10 @@ func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (i
h := k.Healthy(pp)
//print info
log.Debug(k.String())
- log.Debug("kademlia", "empty bins", pp.EmptyBins, "gotNN", h.GotNN, "knowNN", h.KnowNN, "full", h.Full)
- log.Debug("kademlia", "health", h.GotNN && h.KnowNN && h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
- log.Debug("kademlia", "ill condition", !h.GotNN || !h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
- if !h.GotNN || !h.Full {
+ log.Debug("kademlia", "connectNN", h.ConnectNN, "knowNN", h.KnowNN)
+ log.Debug("kademlia", "health", h.ConnectNN && h.KnowNN, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
+ log.Debug("kademlia", "ill condition", !h.ConnectNN, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
+ if !h.ConnectNN {
ill[id] = k
}
}
diff --git a/swarm/network/simulation/kademlia_test.go b/swarm/network/simulation/kademlia_test.go
index 285644a0f..36b244d3d 100644
--- a/swarm/network/simulation/kademlia_test.go
+++ b/swarm/network/simulation/kademlia_test.go
@@ -28,11 +28,12 @@ import (
)
func TestWaitTillHealthy(t *testing.T) {
+ t.Skip("WaitTillHealthy depends on discovery, which relies on a reliable SuggestPeer, which is not reliable")
+
sim := New(map[string]ServiceFunc{
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
addr := network.NewAddr(ctx.Config.Node())
hp := network.NewHiveParams()
- hp.Discovery = false
config := &network.BzzConfig{
OverlayAddr: addr.Over(),
UnderlayAddr: addr.Under(),
@@ -54,7 +55,7 @@ func TestWaitTillHealthy(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
- ill, err := sim.WaitTillHealthy(ctx, 2)
+ ill, err := sim.WaitTillHealthy(ctx)
if err != nil {
for id, kad := range ill {
t.Log("Node", id)
diff --git a/swarm/network/simulation/node.go b/swarm/network/simulation/node.go
index a916d3fc2..08eb83524 100644
--- a/swarm/network/simulation/node.go
+++ b/swarm/network/simulation/node.go
@@ -127,7 +127,7 @@ func (s *Simulation) AddNodesAndConnectFull(count int, opts ...AddNodeOption) (i
if err != nil {
return nil, err
}
- err = s.ConnectNodesFull(ids)
+ err = s.Net.ConnectNodesFull(ids)
if err != nil {
return nil, err
}
@@ -145,7 +145,7 @@ func (s *Simulation) AddNodesAndConnectChain(count int, opts ...AddNodeOption) (
if err != nil {
return nil, err
}
- err = s.ConnectToLastNode(id)
+ err = s.Net.ConnectToLastNode(id)
if err != nil {
return nil, err
}
@@ -154,7 +154,7 @@ func (s *Simulation) AddNodesAndConnectChain(count int, opts ...AddNodeOption) (
return nil, err
}
ids = append([]enode.ID{id}, ids...)
- err = s.ConnectNodesChain(ids)
+ err = s.Net.ConnectNodesChain(ids)
if err != nil {
return nil, err
}
@@ -171,7 +171,7 @@ func (s *Simulation) AddNodesAndConnectRing(count int, opts ...AddNodeOption) (i
if err != nil {
return nil, err
}
- err = s.ConnectNodesRing(ids)
+ err = s.Net.ConnectNodesRing(ids)
if err != nil {
return nil, err
}
@@ -188,16 +188,16 @@ func (s *Simulation) AddNodesAndConnectStar(count int, opts ...AddNodeOption) (i
if err != nil {
return nil, err
}
- err = s.ConnectNodesStar(ids[0], ids[1:])
+ err = s.Net.ConnectNodesStar(ids[1:], ids[0])
if err != nil {
return nil, err
}
return ids, nil
}
-//UploadSnapshot uploads a snapshot to the simulation
-//This method tries to open the json file provided, applies the config to all nodes
-//and then loads the snapshot into the Simulation network
+// UploadSnapshot uploads a snapshot to the simulation
+// This method tries to open the json file provided, applies the config to all nodes
+// and then loads the snapshot into the Simulation network
func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption) error {
f, err := os.Open(snapshotFile)
if err != nil {
@@ -241,25 +241,6 @@ func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption)
return nil
}
-// SetPivotNode sets the NodeID of the network's pivot node.
-// Pivot node is just a specific node that should be treated
-// differently then other nodes in test. SetPivotNode and
-// PivotNodeID are just a convenient functions to set and
-// retrieve it.
-func (s *Simulation) SetPivotNode(id enode.ID) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.pivotNodeID = &id
-}
-
-// PivotNodeID returns NodeID of the pivot node set by
-// Simulation.SetPivotNode method.
-func (s *Simulation) PivotNodeID() (id *enode.ID) {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.pivotNodeID
-}
-
// StartNode starts a node by NodeID.
func (s *Simulation) StartNode(id enode.ID) (err error) {
return s.Net.Start(id)
@@ -267,27 +248,26 @@ func (s *Simulation) StartNode(id enode.ID) (err error) {
// StartRandomNode starts a random node.
func (s *Simulation) StartRandomNode() (id enode.ID, err error) {
- n := s.randomDownNode()
+ n := s.Net.GetRandomDownNode()
if n == nil {
return id, ErrNodeNotFound
}
- return n.ID, s.Net.Start(n.ID)
+ return n.ID(), s.Net.Start(n.ID())
}
// StartRandomNodes starts random nodes.
func (s *Simulation) StartRandomNodes(count int) (ids []enode.ID, err error) {
ids = make([]enode.ID, 0, count)
- downIDs := s.DownNodeIDs()
for i := 0; i < count; i++ {
- n := s.randomNode(downIDs, ids...)
+ n := s.Net.GetRandomDownNode()
if n == nil {
return nil, ErrNodeNotFound
}
- err = s.Net.Start(n.ID)
+ err = s.Net.Start(n.ID())
if err != nil {
return nil, err
}
- ids = append(ids, n.ID)
+ ids = append(ids, n.ID())
}
return ids, nil
}
@@ -299,27 +279,26 @@ func (s *Simulation) StopNode(id enode.ID) (err error) {
// StopRandomNode stops a random node.
func (s *Simulation) StopRandomNode() (id enode.ID, err error) {
- n := s.RandomUpNode()
+ n := s.Net.GetRandomUpNode()
if n == nil {
return id, ErrNodeNotFound
}
- return n.ID, s.Net.Stop(n.ID)
+ return n.ID(), s.Net.Stop(n.ID())
}
// StopRandomNodes stops random nodes.
func (s *Simulation) StopRandomNodes(count int) (ids []enode.ID, err error) {
ids = make([]enode.ID, 0, count)
- upIDs := s.UpNodeIDs()
for i := 0; i < count; i++ {
- n := s.randomNode(upIDs, ids...)
+ n := s.Net.GetRandomUpNode()
if n == nil {
return nil, ErrNodeNotFound
}
- err = s.Net.Stop(n.ID)
+ err = s.Net.Stop(n.ID())
if err != nil {
return nil, err
}
- ids = append(ids, n.ID)
+ ids = append(ids, n.ID())
}
return ids, nil
}
@@ -328,35 +307,3 @@ func (s *Simulation) StopRandomNodes(count int) (ids []enode.ID, err error) {
func init() {
rand.Seed(time.Now().UnixNano())
}
-
-// RandomUpNode returns a random SimNode that is up.
-// Arguments are NodeIDs for nodes that should not be returned.
-func (s *Simulation) RandomUpNode(exclude ...enode.ID) *adapters.SimNode {
- return s.randomNode(s.UpNodeIDs(), exclude...)
-}
-
-// randomDownNode returns a random SimNode that is not up.
-func (s *Simulation) randomDownNode(exclude ...enode.ID) *adapters.SimNode {
- return s.randomNode(s.DownNodeIDs(), exclude...)
-}
-
-// randomNode returns a random SimNode from the slice of NodeIDs.
-func (s *Simulation) randomNode(ids []enode.ID, exclude ...enode.ID) *adapters.SimNode {
- for _, e := range exclude {
- var i int
- for _, id := range ids {
- if id == e {
- ids = append(ids[:i], ids[i+1:]...)
- } else {
- i++
- }
- }
- }
- l := len(ids)
- if l == 0 {
- return nil
- }
- n := s.Net.GetNode(ids[rand.Intn(l)])
- node, _ := n.Node.(*adapters.SimNode)
- return node
-}
diff --git a/swarm/network/simulation/node_test.go b/swarm/network/simulation/node_test.go
index 086ab606f..dc9189c91 100644
--- a/swarm/network/simulation/node_test.go
+++ b/swarm/network/simulation/node_test.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/network"
)
@@ -160,6 +161,41 @@ func TestAddNodeWithService(t *testing.T) {
}
}
+func TestAddNodeMultipleServices(t *testing.T) {
+ sim := New(map[string]ServiceFunc{
+ "noop1": noopServiceFunc,
+ "noop2": noopService2Func,
+ })
+ defer sim.Close()
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n := sim.Net.GetNode(id).Node.(*adapters.SimNode)
+ if n.Service("noop1") == nil {
+ t.Error("service noop1 not found on node")
+ }
+ if n.Service("noop2") == nil {
+ t.Error("service noop2 not found on node")
+ }
+}
+
+func TestAddNodeDuplicateServiceError(t *testing.T) {
+ sim := New(map[string]ServiceFunc{
+ "noop1": noopServiceFunc,
+ "noop2": noopServiceFunc,
+ })
+ defer sim.Close()
+
+ wantErr := "duplicate service: *simulation.noopService"
+ _, err := sim.AddNode()
+ if err.Error() != wantErr {
+ t.Errorf("got error %q, want %q", err, wantErr)
+ }
+}
+
func TestAddNodes(t *testing.T) {
sim := New(noopServiceFuncMap)
defer sim.Close()
@@ -193,7 +229,7 @@ func TestAddNodesAndConnectFull(t *testing.T) {
t.Fatal(err)
}
- testFull(t, sim, ids)
+ simulations.VerifyFull(t, sim.Net, ids)
}
func TestAddNodesAndConnectChain(t *testing.T) {
@@ -212,7 +248,7 @@ func TestAddNodesAndConnectChain(t *testing.T) {
t.Fatal(err)
}
- testChain(t, sim, sim.UpNodeIDs())
+ simulations.VerifyChain(t, sim.Net, sim.UpNodeIDs())
}
func TestAddNodesAndConnectRing(t *testing.T) {
@@ -224,7 +260,7 @@ func TestAddNodesAndConnectRing(t *testing.T) {
t.Fatal(err)
}
- testRing(t, sim, ids)
+ simulations.VerifyRing(t, sim.Net, ids)
}
func TestAddNodesAndConnectStar(t *testing.T) {
@@ -236,7 +272,7 @@ func TestAddNodesAndConnectStar(t *testing.T) {
t.Fatal(err)
}
- testStar(t, sim, ids, 0)
+ simulations.VerifyStar(t, sim.Net, ids, 0)
}
//To test that uploading a snapshot works
@@ -278,45 +314,6 @@ func TestUploadSnapshot(t *testing.T) {
log.Debug("Done.")
}
-func TestPivotNode(t *testing.T) {
- sim := New(noopServiceFuncMap)
- defer sim.Close()
-
- id, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- id2, err := sim.AddNode()
- if err != nil {
- t.Fatal(err)
- }
-
- if sim.PivotNodeID() != nil {
- t.Error("expected no pivot node")
- }
-
- sim.SetPivotNode(id)
-
- pid := sim.PivotNodeID()
-
- if pid == nil {
- t.Error("pivot node not set")
- } else if *pid != id {
- t.Errorf("expected pivot node %s, got %s", id, *pid)
- }
-
- sim.SetPivotNode(id2)
-
- pid = sim.PivotNodeID()
-
- if pid == nil {
- t.Error("pivot node not set")
- } else if *pid != id2 {
- t.Errorf("expected pivot node %s, got %s", id2, *pid)
- }
-}
-
func TestStartStopNode(t *testing.T) {
sim := New(noopServiceFuncMap)
defer sim.Close()
diff --git a/swarm/network/simulation/service.go b/swarm/network/simulation/service.go
index 819602e9e..7dd4dc6d8 100644
--- a/swarm/network/simulation/service.go
+++ b/swarm/network/simulation/service.go
@@ -39,7 +39,7 @@ func (s *Simulation) Service(name string, id enode.ID) node.Service {
// RandomService returns a single Service by name on a
// randomly chosen node that is up.
func (s *Simulation) RandomService(name string) node.Service {
- n := s.RandomUpNode()
+ n := s.Net.GetRandomUpNode().Node.(*adapters.SimNode)
if n == nil {
return nil
}
diff --git a/swarm/network/simulation/simulation.go b/swarm/network/simulation/simulation.go
index f6d3ce229..e18d19a67 100644
--- a/swarm/network/simulation/simulation.go
+++ b/swarm/network/simulation/simulation.go
@@ -28,12 +28,12 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/network"
)
// Common errors that are returned by functions in this package.
var (
ErrNodeNotFound = errors.New("node not found")
- ErrNoPivotNode = errors.New("no pivot node set")
)
// Simulation provides methods on network, nodes and services
@@ -43,13 +43,13 @@ type Simulation struct {
// of p2p/simulations.Network.
Net *simulations.Network
- serviceNames []string
- cleanupFuncs []func()
- buckets map[enode.ID]*sync.Map
- pivotNodeID *enode.ID
- shutdownWG sync.WaitGroup
- done chan struct{}
- mu sync.RWMutex
+ serviceNames []string
+ cleanupFuncs []func()
+ buckets map[enode.ID]*sync.Map
+ shutdownWG sync.WaitGroup
+ done chan struct{}
+ mu sync.RWMutex
+ neighbourhoodSize int
httpSrv *http.Server //attach a HTTP server via SimulationOptions
handler *simulations.Server //HTTP handler for the server
@@ -66,16 +66,23 @@ type Simulation struct {
// after network shutdown.
type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error)
-// New creates a new Simulation instance with new
-// simulations.Network initialized with provided services.
+// New creates a new simulation instance
+// Services map must have unique keys as service names and
+// every ServiceFunc must return a node.Service of the unique type.
+// This restriction is required by node.Node.Start() function
+// which is used to start node.Service returned by ServiceFunc.
func New(services map[string]ServiceFunc) (s *Simulation) {
s = &Simulation{
- buckets: make(map[enode.ID]*sync.Map),
- done: make(chan struct{}),
+ buckets: make(map[enode.ID]*sync.Map),
+ done: make(chan struct{}),
+ neighbourhoodSize: network.NewKadParams().NeighbourhoodSize,
}
adapterServices := make(map[string]adapters.ServiceFunc, len(services))
for name, serviceFunc := range services {
+ // Scope this variables correctly
+ // as they will be in the adapterServices[name] function accessed later.
+ name, serviceFunc := name, serviceFunc
s.serviceNames = append(s.serviceNames, name)
adapterServices[name] = func(ctx *adapters.ServiceContext) (node.Service, error) {
b := new(sync.Map)
diff --git a/swarm/network/simulation/simulation_test.go b/swarm/network/simulation/simulation_test.go
index eed09bf50..f837f9382 100644
--- a/swarm/network/simulation/simulation_test.go
+++ b/swarm/network/simulation/simulation_test.go
@@ -26,10 +26,9 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- "github.com/ethereum/go-ethereum/rpc"
- colorable "github.com/mattn/go-colorable"
+ "github.com/mattn/go-colorable"
)
var (
@@ -178,30 +177,27 @@ var noopServiceFuncMap = map[string]ServiceFunc{
}
// a helper function for most basic noop service
-func noopServiceFunc(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+func noopServiceFunc(_ *adapters.ServiceContext, _ *sync.Map) (node.Service, func(), error) {
return newNoopService(), nil, nil
}
-// noopService is the service that does not do anything
-// but implements node.Service interface.
-type noopService struct{}
-
func newNoopService() node.Service {
return &noopService{}
}
-func (t *noopService) Protocols() []p2p.Protocol {
- return []p2p.Protocol{}
+// a helper function for most basic Noop service
+// of a different type then NoopService to test
+// multiple services on one node.
+func noopService2Func(_ *adapters.ServiceContext, _ *sync.Map) (node.Service, func(), error) {
+ return new(noopService2), nil, nil
}
-func (t *noopService) APIs() []rpc.API {
- return []rpc.API{}
+// NoopService2 is the service that does not do anything
+// but implements node.Service interface.
+type noopService2 struct {
+ simulations.NoopService
}
-func (t *noopService) Start(server *p2p.Server) error {
- return nil
-}
-
-func (t *noopService) Stop() error {
- return nil
+type noopService struct {
+ simulations.NoopService
}
diff --git a/swarm/network/simulations/discovery/discovery_test.go b/swarm/network/simulations/discovery/discovery_test.go
index cd5456b73..e5121c477 100644
--- a/swarm/network/simulations/discovery/discovery_test.go
+++ b/swarm/network/simulations/discovery/discovery_test.go
@@ -31,6 +31,7 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
@@ -45,7 +46,7 @@ import (
// serviceName is used with the exec adapter so the exec'd binary knows which
// service to execute
const serviceName = "discovery"
-const testMinProxBinSize = 2
+const testNeighbourhoodSize = 2
const discoveryPersistenceDatadir = "discovery_persistence_test_store"
var discoveryPersistencePath = path.Join(os.TempDir(), discoveryPersistenceDatadir)
@@ -156,6 +157,7 @@ func testDiscoverySimulationSimAdapter(t *testing.T, nodes, conns int) {
}
func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) {
+ t.Skip("discovery tests depend on suggestpeer, which is unreliable after kademlia depth change.")
startedAt := time.Now()
result, err := discoverySimulation(nodes, conns, adapter)
if err != nil {
@@ -183,6 +185,7 @@ func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.No
}
func testDiscoveryPersistenceSimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) map[int][]byte {
+ t.Skip("discovery tests depend on suggestpeer, which is unreliable after kademlia depth change.")
persistenceEnabled = true
discoveryEnabled = true
@@ -265,7 +268,7 @@ func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simul
wg.Wait()
log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
// construct the peer pot, so that kademlia health can be checked
- ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs)
+ ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
check := func(ctx context.Context, id enode.ID) (bool, error) {
select {
case <-ctx.Done():
@@ -281,12 +284,13 @@ func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simul
if err != nil {
return false, fmt.Errorf("error getting node client: %s", err)
}
+
healthy := &network.Health{}
- if err := client.Call(&healthy, "hive_healthy", ppmap[id.String()]); err != nil {
+ if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
return false, fmt.Errorf("error getting node health: %s", err)
}
- log.Debug(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v\n%v", id, healthy.GotNN, healthy.KnowNN, healthy.Full, healthy.Hive))
- return healthy.KnowNN && healthy.GotNN && healthy.Full, nil
+ log.Info(fmt.Sprintf("node %4s healthy: connected nearest neighbours: %v, know nearest neighbours: %v,\n\n%v", id, healthy.ConnectNN, healthy.KnowNN, healthy.Hive))
+ return healthy.KnowNN && healthy.ConnectNN, nil
}
// 64 nodes ~ 1min
@@ -371,6 +375,7 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
if err := triggerChecks(trigger, net, node.ID()); err != nil {
return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
}
+ // TODO we shouldn't be equating underaddr and overaddr like this, as they are not the same in production
ids[i] = node.ID()
a := ids[i].Bytes()
@@ -379,7 +384,6 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
// run a simulation which connects the 10 nodes in a ring and waits
// for full peer discovery
- ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs)
var restartTime time.Time
@@ -400,12 +404,21 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
}
healthy := &network.Health{}
addr := id.String()
- if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil {
+ ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
+ if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
return fmt.Errorf("error getting node health: %s", err)
}
- log.Info(fmt.Sprintf("NODE: %s, IS HEALTHY: %t", addr, healthy.GotNN && healthy.KnowNN && healthy.Full))
- if !healthy.GotNN || !healthy.Full {
+ log.Info(fmt.Sprintf("NODE: %s, IS HEALTHY: %t", addr, healthy.ConnectNN && healthy.KnowNN && healthy.CountKnowNN > 0))
+ var nodeStr string
+ if err := client.Call(&nodeStr, "hive_string"); err != nil {
+ return fmt.Errorf("error getting node string %s", err)
+ }
+ log.Info(nodeStr)
+ for _, a := range addrs {
+ log.Info(common.Bytes2Hex(a))
+ }
+ if !healthy.ConnectNN || healthy.CountKnowNN == 0 {
isHealthy = false
break
}
@@ -479,12 +492,14 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
return false, fmt.Errorf("error getting node client: %s", err)
}
healthy := &network.Health{}
- if err := client.Call(&healthy, "hive_healthy", ppmap[id.String()]); err != nil {
+ ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
+
+ if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
return false, fmt.Errorf("error getting node health: %s", err)
}
- log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v", id, healthy.GotNN, healthy.KnowNN, healthy.Full))
+ log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v", id, healthy.ConnectNN, healthy.KnowNN))
- return healthy.KnowNN && healthy.GotNN && healthy.Full, nil
+ return healthy.KnowNN && healthy.ConnectNN, nil
}
// 64 nodes ~ 1min
@@ -551,7 +566,7 @@ func newService(ctx *adapters.ServiceContext) (node.Service, error) {
addr := network.NewAddr(ctx.Config.Node())
kp := network.NewKadParams()
- kp.MinProxBinSize = testMinProxBinSize
+ kp.NeighbourhoodSize = testNeighbourhoodSize
if ctx.Config.Reachable != nil {
kp.Reachable = func(o *network.BzzAddr) bool {
diff --git a/swarm/network/simulations/overlay.go b/swarm/network/simulations/overlay.go
index 284ae6398..63938809e 100644
--- a/swarm/network/simulations/overlay.go
+++ b/swarm/network/simulations/overlay.go
@@ -86,7 +86,7 @@ func (s *Simulation) NewService(ctx *adapters.ServiceContext) (node.Service, err
addr := network.NewAddr(node)
kp := network.NewKadParams()
- kp.MinProxBinSize = 2
+ kp.NeighbourhoodSize = 2
kp.MaxBinSize = 4
kp.MinBinSize = 1
kp.MaxRetries = 1000
diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go
index e0a7f7e12..29b917d39 100644
--- a/swarm/network/stream/common_test.go
+++ b/swarm/network/stream/common_test.go
@@ -35,7 +35,6 @@ import (
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
- "github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/testutil"
@@ -57,7 +56,7 @@ var (
bucketKeyRegistry = simulation.BucketKey("registry")
chunkSize = 4096
- pof = pot.DefaultPof(256)
+ pof = network.Pof
)
func init() {
diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go
index 64d754336..e1a13fe8d 100644
--- a/swarm/network/stream/delivery.go
+++ b/swarm/network/stream/delivery.go
@@ -19,7 +19,6 @@ package stream
import (
"context"
"errors"
-
"fmt"
"github.com/ethereum/go-ethereum/metrics"
@@ -39,6 +38,7 @@ const (
var (
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
+ retrieveChunkFail = metrics.NewRegisteredCounter("network.stream.retrieve_chunks_fail.count", nil)
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
@@ -169,7 +169,8 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
go func() {
chunk, err := d.chunkStore.Get(ctx, req.Addr)
if err != nil {
- log.Warn("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
+ retrieveChunkFail.Inc(1)
+ log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
return
}
if req.SkipCheck {
@@ -243,7 +244,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
return nil, nil, fmt.Errorf("source peer %v not found", spID.String())
}
} else {
- d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int, nn bool) bool {
+ d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int) bool {
id := p.ID()
if p.LightNode {
// skip light nodes
@@ -255,7 +256,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
}
sp = d.getPeer(id)
if sp == nil {
- log.Warn("Delivery.RequestFromPeers: peer not found", "id", id)
+ //log.Warn("Delivery.RequestFromPeers: peer not found", "id", id)
return true
}
spID = &id
diff --git a/swarm/network/stream/delivery_test.go b/swarm/network/stream/delivery_test.go
index a6173a389..70d3829b3 100644
--- a/swarm/network/stream/delivery_test.go
+++ b/swarm/network/stream/delivery_test.go
@@ -19,9 +19,11 @@ package stream
import (
"bytes"
"context"
+ "errors"
"fmt"
"os"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -442,17 +444,17 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
}
func TestDeliveryFromNodes(t *testing.T) {
- testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
+ testDeliveryFromNodes(t, 2, dataChunkCount, true)
+ testDeliveryFromNodes(t, 2, dataChunkCount, false)
+ testDeliveryFromNodes(t, 4, dataChunkCount, true)
+ testDeliveryFromNodes(t, 4, dataChunkCount, false)
+ testDeliveryFromNodes(t, 8, dataChunkCount, true)
+ testDeliveryFromNodes(t, 8, dataChunkCount, false)
+ testDeliveryFromNodes(t, 16, dataChunkCount, true)
+ testDeliveryFromNodes(t, 16, dataChunkCount, false)
}
-func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
+func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool) {
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
node := ctx.Config.Node()
@@ -500,10 +502,11 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
log.Info("Starting simulation")
ctx := context.Background()
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
nodeIDs := sim.UpNodeIDs()
//determine the pivot node to be the first node of the simulation
- sim.SetPivotNode(nodeIDs[0])
+ pivot := nodeIDs[0]
+
//distribute chunks of a random file into Stores of nodes 1 to nodes
//we will do this by creating a file store with an underlying round-robin store:
//the file store will create a hash for the uploaded file, but every chunk will be
@@ -517,7 +520,7 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
//...iterate the buckets...
for id, bucketVal := range lStores {
//...and remove the one which is the pivot node
- if id == *sim.PivotNodeID() {
+ if id == pivot {
continue
}
//the other ones are added to the array...
@@ -540,25 +543,25 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
}
log.Debug("Waiting for kademlia")
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ // TODO this does not seem to be correct usage of the function, as the simulation may have no kademlias
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
//get the pivot node's filestore
- item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
+ item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
if !ok {
return fmt.Errorf("No filestore")
}
pivotFileStore := item.(*storage.FileStore)
log.Debug("Starting retrieval routine")
+ retErrC := make(chan error)
go func() {
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
// we must wait for the peer connections to have started before requesting
n, err := readAll(pivotFileStore, fileHash)
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
- if err != nil {
- t.Fatalf("requesting chunks action error: %v", err)
- }
+ retErrC <- err
}()
log.Debug("Watching for disconnections")
@@ -568,11 +571,19 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
simulation.NewPeerEventsFilter().Drop(),
)
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal(d.Error)
+ disconnected.Store(true)
+ }
+ }
+ }()
+ defer func() {
+ if err != nil {
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ err = errors.New("disconnect events received")
}
}
}()
@@ -593,6 +604,9 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
if !success {
return fmt.Errorf("Test failed, chunks not available on all nodes")
}
+ if err := <-retErrC; err != nil {
+ t.Fatalf("requesting chunks: %v", err)
+ }
log.Debug("Test terminated successfully")
return nil
})
@@ -607,7 +621,7 @@ func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
b.Run(
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
func(b *testing.B) {
- benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
+ benchmarkDeliveryFromNodes(b, i, chunks, true)
},
)
}
@@ -620,14 +634,14 @@ func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
b.Run(
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
func(b *testing.B) {
- benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
+ benchmarkDeliveryFromNodes(b, i, chunks, false)
},
)
}
}
}
-func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
+func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck bool) {
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
node := ctx.Config.Node()
@@ -673,7 +687,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
}
ctx := context.Background()
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
nodeIDs := sim.UpNodeIDs()
node := nodeIDs[len(nodeIDs)-1]
@@ -690,7 +704,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
}
netStore := item.(*storage.NetStore)
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
@@ -700,11 +714,19 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
simulation.NewPeerEventsFilter().Drop(),
)
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- b.Fatal(d.Error)
+ disconnected.Store(true)
+ }
+ }
+ }()
+ defer func() {
+ if err != nil {
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ err = errors.New("disconnect events received")
}
}
}()
diff --git a/swarm/network/stream/intervals/store_test.go b/swarm/network/stream/intervals/store_test.go
index 0ab14c065..a36814b71 100644
--- a/swarm/network/stream/intervals/store_test.go
+++ b/swarm/network/stream/intervals/store_test.go
@@ -17,14 +17,11 @@
package intervals
import (
- "errors"
"testing"
"github.com/ethereum/go-ethereum/swarm/state"
)
-var ErrNotFound = errors.New("not found")
-
// TestInmemoryStore tests basic functionality of InmemoryStore.
func TestInmemoryStore(t *testing.T) {
testStore(t, state.NewInmemoryStore())
diff --git a/swarm/network/stream/intervals_test.go b/swarm/network/stream/intervals_test.go
index defb6df50..8f2bed9d6 100644
--- a/swarm/network/stream/intervals_test.go
+++ b/swarm/network/stream/intervals_test.go
@@ -19,9 +19,11 @@ package stream
import (
"context"
"encoding/binary"
+ "errors"
"fmt"
"os"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -52,6 +54,7 @@ func TestIntervalsLiveAndHistory(t *testing.T) {
}
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
+
nodes := 2
chunkCount := dataChunkCount
externalStreamName := "externalStream"
@@ -112,11 +115,11 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
t.Fatal(err)
}
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
nodeIDs := sim.UpNodeIDs()
storer := nodeIDs[0]
checker := nodeIDs[1]
@@ -161,11 +164,19 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
return err
}
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal(d.Error)
+ disconnected.Store(true)
+ }
+ }
+ }()
+ defer func() {
+ if err != nil {
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ err = errors.New("disconnect events received")
}
}
}()
diff --git a/swarm/network/stream/messages.go b/swarm/network/stream/messages.go
index eb1b2983e..b293724cc 100644
--- a/swarm/network/stream/messages.go
+++ b/swarm/network/stream/messages.go
@@ -336,7 +336,7 @@ func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg)
// launch in go routine since GetBatch blocks until new hashes arrive
go func() {
if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
- log.Warn("SendOfferedHashes error", "err", err)
+ log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
}
}()
// go p.SendOfferedHashes(s, req.From, req.To)
diff --git a/swarm/network/stream/snapshot_retrieval_test.go b/swarm/network/stream/snapshot_retrieval_test.go
index 5ea0b1511..d345ac8d0 100644
--- a/swarm/network/stream/snapshot_retrieval_test.go
+++ b/swarm/network/stream/snapshot_retrieval_test.go
@@ -197,7 +197,7 @@ func runFileRetrievalTest(nodeCount int) error {
if err != nil {
return err
}
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
@@ -277,17 +277,17 @@ func runRetrievalTest(chunkCount int, nodeCount int) error {
}
//this is the node selected for upload
- node := sim.RandomUpNode()
- item, ok := sim.NodeItem(node.ID, bucketKeyStore)
+ node := sim.Net.GetRandomUpNode()
+ item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
if !ok {
return fmt.Errorf("No localstore")
}
lstore := item.(*storage.LocalStore)
- conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
+ conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
if err != nil {
return err
}
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
diff --git a/swarm/network/stream/snapshot_sync_test.go b/swarm/network/stream/snapshot_sync_test.go
index 4e56f71b5..6af19c12a 100644
--- a/swarm/network/stream/snapshot_sync_test.go
+++ b/swarm/network/stream/snapshot_sync_test.go
@@ -21,6 +21,7 @@ import (
"os"
"runtime"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -105,43 +106,6 @@ func TestSyncingViaGlobalSync(t *testing.T) {
}
}
-func TestSyncingViaDirectSubscribe(t *testing.T) {
- if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
- t.Skip("Flaky on mac on travis")
- }
- //if nodes/chunks have been provided via commandline,
- //run the tests with these values
- if *nodes != 0 && *chunks != 0 {
- log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
- err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
- if err != nil {
- t.Fatal(err)
- }
- } else {
- var nodeCnt []int
- var chnkCnt []int
- //if the `longrunning` flag has been provided
- //run more test combinations
- if *longrunning {
- chnkCnt = []int{1, 8, 32, 256, 1024}
- nodeCnt = []int{32, 16}
- } else {
- //default test
- chnkCnt = []int{4, 32}
- nodeCnt = []int{32, 16}
- }
- for _, chnk := range chnkCnt {
- for _, n := range nodeCnt {
- log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
- err := testSyncingViaDirectSubscribe(t, chnk, n)
- if err != nil {
- t.Fatal(err)
- }
- }
- }
- }
-}
-
var simServiceMap = map[string]simulation.ServiceFunc{
"streamer": streamerFunc,
}
@@ -203,7 +167,7 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancelSimRun()
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
t.Fatal(err)
}
@@ -213,11 +177,13 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
simulation.NewPeerEventsFilter().Drop(),
)
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
- log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal("unexpected disconnect")
- cancelSimRun()
+ if d.Error != nil {
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
+ disconnected.Store(true)
+ }
}
}()
@@ -226,6 +192,9 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
if result.Error != nil {
t.Fatal(result.Error)
}
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ t.Fatal("disconnect events received")
+ }
log.Info("Simulation ended")
}
@@ -246,20 +215,20 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
//get the node at that index
//this is the node selected for upload
- node := sim.RandomUpNode()
- item, ok := sim.NodeItem(node.ID, bucketKeyStore)
+ node := sim.Net.GetRandomUpNode()
+ item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
if !ok {
return fmt.Errorf("No localstore")
}
lstore := item.(*storage.LocalStore)
- hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
+ hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
if err != nil {
return err
}
for _, h := range hashes {
evt := &simulations.Event{
Type: EventTypeChunkCreated,
- Node: sim.Net.GetNode(node.ID),
+ Node: sim.Net.GetNode(node.ID()),
Data: h.String(),
}
sim.Net.Events().Send(evt)
@@ -317,229 +286,6 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
})
}
-/*
-The test generates the given number of chunks
-
-For every chunk generated, the nearest node addresses
-are identified, we verify that the nodes closer to the
-chunk addresses actually do have the chunks in their local stores.
-
-The test loads a snapshot file to construct the swarm network,
-assuming that the snapshot file identifies a healthy
-kademlia network. The snapshot should have 'streamer' in its service list.
-*/
-func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
- sim := simulation.New(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- n := ctx.Config.Node()
- addr := network.NewAddr(n)
- store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
- if err != nil {
- return nil, nil, err
- }
- bucket.Store(bucketKeyStore, store)
- localStore := store.(*storage.LocalStore)
- netStore, err := storage.NewNetStore(localStore, nil)
- if err != nil {
- return nil, nil, err
- }
- kad := network.NewKademlia(addr.Over(), network.NewKadParams())
- delivery := NewDelivery(kad, netStore)
- netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
-
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingRegisterOnly,
- }, nil)
- bucket.Store(bucketKeyRegistry, r)
-
- fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
- bucket.Store(bucketKeyFileStore, fileStore)
-
- cleanup = func() {
- os.RemoveAll(datadir)
- netStore.Close()
- r.Close()
- }
-
- return r, cleanup, nil
-
- },
- })
- defer sim.Close()
-
- ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
- defer cancelSimRun()
-
- conf := &synctestConfig{}
- //map of discover ID to indexes of chunks expected at that ID
- conf.idToChunksMap = make(map[enode.ID][]int)
- //map of overlay address to discover ID
- conf.addrToIDMap = make(map[string]enode.ID)
- //array where the generated chunk hashes will be stored
- conf.hashes = make([]storage.Address, 0)
-
- err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
- if err != nil {
- return err
- }
-
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
- return err
- }
-
- disconnections := sim.PeerEvents(
- context.Background(),
- sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Drop(),
- )
-
- go func() {
- for d := range disconnections {
- log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal("unexpected disconnect")
- cancelSimRun()
- }
- }()
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- nodeIDs := sim.UpNodeIDs()
- for _, n := range nodeIDs {
- //get the kademlia overlay address from this ID
- a := n.Bytes()
- //append it to the array of all overlay addresses
- conf.addrs = append(conf.addrs, a)
- //the proximity calculation is on overlay addr,
- //the p2p/simulations check func triggers on enode.ID,
- //so we need to know which overlay addr maps to which nodeID
- conf.addrToIDMap[string(a)] = n
- }
-
- var subscriptionCount int
-
- filter := simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("stream").MsgCode(4)
- eventC := sim.PeerEvents(ctx, nodeIDs, filter)
-
- for j, node := range nodeIDs {
- log.Trace(fmt.Sprintf("Start syncing subscriptions: %d", j))
- //start syncing!
- item, ok := sim.NodeItem(node, bucketKeyRegistry)
- if !ok {
- return fmt.Errorf("No registry")
- }
- registry := item.(*Registry)
-
- var cnt int
- cnt, err = startSyncing(registry, conf)
- if err != nil {
- return err
- }
- //increment the number of subscriptions we need to wait for
- //by the count returned from startSyncing (SYNC subscriptions)
- subscriptionCount += cnt
- }
-
- for e := range eventC {
- if e.Error != nil {
- return e.Error
- }
- subscriptionCount--
- if subscriptionCount == 0 {
- break
- }
- }
- //select a random node for upload
- node := sim.RandomUpNode()
- item, ok := sim.NodeItem(node.ID, bucketKeyStore)
- if !ok {
- return fmt.Errorf("No localstore")
- }
- lstore := item.(*storage.LocalStore)
- hashes, err := uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
- if err != nil {
- return err
- }
- conf.hashes = append(conf.hashes, hashes...)
- mapKeysToNodes(conf)
-
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
- return err
- }
-
- var globalStore mock.GlobalStorer
- if *useMockStore {
- globalStore = mockmem.NewGlobalStore()
- }
- // File retrieval check is repeated until all uploaded files are retrieved from all nodes
- // or until the timeout is reached.
- REPEAT:
- for {
- for _, id := range nodeIDs {
- //for each expected chunk, check if it is in the local store
- localChunks := conf.idToChunksMap[id]
- for _, ch := range localChunks {
- //get the real chunk by the index in the index array
- chunk := conf.hashes[ch]
- log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
- //check if the expected chunk is indeed in the localstore
- var err error
- if *useMockStore {
- //use the globalStore if the mockStore should be used; in that case,
- //the complete localStore stack is bypassed for getting the chunk
- _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
- } else {
- //use the actual localstore
- item, ok := sim.NodeItem(id, bucketKeyStore)
- if !ok {
- return fmt.Errorf("Error accessing localstore")
- }
- lstore := item.(*storage.LocalStore)
- _, err = lstore.Get(ctx, chunk)
- }
- if err != nil {
- log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
- // Do not get crazy with logging the warn message
- time.Sleep(500 * time.Millisecond)
- continue REPEAT
- }
- log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
- }
- }
- return nil
- }
- })
-
- if result.Error != nil {
- return result.Error
- }
-
- log.Info("Simulation ended")
- return nil
-}
-
-//the server func to start syncing
-//issues `RequestSubscriptionMsg` to peers, based on po, by iterating over
-//the kademlia's `EachBin` function.
-//returns the number of subscriptions requested
-func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
- var err error
- kad := r.delivery.kad
- subCnt := 0
- //iterate over each bin and solicit needed subscription to bins
- kad.EachBin(r.addr[:], pof, 0, func(conn *network.Peer, po int) bool {
- //identify begin and start index of the bin(s) we want to subscribe to
- subCnt++
- err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
- if err != nil {
- log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
- return false
- }
- return true
-
- })
- return subCnt, nil
-}
-
//map chunk keys to addresses which are responsible
func mapKeysToNodes(conf *synctestConfig) {
nodemap := make(map[string][]int)
@@ -551,9 +297,7 @@ func mapKeysToNodes(conf *synctestConfig) {
np, _, _ = pot.Add(np, a, pof)
}
- var kadMinProxSize = 2
-
- ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
+ ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, conf.addrs)
//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go
index 32e107823..fb571c856 100644
--- a/swarm/network/stream/stream.go
+++ b/swarm/network/stream/stream.go
@@ -33,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
- "github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
)
@@ -48,31 +47,36 @@ const (
HashSize = 32
)
-//Enumerate options for syncing and retrieval
+// Enumerate options for syncing and retrieval
type SyncingOption int
type RetrievalOption int
-//Syncing options
+// Syncing options
const (
- //Syncing disabled
+ // Syncing disabled
SyncingDisabled SyncingOption = iota
- //Register the client and the server but not subscribe
+ // Register the client and the server but not subscribe
SyncingRegisterOnly
- //Both client and server funcs are registered, subscribe sent automatically
+ // Both client and server funcs are registered, subscribe sent automatically
SyncingAutoSubscribe
)
const (
- //Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only)
+ // Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only)
RetrievalDisabled RetrievalOption = iota
- //Only the client side of the retrieve request is registered.
- //(light nodes do not serve retrieve requests)
- //once the client is registered, subscription to retrieve request stream is always sent
+ // Only the client side of the retrieve request is registered.
+ // (light nodes do not serve retrieve requests)
+ // once the client is registered, subscription to retrieve request stream is always sent
RetrievalClientOnly
- //Both client and server funcs are registered, subscribe sent automatically
+ // Both client and server funcs are registered, subscribe sent automatically
RetrievalEnabled
)
+// subscriptionFunc is used to determine what to do in order to perform subscriptions
+// usually we would start to really subscribe to nodes, but for tests other functionality may be needed
+// (see TestRequestPeerSubscriptions in streamer_test.go)
+var subscriptionFunc func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool = doRequestSubscription
+
// Registry registry for outgoing and incoming streamer constructors
type Registry struct {
addr enode.ID
@@ -86,7 +90,7 @@ type Registry struct {
peers map[enode.ID]*Peer
delivery *Delivery
intervalsStore state.Store
- autoRetrieval bool //automatically subscribe to retrieve request stream
+ autoRetrieval bool // automatically subscribe to retrieve request stream
maxPeerServers int
spec *protocols.Spec //this protocol's spec
balance protocols.Balance //implements protocols.Balance, for accounting
@@ -96,8 +100,8 @@ type Registry struct {
// RegistryOptions holds optional values for NewRegistry constructor.
type RegistryOptions struct {
SkipCheck bool
- Syncing SyncingOption //Defines syncing behavior
- Retrieval RetrievalOption //Defines retrieval behavior
+ Syncing SyncingOption // Defines syncing behavior
+ Retrieval RetrievalOption // Defines retrieval behavior
SyncUpdateDelay time.Duration
MaxPeerServers int // The limit of servers for each peer in registry
}
@@ -110,7 +114,7 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
if options.SyncUpdateDelay <= 0 {
options.SyncUpdateDelay = 15 * time.Second
}
- //check if retriaval has been disabled
+ // check if retrieval has been disabled
retrieval := options.Retrieval != RetrievalDisabled
streamer := &Registry{
@@ -125,12 +129,13 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
maxPeerServers: options.MaxPeerServers,
balance: balance,
}
+
streamer.setupSpec()
streamer.api = NewAPI(streamer)
delivery.getPeer = streamer.getPeer
- //if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only)
+ // if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only)
if options.Retrieval == RetrievalEnabled {
streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) {
if !live {
@@ -140,20 +145,20 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
})
}
- //if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests)
+ // if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests)
if options.Retrieval != RetrievalDisabled {
streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live))
})
}
- //If syncing is not disabled, the syncing functions are registered (both client and server)
+ // If syncing is not disabled, the syncing functions are registered (both client and server)
if options.Syncing != SyncingDisabled {
RegisterSwarmSyncerServer(streamer, syncChunkStore)
RegisterSwarmSyncerClient(streamer, syncChunkStore)
}
- //if syncing is set to automatically subscribe to the syncing stream, start the subscription process
+ // if syncing is set to automatically subscribe to the syncing stream, start the subscription process
if options.Syncing == SyncingAutoSubscribe {
// latestIntC function ensures that
// - receiving from the in chan is not blocked by processing inside the for loop
@@ -235,13 +240,17 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
return streamer
}
-//we need to construct a spec instance per node instance
+// This is an accounted protocol, therefore we need to provide a pricing Hook to the spec
+// For simulations to be able to run multiple nodes and not override the hook's balance,
+// we need to construct a spec instance per node instance
func (r *Registry) setupSpec() {
- //first create the "bare" spec
+ // first create the "bare" spec
r.createSpec()
- //if balance is nil, this node has been started without swap support (swapEnabled flag is false)
+ // now create the pricing object
+ r.createPriceOracle()
+ // if balance is nil, this node has been started without swap support (swapEnabled flag is false)
if r.balance != nil && !reflect.ValueOf(r.balance).IsNil() {
- //swap is enabled, so setup the hook
+ // swap is enabled, so setup the hook
r.spec.Hook = protocols.NewAccounting(r.balance, r.prices)
}
}
@@ -388,14 +397,6 @@ func (r *Registry) Quit(peerId enode.ID, s Stream) error {
return peer.Send(context.TODO(), msg)
}
-func (r *Registry) NodeInfo() interface{} {
- return nil
-}
-
-func (r *Registry) PeerInfo(id enode.ID) interface{} {
- return nil
-}
-
func (r *Registry) Close() error {
return r.intervalsStore.Close()
}
@@ -471,24 +472,8 @@ func (r *Registry) updateSyncing() {
}
r.peersMu.RUnlock()
- // request subscriptions for all nodes and bins
- kad.EachBin(r.addr[:], pot.DefaultPof(256), 0, func(p *network.Peer, bin int) bool {
- log.Debug(fmt.Sprintf("Requesting subscription by: registry %s from peer %s for bin: %d", r.addr, p.ID(), bin))
-
- // bin is always less then 256 and it is safe to convert it to type uint8
- stream := NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true)
- if streams, ok := subs[p.ID()]; ok {
- // delete live and history streams from the map, so that it won't be removed with a Quit request
- delete(streams, stream)
- delete(streams, getHistoryStream(stream))
- }
- err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
- if err != nil {
- log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
- return false
- }
- return true
- })
+ // start requesting subscriptions from peers
+ r.requestPeerSubscriptions(kad, subs)
// remove SYNC servers that do not need to be subscribed
for id, streams := range subs {
@@ -509,6 +494,66 @@ func (r *Registry) updateSyncing() {
}
}
+// requestPeerSubscriptions calls on each live peer in the kademlia table
+// and sends a `RequestSubscription` to peers according to their bin
+// and their relationship with kademlia's depth.
+// Also check `TestRequestPeerSubscriptions` in order to understand the
+// expected behavior.
+// The function expects:
+// * the kademlia
+// * a map of subscriptions
+// * the actual function to subscribe
+// (in case of the test, it doesn't do real subscriptions)
+func (r *Registry) requestPeerSubscriptions(kad *network.Kademlia, subs map[enode.ID]map[Stream]struct{}) {
+
+ var startPo int
+ var endPo int
+ var ok bool
+
+ // kademlia's depth
+ kadDepth := kad.NeighbourhoodDepth()
+ // request subscriptions for all nodes and bins
+ // nil as base takes the node's base; we need to pass 255 as `EachConn` runs
+ // from deepest bins backwards
+ kad.EachConn(nil, 255, func(p *network.Peer, po int) bool {
+ //if the peer's bin is shallower than the kademlia depth,
+ //only the peer's bin should be subscribed
+ if po < kadDepth {
+ startPo = po
+ endPo = po
+ } else {
+ //if the peer's bin is equal or deeper than the kademlia depth,
+ //each bin from the depth up to k.MaxProxDisplay should be subscribed
+ startPo = kadDepth
+ endPo = kad.MaxProxDisplay
+ }
+
+ for bin := startPo; bin <= endPo; bin++ {
+ //do the actual subscription
+ ok = subscriptionFunc(r, p, uint8(bin), subs)
+ }
+ return ok
+ })
+}
+
+// doRequestSubscription sends the actual RequestSubscription to the peer
+func doRequestSubscription(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
+ log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", p.ID(), "bin", bin)
+ // bin is always less then 256 and it is safe to convert it to type uint8
+ stream := NewStream("SYNC", FormatSyncBinKey(bin), true)
+ if streams, ok := subs[p.ID()]; ok {
+ // delete live and history streams from the map, so that it won't be removed with a Quit request
+ delete(streams, stream)
+ delete(streams, getHistoryStream(stream))
+ }
+ err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
+ if err != nil {
+ log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
+ return false
+ }
+ return true
+}
+
func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := protocols.NewPeer(p, rw, r.spec)
bp := network.NewBzzPeer(peer)
@@ -541,11 +586,11 @@ func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
return p.handleWantedHashesMsg(ctx, msg)
case *ChunkDeliveryMsgRetrieval:
- //handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
+ // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
case *ChunkDeliveryMsgSyncing:
- //handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
+ // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
case *RetrieveRequestMsg:
@@ -734,9 +779,9 @@ func (c *clientParams) clientCreated() {
close(c.clientCreatedC)
}
-//GetSpec returns the streamer spec to callers
-//This used to be a global variable but for simulations with
-//multiple nodes its fields (notably the Hook) would be overwritten
+// GetSpec returns the streamer spec to callers
+// This used to be a global variable but for simulations with
+// multiple nodes its fields (notably the Hook) would be overwritten
func (r *Registry) GetSpec() *protocols.Spec {
return r.spec
}
@@ -764,6 +809,52 @@ func (r *Registry) createSpec() {
r.spec = spec
}
+// An accountable message needs some meta information attached to it
+// in order to evaluate the correct price
+type StreamerPrices struct {
+ priceMatrix map[reflect.Type]*protocols.Price
+ registry *Registry
+}
+
+// Price implements the accounting interface and returns the price for a specific message
+func (sp *StreamerPrices) Price(msg interface{}) *protocols.Price {
+ t := reflect.TypeOf(msg).Elem()
+ return sp.priceMatrix[t]
+}
+
+// Instead of hardcoding the price, get it
+// through a function - it could be quite complex in the future
+func (sp *StreamerPrices) getRetrieveRequestMsgPrice() uint64 {
+ return uint64(1)
+}
+
+// Instead of hardcoding the price, get it
+// through a function - it could be quite complex in the future
+func (sp *StreamerPrices) getChunkDeliveryMsgRetrievalPrice() uint64 {
+ return uint64(1)
+}
+
+// createPriceOracle sets up a matrix which can be queried to get
+// the price for a message via the Price method
+func (r *Registry) createPriceOracle() {
+ sp := &StreamerPrices{
+ registry: r,
+ }
+ sp.priceMatrix = map[reflect.Type]*protocols.Price{
+ reflect.TypeOf(ChunkDeliveryMsgRetrieval{}): {
+ Value: sp.getChunkDeliveryMsgRetrievalPrice(), // arbitrary price for now
+ PerByte: true,
+ Payer: protocols.Receiver,
+ },
+ reflect.TypeOf(RetrieveRequestMsg{}): {
+ Value: sp.getRetrieveRequestMsgPrice(), // arbitrary price for now
+ PerByte: false,
+ Payer: protocols.Sender,
+ },
+ }
+ r.prices = sp
+}
+
func (r *Registry) Protocols() []p2p.Protocol {
return []p2p.Protocol{
{
diff --git a/swarm/network/stream/streamer_test.go b/swarm/network/stream/streamer_test.go
index 16c74d3b3..cdaeb92d0 100644
--- a/swarm/network/stream/streamer_test.go
+++ b/swarm/network/stream/streamer_test.go
@@ -20,12 +20,17 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"strconv"
"testing"
"time"
- "github.com/ethereum/go-ethereum/crypto/sha3"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/enode"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
+ "github.com/ethereum/go-ethereum/swarm/network"
+ "golang.org/x/crypto/sha3"
)
func TestStreamerSubscribe(t *testing.T) {
@@ -921,3 +926,191 @@ func TestMaxPeerServersWithoutUnsubscribe(t *testing.T) {
}
}
}
+
+//TestHasPriceImplementation is to check that the Registry has a
+//`Price` interface implementation
+func TestHasPriceImplementation(t *testing.T) {
+ _, r, _, teardown, err := newStreamerTester(t, &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingDisabled,
+ })
+ defer teardown()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if r.prices == nil {
+ t.Fatal("No prices implementation available for the stream protocol")
+ }
+
+ pricesInstance, ok := r.prices.(*StreamerPrices)
+ if !ok {
+ t.Fatal("`Registry` does not have the expected Prices instance")
+ }
+ price := pricesInstance.Price(&ChunkDeliveryMsgRetrieval{})
+ if price == nil || price.Value == 0 || price.Value != pricesInstance.getChunkDeliveryMsgRetrievalPrice() {
+ t.Fatal("No prices set for chunk delivery msg")
+ }
+
+ price = pricesInstance.Price(&RetrieveRequestMsg{})
+ if price == nil || price.Value == 0 || price.Value != pricesInstance.getRetrieveRequestMsgPrice() {
+ t.Fatal("No prices set for chunk delivery msg")
+ }
+}
+
+/*
+TestRequestPeerSubscriptions is a unit test for stream's pull sync subscriptions.
+
+The test does:
+ * assign each connected peer to a bin map
+ * build up a known kademlia in advance
+ * run the EachConn function, which returns supposed subscription bins
+ * store all supposed bins per peer in a map
+ * check that all peers have the expected subscriptions
+
+This kad table and its peers are copied from network.TestKademliaCase1,
+it represents an edge case but for the purpose of testing the
+syncing subscriptions it is just fine.
+
+Addresses used in this test are discovered as part of the simulation network
+in higher level tests for streaming. They were generated randomly.
+
+The resulting kademlia looks like this:
+=========================================================================
+Fri Dec 21 20:02:39 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
+population: 12 (12), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
+000 2 8196 835f | 2 8196 (0) 835f (0)
+001 2 2690 28f0 | 2 2690 (0) 28f0 (0)
+002 2 4d72 4a45 | 2 4d72 (0) 4a45 (0)
+003 1 646e | 1 646e (0)
+004 3 769c 76d1 7656 | 3 769c (0) 76d1 (0) 7656 (0)
+============ DEPTH: 5 ==========================================
+005 1 7a48 | 1 7a48 (0)
+006 1 7cbd | 1 7cbd (0)
+007 0 | 0
+008 0 | 0
+009 0 | 0
+010 0 | 0
+011 0 | 0
+012 0 | 0
+013 0 | 0
+014 0 | 0
+015 0 | 0
+=========================================================================
+*/
+func TestRequestPeerSubscriptions(t *testing.T) {
+ // the pivot address; this is the actual kademlia node
+ pivotAddr := "7efef1c41d77f843ad167be95f6660567eb8a4a59f39240000cce2e0d65baf8e"
+
+ // a map of bin number to addresses from the given kademlia
+ binMap := make(map[int][]string)
+ binMap[0] = []string{
+ "835fbbf1d16ba7347b6e2fc552d6e982148d29c624ea20383850df3c810fa8fc",
+ "81968a2d8fb39114342ee1da85254ec51e0608d7f0f6997c2a8354c260a71009",
+ }
+ binMap[1] = []string{
+ "28f0bc1b44658548d6e05dd16d4c2fe77f1da5d48b6774bc4263b045725d0c19",
+ "2690a910c33ee37b91eb6c4e0731d1d345e2dc3b46d308503a6e85bbc242c69e",
+ }
+ binMap[2] = []string{
+ "4a45f1fc63e1a9cb9dfa44c98da2f3d20c2923e5d75ff60b2db9d1bdb0c54d51",
+ "4d72a04ddeb851a68cd197ef9a92a3e2ff01fbbff638e64929dd1a9c2e150112",
+ }
+ binMap[3] = []string{
+ "646e9540c84f6a2f9cf6585d45a4c219573b4fd1b64a3c9a1386fc5cf98c0d4d",
+ }
+ binMap[4] = []string{
+ "7656caccdc79cd8d7ce66d415cc96a718e8271c62fb35746bfc2b49faf3eebf3",
+ "76d1e83c71ca246d042e37ff1db181f2776265fbcfdc890ce230bfa617c9c2f0",
+ "769ce86aa90b518b7ed382f9fdacfbed93574e18dc98fe6c342e4f9f409c2d5a",
+ }
+ binMap[5] = []string{
+ "7a48f75f8ca60487ae42d6f92b785581b40b91f2da551ae73d5eae46640e02e8",
+ }
+ binMap[6] = []string{
+ "7cbd42350bde8e18ae5b955b5450f8e2cef3419f92fbf5598160c60fd78619f0",
+ }
+
+ // create the pivot's kademlia
+ addr := common.FromHex(pivotAddr)
+ k := network.NewKademlia(addr, network.NewKadParams())
+
+ // construct the peers and the kademlia
+ for _, binaddrs := range binMap {
+ for _, a := range binaddrs {
+ addr := common.FromHex(a)
+ k.On(network.NewPeer(&network.BzzPeer{BzzAddr: &network.BzzAddr{OAddr: addr}}, k))
+ }
+ }
+
+ // TODO: check kad table is same
+ // currently k.String() prints date so it will never be the same :)
+ // --> implement JSON representation of kad table
+ log.Debug(k.String())
+
+ // simulate that we would do subscriptions: just store the bin numbers
+ fakeSubscriptions := make(map[string][]int)
+ //after the test, we need to reset the subscriptionFunc to the default
+ defer func() { subscriptionFunc = doRequestSubscription }()
+ // define the function which should run for each connection
+ // instead of doing real subscriptions, we just store the bin numbers
+ subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
+ // get the peer ID
+ peerstr := fmt.Sprintf("%x", p.Over())
+ // create the array of bins per peer
+ if _, ok := fakeSubscriptions[peerstr]; !ok {
+ fakeSubscriptions[peerstr] = make([]int, 0)
+ }
+ // store the (fake) bin subscription
+ log.Debug(fmt.Sprintf("Adding fake subscription for peer %s with bin %d", peerstr, bin))
+ fakeSubscriptions[peerstr] = append(fakeSubscriptions[peerstr], int(bin))
+ return true
+ }
+ // create just a simple Registry object in order to be able to call...
+ r := &Registry{}
+ r.requestPeerSubscriptions(k, nil)
+ // calculate the kademlia depth
+ kdepth := k.NeighbourhoodDepth()
+
+ // now, check that all peers have the expected (fake) subscriptions
+ // iterate the bin map
+ for bin, peers := range binMap {
+ // for every peer...
+ for _, peer := range peers {
+ // ...get its (fake) subscriptions
+ fakeSubsForPeer := fakeSubscriptions[peer]
+ // if the peer's bin is shallower than the kademlia depth...
+ if bin < kdepth {
+ // (iterate all (fake) subscriptions)
+ for _, subbin := range fakeSubsForPeer {
+ // ...only the peer's bin should be "subscribed"
+ // (and thus have only one subscription)
+ if subbin != bin || len(fakeSubsForPeer) != 1 {
+ t.Fatalf("Did not get expected subscription for bin < depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
+ }
+ }
+ } else { //if the peer's bin is equal or higher than the kademlia depth...
+ // (iterate all (fake) subscriptions)
+ for i, subbin := range fakeSubsForPeer {
+ // ...each bin from the peer's bin number up to k.MaxProxDisplay should be "subscribed"
+ // as we start from depth we can use the iteration index to check
+ if subbin != i+kdepth {
+ t.Fatalf("Did not get expected subscription for bin > depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
+ }
+ // the last "subscription" should be k.MaxProxDisplay
+ if i == len(fakeSubsForPeer)-1 && subbin != k.MaxProxDisplay {
+ t.Fatalf("Expected last subscription to be: %d, but is: %d", k.MaxProxDisplay, subbin)
+ }
+ }
+ }
+ }
+ }
+
+ // print some output
+ for p, subs := range fakeSubscriptions {
+ log.Debug(fmt.Sprintf("Peer %s has the following fake subscriptions: ", p))
+ for _, bin := range subs {
+ log.Debug(fmt.Sprintf("%d,", bin))
+ }
+ }
+}
diff --git a/swarm/network/stream/syncer.go b/swarm/network/stream/syncer.go
index 4bfbac8b0..4fb8b9342 100644
--- a/swarm/network/stream/syncer.go
+++ b/swarm/network/stream/syncer.go
@@ -127,19 +127,9 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
// SwarmSyncerClient
type SwarmSyncerClient struct {
- sessionAt uint64
- nextC chan struct{}
- sessionRoot storage.Address
- sessionReader storage.LazySectionReader
- retrieveC chan *storage.Chunk
- storeC chan *storage.Chunk
- store storage.SyncChunkStore
- // chunker storage.Chunker
- currentRoot storage.Address
- requestFunc func(chunk *storage.Chunk)
- end, start uint64
- peer *Peer
- stream Stream
+ store storage.SyncChunkStore
+ peer *Peer
+ stream Stream
}
// NewSwarmSyncerClient is a contructor for provable data exchange syncer
@@ -209,46 +199,6 @@ func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte,
return nil
}
-func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) {
- // for provable syncer currentRoot is non-zero length
- // TODO: reenable this with putter/getter
- // if s.chunker != nil {
- // if from > s.sessionAt { // for live syncing currentRoot is always updated
- // //expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC)
- // expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC)
- // if err != nil {
- // return nil, err
- // }
- // if !bytes.Equal(root, expRoot) {
- // return nil, fmt.Errorf("HandoverProof mismatch")
- // }
- // s.currentRoot = root
- // } else {
- // expHashes := make([]byte, len(hashes))
- // _, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize))
- // if err != nil && err != io.EOF {
- // return nil, err
- // }
- // if !bytes.Equal(expHashes, hashes) {
- // return nil, errors.New("invalid proof")
- // }
- // }
- // return nil, nil
- // }
- s.end += uint64(len(hashes)) / HashSize
- takeover := &Takeover{
- Stream: stream,
- Start: s.start,
- End: s.end,
- Root: root,
- }
- // serialise and sign
- return &TakeoverProof{
- Takeover: takeover,
- Sig: nil,
- }, nil
-}
-
func (s *SwarmSyncerClient) Close() {}
// base for parsing and formating sync bin key
diff --git a/swarm/network/stream/syncer_test.go b/swarm/network/stream/syncer_test.go
index 5764efc92..014ec9a98 100644
--- a/swarm/network/stream/syncer_test.go
+++ b/swarm/network/stream/syncer_test.go
@@ -18,11 +18,13 @@ package stream
import (
"context"
+ "errors"
"fmt"
"io/ioutil"
"math"
"os"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -43,10 +45,10 @@ import (
const dataChunkCount = 200
func TestSyncerSimulation(t *testing.T) {
- testSyncBetweenNodes(t, 2, 1, dataChunkCount, true, 1)
- testSyncBetweenNodes(t, 4, 1, dataChunkCount, true, 1)
- testSyncBetweenNodes(t, 8, 1, dataChunkCount, true, 1)
- testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1)
+ testSyncBetweenNodes(t, 2, dataChunkCount, true, 1)
+ testSyncBetweenNodes(t, 4, dataChunkCount, true, 1)
+ testSyncBetweenNodes(t, 8, dataChunkCount, true, 1)
+ testSyncBetweenNodes(t, 16, dataChunkCount, true, 1)
}
func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
@@ -67,7 +69,8 @@ func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.B
return lstore, datadir, nil
}
-func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) {
+func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
+
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
var store storage.ChunkStore
@@ -128,7 +131,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
if err != nil {
t.Fatal(err)
}
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
nodeIDs := sim.UpNodeIDs()
nodeIndex := make(map[enode.ID]int)
@@ -142,11 +145,19 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
simulation.NewPeerEventsFilter().Drop(),
)
+ var disconnected atomic.Value
go func() {
for d := range disconnections {
if d.Error != nil {
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal(d.Error)
+ disconnected.Store(true)
+ }
+ }
+ }()
+ defer func() {
+ if err != nil {
+ if yes, ok := disconnected.Load().(bool); ok && yes {
+ err = errors.New("disconnect events received")
}
}
}()
@@ -178,7 +189,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
}
}
// here we distribute chunks of a random file into stores 1...nodes
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
@@ -232,3 +243,170 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
t.Fatal(result.Error)
}
}
+
+//TestSameVersionID just checks that if the version is not changed,
+//then streamer peers see each other
+func TestSameVersionID(t *testing.T) {
+ //test version ID
+ v := uint(1)
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ var store storage.ChunkStore
+ var datadir string
+
+ node := ctx.Config.Node()
+ addr := network.NewAddr(node)
+
+ store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyStore, store)
+ cleanup = func() {
+ store.Close()
+ os.RemoveAll(datadir)
+ }
+ localStore := store.(*storage.LocalStore)
+ netStore, err := storage.NewNetStore(localStore, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyDB, netStore)
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ delivery := NewDelivery(kad, netStore)
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
+
+ bucket.Store(bucketKeyDelivery, delivery)
+
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingAutoSubscribe,
+ }, nil)
+ //assign to each node the same version ID
+ r.spec.Version = v
+
+ bucket.Store(bucketKeyRegistry, r)
+
+ return r, cleanup, nil
+
+ },
+ })
+ defer sim.Close()
+
+ //connect just two nodes
+ log.Info("Adding nodes to simulation")
+ _, err := sim.AddNodesAndConnectChain(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Info("Starting simulation")
+ ctx := context.Background()
+ //make sure they have time to connect
+ time.Sleep(200 * time.Millisecond)
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ //get the pivot node's filestore
+ nodes := sim.UpNodeIDs()
+
+ item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
+ if !ok {
+ return fmt.Errorf("No filestore")
+ }
+ registry := item.(*Registry)
+
+ //the peers should connect, thus getting the peer should not return nil
+ if registry.getPeer(nodes[1]) == nil {
+ t.Fatal("Expected the peer to not be nil, but it is")
+ }
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+ log.Info("Simulation ended")
+}
+
+//TestDifferentVersionID proves that if the streamer protocol version doesn't match,
+//then the peers are not connected at streamer level
+func TestDifferentVersionID(t *testing.T) {
+ //create a variable to hold the version ID
+ v := uint(0)
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ var store storage.ChunkStore
+ var datadir string
+
+ node := ctx.Config.Node()
+ addr := network.NewAddr(node)
+
+ store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyStore, store)
+ cleanup = func() {
+ store.Close()
+ os.RemoveAll(datadir)
+ }
+ localStore := store.(*storage.LocalStore)
+ netStore, err := storage.NewNetStore(localStore, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyDB, netStore)
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ delivery := NewDelivery(kad, netStore)
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
+
+ bucket.Store(bucketKeyDelivery, delivery)
+
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingAutoSubscribe,
+ }, nil)
+
+ //increase the version ID for each node
+ v++
+ r.spec.Version = v
+
+ bucket.Store(bucketKeyRegistry, r)
+
+ return r, cleanup, nil
+
+ },
+ })
+ defer sim.Close()
+
+ //connect the nodes
+ log.Info("Adding nodes to simulation")
+ _, err := sim.AddNodesAndConnectChain(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Info("Starting simulation")
+ ctx := context.Background()
+ //make sure they have time to connect
+ time.Sleep(200 * time.Millisecond)
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ //get the pivot node's filestore
+ nodes := sim.UpNodeIDs()
+
+ item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
+ if !ok {
+ return fmt.Errorf("No filestore")
+ }
+ registry := item.(*Registry)
+
+ //getting the other peer should fail due to the different version numbers
+ if registry.getPeer(nodes[1]) != nil {
+ t.Fatal("Expected the peer to be nil, but it is not")
+ }
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+ log.Info("Simulation ended")
+
+}
diff --git a/swarm/network/stream/visualized_snapshot_sync_sim_test.go b/swarm/network/stream/visualized_snapshot_sync_sim_test.go
index 437c17e5e..18b4c8fb0 100644
--- a/swarm/network/stream/visualized_snapshot_sync_sim_test.go
+++ b/swarm/network/stream/visualized_snapshot_sync_sim_test.go
@@ -19,16 +19,27 @@
package stream
import (
+ "bytes"
"context"
+ "errors"
"fmt"
+ "io"
+ "os"
+ "sync"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
"github.com/ethereum/go-ethereum/p2p/simulations"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
+ "github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
)
@@ -61,19 +72,19 @@ func setupSim(serviceMap map[string]simulation.ServiceFunc) (int, int, *simulati
func watchSim(sim *simulation.Simulation) (context.Context, context.CancelFunc) {
ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
panic(err)
}
disconnections := sim.PeerEvents(
context.Background(),
sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ simulation.NewPeerEventsFilter().Drop(),
)
go func() {
for d := range disconnections {
- log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
panic("unexpected disconnect")
cancelSimRun()
}
@@ -84,6 +95,7 @@ func watchSim(sim *simulation.Simulation) (context.Context, context.CancelFunc)
//This test requests bogus hashes into the network
func TestNonExistingHashesWithServer(t *testing.T) {
+
nodeCount, _, sim := setupSim(retrievalSimServiceMap)
defer sim.Close()
@@ -101,7 +113,7 @@ func TestNonExistingHashesWithServer(t *testing.T) {
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
//check on the node's FileStore (netstore)
- id := sim.RandomUpNode().ID
+ id := sim.Net.GetRandomUpNode().ID()
item, ok := sim.NodeItem(id, bucketKeyFileStore)
if !ok {
t.Fatalf("No filestore")
@@ -142,6 +154,61 @@ func sendSimTerminatedEvent(sim *simulation.Simulation) {
//It also sends some custom events so that the frontend
//can visualize messages like SendOfferedMsg, WantedHashesMsg, DeliveryMsg
func TestSnapshotSyncWithServer(t *testing.T) {
+ //t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
+
+ //define a wrapper object to be able to pass around data
+ wrapper := &netWrapper{}
+
+ nodeCount := *nodes
+ chunkCount := *chunks
+
+ if nodeCount == 0 || chunkCount == 0 {
+ nodeCount = 32
+ chunkCount = 1
+ }
+
+ log.Info(fmt.Sprintf("Running the simulation with %d nodes and %d chunks", nodeCount, chunkCount))
+
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ n := ctx.Config.Node()
+ addr := network.NewAddr(n)
+ store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyStore, store)
+ localStore := store.(*storage.LocalStore)
+ netStore, err := storage.NewNetStore(localStore, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ delivery := NewDelivery(kad, netStore)
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
+
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingAutoSubscribe,
+ SyncUpdateDelay: 3 * time.Second,
+ }, nil)
+
+ tr := &testRegistry{
+ Registry: r,
+ w: wrapper,
+ }
+
+ bucket.Store(bucketKeyRegistry, tr)
+
+ cleanup = func() {
+ netStore.Close()
+ tr.Close()
+ os.RemoveAll(datadir)
+ }
+
+ return tr, cleanup, nil
+ },
+ }).WithServer(":8888") //start with the HTTP server
nodeCount, chunkCount, sim := setupSim(simServiceMap)
defer sim.Close()
@@ -150,12 +217,13 @@ func TestSnapshotSyncWithServer(t *testing.T) {
conf := &synctestConfig{}
//map of discover ID to indexes of chunks expected at that ID
- conf.idToChunksMap = make(map[discover.NodeID][]int)
+ conf.idToChunksMap = make(map[enode.ID][]int)
//map of overlay address to discover ID
- conf.addrToIDMap = make(map[string]discover.NodeID)
+ conf.addrToIDMap = make(map[string]enode.ID)
//array where the generated chunk hashes will be stored
conf.hashes = make([]storage.Address, 0)
-
+ //pass the network to the wrapper object
+ wrapper.setNetwork(sim.Net)
err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
if err != nil {
panic(err)
@@ -164,49 +232,6 @@ func TestSnapshotSyncWithServer(t *testing.T) {
ctx, cancelSimRun := watchSim(sim)
defer cancelSimRun()
- //setup filters in the event feed
- offeredHashesFilter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(1)
- wantedFilter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(2)
- deliveryFilter := simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("stream").MsgCode(6)
- eventC := sim.PeerEvents(ctx, sim.UpNodeIDs(), offeredHashesFilter, wantedFilter, deliveryFilter)
-
- quit := make(chan struct{})
-
- go func() {
- for e := range eventC {
- select {
- case <-quit:
- fmt.Println("quitting event loop")
- return
- default:
- }
- if e.Error != nil {
- t.Fatal(e.Error)
- }
- if *e.Event.MsgCode == uint64(1) {
- evt := &simulations.Event{
- Type: EventTypeChunkOffered,
- Node: sim.Net.GetNode(e.NodeID),
- Control: false,
- }
- sim.Net.Events().Send(evt)
- } else if *e.Event.MsgCode == uint64(2) {
- evt := &simulations.Event{
- Type: EventTypeChunkWanted,
- Node: sim.Net.GetNode(e.NodeID),
- Control: false,
- }
- sim.Net.Events().Send(evt)
- } else if *e.Event.MsgCode == uint64(6) {
- evt := &simulations.Event{
- Type: EventTypeChunkDelivered,
- Node: sim.Net.GetNode(e.NodeID),
- Control: false,
- }
- sim.Net.Events().Send(evt)
- }
- }
- }()
//run the sim
result := runSim(conf, ctx, sim, chunkCount)
@@ -215,11 +240,150 @@ func TestSnapshotSyncWithServer(t *testing.T) {
Type: EventTypeSimTerminated,
Control: false,
}
- sim.Net.Events().Send(evt)
+ go sim.Net.Events().Send(evt)
if result.Error != nil {
panic(result.Error)
}
- close(quit)
log.Info("Simulation ended")
}
+
+//testRegistry embeds registry
+//it allows to replace the protocol run function
+type testRegistry struct {
+ *Registry
+ w *netWrapper
+}
+
+//Protocols replaces the protocol's run function
+func (tr *testRegistry) Protocols() []p2p.Protocol {
+ regProto := tr.Registry.Protocols()
+ //set the `stream` protocol's run function with the testRegistry's one
+ regProto[0].Run = tr.runProto
+ return regProto
+}
+
+//runProto is the new overwritten protocol's run function for this test
+func (tr *testRegistry) runProto(p *p2p.Peer, rw p2p.MsgReadWriter) error {
+ //create a custom rw message ReadWriter
+ testRw := &testMsgReadWriter{
+ MsgReadWriter: rw,
+ Peer: p,
+ w: tr.w,
+ Registry: tr.Registry,
+ }
+ //now run the actual upper layer `Registry`'s protocol function
+ return tr.runProtocol(p, testRw)
+}
+
+//testMsgReadWriter is a custom rw
+//it will allow us to re-use the message twice
+type testMsgReadWriter struct {
+ *Registry
+ p2p.MsgReadWriter
+ *p2p.Peer
+ w *netWrapper
+}
+
+//netWrapper wrapper object so we can pass data around
+type netWrapper struct {
+ net *simulations.Network
+}
+
+//set the network to the wrapper for later use (used inside the custom rw)
+func (w *netWrapper) setNetwork(n *simulations.Network) {
+ w.net = n
+}
+
+//get he network from the wrapper (used inside the custom rw)
+func (w *netWrapper) getNetwork() *simulations.Network {
+ return w.net
+}
+
+// ReadMsg reads a message from the underlying MsgReadWriter and emits a
+// "message received" event
+//we do this because we are interested in the Payload of the message for custom use
+//in this test, but messages can only be consumed once (stream io.Reader)
+func (ev *testMsgReadWriter) ReadMsg() (p2p.Msg, error) {
+ //read the message from the underlying rw
+ msg, err := ev.MsgReadWriter.ReadMsg()
+ if err != nil {
+ return msg, err
+ }
+
+ //don't do anything with message codes we actually are not needing/reading
+ subCodes := []uint64{1, 2, 10}
+ found := false
+ for _, c := range subCodes {
+ if c == msg.Code {
+ found = true
+ }
+ }
+ //just return if not a msg code we are interested in
+ if !found {
+ return msg, nil
+ }
+
+ //we use a io.TeeReader so that we can read the message twice
+ //the Payload is a io.Reader, so if we read from it, the actual protocol handler
+ //cannot access it anymore.
+ //But we need that handler to be able to consume the message as normal,
+ //as if we would not do anything here with that message
+ var buf bytes.Buffer
+ tee := io.TeeReader(msg.Payload, &buf)
+
+ mcp := &p2p.Msg{
+ Code: msg.Code,
+ Size: msg.Size,
+ ReceivedAt: msg.ReceivedAt,
+ Payload: tee,
+ }
+ //assign the copy for later use
+ msg.Payload = &buf
+
+ //now let's look into the message
+ var wmsg protocols.WrappedMsg
+ err = mcp.Decode(&wmsg)
+ if err != nil {
+ log.Error(err.Error())
+ return msg, err
+ }
+ //create a new message from the code
+ val, ok := ev.Registry.GetSpec().NewMsg(mcp.Code)
+ if !ok {
+ return msg, errors.New(fmt.Sprintf("Invalid message code: %v", msg.Code))
+ }
+ //decode it
+ if err := rlp.DecodeBytes(wmsg.Payload, val); err != nil {
+ return msg, errors.New(fmt.Sprintf("Decoding error <= %v: %v", msg, err))
+ }
+ //now for every message type we are interested in, create a custom event and send it
+ var evt *simulations.Event
+ switch val := val.(type) {
+ case *OfferedHashesMsg:
+ evt = &simulations.Event{
+ Type: EventTypeChunkOffered,
+ Node: ev.w.getNetwork().GetNode(ev.ID()),
+ Control: false,
+ Data: val.Hashes,
+ }
+ case *WantedHashesMsg:
+ evt = &simulations.Event{
+ Type: EventTypeChunkWanted,
+ Node: ev.w.getNetwork().GetNode(ev.ID()),
+ Control: false,
+ }
+ case *ChunkDeliveryMsgSyncing:
+ evt = &simulations.Event{
+ Type: EventTypeChunkDelivered,
+ Node: ev.w.getNetwork().GetNode(ev.ID()),
+ Control: false,
+ Data: val.Addr.String(),
+ }
+ }
+ if evt != nil {
+ //send custom event to feed; frontend will listen to it and display
+ ev.w.getNetwork().Events().Send(evt)
+ }
+ return msg, nil
+}
diff --git a/swarm/network_test.go b/swarm/network_test.go
index d84f28147..71d4b8f16 100644
--- a/swarm/network_test.go
+++ b/swarm/network_test.go
@@ -259,6 +259,7 @@ type testSwarmNetworkOptions struct {
// - May wait for Kademlia on every node to be healthy.
// - Checking if a file is retrievable from all nodes.
func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwarmNetworkStep) {
+
if o == nil {
o = new(testSwarmNetworkOptions)
}
@@ -352,7 +353,7 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
}
if *waitKademlia {
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ if _, err := sim.WaitTillHealthy(ctx); err != nil {
return err
}
}
diff --git a/swarm/pot/address.go b/swarm/pot/address.go
index 728dac14e..91cada2e8 100644
--- a/swarm/pot/address.go
+++ b/swarm/pot/address.go
@@ -41,10 +41,6 @@ func NewAddressFromBytes(b []byte) Address {
return Address(h)
}
-func (a Address) IsZero() bool {
- return a.Bin() == zerosBin
-}
-
func (a Address) String() string {
return fmt.Sprintf("%x", a[:])
}
@@ -166,7 +162,6 @@ func ToBytes(v Val) []byte {
}
// DefaultPof returns a proximity order comparison operator function
-// where all
func DefaultPof(max int) func(one, other Val, pos int) (int, bool) {
return func(one, other Val, pos int) (int, bool) {
po, eq := proximityOrder(ToBytes(one), ToBytes(other), pos)
@@ -178,6 +173,9 @@ func DefaultPof(max int) func(one, other Val, pos int) (int, bool) {
}
}
+// proximityOrder returns two parameters:
+// 1. relative proximity order of the arguments one & other;
+// 2. boolean indicating whether the full match occurred (one == other).
func proximityOrder(one, other []byte, pos int) (int, bool) {
for i := pos / 8; i < len(one); i++ {
if one[i] == other[i] {
diff --git a/swarm/pot/pot.go b/swarm/pot/pot.go
index dfda84804..7e3967f3f 100644
--- a/swarm/pot/pot.go
+++ b/swarm/pot/pot.go
@@ -144,13 +144,10 @@ func add(t *Pot, val Val, pof Pof) (*Pot, int, bool) {
return r, po, found
}
-// Remove called on (v) deletes v from the Pot and returns
-// the proximity order of v and a boolean value indicating
-// if the value was found
-// Remove called on (t, v) returns a new Pot that contains all the elements of t
-// minus the value v, using the applicative remove
-// the second return value is the proximity order of the inserted element
-// the third is boolean indicating if the item was found
+// Remove deletes element v from the Pot t and returns three parameters:
+// 1. new Pot that contains all the elements of t minus the element v;
+// 2. proximity order of the removed element v;
+// 3. boolean indicating whether the item was found.
func Remove(t *Pot, v Val, pof Pof) (*Pot, int, bool) {
return remove(t, v, pof)
}
@@ -161,10 +158,7 @@ func remove(t *Pot, val Val, pof Pof) (r *Pot, po int, found bool) {
if found {
size--
if size == 0 {
- r = &Pot{
- po: t.po,
- }
- return r, po, true
+ return &Pot{}, po, true
}
i := len(t.bins) - 1
last := t.bins[i]
@@ -201,7 +195,7 @@ func remove(t *Pot, val Val, pof Pof) (r *Pot, po int, found bool) {
}
bins = append(bins, t.bins[j:]...)
r = &Pot{
- pin: val,
+ pin: t.pin,
size: size,
po: t.po,
bins: bins,
@@ -453,64 +447,50 @@ func union(t0, t1 *Pot, pof Pof) (*Pot, int) {
return n, common
}
-// Each called with (f) is a synchronous iterator over the bins of a node
-// respecting an ordering
-// proximity > pinnedness
-func (t *Pot) Each(f func(Val, int) bool) bool {
+// Each is a synchronous iterator over the elements of pot with function f.
+func (t *Pot) Each(f func(Val) bool) bool {
return t.each(f)
}
-func (t *Pot) each(f func(Val, int) bool) bool {
- var next bool
- for _, n := range t.bins {
- if n == nil {
- return true
- }
- next = n.each(f)
- if !next {
- return false
- }
- }
- if t.size == 0 {
+// each is a synchronous iterator over the elements of pot with function f.
+// the iteration ends if the function return false or there are no more elements.
+func (t *Pot) each(f func(Val) bool) bool {
+ if t == nil || t.size == 0 {
return false
}
- return f(t.pin, t.po)
-}
-
-// EachFrom called with (f, start) is a synchronous iterator over the elements of a Pot
-// within the inclusive range starting from proximity order start
-// the function argument is passed the value and the proximity order wrt the root pin
-// it does NOT include the pinned item of the root
-// respecting an ordering
-// proximity > pinnedness
-// the iteration ends if the function return false or there are no more elements
-// end of a po range can be implemented since po is passed to the function
-func (t *Pot) EachFrom(f func(Val, int) bool, po int) bool {
- return t.eachFrom(f, po)
-}
-
-func (t *Pot) eachFrom(f func(Val, int) bool, po int) bool {
- var next bool
- _, lim := t.getPos(po)
- for i := lim; i < len(t.bins); i++ {
- n := t.bins[i]
- next = n.each(f)
- if !next {
+ for _, n := range t.bins {
+ if !n.each(f) {
return false
}
}
- return f(t.pin, t.po)
+ return f(t.pin)
+}
+
+// eachFrom is a synchronous iterator over the elements of pot with function f,
+// starting from certain proximity order po, which is passed as a second parameter.
+// the iteration ends if the function return false or there are no more elements.
+func (t *Pot) eachFrom(f func(Val) bool, po int) bool {
+ if t == nil || t.size == 0 {
+ return false
+ }
+ _, beg := t.getPos(po)
+ for i := beg; i < len(t.bins); i++ {
+ if !t.bins[i].each(f) {
+ return false
+ }
+ }
+ return f(t.pin)
}
// EachBin iterates over bins of the pivot node and offers iterators to the caller on each
// subtree passing the proximity order and the size
// the iteration continues until the function's return value is false
// or there are no more subtries
-func (t *Pot) EachBin(val Val, pof Pof, po int, f func(int, int, func(func(val Val, i int) bool) bool) bool) {
+func (t *Pot) EachBin(val Val, pof Pof, po int, f func(int, int, func(func(val Val) bool) bool) bool) {
t.eachBin(val, pof, po, f)
}
-func (t *Pot) eachBin(val Val, pof Pof, po int, f func(int, int, func(func(val Val, i int) bool) bool) bool) {
+func (t *Pot) eachBin(val Val, pof Pof, po int, f func(int, int, func(func(val Val) bool) bool) bool) {
if t == nil || t.size == 0 {
return
}
@@ -530,8 +510,8 @@ func (t *Pot) eachBin(val Val, pof Pof, po int, f func(int, int, func(func(val V
}
if lim == len(t.bins) {
if spr >= po {
- f(spr, 1, func(g func(Val, int) bool) bool {
- return g(t.pin, spr)
+ f(spr, 1, func(g func(Val) bool) bool {
+ return g(t.pin)
})
}
return
@@ -545,9 +525,9 @@ func (t *Pot) eachBin(val Val, pof Pof, po int, f func(int, int, func(func(val V
size += n.size
}
if spr >= po {
- if !f(spr, t.size-size, func(g func(Val, int) bool) bool {
- return t.eachFrom(func(v Val, j int) bool {
- return g(v, spr)
+ if !f(spr, t.size-size, func(g func(Val) bool) bool {
+ return t.eachFrom(func(v Val) bool {
+ return g(v)
}, spo)
}) {
return
@@ -595,7 +575,7 @@ func (t *Pot) eachNeighbour(val Val, pof Pof, f func(Val, int) bool) bool {
}
for i := l - 1; i > ir; i-- {
- next = t.bins[i].each(func(v Val, _ int) bool {
+ next = t.bins[i].each(func(v Val) bool {
return f(v, po)
})
if !next {
@@ -605,7 +585,7 @@ func (t *Pot) eachNeighbour(val Val, pof Pof, f func(Val, int) bool) bool {
for i := il - 1; i >= 0; i-- {
n := t.bins[i]
- next = n.each(func(v Val, _ int) bool {
+ next = n.each(func(v Val) bool {
return f(v, n.po)
})
if !next {
@@ -719,7 +699,7 @@ func (t *Pot) eachNeighbourAsync(val Val, pof Pof, max int, maxPos int, f func(V
wg.Add(m)
}
go func(pn *Pot, pm int) {
- pn.each(func(v Val, _ int) bool {
+ pn.each(func(v Val) bool {
if wg != nil {
defer wg.Done()
}
@@ -746,7 +726,7 @@ func (t *Pot) eachNeighbourAsync(val Val, pof Pof, max int, maxPos int, f func(V
wg.Add(m)
}
go func(pn *Pot, pm int) {
- pn.each(func(v Val, _ int) bool {
+ pn.each(func(v Val) bool {
if wg != nil {
defer wg.Done()
}
diff --git a/swarm/pot/pot_test.go b/swarm/pot/pot_test.go
index aeb23dfc6..83d604919 100644
--- a/swarm/pot/pot_test.go
+++ b/swarm/pot/pot_test.go
@@ -65,14 +65,13 @@ func randomtestAddr(n int, i int) *testAddr {
return newTestAddr(v, i)
}
-func indexes(t *Pot) (i []int, po []int) {
- t.Each(func(v Val, p int) bool {
+func indexes(t *Pot) (i []int) {
+ t.Each(func(v Val) bool {
a := v.(*testAddr)
i = append(i, a.i)
- po = append(po, p)
return true
})
- return i, po
+ return i
}
func testAdd(t *Pot, pof Pof, j int, values ...string) (_ *Pot, n int, f bool) {
@@ -82,6 +81,69 @@ func testAdd(t *Pot, pof Pof, j int, values ...string) (_ *Pot, n int, f bool) {
return t, n, f
}
+// removing non-existing element from pot
+func TestPotRemoveNonExisting(t *testing.T) {
+ pof := DefaultPof(8)
+ n := NewPot(newTestAddr("00111100", 0), 0)
+ n, _, _ = Remove(n, newTestAddr("00000101", 0), pof)
+ exp := "00111100"
+ got := Label(n.Pin())
+ if got[:8] != exp {
+ t.Fatalf("incorrect pinned value. Expected %v, got %v", exp, got[:8])
+ }
+}
+
+// this test creates hierarchical pot tree, and therefore any child node will have
+// child_po = parent_po + 1.
+// then removes a node from the middle of the tree.
+func TestPotRemoveSameBin(t *testing.T) {
+ pof := DefaultPof(8)
+ n := NewPot(newTestAddr("11111111", 0), 0)
+ n, _, _ = testAdd(n, pof, 1, "00000000", "01000000", "01100000", "01110000", "01111000")
+ n, _, _ = Remove(n, newTestAddr("01110000", 0), pof)
+ inds := indexes(n)
+ goti := n.Size()
+ expi := 5
+ if goti != expi {
+ t.Fatalf("incorrect number of elements in Pot. Expected %v, got %v", expi, goti)
+ }
+ inds = indexes(n)
+ got := fmt.Sprintf("%v", inds)
+ exp := "[5 3 2 1 0]"
+ if got != exp {
+ t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
+ }
+}
+
+// this test creates a flat pot tree (all the elements are leafs of one root),
+// and therefore they all have the same po.
+// then removes an arbitrary element from the pot.
+func TestPotRemoveDifferentBins(t *testing.T) {
+ pof := DefaultPof(8)
+ n := NewPot(newTestAddr("11111111", 0), 0)
+ n, _, _ = testAdd(n, pof, 1, "00000000", "10000000", "11000000", "11100000", "11110000")
+ n, _, _ = Remove(n, newTestAddr("11100000", 0), pof)
+ inds := indexes(n)
+ goti := n.Size()
+ expi := 5
+ if goti != expi {
+ t.Fatalf("incorrect number of elements in Pot. Expected %v, got %v", expi, goti)
+ }
+ inds = indexes(n)
+ got := fmt.Sprintf("%v", inds)
+ exp := "[1 2 3 5 0]"
+ if got != exp {
+ t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
+ }
+ n, _, _ = testAdd(n, pof, 4, "11100000")
+ inds = indexes(n)
+ got = fmt.Sprintf("%v", inds)
+ exp = "[1 2 3 4 5 0]"
+ if got != exp {
+ t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
+ }
+}
+
func TestPotAdd(t *testing.T) {
pof := DefaultPof(8)
n := NewPot(newTestAddr("00111100", 0), 0)
@@ -105,17 +167,12 @@ func TestPotAdd(t *testing.T) {
if goti != expi {
t.Fatalf("incorrect number of elements in Pot. Expected %v, got %v", expi, goti)
}
- inds, po := indexes(n)
+ inds := indexes(n)
got = fmt.Sprintf("%v", inds)
exp = "[3 4 2]"
if got != exp {
t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
}
- got = fmt.Sprintf("%v", po)
- exp = "[1 2 0]"
- if got != exp {
- t.Fatalf("incorrect po-s in iteration over Pot. Expected %v, got %v", exp, got)
- }
}
func TestPotRemove(t *testing.T) {
@@ -134,26 +191,25 @@ func TestPotRemove(t *testing.T) {
if goti != expi {
t.Fatalf("incorrect number of elements in Pot. Expected %v, got %v", expi, goti)
}
- inds, po := indexes(n)
+ inds := indexes(n)
got = fmt.Sprintf("%v", inds)
- exp = "[2 4 0]"
+ exp = "[2 4 1]"
if got != exp {
t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
}
- got = fmt.Sprintf("%v", po)
- exp = "[1 3 0]"
+ n, _, _ = Remove(n, newTestAddr("00111100", 0), pof) // remove again same element
+ inds = indexes(n)
+ got = fmt.Sprintf("%v", inds)
if got != exp {
- t.Fatalf("incorrect po-s in iteration over Pot. Expected %v, got %v", exp, got)
+ t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
}
- // remove again
- n, _, _ = Remove(n, newTestAddr("00111100", 0), pof)
- inds, _ = indexes(n)
+ n, _, _ = Remove(n, newTestAddr("00000000", 0), pof) // remove the first element
+ inds = indexes(n)
got = fmt.Sprintf("%v", inds)
exp = "[2 4]"
if got != exp {
t.Fatalf("incorrect indexes in iteration over Pot. Expected %v, got %v", exp, got)
}
-
}
func TestPotSwap(t *testing.T) {
@@ -202,7 +258,7 @@ func TestPotSwap(t *testing.T) {
})
}
sum := 0
- n.Each(func(v Val, i int) bool {
+ n.Each(func(v Val) bool {
if v == nil {
return true
}
diff --git a/swarm/pss/api.go b/swarm/pss/api.go
index eba7bb722..4556d7b7c 100644
--- a/swarm/pss/api.go
+++ b/swarm/pss/api.go
@@ -51,7 +51,7 @@ func NewAPI(ps *Pss) *API {
//
// All incoming messages to the node matching this topic will be encapsulated in the APIMsg
// struct and sent to the subscriber
-func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription, error) {
+func (pssapi *API) Receive(ctx context.Context, topic Topic, raw bool, prox bool) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return nil, fmt.Errorf("Subscribe not supported")
@@ -59,7 +59,7 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription,
psssub := notifier.CreateSubscription()
- handler := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ hndlr := NewHandler(func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
apimsg := &APIMsg{
Msg: hexutil.Bytes(msg),
Asymmetric: asymmetric,
@@ -69,9 +69,15 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription,
log.Warn(fmt.Sprintf("notification on pss sub topic rpc (sub %v) msg %v failed!", psssub.ID, msg))
}
return nil
+ })
+ if raw {
+ hndlr.caps.raw = true
+ }
+ if prox {
+ hndlr.caps.prox = true
}
- deregf := pssapi.Register(&topic, handler)
+ deregf := pssapi.Register(&topic, hndlr)
go func() {
defer deregf()
select {
@@ -86,7 +92,7 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription,
}
func (pssapi *API) GetAddress(topic Topic, asymmetric bool, key string) (PssAddress, error) {
- var addr *PssAddress
+ var addr PssAddress
if asymmetric {
peer, ok := pssapi.Pss.pubKeyPool[key][topic]
if !ok {
@@ -101,7 +107,7 @@ func (pssapi *API) GetAddress(topic Topic, asymmetric bool, key string) (PssAddr
addr = peer.address
}
- return *addr, nil
+ return addr, nil
}
// Retrieves the node's base address in hex form
@@ -122,7 +128,7 @@ func (pssapi *API) SetPeerPublicKey(pubkey hexutil.Bytes, topic Topic, addr PssA
if err != nil {
return fmt.Errorf("Cannot unmarshal pubkey: %x", pubkey)
}
- err = pssapi.Pss.SetPeerPublicKey(pk, topic, &addr)
+ err = pssapi.Pss.SetPeerPublicKey(pk, topic, addr)
if err != nil {
return fmt.Errorf("Invalid key: %x", pk)
}
@@ -135,11 +141,11 @@ func (pssapi *API) GetSymmetricKey(symkeyid string) (hexutil.Bytes, error) {
}
func (pssapi *API) GetSymmetricAddressHint(topic Topic, symkeyid string) (PssAddress, error) {
- return *pssapi.Pss.symKeyPool[symkeyid][topic].address, nil
+ return pssapi.Pss.symKeyPool[symkeyid][topic].address, nil
}
func (pssapi *API) GetAsymmetricAddressHint(topic Topic, pubkeyid string) (PssAddress, error) {
- return *pssapi.Pss.pubKeyPool[pubkeyid][topic].address, nil
+ return pssapi.Pss.pubKeyPool[pubkeyid][topic].address, nil
}
func (pssapi *API) StringToTopic(topicstring string) (Topic, error) {
@@ -151,13 +157,26 @@ func (pssapi *API) StringToTopic(topicstring string) (Topic, error) {
}
func (pssapi *API) SendAsym(pubkeyhex string, topic Topic, msg hexutil.Bytes) error {
+ if err := validateMsg(msg); err != nil {
+ return err
+ }
return pssapi.Pss.SendAsym(pubkeyhex, topic, msg[:])
}
func (pssapi *API) SendSym(symkeyhex string, topic Topic, msg hexutil.Bytes) error {
+ if err := validateMsg(msg); err != nil {
+ return err
+ }
return pssapi.Pss.SendSym(symkeyhex, topic, msg[:])
}
+func (pssapi *API) SendRaw(addr hexutil.Bytes, topic Topic, msg hexutil.Bytes) error {
+ if err := validateMsg(msg); err != nil {
+ return err
+ }
+ return pssapi.Pss.SendRaw(PssAddress(addr), topic, msg[:])
+}
+
func (pssapi *API) GetPeerTopics(pubkeyhex string) ([]Topic, error) {
topics, _, err := pssapi.Pss.GetPublickeyPeers(pubkeyhex)
return topics, err
@@ -167,3 +186,10 @@ func (pssapi *API) GetPeerTopics(pubkeyhex string) ([]Topic, error) {
func (pssapi *API) GetPeerAddress(pubkeyhex string, topic Topic) (PssAddress, error) {
return pssapi.Pss.getPeerAddress(pubkeyhex, topic)
}
+
+func validateMsg(msg []byte) error {
+ if len(msg) == 0 {
+ return errors.New("invalid message length")
+ }
+ return nil
+}
diff --git a/swarm/pss/client/client.go b/swarm/pss/client/client.go
index d541081d3..5ee387aa7 100644
--- a/swarm/pss/client/client.go
+++ b/swarm/pss/client/client.go
@@ -236,7 +236,7 @@ func (c *Client) RunProtocol(ctx context.Context, proto *p2p.Protocol) error {
topichex := topicobj.String()
msgC := make(chan pss.APIMsg)
c.peerPool[topicobj] = make(map[string]*pssRPCRW)
- sub, err := c.rpc.Subscribe(ctx, "pss", msgC, "receive", topichex)
+ sub, err := c.rpc.Subscribe(ctx, "pss", msgC, "receive", topichex, false, false)
if err != nil {
return fmt.Errorf("pss event subscription failed: %v", err)
}
diff --git a/swarm/pss/client/client_test.go b/swarm/pss/client/client_test.go
index 8f2f0e805..0d6788d67 100644
--- a/swarm/pss/client/client_test.go
+++ b/swarm/pss/client/client_test.go
@@ -238,7 +238,7 @@ func newServices() adapters.Services {
return k
}
params := network.NewKadParams()
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
params.MaxBinSize = 3
params.MinBinSize = 1
params.MaxRetries = 1000
diff --git a/swarm/pss/forwarding_test.go b/swarm/pss/forwarding_test.go
new file mode 100644
index 000000000..084688439
--- /dev/null
+++ b/swarm/pss/forwarding_test.go
@@ -0,0 +1,356 @@
+package pss
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
+ "github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/pot"
+ whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
+)
+
+type testCase struct {
+ name string
+ recipient []byte
+ peers []pot.Address
+ expected []int
+ exclusive bool
+ nFails int
+ success bool
+ errors string
+}
+
+var testCases []testCase
+
+// the purpose of this test is to see that pss.forward() function correctly
+// selects the peers for message forwarding, depending on the message address
+// and kademlia constellation.
+func TestForwardBasic(t *testing.T) {
+ baseAddrBytes := make([]byte, 32)
+ for i := 0; i < len(baseAddrBytes); i++ {
+ baseAddrBytes[i] = 0xFF
+ }
+ var c testCase
+ base := pot.NewAddressFromBytes(baseAddrBytes)
+ var peerAddresses []pot.Address
+ const depth = 10
+ for i := 0; i <= depth; i++ {
+ // add two peers for each proximity order
+ a := pot.RandomAddressAt(base, i)
+ peerAddresses = append(peerAddresses, a)
+ a = pot.RandomAddressAt(base, i)
+ peerAddresses = append(peerAddresses, a)
+ }
+
+ // skip one level, add one peer at one level deeper.
+ // as a result, we will have an edge case of three peers in nearest neighbours' bin.
+ peerAddresses = append(peerAddresses, pot.RandomAddressAt(base, depth+2))
+
+ kad := network.NewKademlia(base[:], network.NewKadParams())
+ ps := createPss(t, kad)
+ addPeers(kad, peerAddresses)
+
+ const firstNearest = depth * 2 // shallowest peer in the nearest neighbours' bin
+ nearestNeighbours := []int{firstNearest, firstNearest + 1, firstNearest + 2}
+ var all []int // indices of all the peers
+ for i := 0; i < len(peerAddresses); i++ {
+ all = append(all, i)
+ }
+
+ for i := 0; i < len(peerAddresses); i++ {
+ // send msg directly to the known peers (recipient address == peer address)
+ c = testCase{
+ name: fmt.Sprintf("Send direct to known, id: [%d]", i),
+ recipient: peerAddresses[i][:],
+ peers: peerAddresses,
+ expected: []int{i},
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for i := 0; i < firstNearest; i++ {
+ // send random messages with proximity orders, corresponding to PO of each bin,
+ // with one peer being closer to the recipient address
+ a := pot.RandomAddressAt(peerAddresses[i], 64)
+ c = testCase{
+ name: fmt.Sprintf("Send random to each PO, id: [%d]", i),
+ recipient: a[:],
+ peers: peerAddresses,
+ expected: []int{i},
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for i := 0; i < firstNearest; i++ {
+ // send random messages with proximity orders, corresponding to PO of each bin,
+ // with random proximity relative to the recipient address
+ po := i / 2
+ a := pot.RandomAddressAt(base, po)
+ c = testCase{
+ name: fmt.Sprintf("Send direct to known, id: [%d]", i),
+ recipient: a[:],
+ peers: peerAddresses,
+ expected: []int{po * 2, po*2 + 1},
+ exclusive: true,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for i := firstNearest; i < len(peerAddresses); i++ {
+ // recipient address falls into the nearest neighbours' bin
+ a := pot.RandomAddressAt(base, i)
+ c = testCase{
+ name: fmt.Sprintf("recipient address falls into the nearest neighbours' bin, id: [%d]", i),
+ recipient: a[:],
+ peers: peerAddresses,
+ expected: nearestNeighbours,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+ }
+
+ // send msg with proximity order much deeper than the deepest nearest neighbour
+ a2 := pot.RandomAddressAt(base, 77)
+ c = testCase{
+ name: "proximity order much deeper than the deepest nearest neighbour",
+ recipient: a2[:],
+ peers: peerAddresses,
+ expected: nearestNeighbours,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+
+ // test with partial addresses
+ const part = 12
+
+ for i := 0; i < firstNearest; i++ {
+ // send messages with partial address falling into different proximity orders
+ po := i / 2
+ if i%8 != 0 {
+ c = testCase{
+ name: fmt.Sprintf("partial address falling into different proximity orders, id: [%d]", i),
+ recipient: peerAddresses[i][:i],
+ peers: peerAddresses,
+ expected: []int{po * 2, po*2 + 1},
+ exclusive: true,
+ }
+ testCases = append(testCases, c)
+ }
+ c = testCase{
+ name: fmt.Sprintf("extended partial address falling into different proximity orders, id: [%d]", i),
+ recipient: peerAddresses[i][:part],
+ peers: peerAddresses,
+ expected: []int{po * 2, po*2 + 1},
+ exclusive: true,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for i := firstNearest; i < len(peerAddresses); i++ {
+ // partial address falls into the nearest neighbours' bin
+ c = testCase{
+ name: fmt.Sprintf("partial address falls into the nearest neighbours' bin, id: [%d]", i),
+ recipient: peerAddresses[i][:part],
+ peers: peerAddresses,
+ expected: nearestNeighbours,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+ }
+
+ // partial address with proximity order deeper than any of the nearest neighbour
+ a3 := pot.RandomAddressAt(base, part)
+ c = testCase{
+ name: "partial address with proximity order deeper than any of the nearest neighbour",
+ recipient: a3[:part],
+ peers: peerAddresses,
+ expected: nearestNeighbours,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+
+ // special cases where partial address matches a large group of peers
+
+ // zero bytes of address is given, msg should be delivered to all the peers
+ c = testCase{
+ name: "zero bytes of address is given",
+ recipient: []byte{},
+ peers: peerAddresses,
+ expected: all,
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+
+ // luminous radius of 8 bits, proximity order 8
+ indexAtPo8 := 16
+ c = testCase{
+ name: "luminous radius of 8 bits",
+ recipient: []byte{0xFF},
+ peers: peerAddresses,
+ expected: all[indexAtPo8:],
+ exclusive: false,
+ }
+ testCases = append(testCases, c)
+
+ // luminous radius of 256 bits, proximity order 8
+ a4 := pot.Address{}
+ a4[0] = 0xFF
+ c = testCase{
+ name: "luminous radius of 256 bits",
+ recipient: a4[:],
+ peers: peerAddresses,
+ expected: []int{indexAtPo8, indexAtPo8 + 1},
+ exclusive: true,
+ }
+ testCases = append(testCases, c)
+
+ // check correct behaviour in case send fails
+ for i := 2; i < firstNearest-3; i += 2 {
+ po := i / 2
+ // send random messages with proximity orders, corresponding to PO of each bin,
+ // with different numbers of failed attempts.
+ // msg should be received by only one of the deeper peers.
+ a := pot.RandomAddressAt(base, po)
+ c = testCase{
+ name: fmt.Sprintf("Send direct to known, id: [%d]", i),
+ recipient: a[:],
+ peers: peerAddresses,
+ expected: all[i+1:],
+ exclusive: true,
+ nFails: rand.Int()%3 + 2,
+ }
+ testCases = append(testCases, c)
+ }
+
+ for _, c := range testCases {
+ testForwardMsg(t, ps, &c)
+ }
+}
+
+// this function tests the forwarding of a single message. the recipient address is passed as param,
+// along with addresses of all peers, and indices of those peers which are expected to receive the message.
+func testForwardMsg(t *testing.T, ps *Pss, c *testCase) {
+ recipientAddr := c.recipient
+ peers := c.peers
+ expected := c.expected
+ exclusive := c.exclusive
+ nFails := c.nFails
+ tries := 0 // number of previous failed tries
+
+ resultMap := make(map[pot.Address]int)
+
+ defer func() { sendFunc = sendMsg }()
+ sendFunc = func(_ *Pss, sp *network.Peer, _ *PssMsg) bool {
+ if tries < nFails {
+ tries++
+ return false
+ }
+ a := pot.NewAddressFromBytes(sp.Address())
+ resultMap[a]++
+ return true
+ }
+
+ msg := newTestMsg(recipientAddr)
+ ps.forward(msg)
+
+ // check test results
+ var fail bool
+ precision := len(recipientAddr)
+ if precision > 4 {
+ precision = 4
+ }
+ s := fmt.Sprintf("test [%s]\nmsg address: %x..., radius: %d", c.name, recipientAddr[:precision], 8*len(recipientAddr))
+
+ // false negatives (expected message didn't reach peer)
+ if exclusive {
+ var cnt int
+ for _, i := range expected {
+ a := peers[i]
+ cnt += resultMap[a]
+ resultMap[a] = 0
+ }
+ if cnt != 1 {
+ s += fmt.Sprintf("\n%d messages received by %d peers with indices: [%v]", cnt, len(expected), expected)
+ fail = true
+ }
+ } else {
+ for _, i := range expected {
+ a := peers[i]
+ received := resultMap[a]
+ if received != 1 {
+ s += fmt.Sprintf("\npeer number %d [%x...] received %d messages", i, a[:4], received)
+ fail = true
+ }
+ resultMap[a] = 0
+ }
+ }
+
+ // false positives (unexpected message reached peer)
+ for k, v := range resultMap {
+ if v != 0 {
+ // find the index of the false positive peer
+ var j int
+ for j = 0; j < len(peers); j++ {
+ if peers[j] == k {
+ break
+ }
+ }
+ s += fmt.Sprintf("\npeer number %d [%x...] received %d messages", j, k[:4], v)
+ fail = true
+ }
+ }
+
+ if fail {
+ t.Fatal(s)
+ }
+}
+
+func addPeers(kad *network.Kademlia, addresses []pot.Address) {
+ for _, a := range addresses {
+ p := newTestDiscoveryPeer(a, kad)
+ kad.On(p)
+ }
+}
+
+func createPss(t *testing.T, kad *network.Kademlia) *Pss {
+ privKey, err := crypto.GenerateKey()
+ pssp := NewPssParams().WithPrivateKey(privKey)
+ ps, err := NewPss(kad, pssp)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+ return ps
+}
+
+func newTestDiscoveryPeer(addr pot.Address, kad *network.Kademlia) *network.Peer {
+ rw := &p2p.MsgPipeRW{}
+ p := p2p.NewPeer(enode.ID{}, "test", []p2p.Cap{})
+ pp := protocols.NewPeer(p, rw, &protocols.Spec{})
+ bp := &network.BzzPeer{
+ Peer: pp,
+ BzzAddr: &network.BzzAddr{
+ OAddr: addr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", addr[:])),
+ },
+ }
+ return network.NewPeer(bp, kad)
+}
+
+func newTestMsg(addr []byte) *PssMsg {
+ msg := newPssMsg(&msgParams{})
+ msg.To = addr[:]
+ msg.Expire = uint32(time.Now().Add(time.Second * 60).Unix())
+ msg.Payload = &whisper.Envelope{
+ Topic: [4]byte{},
+ Data: []byte("i have nothing to hide"),
+ }
+ return msg
+}
diff --git a/swarm/pss/handshake.go b/swarm/pss/handshake.go
index e3ead77d0..bb67b5156 100644
--- a/swarm/pss/handshake.go
+++ b/swarm/pss/handshake.go
@@ -321,9 +321,7 @@ func (ctl *HandshakeController) handleKeys(pubkeyid string, keymsg *handshakeMsg
for _, key := range keymsg.Keys {
sendsymkey := make([]byte, len(key))
copy(sendsymkey, key)
- var address PssAddress
- copy(address[:], keymsg.From)
- sendsymkeyid, err := ctl.pss.setSymmetricKey(sendsymkey, keymsg.Topic, &address, false, false)
+ sendsymkeyid, err := ctl.pss.setSymmetricKey(sendsymkey, keymsg.Topic, PssAddress(keymsg.From), false, false)
if err != nil {
return err
}
@@ -356,7 +354,7 @@ func (ctl *HandshakeController) handleKeys(pubkeyid string, keymsg *handshakeMsg
func (ctl *HandshakeController) sendKey(pubkeyid string, topic *Topic, keycount uint8) ([]string, error) {
var requestcount uint8
- to := &PssAddress{}
+ to := PssAddress{}
if _, ok := ctl.pss.pubKeyPool[pubkeyid]; !ok {
return []string{}, errors.New("Invalid public key")
} else if psp, ok := ctl.pss.pubKeyPool[pubkeyid][*topic]; ok {
@@ -486,7 +484,7 @@ func (api *HandshakeAPI) Handshake(pubkeyid string, topic Topic, sync bool, flus
// Activate handshake functionality on a topic
func (api *HandshakeAPI) AddHandshake(topic Topic) error {
- api.ctrl.deregisterFuncs[topic] = api.ctrl.pss.Register(&topic, api.ctrl.handler)
+ api.ctrl.deregisterFuncs[topic] = api.ctrl.pss.Register(&topic, NewHandler(api.ctrl.handler))
return nil
}
@@ -564,5 +562,5 @@ func (api *HandshakeAPI) SendSym(symkeyid string, topic Topic, msg hexutil.Bytes
api.ctrl.symKeyIndex[symkeyid].count++
log.Trace("increment symkey send use", "symkeyid", symkeyid, "count", api.ctrl.symKeyIndex[symkeyid].count, "limit", api.ctrl.symKeyIndex[symkeyid].limit, "receiver", common.ToHex(crypto.FromECDSAPub(api.ctrl.pss.PublicKey())))
}
- return
+ return err
}
diff --git a/swarm/pss/handshake_test.go b/swarm/pss/handshake_test.go
index 0fc7e798f..895163f30 100644
--- a/swarm/pss/handshake_test.go
+++ b/swarm/pss/handshake_test.go
@@ -30,6 +30,7 @@ import (
// asymmetrical key exchange between two directly connected peers
// full address, partial address (8 bytes) and empty address
func TestHandshake(t *testing.T) {
+ t.Skip("handshakes are not adapted to current pss core code")
t.Run("32", testHandshake)
t.Run("8", testHandshake)
t.Run("0", testHandshake)
diff --git a/swarm/pss/notify/notify.go b/swarm/pss/notify/notify.go
index 3731fb9db..e9d40dc32 100644
--- a/swarm/pss/notify/notify.go
+++ b/swarm/pss/notify/notify.go
@@ -113,7 +113,7 @@ func NewController(ps *pss.Pss) *Controller {
notifiers: make(map[string]*notifier),
subscriptions: make(map[string]*subscription),
}
- ctrl.pss.Register(&controlTopic, ctrl.Handler)
+ ctrl.pss.Register(&controlTopic, pss.NewHandler(ctrl.Handler))
return ctrl
}
@@ -138,7 +138,7 @@ func (c *Controller) Subscribe(name string, pubkey *ecdsa.PublicKey, address pss
c.mu.Lock()
defer c.mu.Unlock()
msg := NewMsg(MsgCodeStart, name, c.pss.BaseAddr())
- c.pss.SetPeerPublicKey(pubkey, controlTopic, &address)
+ c.pss.SetPeerPublicKey(pubkey, controlTopic, address)
pubkeyId := hexutil.Encode(crypto.FromECDSAPub(pubkey))
smsg, err := rlp.EncodeToBytes(msg)
if err != nil {
@@ -271,7 +271,7 @@ func (c *Controller) addToBin(ntfr *notifier, address []byte) (symKeyId string,
currentBin.count++
symKeyId = currentBin.symKeyId
} else {
- symKeyId, err = c.pss.GenerateSymmetricKey(ntfr.topic, &pssAddress, false)
+ symKeyId, err = c.pss.GenerateSymmetricKey(ntfr.topic, pssAddress, false)
if err != nil {
return "", nil, err
}
@@ -312,7 +312,7 @@ func (c *Controller) handleStartMsg(msg *Msg, keyid string) (err error) {
if err != nil {
return err
}
- err = c.pss.SetPeerPublicKey(pubkey, controlTopic, &pssAddress)
+ err = c.pss.SetPeerPublicKey(pubkey, controlTopic, pssAddress)
if err != nil {
return err
}
@@ -335,8 +335,8 @@ func (c *Controller) handleNotifyWithKeyMsg(msg *Msg) error {
// \TODO keep track of and add actual address
updaterAddr := pss.PssAddress([]byte{})
- c.pss.SetSymmetricKey(symkey, topic, &updaterAddr, true)
- c.pss.Register(&topic, c.Handler)
+ c.pss.SetSymmetricKey(symkey, topic, updaterAddr, true)
+ c.pss.Register(&topic, pss.NewHandler(c.Handler))
return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload[:len(msg.Payload)-symKeyLength])
}
diff --git a/swarm/pss/notify/notify_test.go b/swarm/pss/notify/notify_test.go
index d4d383a6b..5c29f68e0 100644
--- a/swarm/pss/notify/notify_test.go
+++ b/swarm/pss/notify/notify_test.go
@@ -121,7 +121,7 @@ func TestStart(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
defer cancel()
rmsgC := make(chan *pss.APIMsg)
- rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic)
+ rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -174,7 +174,7 @@ func TestStart(t *testing.T) {
t.Fatalf("expected payload length %d, have %d", len(updateMsg)+symKeyLength, len(dMsg.Payload))
}
- rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic)
+ rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -209,7 +209,7 @@ func newServices(allowRaw bool) adapters.Services {
return k
}
params := network.NewKadParams()
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
params.MaxBinSize = 3
params.MinBinSize = 1
params.MaxRetries = 1000
diff --git a/swarm/pss/protocol_test.go b/swarm/pss/protocol_test.go
index 4ef3e90a0..520c48a20 100644
--- a/swarm/pss/protocol_test.go
+++ b/swarm/pss/protocol_test.go
@@ -92,7 +92,7 @@ func testProtocol(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -100,7 +100,7 @@ func testProtocol(t *testing.T) {
rmsgC := make(chan APIMsg)
rctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -130,6 +130,7 @@ func testProtocol(t *testing.T) {
log.Debug("lnode ok")
case cerr := <-lctx.Done():
t.Fatalf("test message timed out: %v", cerr)
+ return
}
select {
case <-rmsgC:
diff --git a/swarm/pss/pss.go b/swarm/pss/pss.go
index e1e24e1f5..bee64b0df 100644
--- a/swarm/pss/pss.go
+++ b/swarm/pss/pss.go
@@ -23,6 +23,7 @@ import (
"crypto/rand"
"errors"
"fmt"
+ "hash"
"sync"
"time"
@@ -38,6 +39,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/storage"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
+ "golang.org/x/crypto/sha3"
)
const (
@@ -79,7 +81,7 @@ type senderPeer interface {
// member `protected` prevents garbage collection of the instance
type pssPeer struct {
lastSeen time.Time
- address *PssAddress
+ address PssAddress
protected bool
}
@@ -136,10 +138,10 @@ type Pss struct {
symKeyDecryptCacheCapacity int // max amount of symkeys to keep.
// message handling
- handlers map[Topic]map[*Handler]bool // topic and version based pss payload handlers. See pss.Handle()
- handlersMu sync.RWMutex
- allowRaw bool
- hashPool sync.Pool
+ handlers map[Topic]map[*handler]bool // topic and version based pss payload handlers. See pss.Handle()
+ handlersMu sync.RWMutex
+ hashPool sync.Pool
+ topicHandlerCaps map[Topic]*handlerCaps // caches capabilities of each topic's handlers (see handlerCap* consts in types.go)
// process
quitC chan struct{}
@@ -180,11 +182,12 @@ func NewPss(k *network.Kademlia, params *PssParams) (*Pss, error) {
symKeyDecryptCache: make([]*string, params.SymKeyCacheCapacity),
symKeyDecryptCacheCapacity: params.SymKeyCacheCapacity,
- handlers: make(map[Topic]map[*Handler]bool),
- allowRaw: params.AllowRaw,
+ handlers: make(map[Topic]map[*handler]bool),
+ topicHandlerCaps: make(map[Topic]*handlerCaps),
+
hashPool: sync.Pool{
New: func() interface{} {
- return storage.MakeHashFunc(storage.DefaultHash)()
+ return sha3.NewLegacyKeccak256()
},
},
}
@@ -313,30 +316,54 @@ func (p *Pss) PublicKey() *ecdsa.PublicKey {
//
// Returns a deregister function which needs to be called to
// deregister the handler,
-func (p *Pss) Register(topic *Topic, handler Handler) func() {
+func (p *Pss) Register(topic *Topic, hndlr *handler) func() {
p.handlersMu.Lock()
defer p.handlersMu.Unlock()
handlers := p.handlers[*topic]
if handlers == nil {
- handlers = make(map[*Handler]bool)
+ handlers = make(map[*handler]bool)
p.handlers[*topic] = handlers
+ log.Debug("registered handler", "caps", hndlr.caps)
}
- handlers[&handler] = true
- return func() { p.deregister(topic, &handler) }
+ if hndlr.caps == nil {
+ hndlr.caps = &handlerCaps{}
+ }
+ handlers[hndlr] = true
+ if _, ok := p.topicHandlerCaps[*topic]; !ok {
+ p.topicHandlerCaps[*topic] = &handlerCaps{}
+ }
+ if hndlr.caps.raw {
+ p.topicHandlerCaps[*topic].raw = true
+ }
+ if hndlr.caps.prox {
+ p.topicHandlerCaps[*topic].prox = true
+ }
+ return func() { p.deregister(topic, hndlr) }
}
-func (p *Pss) deregister(topic *Topic, h *Handler) {
+func (p *Pss) deregister(topic *Topic, hndlr *handler) {
p.handlersMu.Lock()
defer p.handlersMu.Unlock()
handlers := p.handlers[*topic]
- if len(handlers) == 1 {
+ if len(handlers) > 1 {
delete(p.handlers, *topic)
+ // topic caps might have changed now that a handler is gone
+ caps := &handlerCaps{}
+ for h := range handlers {
+ if h.caps.raw {
+ caps.raw = true
+ }
+ if h.caps.prox {
+ caps.prox = true
+ }
+ }
+ p.topicHandlerCaps[*topic] = caps
return
}
- delete(handlers, h)
+ delete(handlers, hndlr)
}
// get all registered handlers for respective topics
-func (p *Pss) getHandlers(topic Topic) map[*Handler]bool {
+func (p *Pss) getHandlers(topic Topic) map[*handler]bool {
p.handlersMu.RLock()
defer p.handlersMu.RUnlock()
return p.handlers[topic]
@@ -348,12 +375,11 @@ func (p *Pss) getHandlers(topic Topic) map[*Handler]bool {
// Only passes error to pss protocol handler if payload is not valid pssmsg
func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
metrics.GetOrRegisterCounter("pss.handlepssmsg", nil).Inc(1)
-
pssmsg, ok := msg.(*PssMsg)
-
if !ok {
return fmt.Errorf("invalid message type. Expected *PssMsg, got %T ", msg)
}
+ log.Trace("handler", "self", label(p.Kademlia.BaseAddr()), "topic", label(pssmsg.Payload.Topic[:]))
if int64(pssmsg.Expire) < time.Now().Unix() {
metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1)
log.Warn("pss filtered expired message", "from", common.ToHex(p.Kademlia.BaseAddr()), "to", common.ToHex(pssmsg.To))
@@ -365,13 +391,36 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
}
p.addFwdCache(pssmsg)
- if !p.isSelfPossibleRecipient(pssmsg) {
- log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr()))
+ psstopic := Topic(pssmsg.Payload.Topic)
+
+ // raw is simplest handler contingency to check, so check that first
+ var isRaw bool
+ if pssmsg.isRaw() {
+ if _, ok := p.topicHandlerCaps[psstopic]; ok {
+ if !p.topicHandlerCaps[psstopic].raw {
+ log.Debug("No handler for raw message", "topic", psstopic)
+ return nil
+ }
+ }
+ isRaw = true
+ }
+
+ // check if we can be recipient:
+ // - no prox handler on message and partial address matches
+ // - prox handler on message and we are in prox regardless of partial address match
+ // store this result so we don't calculate again on every handler
+ var isProx bool
+ if _, ok := p.topicHandlerCaps[psstopic]; ok {
+ isProx = p.topicHandlerCaps[psstopic].prox
+ }
+ isRecipient := p.isSelfPossibleRecipient(pssmsg, isProx)
+ if !isRecipient {
+ log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr()), "prox", isProx)
return p.enqueue(pssmsg)
}
- log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr()))
- if err := p.process(pssmsg); err != nil {
+ log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr()), "prox", isProx, "raw", isRaw, "topic", label(pssmsg.Payload.Topic[:]))
+ if err := p.process(pssmsg, isRaw, isProx); err != nil {
qerr := p.enqueue(pssmsg)
if qerr != nil {
return fmt.Errorf("process fail: processerr %v, queueerr: %v", err, qerr)
@@ -384,23 +433,21 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
// Entry point to processing a message for which the current node can be the intended recipient.
// Attempts symmetric and asymmetric decryption with stored keys.
// Dispatches message to all handlers matching the message topic
-func (p *Pss) process(pssmsg *PssMsg) error {
+func (p *Pss) process(pssmsg *PssMsg, raw bool, prox bool) error {
metrics.GetOrRegisterCounter("pss.process", nil).Inc(1)
var err error
var recvmsg *whisper.ReceivedMessage
var payload []byte
- var from *PssAddress
+ var from PssAddress
var asymmetric bool
var keyid string
- var keyFunc func(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, *PssAddress, error)
+ var keyFunc func(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, PssAddress, error)
envelope := pssmsg.Payload
psstopic := Topic(envelope.Topic)
- if pssmsg.isRaw() {
- if !p.allowRaw {
- return errors.New("raw message support disabled")
- }
+
+ if raw {
payload = pssmsg.Payload.Data
} else {
if pssmsg.isSym() {
@@ -422,19 +469,27 @@ func (p *Pss) process(pssmsg *PssMsg) error {
return err
}
}
- p.executeHandlers(psstopic, payload, from, asymmetric, keyid)
+ p.executeHandlers(psstopic, payload, from, raw, prox, asymmetric, keyid)
return nil
}
-func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, asymmetric bool, keyid string) {
+func (p *Pss) executeHandlers(topic Topic, payload []byte, from PssAddress, raw bool, prox bool, asymmetric bool, keyid string) {
handlers := p.getHandlers(topic)
peer := p2p.NewPeer(enode.ID{}, fmt.Sprintf("%x", from), []p2p.Cap{})
- for f := range handlers {
- err := (*f)(payload, peer, asymmetric, keyid)
+ for h := range handlers {
+ if !h.caps.raw && raw {
+ log.Warn("norawhandler")
+ continue
+ }
+ if !h.caps.prox && prox {
+ log.Warn("noproxhandler")
+ continue
+ }
+ err := (h.f)(payload, peer, asymmetric, keyid)
if err != nil {
- log.Warn("Pss handler %p failed: %v", f, err)
+ log.Warn("Pss handler failed", "err", err)
}
}
}
@@ -445,9 +500,23 @@ func (p *Pss) isSelfRecipient(msg *PssMsg) bool {
}
// test match of leftmost bytes in given message to node's Kademlia address
-func (p *Pss) isSelfPossibleRecipient(msg *PssMsg) bool {
+func (p *Pss) isSelfPossibleRecipient(msg *PssMsg, prox bool) bool {
local := p.Kademlia.BaseAddr()
- return bytes.Equal(msg.To, local[:len(msg.To)])
+
+ // if a partial address matches we are possible recipient regardless of prox
+ // if not and prox is not set, we are surely not
+ if bytes.Equal(msg.To, local[:len(msg.To)]) {
+
+ return true
+ } else if !prox {
+ return false
+ }
+
+ depth := p.Kademlia.NeighbourhoodDepth()
+ po, _ := network.Pof(p.Kademlia.BaseAddr(), msg.To, 0)
+ log.Trace("selfpossible", "po", po, "depth", depth)
+
+ return depth <= po
}
/////////////////////////////////////////////////////////////////////
@@ -461,7 +530,10 @@ func (p *Pss) isSelfPossibleRecipient(msg *PssMsg) bool {
//
// The value in `address` will be used as a routing hint for the
// public key / topic association
-func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address *PssAddress) error {
+func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address PssAddress) error {
+ if err := validateAddress(address); err != nil {
+ return err
+ }
pubkeybytes := crypto.FromECDSAPub(pubkey)
if len(pubkeybytes) == 0 {
return fmt.Errorf("invalid public key: %v", pubkey)
@@ -476,12 +548,12 @@ func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address *Ps
}
p.pubKeyPool[pubkeyid][topic] = psp
p.pubKeyPoolMu.Unlock()
- log.Trace("added pubkey", "pubkeyid", pubkeyid, "topic", topic, "address", common.ToHex(*address))
+ log.Trace("added pubkey", "pubkeyid", pubkeyid, "topic", topic, "address", address)
return nil
}
// Automatically generate a new symkey for a topic and address hint
-func (p *Pss) GenerateSymmetricKey(topic Topic, address *PssAddress, addToCache bool) (string, error) {
+func (p *Pss) GenerateSymmetricKey(topic Topic, address PssAddress, addToCache bool) (string, error) {
keyid, err := p.w.GenerateSymKey()
if err != nil {
return "", err
@@ -502,11 +574,14 @@ func (p *Pss) GenerateSymmetricKey(topic Topic, address *PssAddress, addToCache
//
// Returns a string id that can be used to retrieve the key bytes
// from the whisper backend (see pss.GetSymmetricKey())
-func (p *Pss) SetSymmetricKey(key []byte, topic Topic, address *PssAddress, addtocache bool) (string, error) {
+func (p *Pss) SetSymmetricKey(key []byte, topic Topic, address PssAddress, addtocache bool) (string, error) {
+ if err := validateAddress(address); err != nil {
+ return "", err
+ }
return p.setSymmetricKey(key, topic, address, addtocache, true)
}
-func (p *Pss) setSymmetricKey(key []byte, topic Topic, address *PssAddress, addtocache bool, protected bool) (string, error) {
+func (p *Pss) setSymmetricKey(key []byte, topic Topic, address PssAddress, addtocache bool, protected bool) (string, error) {
keyid, err := p.w.AddSymKeyDirect(key)
if err != nil {
return "", err
@@ -518,7 +593,7 @@ func (p *Pss) setSymmetricKey(key []byte, topic Topic, address *PssAddress, addt
// adds a symmetric key to the pss key pool, and optionally adds the key
// to the collection of keys used to attempt symmetric decryption of
// incoming messages
-func (p *Pss) addSymmetricKeyToPool(keyid string, topic Topic, address *PssAddress, addtocache bool, protected bool) {
+func (p *Pss) addSymmetricKeyToPool(keyid string, topic Topic, address PssAddress, addtocache bool, protected bool) {
psp := &pssPeer{
address: address,
protected: protected,
@@ -534,7 +609,7 @@ func (p *Pss) addSymmetricKeyToPool(keyid string, topic Topic, address *PssAddre
p.symKeyDecryptCache[p.symKeyDecryptCacheCursor%cap(p.symKeyDecryptCache)] = &keyid
}
key, _ := p.GetSymmetricKey(keyid)
- log.Trace("added symkey", "symkeyid", keyid, "symkey", common.ToHex(key), "topic", topic, "address", fmt.Sprintf("%p", address), "cache", addtocache)
+ log.Trace("added symkey", "symkeyid", keyid, "symkey", common.ToHex(key), "topic", topic, "address", address, "cache", addtocache)
}
// Returns a symmetric key byte seqyence stored in the whisper backend
@@ -555,7 +630,7 @@ func (p *Pss) GetPublickeyPeers(keyid string) (topic []Topic, address []PssAddre
defer p.pubKeyPoolMu.RUnlock()
for t, peer := range p.pubKeyPool[keyid] {
topic = append(topic, t)
- address = append(address, *peer.address)
+ address = append(address, peer.address)
}
return topic, address, nil
@@ -566,7 +641,7 @@ func (p *Pss) getPeerAddress(keyid string, topic Topic) (PssAddress, error) {
defer p.pubKeyPoolMu.RUnlock()
if peers, ok := p.pubKeyPool[keyid]; ok {
if t, ok := peers[topic]; ok {
- return *t.address, nil
+ return t.address, nil
}
}
return nil, fmt.Errorf("peer with pubkey %s, topic %x not found", keyid, topic)
@@ -578,7 +653,7 @@ func (p *Pss) getPeerAddress(keyid string, topic Topic) (PssAddress, error) {
// encapsulating the decrypted message, and the whisper backend id
// of the symmetric key used to decrypt the message.
// It fails if decryption of the message fails or if the message is corrupted
-func (p *Pss) processSym(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, *PssAddress, error) {
+func (p *Pss) processSym(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, PssAddress, error) {
metrics.GetOrRegisterCounter("pss.process.sym", nil).Inc(1)
for i := p.symKeyDecryptCacheCursor; i > p.symKeyDecryptCacheCursor-cap(p.symKeyDecryptCache) && i > 0; i-- {
@@ -610,7 +685,7 @@ func (p *Pss) processSym(envelope *whisper.Envelope) (*whisper.ReceivedMessage,
// encapsulating the decrypted message, and the byte representation of
// the public key used to decrypt the message.
// It fails if decryption of message fails, or if the message is corrupted
-func (p *Pss) processAsym(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, *PssAddress, error) {
+func (p *Pss) processAsym(envelope *whisper.Envelope) (*whisper.ReceivedMessage, string, PssAddress, error) {
metrics.GetOrRegisterCounter("pss.process.asym", nil).Inc(1)
recvmsg, err := envelope.OpenAsymmetric(p.privateKey)
@@ -622,7 +697,7 @@ func (p *Pss) processAsym(envelope *whisper.Envelope) (*whisper.ReceivedMessage,
return nil, "", nil, fmt.Errorf("invalid message")
}
pubkeyid := common.ToHex(crypto.FromECDSAPub(recvmsg.Src))
- var from *PssAddress
+ var from PssAddress
p.pubKeyPoolMu.Lock()
if p.pubKeyPool[pubkeyid][Topic(envelope.Topic)] != nil {
from = p.pubKeyPool[pubkeyid][Topic(envelope.Topic)].address
@@ -684,8 +759,8 @@ func (p *Pss) enqueue(msg *PssMsg) error {
//
// Will fail if raw messages are disallowed
func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
- if !p.allowRaw {
- return errors.New("Raw messages not enabled")
+ if err := validateAddress(address); err != nil {
+ return err
}
pssMsgParams := &msgParams{
raw: true,
@@ -699,7 +774,19 @@ func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix())
pssMsg.Payload = payload
p.addFwdCache(pssMsg)
- return p.enqueue(pssMsg)
+ err := p.enqueue(pssMsg)
+ if err != nil {
+ return err
+ }
+
+ // if we have a proxhandler on this topic
+ // also deliver message to ourselves
+ if _, ok := p.topicHandlerCaps[topic]; ok {
+ if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox {
+ return p.process(pssMsg, true, true)
+ }
+ }
+ return nil
}
// Send a message using symmetric encryption
@@ -715,11 +802,8 @@ func (p *Pss) SendSym(symkeyid string, topic Topic, msg []byte) error {
p.symKeyPoolMu.Unlock()
if !ok {
return fmt.Errorf("invalid topic '%s' for symkey '%s'", topic.String(), symkeyid)
- } else if psp.address == nil {
- return fmt.Errorf("no address hint for topic '%s' symkey '%s'", topic.String(), symkeyid)
}
- err = p.send(*psp.address, topic, msg, false, symkey)
- return err
+ return p.send(psp.address, topic, msg, false, symkey)
}
// Send a message using asymmetric encryption
@@ -734,13 +818,8 @@ func (p *Pss) SendAsym(pubkeyid string, topic Topic, msg []byte) error {
p.pubKeyPoolMu.Unlock()
if !ok {
return fmt.Errorf("invalid topic '%s' for pubkey '%s'", topic.String(), pubkeyid)
- } else if psp.address == nil {
- return fmt.Errorf("no address hint for topic '%s' pubkey '%s'", topic.String(), pubkeyid)
}
- go func() {
- p.send(*psp.address, topic, msg, true, common.FromHex(pubkeyid))
- }()
- return nil
+ return p.send(psp.address, topic, msg, true, common.FromHex(pubkeyid))
}
// Send is payload agnostic, and will accept any byte slice as payload
@@ -800,71 +879,109 @@ func (p *Pss) send(to []byte, topic Topic, msg []byte, asymmetric bool, key []by
pssMsg.To = to
pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix())
pssMsg.Payload = envelope
- return p.enqueue(pssMsg)
+ err = p.enqueue(pssMsg)
+ if err != nil {
+ return err
+ }
+ if _, ok := p.topicHandlerCaps[topic]; ok {
+ if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox {
+ return p.process(pssMsg, true, true)
+ }
+ }
+ return nil
}
-// Forwards a pss message to the peer(s) closest to the to recipient address in the PssMsg struct
-// The recipient address can be of any length, and the byte slice will be matched to the MSB slice
-// of the peer address of the equivalent length.
+// sendFunc is a helper function that tries to send a message and returns true on success.
+// It is set here for usage in production, and optionally overridden in tests.
+var sendFunc func(p *Pss, sp *network.Peer, msg *PssMsg) bool = sendMsg
+
+// tries to send a message, returns true if successful
+func sendMsg(p *Pss, sp *network.Peer, msg *PssMsg) bool {
+ var isPssEnabled bool
+ info := sp.Info()
+ for _, capability := range info.Caps {
+ if capability == p.capstring {
+ isPssEnabled = true
+ break
+ }
+ }
+ if !isPssEnabled {
+ log.Error("peer doesn't have matching pss capabilities, skipping", "peer", info.Name, "caps", info.Caps)
+ return false
+ }
+
+ // get the protocol peer from the forwarding peer cache
+ p.fwdPoolMu.RLock()
+ pp := p.fwdPool[sp.Info().ID]
+ p.fwdPoolMu.RUnlock()
+
+ err := pp.Send(context.TODO(), msg)
+ if err != nil {
+ metrics.GetOrRegisterCounter("pss.pp.send.error", nil).Inc(1)
+ log.Error(err.Error())
+ }
+
+ return err == nil
+}
+
+// Forwards a pss message to the peer(s) based on recipient address according to the algorithm
+// described below. The recipient address can be of any length, and the byte slice will be matched
+// to the MSB slice of the peer address of the equivalent length.
+//
+// If the recipient address (or partial address) is within the neighbourhood depth of the forwarding
+// node, then it will be forwarded to all the nearest neighbours of the forwarding node. In case of
+// partial address, it should be forwarded to all the peers matching the partial address, if there
+// are any; otherwise only to one peer, closest to the recipient address. In any case, if the message
+// forwarding fails, the node should try to forward it to the next best peer, until the message is
+// successfully forwarded to at least one peer.
func (p *Pss) forward(msg *PssMsg) error {
metrics.GetOrRegisterCounter("pss.forward", nil).Inc(1)
-
+ sent := 0 // number of successful sends
to := make([]byte, addressLength)
copy(to[:len(msg.To)], msg.To)
+ neighbourhoodDepth := p.Kademlia.NeighbourhoodDepth()
- // send with kademlia
- // find the closest peer to the recipient and attempt to send
- sent := 0
- p.Kademlia.EachConn(to, 256, func(sp *network.Peer, po int, isproxbin bool) bool {
- info := sp.Info()
+ // luminosity is the opposite of darkness. the more bytes are removed from the address, the higher is darkness,
+ // but the luminosity is less. here luminosity equals the number of bits given in the destination address.
+ luminosityRadius := len(msg.To) * 8
- // check if the peer is running pss
- var ispss bool
- for _, cap := range info.Caps {
- if cap == p.capstring {
- ispss = true
- break
+ // proximity order function matching up to neighbourhoodDepth bits (po <= neighbourhoodDepth)
+ pof := pot.DefaultPof(neighbourhoodDepth)
+
+ // soft threshold for msg broadcast
+ broadcastThreshold, _ := pof(to, p.BaseAddr(), 0)
+ if broadcastThreshold > luminosityRadius {
+ broadcastThreshold = luminosityRadius
+ }
+
+ var onlySendOnce bool // indicates if the message should only be sent to one peer with closest address
+
+ // if measured from the recipient address as opposed to the base address (see Kademlia.EachConn
+ // call below), then peers that fall in the same proximity bin as recipient address will appear
+ // [at least] one bit closer, but only if these additional bits are given in the recipient address.
+ if broadcastThreshold < luminosityRadius && broadcastThreshold < neighbourhoodDepth {
+ broadcastThreshold++
+ onlySendOnce = true
+ }
+
+ p.Kademlia.EachConn(to, addressLength*8, func(sp *network.Peer, po int) bool {
+ if po < broadcastThreshold && sent > 0 {
+ return false // stop iterating
+ }
+ if sendFunc(p, sp, msg) {
+ sent++
+ if onlySendOnce {
+ return false
+ }
+ if po == addressLength*8 {
+ // stop iterating if successfully sent to the exact recipient (perfect match of full address)
+ return false
}
}
- if !ispss {
- log.Trace("peer doesn't have matching pss capabilities, skipping", "peer", info.Name, "caps", info.Caps)
- return true
- }
-
- // get the protocol peer from the forwarding peer cache
- sendMsg := fmt.Sprintf("MSG TO %x FROM %x VIA %x", to, p.BaseAddr(), sp.Address())
- p.fwdPoolMu.RLock()
- pp := p.fwdPool[sp.Info().ID]
- p.fwdPoolMu.RUnlock()
-
- // attempt to send the message
- err := pp.Send(context.TODO(), msg)
- if err != nil {
- metrics.GetOrRegisterCounter("pss.pp.send.error", nil).Inc(1)
- log.Error(err.Error())
- return true
- }
- sent++
- log.Trace(fmt.Sprintf("%v: successfully forwarded", sendMsg))
-
- // continue forwarding if:
- // - if the peer is end recipient but the full address has not been disclosed
- // - if the peer address matches the partial address fully
- // - if the peer is in proxbin
- if len(msg.To) < addressLength && bytes.Equal(msg.To, sp.Address()[:len(msg.To)]) {
- log.Trace(fmt.Sprintf("Pss keep forwarding: Partial address + full partial match"))
- return true
- } else if isproxbin {
- log.Trace(fmt.Sprintf("%x is in proxbin, keep forwarding", common.ToHex(sp.Address())))
- return true
- }
- // at this point we stop forwarding, and the state is as follows:
- // - the peer is end recipient and we have full address
- // - we are not in proxbin (directed routing)
- // - partial addresses don't fully match
- return false
+ return true
})
+ // if we failed to send to anyone, re-insert message in the send-queue
if sent == 0 {
log.Debug("unable to forward to any peers")
if err := p.enqueue(msg); err != nil {
@@ -895,6 +1012,10 @@ func (p *Pss) cleanFwdCache() {
}
}
+func label(b []byte) string {
+ return fmt.Sprintf("%04x", b[:2])
+}
+
// add a message to the cache
func (p *Pss) addFwdCache(msg *PssMsg) error {
metrics.GetOrRegisterCounter("pss.addfwdcache", nil).Inc(1)
@@ -934,12 +1055,23 @@ func (p *Pss) checkFwdCache(msg *PssMsg) bool {
// Digest of message
func (p *Pss) digest(msg *PssMsg) pssDigest {
- hasher := p.hashPool.Get().(storage.SwarmHash)
+ return p.digestBytes(msg.serialize())
+}
+
+func (p *Pss) digestBytes(msg []byte) pssDigest {
+ hasher := p.hashPool.Get().(hash.Hash)
defer p.hashPool.Put(hasher)
hasher.Reset()
- hasher.Write(msg.serialize())
+ hasher.Write(msg)
digest := pssDigest{}
key := hasher.Sum(nil)
copy(digest[:], key[:digestLength])
return digest
}
+
+func validateAddress(addr PssAddress) error {
+ if len(addr) > addressLength {
+ return errors.New("address too long")
+ }
+ return nil
+}
diff --git a/swarm/pss/pss_test.go b/swarm/pss/pss_test.go
index 66a90be62..46daa4674 100644
--- a/swarm/pss/pss_test.go
+++ b/swarm/pss/pss_test.go
@@ -48,20 +48,23 @@ import (
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
)
var (
- initOnce = sync.Once{}
- debugdebugflag = flag.Bool("vv", false, "veryverbose")
- debugflag = flag.Bool("v", false, "verbose")
- longrunning = flag.Bool("longrunning", false, "do run long-running tests")
- w *whisper.Whisper
- wapi *whisper.PublicWhisperAPI
- psslogmain log.Logger
- pssprotocols map[string]*protoCtrl
- useHandshake bool
+ initOnce = sync.Once{}
+ loglevel = flag.Int("loglevel", 2, "logging verbosity")
+ longrunning = flag.Bool("longrunning", false, "do run long-running tests")
+ w *whisper.Whisper
+ wapi *whisper.PublicWhisperAPI
+ psslogmain log.Logger
+ pssprotocols map[string]*protoCtrl
+ useHandshake bool
+ noopHandlerFunc = func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ return nil
+ }
)
func init() {
@@ -75,16 +78,9 @@ func init() {
func initTest() {
initOnce.Do(
func() {
- loglevel := log.LvlInfo
- if *debugflag {
- loglevel = log.LvlDebug
- } else if *debugdebugflag {
- loglevel = log.LvlTrace
- }
-
psslogmain = log.New("psslog", "*")
hs := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
- hf := log.LvlFilterHandler(loglevel, hs)
+ hf := log.LvlFilterHandler(log.Lvl(*loglevel), hs)
h := log.CallerFileHandler(hf)
log.Root().SetHandler(h)
@@ -280,15 +276,14 @@ func TestAddressMatch(t *testing.T) {
}
pssmsg := &PssMsg{
- To: remoteaddr,
- Payload: &whisper.Envelope{},
+ To: remoteaddr,
}
// differ from first byte
if ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient true but %x != %x", remoteaddr, localaddr)
}
- if ps.isSelfPossibleRecipient(pssmsg) {
+ if ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient true but %x != %x", remoteaddr[:8], localaddr[:8])
}
@@ -297,7 +292,7 @@ func TestAddressMatch(t *testing.T) {
if ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient true but %x != %x", remoteaddr, localaddr)
}
- if !ps.isSelfPossibleRecipient(pssmsg) {
+ if !ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient false but %x == %x", remoteaddr[:8], localaddr[:8])
}
@@ -306,13 +301,342 @@ func TestAddressMatch(t *testing.T) {
if !ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient false but %x == %x", remoteaddr, localaddr)
}
- if !ps.isSelfPossibleRecipient(pssmsg) {
+ if !ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient false but %x == %x", remoteaddr[:8], localaddr[:8])
}
+
+}
+
+// test that message is handled by sender if a prox handler exists and sender is in prox of message
+func TestProxShortCircuit(t *testing.T) {
+
+ // sender node address
+ localAddr := network.RandomAddr().Over()
+ localPotAddr := pot.NewAddressFromBytes(localAddr)
+
+ // set up kademlia
+ kadParams := network.NewKadParams()
+ kad := network.NewKademlia(localAddr, kadParams)
+ peerCount := kad.MinBinSize + 1
+
+ // set up pss
+ privKey, err := crypto.GenerateKey()
+ pssp := NewPssParams().WithPrivateKey(privKey)
+ ps, err := NewPss(kad, pssp)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ // create kademlia peers, so we have peers both inside and outside minproxlimit
+ var peers []*network.Peer
+ proxMessageAddress := pot.RandomAddressAt(localPotAddr, peerCount).Bytes()
+ distantMessageAddress := pot.RandomAddressAt(localPotAddr, 0).Bytes()
+
+ for i := 0; i < peerCount; i++ {
+ rw := &p2p.MsgPipeRW{}
+ ptpPeer := p2p.NewPeer(enode.ID{}, "wanna be with me? [ ] yes [ ] no", []p2p.Cap{})
+ protoPeer := protocols.NewPeer(ptpPeer, rw, &protocols.Spec{})
+ peerAddr := pot.RandomAddressAt(localPotAddr, i)
+ bzzPeer := &network.BzzPeer{
+ Peer: protoPeer,
+ BzzAddr: &network.BzzAddr{
+ OAddr: peerAddr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", peerAddr[:])),
+ },
+ }
+ peer := network.NewPeer(bzzPeer, kad)
+ kad.On(peer)
+ peers = append(peers, peer)
+ }
+
+ // register it marking prox capability
+ delivered := make(chan struct{})
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ delivered <- struct{}{}
+ return nil
+ }
+ topic := BytesToTopic([]byte{0x2a})
+ hndlrProxDereg := ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ prox: true,
+ },
+ })
+ defer hndlrProxDereg()
+
+ // send message too far away for sender to be in prox
+ // reception of this message should time out
+ errC := make(chan error)
+ go func() {
+ err := ps.SendRaw(distantMessageAddress, topic, []byte("foo"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+
+ ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ t.Fatal("raw distant message delivered")
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ }
+
+ // send message that should be within sender prox
+ // this message should be delivered
+ go func() {
+ err := ps.SendRaw(proxMessageAddress, topic, []byte("bar"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("raw timeout")
+ }
+
+ // try the same prox message with sym and asym send
+ proxAddrPss := PssAddress(proxMessageAddress)
+ symKeyId, err := ps.GenerateSymmetricKey(topic, proxAddrPss, true)
+ go func() {
+ err := ps.SendSym(symKeyId, topic, []byte("baz"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("sym timeout")
+ }
+
+ err = ps.SetPeerPublicKey(&privKey.PublicKey, topic, proxAddrPss)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubKeyId := hexutil.Encode(crypto.FromECDSAPub(&privKey.PublicKey))
+ go func() {
+ err := ps.SendAsym(pubKeyId, topic, []byte("xyzzy"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("asym timeout")
+ }
}
-//
-func TestHandlerConditions(t *testing.T) {
+// verify that node can be set as recipient regardless of explicit message address match if minimum one handler of a topic is explicitly set to allow it
+// note that in these tests we use the raw capability on handlers for convenience
+func TestAddressMatchProx(t *testing.T) {
+
+ // recipient node address
+ localAddr := network.RandomAddr().Over()
+ localPotAddr := pot.NewAddressFromBytes(localAddr)
+
+ // set up kademlia
+ kadparams := network.NewKadParams()
+ kad := network.NewKademlia(localAddr, kadparams)
+ nnPeerCount := kad.MinBinSize
+ peerCount := nnPeerCount + 2
+
+ // set up pss
+ privKey, err := crypto.GenerateKey()
+ pssp := NewPssParams().WithPrivateKey(privKey)
+ ps, err := NewPss(kad, pssp)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ // create kademlia peers, so we have peers both inside and outside minproxlimit
+ var peers []*network.Peer
+ for i := 0; i < peerCount; i++ {
+ rw := &p2p.MsgPipeRW{}
+ ptpPeer := p2p.NewPeer(enode.ID{}, "362436 call me anytime", []p2p.Cap{})
+ protoPeer := protocols.NewPeer(ptpPeer, rw, &protocols.Spec{})
+ peerAddr := pot.RandomAddressAt(localPotAddr, i)
+ bzzPeer := &network.BzzPeer{
+ Peer: protoPeer,
+ BzzAddr: &network.BzzAddr{
+ OAddr: peerAddr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", peerAddr[:])),
+ },
+ }
+ peer := network.NewPeer(bzzPeer, kad)
+ kad.On(peer)
+ peers = append(peers, peer)
+ }
+
+ // TODO: create a test in the network package to make a table with n peers where n-m are proxpeers
+ // meanwhile test regression for kademlia since we are compiling the test parameters from different packages
+ var proxes int
+ var conns int
+ depth := kad.NeighbourhoodDepth()
+ kad.EachConn(nil, peerCount, func(p *network.Peer, po int) bool {
+ conns++
+ if po >= depth {
+ proxes++
+ }
+ return true
+ })
+ if proxes != nnPeerCount {
+ t.Fatalf("expected %d proxpeers, have %d", nnPeerCount, proxes)
+ } else if conns != peerCount {
+ t.Fatalf("expected %d peers total, have %d", peerCount, proxes)
+ }
+
+ // remote address distances from localAddr to try and the expected outcomes if we use prox handler
+ remoteDistances := []int{
+ 255,
+ nnPeerCount + 1,
+ nnPeerCount,
+ nnPeerCount - 1,
+ 0,
+ }
+ expects := []bool{
+ true,
+ true,
+ true,
+ false,
+ false,
+ }
+
+ // first the unit test on the method that calculates possible receipient using prox
+ for i, distance := range remoteDistances {
+ pssMsg := newPssMsg(&msgParams{})
+ pssMsg.To = make([]byte, len(localAddr))
+ copy(pssMsg.To, localAddr)
+ var byteIdx = distance / 8
+ pssMsg.To[byteIdx] ^= 1 << uint(7-(distance%8))
+ log.Trace(fmt.Sprintf("addrmatch %v", bytes.Equal(pssMsg.To, localAddr)))
+ if ps.isSelfPossibleRecipient(pssMsg, true) != expects[i] {
+ t.Fatalf("expected distance %d to be %v", distance, expects[i])
+ }
+ }
+
+ // we move up to higher level and test the actual message handler
+ // for each distance check if we are possible recipient when prox variant is used is set
+
+ // this handler will increment a counter for every message that gets passed to the handler
+ var receives int
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ receives++
+ return nil
+ }
+
+ // register it marking prox capability
+ topic := BytesToTopic([]byte{0x2a})
+ hndlrProxDereg := ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ prox: true,
+ },
+ })
+
+ // test the distances
+ var prevReceive int
+ for i, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ var data [32]byte
+ rand.Read(data[:])
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: data[:],
+ }
+
+ log.Trace("withprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if (!expects[i] && prevReceive != receives) || (expects[i] && prevReceive == receives) {
+ t.Fatalf("expected distance %d recipient %v when prox is set for handler", distance, expects[i])
+ }
+ prevReceive = receives
+ }
+
+ // now add a non prox-capable handler and test
+ ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ })
+ receives = 0
+ prevReceive = 0
+ for i, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ var data [32]byte
+ rand.Read(data[:])
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: data[:],
+ }
+
+ log.Trace("withprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if (!expects[i] && prevReceive != receives) || (expects[i] && prevReceive == receives) {
+ t.Fatalf("expected distance %d recipient %v when prox is set for handler", distance, expects[i])
+ }
+ prevReceive = receives
+ }
+
+ // now deregister the prox capable handler, now none of the messages will be handled
+ hndlrProxDereg()
+ receives = 0
+
+ for _, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: []byte(remotePotAddr.String()),
+ }
+
+ log.Trace("noprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives != 0 {
+ t.Fatalf("expected distance %d to not be recipient when prox is not set for handler", distance)
+ }
+
+ }
+}
+
+// verify that message queueing happens when it should, and that expired and corrupt messages are dropped
+func TestMessageProcessing(t *testing.T) {
t.Skip("Disabled due to probable faulty logic for outbox expectations")
// setup
@@ -326,13 +650,12 @@ func TestHandlerConditions(t *testing.T) {
ps := newTestPss(privkey, network.NewKademlia(addr, network.NewKadParams()), NewPssParams())
// message should pass
- msg := &PssMsg{
- To: addr,
- Expire: uint32(time.Now().Add(time.Second * 60).Unix()),
- Payload: &whisper.Envelope{
- Topic: [4]byte{},
- Data: []byte{0x66, 0x6f, 0x6f},
- },
+ msg := newPssMsg(&msgParams{})
+ msg.To = addr
+ msg.Expire = uint32(time.Now().Add(time.Second * 60).Unix())
+ msg.Payload = &whisper.Envelope{
+ Topic: [4]byte{},
+ Data: []byte{0x66, 0x6f, 0x6f},
}
if err := ps.handlePssMsg(context.TODO(), msg); err != nil {
t.Fatal(err.Error())
@@ -463,14 +786,14 @@ func TestKeys(t *testing.T) {
copy(addr, network.RandomAddr().Over())
outkey := network.RandomAddr().Over()
topicobj := BytesToTopic([]byte("foo:42"))
- ps.SetPeerPublicKey(&theirprivkey.PublicKey, topicobj, &addr)
- outkeyid, err := ps.SetSymmetricKey(outkey, topicobj, &addr, false)
+ ps.SetPeerPublicKey(&theirprivkey.PublicKey, topicobj, addr)
+ outkeyid, err := ps.SetSymmetricKey(outkey, topicobj, addr, false)
if err != nil {
t.Fatalf("failed to set 'our' outgoing symmetric key")
}
// make a symmetric key that we will send to peer for encrypting messages to us
- inkeyid, err := ps.GenerateSymmetricKey(topicobj, &addr, true)
+ inkeyid, err := ps.GenerateSymmetricKey(topicobj, addr, true)
if err != nil {
t.Fatalf("failed to set 'our' incoming symmetric key")
}
@@ -493,11 +816,12 @@ func TestKeys(t *testing.T) {
// check that the key is stored in the peerpool
psp := ps.symKeyPool[inkeyid][topicobj]
- if psp.address != &addr {
- t.Fatalf("inkey address does not match; %p != %p", psp.address, &addr)
+ if !bytes.Equal(psp.address, addr) {
+ t.Fatalf("inkey address does not match; %p != %p", psp.address, addr)
}
}
+// check that we can retrieve previously added public key entires per topic and peer
func TestGetPublickeyEntries(t *testing.T) {
privkey, err := crypto.GenerateKey()
@@ -557,7 +881,7 @@ OUTER:
}
// forwarding should skip peers that do not have matching pss capabilities
-func TestMismatch(t *testing.T) {
+func TestPeerCapabilityMismatch(t *testing.T) {
// create privkey for forwarder node
privkey, err := crypto.GenerateKey()
@@ -615,6 +939,104 @@ func TestMismatch(t *testing.T) {
}
+// verifies that message handlers for raw messages only are invoked when minimum one handler for the topic exists in which raw messages are explicitly allowed
+func TestRawAllow(t *testing.T) {
+
+ // set up pss like so many times before
+ privKey, err := crypto.GenerateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+ baseAddr := network.RandomAddr()
+ kad := network.NewKademlia((baseAddr).Over(), network.NewKadParams())
+ ps := newTestPss(privKey, kad, nil)
+ topic := BytesToTopic([]byte{0x2a})
+
+ // create handler innards that increments every time a message hits it
+ var receives int
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ receives++
+ return nil
+ }
+
+ // wrap this handler function with a handler without raw capability and register it
+ hndlrNoRaw := &handler{
+ f: rawHandlerFunc,
+ }
+ ps.Register(&topic, hndlrNoRaw)
+
+ // test it with a raw message, should be poo-poo
+ pssMsg := newPssMsg(&msgParams{
+ raw: true,
+ })
+ pssMsg.To = baseAddr.OAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ }
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives > 0 {
+ t.Fatalf("Expected handler not to be executed with raw cap off")
+ }
+
+ // now wrap the same handler function with raw capabilities and register it
+ hndlrRaw := &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ }
+ deregRawHandler := ps.Register(&topic, hndlrRaw)
+
+ // should work now
+ pssMsg.Payload.Data = []byte("Raw Deal")
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives == 0 {
+ t.Fatalf("Expected handler to be executed with raw cap on")
+ }
+
+ // now deregister the raw capable handler
+ prevReceives := receives
+ deregRawHandler()
+
+ // check that raw messages fail again
+ pssMsg.Payload.Data = []byte("Raw Trump")
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives != prevReceives {
+ t.Fatalf("Expected handler not to be executed when raw handler is retracted")
+ }
+}
+
+// BELOW HERE ARE TESTS USING THE SIMULATION FRAMEWORK
+
+// tests that the API layer can handle edge case values
+func TestApi(t *testing.T) {
+ clients, err := setupNetwork(2, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ topic := "0xdeadbeef"
+
+ err = clients[0].Call(nil, "pss_sendRaw", "0x", topic, "0x666f6f")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = clients[0].Call(nil, "pss_sendRaw", "0xabcdef", topic, "0x")
+ if err == nil {
+ t.Fatal("expected error on empty msg")
+ }
+
+ overflowAddr := [33]byte{}
+ err = clients[0].Call(nil, "pss_sendRaw", hexutil.Encode(overflowAddr[:]), topic, "0x666f6f")
+ if err == nil {
+ t.Fatal("expected error on send too big address")
+ }
+}
+
+// verifies that nodes can send and receive raw (verbatim) messages
func TestSendRaw(t *testing.T) {
t.Run("32", testSendRaw)
t.Run("8", testSendRaw)
@@ -658,19 +1080,19 @@ func testSendRaw(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, true, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, true, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
// send and verify delivery
lmsg := []byte("plugh")
- err = clients[1].Call(nil, "pss_sendRaw", loaddrhex, topic, lmsg)
+ err = clients[1].Call(nil, "pss_sendRaw", loaddrhex, topic, hexutil.Encode(lmsg))
if err != nil {
t.Fatal(err)
}
@@ -683,7 +1105,7 @@ func testSendRaw(t *testing.T) {
t.Fatalf("test message (left) timed out: %v", cerr)
}
rmsg := []byte("xyzzy")
- err = clients[0].Call(nil, "pss_sendRaw", roaddrhex, topic, rmsg)
+ err = clients[0].Call(nil, "pss_sendRaw", roaddrhex, topic, hexutil.Encode(rmsg))
if err != nil {
t.Fatal(err)
}
@@ -757,13 +1179,13 @@ func testSendSym(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -872,13 +1294,13 @@ func testSendAsym(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -1037,7 +1459,7 @@ func testNetwork(t *testing.T) {
msgC := make(chan APIMsg)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
- sub, err := rpcclient.Subscribe(ctx, "pss", msgC, "receive", topic)
+ sub, err := rpcclient.Subscribe(ctx, "pss", msgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -1209,7 +1631,7 @@ func TestDeduplication(t *testing.T) {
rmsgC := make(chan APIMsg)
rctx, cancel := context.WithTimeout(context.Background(), time.Second*1)
defer cancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -1274,7 +1696,7 @@ func benchmarkSymKeySend(b *testing.B) {
topic := BytesToTopic([]byte("foo"))
to := make(PssAddress, 32)
copy(to[:], network.RandomAddr().Over())
- symkeyid, err := ps.GenerateSymmetricKey(topic, &to, true)
+ symkeyid, err := ps.GenerateSymmetricKey(topic, to, true)
if err != nil {
b.Fatalf("could not generate symkey: %v", err)
}
@@ -1282,7 +1704,7 @@ func benchmarkSymKeySend(b *testing.B) {
if err != nil {
b.Fatalf("could not retrieve symkey: %v", err)
}
- ps.SetSymmetricKey(symkey, topic, &to, false)
+ ps.SetSymmetricKey(symkey, topic, to, false)
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -1318,7 +1740,7 @@ func benchmarkAsymKeySend(b *testing.B) {
topic := BytesToTopic([]byte("foo"))
to := make(PssAddress, 32)
copy(to[:], network.RandomAddr().Over())
- ps.SetPeerPublicKey(&privkey.PublicKey, topic, &to)
+ ps.SetPeerPublicKey(&privkey.PublicKey, topic, to)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ps.SendAsym(common.ToHex(crypto.FromECDSAPub(&privkey.PublicKey)), topic, msg)
@@ -1367,7 +1789,7 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) {
for i := 0; i < int(keycount); i++ {
to := make(PssAddress, 32)
copy(to[:], network.RandomAddr().Over())
- keyid, err = ps.GenerateSymmetricKey(topic, &to, true)
+ keyid, err = ps.GenerateSymmetricKey(topic, to, true)
if err != nil {
b.Fatalf("cant generate symkey #%d: %v", i, err)
}
@@ -1392,8 +1814,8 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) {
if err != nil {
b.Fatalf("could not generate whisper envelope: %v", err)
}
- ps.Register(&topic, func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
- return nil
+ ps.Register(&topic, &handler{
+ f: noopHandlerFunc,
})
pssmsgs = append(pssmsgs, &PssMsg{
To: to,
@@ -1402,7 +1824,7 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) {
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
- if err := ps.process(pssmsgs[len(pssmsgs)-(i%len(pssmsgs))-1]); err != nil {
+ if err := ps.process(pssmsgs[len(pssmsgs)-(i%len(pssmsgs))-1], false, false); err != nil {
b.Fatalf("pss processing failed: %v", err)
}
}
@@ -1449,7 +1871,7 @@ func benchmarkSymkeyBruteforceSameaddr(b *testing.B) {
topic := BytesToTopic([]byte("foo"))
for i := 0; i < int(keycount); i++ {
copy(addr[i], network.RandomAddr().Over())
- keyid, err = ps.GenerateSymmetricKey(topic, &addr[i], true)
+ keyid, err = ps.GenerateSymmetricKey(topic, addr[i], true)
if err != nil {
b.Fatalf("cant generate symkey #%d: %v", i, err)
}
@@ -1476,15 +1898,15 @@ func benchmarkSymkeyBruteforceSameaddr(b *testing.B) {
if err != nil {
b.Fatalf("could not generate whisper envelope: %v", err)
}
- ps.Register(&topic, func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
- return nil
+ ps.Register(&topic, &handler{
+ f: noopHandlerFunc,
})
pssmsg := &PssMsg{
To: addr[len(addr)-1][:],
Payload: env,
}
for i := 0; i < b.N; i++ {
- if err := ps.process(pssmsg); err != nil {
+ if err := ps.process(pssmsg, false, false); err != nil {
b.Fatalf("pss processing failed: %v", err)
}
}
@@ -1543,7 +1965,7 @@ func newServices(allowRaw bool) adapters.Services {
return k
}
params := network.NewKadParams()
- params.MinProxBinSize = 2
+ params.NeighbourhoodSize = 2
params.MaxBinSize = 3
params.MinBinSize = 1
params.MaxRetries = 1000
@@ -1581,7 +2003,12 @@ func newServices(allowRaw bool) adapters.Services {
if useHandshake {
SetHandshakeController(ps, NewHandshakeParams())
}
- ps.Register(&PingTopic, pp.Handle)
+ ps.Register(&PingTopic, &handler{
+ f: pp.Handle,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ })
ps.addAPI(rpc.API{
Namespace: "psstest",
Version: "0.3",
@@ -1618,7 +2045,7 @@ func newTestPss(privkey *ecdsa.PrivateKey, kad *network.Kademlia, ppextra *PssPa
// set up routing if kademlia is not passed to us
if kad == nil {
kp := network.NewKadParams()
- kp.MinProxBinSize = 3
+ kp.NeighbourhoodSize = 3
kad = network.NewKademlia(nid[:], kp)
}
@@ -1645,12 +2072,13 @@ func NewAPITest(ps *Pss) *APITest {
return &APITest{Pss: ps}
}
-func (apitest *APITest) SetSymKeys(pubkeyid string, recvsymkey []byte, sendsymkey []byte, limit uint16, topic Topic, to PssAddress) ([2]string, error) {
- recvsymkeyid, err := apitest.SetSymmetricKey(recvsymkey, topic, &to, true)
+func (apitest *APITest) SetSymKeys(pubkeyid string, recvsymkey []byte, sendsymkey []byte, limit uint16, topic Topic, to hexutil.Bytes) ([2]string, error) {
+
+ recvsymkeyid, err := apitest.SetSymmetricKey(recvsymkey, topic, PssAddress(to), true)
if err != nil {
return [2]string{}, err
}
- sendsymkeyid, err := apitest.SetSymmetricKey(sendsymkey, topic, &to, false)
+ sendsymkeyid, err := apitest.SetSymmetricKey(sendsymkey, topic, PssAddress(to), false)
if err != nil {
return [2]string{}, err
}
diff --git a/swarm/pss/types.go b/swarm/pss/types.go
index 56c2c51dc..ba963067c 100644
--- a/swarm/pss/types.go
+++ b/swarm/pss/types.go
@@ -159,9 +159,39 @@ func (msg *PssMsg) String() string {
}
// Signature for a message handler function for a PssMsg
-//
// Implementations of this type are passed to Pss.Register together with a topic,
-type Handler func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error
+type HandlerFunc func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error
+
+type handlerCaps struct {
+ raw bool
+ prox bool
+}
+
+// Handler defines code to be executed upon reception of content.
+type handler struct {
+ f HandlerFunc
+ caps *handlerCaps
+}
+
+// NewHandler returns a new message handler
+func NewHandler(f HandlerFunc) *handler {
+ return &handler{
+ f: f,
+ caps: &handlerCaps{},
+ }
+}
+
+// WithRaw is a chainable method that allows raw messages to be handled.
+func (h *handler) WithRaw() *handler {
+ h.caps.raw = true
+ return h
+}
+
+// WithProxBin is a chainable method that allows sending messages with full addresses to neighbourhoods using the kademlia depth as reference
+func (h *handler) WithProxBin() *handler {
+ h.caps.prox = true
+ return h
+}
// the stateStore handles saving and loading PSS peers and their corresponding keys
// it is currently unimplemented
diff --git a/swarm/shed/db.go b/swarm/shed/db.go
new file mode 100644
index 000000000..d4e5d1b23
--- /dev/null
+++ b/swarm/shed/db.go
@@ -0,0 +1,329 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package shed provides a simple abstraction components to compose
+// more complex operations on storage data organized in fields and indexes.
+//
+// Only type which holds logical information about swarm storage chunks data
+// and metadata is Item. This part is not generalized mostly for
+// performance reasons.
+package shed
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+const (
+ openFileLimit = 128 // The limit for LevelDB OpenFilesCacheCapacity.
+ writePauseWarningThrottler = 1 * time.Minute
+)
+
+// DB provides abstractions over LevelDB in order to
+// implement complex structures using fields and ordered indexes.
+// It provides a schema functionality to store fields and indexes
+// information about naming and types.
+type DB struct {
+ ldb *leveldb.DB
+
+ compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
+ compReadMeter metrics.Meter // Meter for measuring the data read during compaction
+ compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
+ writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
+ writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
+ diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
+ diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
+
+ quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
+}
+
+// NewDB constructs a new DB and validates the schema
+// if it exists in database on the given path.
+// metricsPrefix is used for metrics collection for the given DB.
+func NewDB(path string, metricsPrefix string) (db *DB, err error) {
+ ldb, err := leveldb.OpenFile(path, &opt.Options{
+ OpenFilesCacheCapacity: openFileLimit,
+ })
+ if err != nil {
+ return nil, err
+ }
+ db = &DB{
+ ldb: ldb,
+ }
+
+ if _, err = db.getSchema(); err != nil {
+ if err == leveldb.ErrNotFound {
+ // save schema with initialized default fields
+ if err = db.putSchema(schema{
+ Fields: make(map[string]fieldSpec),
+ Indexes: make(map[byte]indexSpec),
+ }); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ }
+
+ // Configure meters for DB
+ db.configure(metricsPrefix)
+
+ // Create a quit channel for the periodic metrics collector and run it
+ db.quitChan = make(chan chan error)
+
+ go db.meter(10 * time.Second)
+
+ return db, nil
+}
+
+// Put wraps LevelDB Put method to increment metrics counter.
+func (db *DB) Put(key []byte, value []byte) (err error) {
+ err = db.ldb.Put(key, value, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.putFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.put", nil).Inc(1)
+ return nil
+}
+
+// Get wraps LevelDB Get method to increment metrics counter.
+func (db *DB) Get(key []byte) (value []byte, err error) {
+ value, err = db.ldb.Get(key, nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ metrics.GetOrRegisterCounter("DB.getNotFound", nil).Inc(1)
+ } else {
+ metrics.GetOrRegisterCounter("DB.getFail", nil).Inc(1)
+ }
+ return nil, err
+ }
+ metrics.GetOrRegisterCounter("DB.get", nil).Inc(1)
+ return value, nil
+}
+
+// Delete wraps LevelDB Delete method to increment metrics counter.
+func (db *DB) Delete(key []byte) (err error) {
+ err = db.ldb.Delete(key, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.deleteFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.delete", nil).Inc(1)
+ return nil
+}
+
+// NewIterator wraps LevelDB NewIterator method to increment metrics counter.
+func (db *DB) NewIterator() iterator.Iterator {
+ metrics.GetOrRegisterCounter("DB.newiterator", nil).Inc(1)
+
+ return db.ldb.NewIterator(nil, nil)
+}
+
+// WriteBatch wraps LevelDB Write method to increment metrics counter.
+func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) {
+ err = db.ldb.Write(batch, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.writebatchFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.writebatch", nil).Inc(1)
+ return nil
+}
+
+// Close closes LevelDB database.
+func (db *DB) Close() (err error) {
+ close(db.quitChan)
+ return db.ldb.Close()
+}
+
+// Configure configures the database metrics collectors
+func (db *DB) configure(prefix string) {
+ // Initialize all the metrics collector at the requested prefix
+ db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
+ db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
+ db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
+ db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
+ db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
+ db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
+ db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
+}
+
+func (db *DB) meter(refresh time.Duration) {
+ // Create the counters to store current and previous compaction values
+ compactions := make([][]float64, 2)
+ for i := 0; i < 2; i++ {
+ compactions[i] = make([]float64, 3)
+ }
+ // Create storage for iostats.
+ var iostats [2]float64
+
+ // Create storage and warning log tracer for write delay.
+ var (
+ delaystats [2]int64
+ lastWritePaused time.Time
+ )
+
+ var (
+ errc chan error
+ merr error
+ )
+
+ // Iterate ad infinitum and collect the stats
+ for i := 1; errc == nil && merr == nil; i++ {
+ // Retrieve the database stats
+ stats, err := db.ldb.GetProperty("leveldb.stats")
+ if err != nil {
+ log.Error("Failed to read database stats", "err", err)
+ merr = err
+ continue
+ }
+ // Find the compaction table, skip the header
+ lines := strings.Split(stats, "\n")
+ for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
+ lines = lines[1:]
+ }
+ if len(lines) <= 3 {
+ log.Error("Compaction table not found")
+ merr = errors.New("compaction table not found")
+ continue
+ }
+ lines = lines[3:]
+
+ // Iterate over all the table rows, and accumulate the entries
+ for j := 0; j < len(compactions[i%2]); j++ {
+ compactions[i%2][j] = 0
+ }
+ for _, line := range lines {
+ parts := strings.Split(line, "|")
+ if len(parts) != 6 {
+ break
+ }
+ for idx, counter := range parts[3:] {
+ value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
+ if err != nil {
+ log.Error("Compaction entry parsing failed", "err", err)
+ merr = err
+ continue
+ }
+ compactions[i%2][idx] += value
+ }
+ }
+ // Update all the requested meters
+ if db.compTimeMeter != nil {
+ db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
+ }
+ if db.compReadMeter != nil {
+ db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
+ }
+ if db.compWriteMeter != nil {
+ db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
+ }
+
+ // Retrieve the write delay statistic
+ writedelay, err := db.ldb.GetProperty("leveldb.writedelay")
+ if err != nil {
+ log.Error("Failed to read database write delay statistic", "err", err)
+ merr = err
+ continue
+ }
+ var (
+ delayN int64
+ delayDuration string
+ duration time.Duration
+ paused bool
+ )
+ if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
+ log.Error("Write delay statistic not found")
+ merr = err
+ continue
+ }
+ duration, err = time.ParseDuration(delayDuration)
+ if err != nil {
+ log.Error("Failed to parse delay duration", "err", err)
+ merr = err
+ continue
+ }
+ if db.writeDelayNMeter != nil {
+ db.writeDelayNMeter.Mark(delayN - delaystats[0])
+ }
+ if db.writeDelayMeter != nil {
+ db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
+ }
+ // If a warning that db is performing compaction has been displayed, any subsequent
+ // warnings will be withheld for one minute not to overwhelm the user.
+ if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
+ time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) {
+ log.Warn("Database compacting, degraded performance")
+ lastWritePaused = time.Now()
+ }
+ delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
+
+ // Retrieve the database iostats.
+ ioStats, err := db.ldb.GetProperty("leveldb.iostats")
+ if err != nil {
+ log.Error("Failed to read database iostats", "err", err)
+ merr = err
+ continue
+ }
+ var nRead, nWrite float64
+ parts := strings.Split(ioStats, " ")
+ if len(parts) < 2 {
+ log.Error("Bad syntax of ioStats", "ioStats", ioStats)
+ merr = fmt.Errorf("bad syntax of ioStats %s", ioStats)
+ continue
+ }
+ if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
+ log.Error("Bad syntax of read entry", "entry", parts[0])
+ merr = err
+ continue
+ }
+ if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
+ log.Error("Bad syntax of write entry", "entry", parts[1])
+ merr = err
+ continue
+ }
+ if db.diskReadMeter != nil {
+ db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
+ }
+ if db.diskWriteMeter != nil {
+ db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
+ }
+ iostats[0], iostats[1] = nRead, nWrite
+
+ // Sleep a bit, then repeat the stats collection
+ select {
+ case errc = <-db.quitChan:
+ // Quit requesting, stop hammering the database
+ case <-time.After(refresh):
+ // Timeout, gather a new set of stats
+ }
+ }
+
+ if errc == nil {
+ errc = <-db.quitChan
+ }
+ errc <- merr
+}
diff --git a/swarm/shed/db_test.go b/swarm/shed/db_test.go
new file mode 100644
index 000000000..65fdac4a6
--- /dev/null
+++ b/swarm/shed/db_test.go
@@ -0,0 +1,110 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+// TestNewDB constructs a new DB
+// and validates if the schema is initialized properly.
+func TestNewDB(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ s, err := db.getSchema()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if s.Fields == nil {
+ t.Error("schema fields are empty")
+ }
+ if len(s.Fields) != 0 {
+ t.Errorf("got schema fields length %v, want %v", len(s.Fields), 0)
+ }
+ if s.Indexes == nil {
+ t.Error("schema indexes are empty")
+ }
+ if len(s.Indexes) != 0 {
+ t.Errorf("got schema indexes length %v, want %v", len(s.Indexes), 0)
+ }
+}
+
+// TestDB_persistence creates one DB, saves a field and closes that DB.
+// Then, it constructs another DB and trues to retrieve the saved value.
+func TestDB_persistence(t *testing.T) {
+ dir, err := ioutil.TempDir("", "shed-test-persistence")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ db, err := NewDB(dir, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stringField, err := db.NewStringField("preserve-me")
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "persistent value"
+ err = stringField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = db.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ db2, err := NewDB(dir, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ stringField2, err := db2.NewStringField("preserve-me")
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := stringField2.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+}
+
+// newTestDB is a helper function that constructs a
+// temporary database and returns a cleanup function that must
+// be called to remove the data.
+func newTestDB(t *testing.T) (db *DB, cleanupFunc func()) {
+ t.Helper()
+
+ dir, err := ioutil.TempDir("", "shed-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cleanupFunc = func() { os.RemoveAll(dir) }
+ db, err = NewDB(dir, "")
+ if err != nil {
+ cleanupFunc()
+ t.Fatal(err)
+ }
+ return db, cleanupFunc
+}
diff --git a/swarm/shed/example_store_test.go b/swarm/shed/example_store_test.go
new file mode 100644
index 000000000..9a83855e7
--- /dev/null
+++ b/swarm/shed/example_store_test.go
@@ -0,0 +1,332 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed_test
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "time"
+
+ "github.com/ethereum/go-ethereum/swarm/shed"
+ "github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Store holds fields and indexes (including their encoding functions)
+// and defines operations on them by composing data from them.
+// It implements storage.ChunkStore interface.
+// It is just an example without any support for parallel operations
+// or real world implementation.
+type Store struct {
+ db *shed.DB
+
+ // fields and indexes
+ schemaName shed.StringField
+ sizeCounter shed.Uint64Field
+ accessCounter shed.Uint64Field
+ retrievalIndex shed.Index
+ accessIndex shed.Index
+ gcIndex shed.Index
+}
+
+// New returns new Store. All fields and indexes are initialized
+// and possible conflicts with schema from existing database is checked
+// automatically.
+func New(path string) (s *Store, err error) {
+ db, err := shed.NewDB(path, "")
+ if err != nil {
+ return nil, err
+ }
+ s = &Store{
+ db: db,
+ }
+ // Identify current storage schema by arbitrary name.
+ s.schemaName, err = db.NewStringField("schema-name")
+ if err != nil {
+ return nil, err
+ }
+ // Global ever incrementing index of chunk accesses.
+ s.accessCounter, err = db.NewUint64Field("access-counter")
+ if err != nil {
+ return nil, err
+ }
+ // Index storing actual chunk address, data and store timestamp.
+ s.retrievalIndex, err = db.NewIndex("Address->StoreTimestamp|Data", shed.IndexFuncs{
+ EncodeKey: func(fields shed.Item) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e shed.Item, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields shed.Item) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+ value = append(b, fields.Data...)
+ return value, nil
+ },
+ DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
+ e.Data = value[8:]
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ // Index storing access timestamp for a particular address.
+ // It is needed in order to update gc index keys for iteration order.
+ s.accessIndex, err = db.NewIndex("Address->AccessTimestamp", shed.IndexFuncs{
+ EncodeKey: func(fields shed.Item) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e shed.Item, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields shed.Item) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.AccessTimestamp))
+ return b, nil
+ },
+ DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
+ e.AccessTimestamp = int64(binary.BigEndian.Uint64(value))
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ // Index with keys ordered by access timestamp for garbage collection prioritization.
+ s.gcIndex, err = db.NewIndex("AccessTimestamp|StoredTimestamp|Address->nil", shed.IndexFuncs{
+ EncodeKey: func(fields shed.Item) (key []byte, err error) {
+ b := make([]byte, 16, 16+len(fields.Address))
+ binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
+ binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
+ key = append(b, fields.Address...)
+ return key, nil
+ },
+ DecodeKey: func(key []byte) (e shed.Item, err error) {
+ e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[8:16]))
+ e.Address = key[16:]
+ return e, nil
+ },
+ EncodeValue: func(fields shed.Item) (value []byte, err error) {
+ return nil, nil
+ },
+ DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// Put stores the chunk and sets it store timestamp.
+func (s *Store) Put(_ context.Context, ch storage.Chunk) (err error) {
+ return s.retrievalIndex.Put(shed.Item{
+ Address: ch.Address(),
+ Data: ch.Data(),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ })
+}
+
+// Get retrieves a chunk with the provided address.
+// It updates access and gc indexes by removing the previous
+// items from them and adding new items as keys of index entries
+// are changed.
+func (s *Store) Get(_ context.Context, addr storage.Address) (c storage.Chunk, err error) {
+ batch := new(leveldb.Batch)
+
+ // Get the chunk data and storage timestamp.
+ item, err := s.retrievalIndex.Get(shed.Item{
+ Address: addr,
+ })
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return nil, storage.ErrChunkNotFound
+ }
+ return nil, err
+ }
+
+ // Get the chunk access timestamp.
+ accessItem, err := s.accessIndex.Get(shed.Item{
+ Address: addr,
+ })
+ switch err {
+ case nil:
+ // Remove gc index entry if access timestamp is found.
+ err = s.gcIndex.DeleteInBatch(batch, shed.Item{
+ Address: item.Address,
+ StoreTimestamp: accessItem.AccessTimestamp,
+ AccessTimestamp: item.StoreTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+ case leveldb.ErrNotFound:
+ // Access timestamp is not found. Do not do anything.
+ // This is the firs get request.
+ default:
+ return nil, err
+ }
+
+ // Specify new access timestamp
+ accessTimestamp := time.Now().UTC().UnixNano()
+
+ // Put new access timestamp in access index.
+ err = s.accessIndex.PutInBatch(batch, shed.Item{
+ Address: addr,
+ AccessTimestamp: accessTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Put new access timestamp in gc index.
+ err = s.gcIndex.PutInBatch(batch, shed.Item{
+ Address: item.Address,
+ AccessTimestamp: accessTimestamp,
+ StoreTimestamp: item.StoreTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Increment access counter.
+ // Currently this information is not used anywhere.
+ _, err = s.accessCounter.IncInBatch(batch)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the batch.
+ err = s.db.WriteBatch(batch)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the chunk.
+ return storage.NewChunk(item.Address, item.Data), nil
+}
+
+// CollectGarbage is an example of index iteration.
+// It provides no reliable garbage collection functionality.
+func (s *Store) CollectGarbage() (err error) {
+ const maxTrashSize = 100
+ maxRounds := 10 // arbitrary number, needs to be calculated
+
+ // Run a few gc rounds.
+ for roundCount := 0; roundCount < maxRounds; roundCount++ {
+ var garbageCount int
+ // New batch for a new cg round.
+ trash := new(leveldb.Batch)
+ // Iterate through all index items and break when needed.
+ err = s.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) {
+ // Remove the chunk.
+ err = s.retrievalIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ // Remove the element in gc index.
+ err = s.gcIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ // Remove the relation in access index.
+ err = s.accessIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ garbageCount++
+ if garbageCount >= maxTrashSize {
+ return true, nil
+ }
+ return false, nil
+ }, nil)
+ if err != nil {
+ return err
+ }
+ if garbageCount == 0 {
+ return nil
+ }
+ err = s.db.WriteBatch(trash)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSchema is an example of retrieveing the most simple
+// string from a database field.
+func (s *Store) GetSchema() (name string, err error) {
+ name, err = s.schemaName.Get()
+ if err == leveldb.ErrNotFound {
+ return "", nil
+ }
+ return name, err
+}
+
+// GetSchema is an example of storing the most simple
+// string in a database field.
+func (s *Store) PutSchema(name string) (err error) {
+ return s.schemaName.Put(name)
+}
+
+// Close closes the underlying database.
+func (s *Store) Close() error {
+ return s.db.Close()
+}
+
+// Example_store constructs a simple storage implementation using shed package.
+func Example_store() {
+ dir, err := ioutil.TempDir("", "ephemeral")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ s, err := New(dir)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer s.Close()
+
+ ch := storage.GenerateRandomChunk(1024)
+ err = s.Put(context.Background(), ch)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ got, err := s.Get(context.Background(), ch.Address())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(bytes.Equal(got.Data(), ch.Data()))
+
+ //Output: true
+}
diff --git a/swarm/shed/field_string.go b/swarm/shed/field_string.go
new file mode 100644
index 000000000..a7e8f0c75
--- /dev/null
+++ b/swarm/shed/field_string.go
@@ -0,0 +1,66 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// StringField is the most simple field implementation
+// that stores an arbitrary string under a specific LevelDB key.
+type StringField struct {
+ db *DB
+ key []byte
+}
+
+// NewStringField retruns a new Instance of StringField.
+// It validates its name and type against the database schema.
+func (db *DB) NewStringField(name string) (f StringField, err error) {
+ key, err := db.schemaFieldKey(name, "string")
+ if err != nil {
+ return f, err
+ }
+ return StringField{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get returns a string value from database.
+// If the value is not found, an empty string is returned
+// an no error.
+func (f StringField) Get() (val string, err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return "", nil
+ }
+ return "", err
+ }
+ return string(b), nil
+}
+
+// Put stores a string in the database.
+func (f StringField) Put(val string) (err error) {
+ return f.db.Put(f.key, []byte(val))
+}
+
+// PutInBatch stores a string in a batch that can be
+// saved later in database.
+func (f StringField) PutInBatch(batch *leveldb.Batch, val string) {
+ batch.Put(f.key, []byte(val))
+}
diff --git a/swarm/shed/field_string_test.go b/swarm/shed/field_string_test.go
new file mode 100644
index 000000000..4215075bc
--- /dev/null
+++ b/swarm/shed/field_string_test.go
@@ -0,0 +1,110 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestStringField validates put and get operations
+// of the StringField.
+func TestStringField(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ simpleString, err := db.NewStringField("simple-string")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := ""
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ want := "simple string value"
+ err = simpleString.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := "overwritten string value"
+ err = simpleString.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := "simple string batch value"
+ simpleString.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := "overwritten string batch value"
+ simpleString.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+}
diff --git a/swarm/shed/field_struct.go b/swarm/shed/field_struct.go
new file mode 100644
index 000000000..90daee7fc
--- /dev/null
+++ b/swarm/shed/field_struct.go
@@ -0,0 +1,71 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// StructField is a helper to store complex structure by
+// encoding it in RLP format.
+type StructField struct {
+ db *DB
+ key []byte
+}
+
+// NewStructField returns a new StructField.
+// It validates its name and type against the database schema.
+func (db *DB) NewStructField(name string) (f StructField, err error) {
+ key, err := db.schemaFieldKey(name, "struct-rlp")
+ if err != nil {
+ return f, err
+ }
+ return StructField{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get unmarshals data from the database to a provided val.
+// If the data is not found leveldb.ErrNotFound is returned.
+func (f StructField) Get(val interface{}) (err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ return err
+ }
+ return rlp.DecodeBytes(b, val)
+}
+
+// Put marshals provided val and saves it to the database.
+func (f StructField) Put(val interface{}) (err error) {
+ b, err := rlp.EncodeToBytes(val)
+ if err != nil {
+ return err
+ }
+ return f.db.Put(f.key, b)
+}
+
+// PutInBatch marshals provided val and puts it into the batch.
+func (f StructField) PutInBatch(batch *leveldb.Batch, val interface{}) (err error) {
+ b, err := rlp.EncodeToBytes(val)
+ if err != nil {
+ return err
+ }
+ batch.Put(f.key, b)
+ return nil
+}
diff --git a/swarm/shed/field_struct_test.go b/swarm/shed/field_struct_test.go
new file mode 100644
index 000000000..cc0be0186
--- /dev/null
+++ b/swarm/shed/field_struct_test.go
@@ -0,0 +1,127 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestStructField validates put and get operations
+// of the StructField.
+func TestStructField(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ complexField, err := db.NewStructField("complex-field")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type complexStructure struct {
+ A string
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ var s complexStructure
+ err := complexField.Get(&s)
+ if err != leveldb.ErrNotFound {
+ t.Fatalf("got error %v, want %v", err, leveldb.ErrNotFound)
+ }
+ want := ""
+ if s.A != want {
+ t.Errorf("got string %q, want %q", s.A, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ want := complexStructure{
+ A: "simple string value",
+ }
+ err = complexField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err = complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got.A, want.A)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := complexStructure{
+ A: "overwritten string value",
+ }
+ err = complexField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err = complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got.A, want.A)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := complexStructure{
+ A: "simple string batch value",
+ }
+ complexField.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err := complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := complexStructure{
+ A: "overwritten string batch value",
+ }
+ complexField.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err := complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+}
diff --git a/swarm/shed/field_uint64.go b/swarm/shed/field_uint64.go
new file mode 100644
index 000000000..0417583ac
--- /dev/null
+++ b/swarm/shed/field_uint64.go
@@ -0,0 +1,146 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "encoding/binary"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Uint64Field provides a way to have a simple counter in the database.
+// It transparently encodes uint64 type value to bytes.
+type Uint64Field struct {
+ db *DB
+ key []byte
+}
+
+// NewUint64Field returns a new Uint64Field.
+// It validates its name and type against the database schema.
+func (db *DB) NewUint64Field(name string) (f Uint64Field, err error) {
+ key, err := db.schemaFieldKey(name, "uint64")
+ if err != nil {
+ return f, err
+ }
+ return Uint64Field{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get retrieves a uint64 value from the database.
+// If the value is not found in the database a 0 value
+// is returned and no error.
+func (f Uint64Field) Get() (val uint64, err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return 0, nil
+ }
+ return 0, err
+ }
+ return binary.BigEndian.Uint64(b), nil
+}
+
+// Put encodes uin64 value and stores it in the database.
+func (f Uint64Field) Put(val uint64) (err error) {
+ return f.db.Put(f.key, encodeUint64(val))
+}
+
+// PutInBatch stores a uint64 value in a batch
+// that can be saved later in the database.
+func (f Uint64Field) PutInBatch(batch *leveldb.Batch, val uint64) {
+ batch.Put(f.key, encodeUint64(val))
+}
+
+// Inc increments a uint64 value in the database.
+// This operation is not goroutine save.
+func (f Uint64Field) Inc() (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ val++
+ return val, f.Put(val)
+}
+
+// IncInBatch increments a uint64 value in the batch
+// by retreiving a value from the database, not the same batch.
+// This operation is not goroutine save.
+func (f Uint64Field) IncInBatch(batch *leveldb.Batch) (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ val++
+ f.PutInBatch(batch, val)
+ return val, nil
+}
+
+// Dec decrements a uint64 value in the database.
+// This operation is not goroutine save.
+// The field is protected from overflow to a negative value.
+func (f Uint64Field) Dec() (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ if val != 0 {
+ val--
+ }
+ return val, f.Put(val)
+}
+
+// DecInBatch decrements a uint64 value in the batch
+// by retreiving a value from the database, not the same batch.
+// This operation is not goroutine save.
+// The field is protected from overflow to a negative value.
+func (f Uint64Field) DecInBatch(batch *leveldb.Batch) (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ if val != 0 {
+ val--
+ }
+ f.PutInBatch(batch, val)
+ return val, nil
+}
+
+// encode transforms uint64 to 8 byte long
+// slice in big endian encoding.
+func encodeUint64(val uint64) (b []byte) {
+ b = make([]byte, 8)
+ binary.BigEndian.PutUint64(b, val)
+ return b
+}
diff --git a/swarm/shed/field_uint64_test.go b/swarm/shed/field_uint64_test.go
new file mode 100644
index 000000000..9462b56dd
--- /dev/null
+++ b/swarm/shed/field_uint64_test.go
@@ -0,0 +1,300 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestUint64Field validates put and get operations
+// of the Uint64Field.
+func TestUint64Field(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var want uint64
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ var want uint64 = 42
+ err = counter.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ var want uint64 = 84
+ err = counter.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ var want uint64 = 42
+ counter.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ var want uint64 = 84
+ counter.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+ })
+}
+
+// TestUint64Field_Inc validates Inc operation
+// of the Uint64Field.
+func TestUint64Field_Inc(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var want uint64 = 1
+ got, err := counter.Inc()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ want = 2
+ got, err = counter.Inc()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
+
+// TestUint64Field_IncInBatch validates IncInBatch operation
+// of the Uint64Field.
+func TestUint64Field_IncInBatch(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ batch := new(leveldb.Batch)
+ var want uint64 = 1
+ got, err := counter.IncInBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ batch2 := new(leveldb.Batch)
+ want = 2
+ got, err = counter.IncInBatch(batch2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
+
+// TestUint64Field_Dec validates Dec operation
+// of the Uint64Field.
+func TestUint64Field_Dec(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test overflow protection
+ var want uint64
+ got, err := counter.Dec()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ want = 32
+ err = counter.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want = 31
+ got, err = counter.Dec()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
+
+// TestUint64Field_DecInBatch validates DecInBatch operation
+// of the Uint64Field.
+func TestUint64Field_DecInBatch(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ batch := new(leveldb.Batch)
+ var want uint64
+ got, err := counter.DecInBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ batch2 := new(leveldb.Batch)
+ want = 42
+ counter.PutInBatch(batch2, want)
+ err = db.WriteBatch(batch2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ batch3 := new(leveldb.Batch)
+ want = 41
+ got, err = counter.DecInBatch(batch3)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch3)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
diff --git a/swarm/shed/index.go b/swarm/shed/index.go
new file mode 100644
index 000000000..df88b1b62
--- /dev/null
+++ b/swarm/shed/index.go
@@ -0,0 +1,306 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "bytes"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Item holds fields relevant to Swarm Chunk data and metadata.
+// All information required for swarm storage and operations
+// on that storage must be defined here.
+// This structure is logically connected to swarm storage,
+// the only part of this package that is not generalized,
+// mostly for performance reasons.
+//
+// Item is a type that is used for retrieving, storing and encoding
+// chunk data and metadata. It is passed as an argument to Index encoding
+// functions, get function and put function.
+// But it is also returned with additional data from get function call
+// and as the argument in iterator function definition.
+type Item struct {
+ Address []byte
+ Data []byte
+ AccessTimestamp int64
+ StoreTimestamp int64
+ // UseMockStore is a pointer to identify
+ // an unset state of the field in Join function.
+ UseMockStore *bool
+}
+
+// Merge is a helper method to construct a new
+// Item by filling up fields with default values
+// of a particular Item with values from another one.
+func (i Item) Merge(i2 Item) (new Item) {
+ if i.Address == nil {
+ i.Address = i2.Address
+ }
+ if i.Data == nil {
+ i.Data = i2.Data
+ }
+ if i.AccessTimestamp == 0 {
+ i.AccessTimestamp = i2.AccessTimestamp
+ }
+ if i.StoreTimestamp == 0 {
+ i.StoreTimestamp = i2.StoreTimestamp
+ }
+ if i.UseMockStore == nil {
+ i.UseMockStore = i2.UseMockStore
+ }
+ return i
+}
+
+// Index represents a set of LevelDB key value pairs that have common
+// prefix. It holds functions for encoding and decoding keys and values
+// to provide transparent actions on saved data which inclide:
+// - getting a particular Item
+// - saving a particular Item
+// - iterating over a sorted LevelDB keys
+// It implements IndexIteratorInterface interface.
+type Index struct {
+ db *DB
+ prefix []byte
+ encodeKeyFunc func(fields Item) (key []byte, err error)
+ decodeKeyFunc func(key []byte) (e Item, err error)
+ encodeValueFunc func(fields Item) (value []byte, err error)
+ decodeValueFunc func(keyFields Item, value []byte) (e Item, err error)
+}
+
+// IndexFuncs structure defines functions for encoding and decoding
+// LevelDB keys and values for a specific index.
+type IndexFuncs struct {
+ EncodeKey func(fields Item) (key []byte, err error)
+ DecodeKey func(key []byte) (e Item, err error)
+ EncodeValue func(fields Item) (value []byte, err error)
+ DecodeValue func(keyFields Item, value []byte) (e Item, err error)
+}
+
+// NewIndex returns a new Index instance with defined name and
+// encoding functions. The name must be unique and will be validated
+// on database schema for a key prefix byte.
+func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) {
+ id, err := db.schemaIndexPrefix(name)
+ if err != nil {
+ return f, err
+ }
+ prefix := []byte{id}
+ return Index{
+ db: db,
+ prefix: prefix,
+ // This function adjusts Index LevelDB key
+ // by appending the provided index id byte.
+ // This is needed to avoid collisions between keys of different
+ // indexes as all index ids are unique.
+ encodeKeyFunc: func(e Item) (key []byte, err error) {
+ key, err = funcs.EncodeKey(e)
+ if err != nil {
+ return nil, err
+ }
+ return append(append(make([]byte, 0, len(key)+1), prefix...), key...), nil
+ },
+ // This function reverses the encodeKeyFunc constructed key
+ // to transparently work with index keys without their index ids.
+ // It assumes that index keys are prefixed with only one byte.
+ decodeKeyFunc: func(key []byte) (e Item, err error) {
+ return funcs.DecodeKey(key[1:])
+ },
+ encodeValueFunc: funcs.EncodeValue,
+ decodeValueFunc: funcs.DecodeValue,
+ }, nil
+}
+
+// Get accepts key fields represented as Item to retrieve a
+// value from the index and return maximum available information
+// from the index represented as another Item.
+func (f Index) Get(keyFields Item) (out Item, err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return out, err
+ }
+ value, err := f.db.Get(key)
+ if err != nil {
+ return out, err
+ }
+ out, err = f.decodeValueFunc(keyFields, value)
+ if err != nil {
+ return out, err
+ }
+ return out.Merge(keyFields), nil
+}
+
+// Put accepts Item to encode information from it
+// and save it to the database.
+func (f Index) Put(i Item) (err error) {
+ key, err := f.encodeKeyFunc(i)
+ if err != nil {
+ return err
+ }
+ value, err := f.encodeValueFunc(i)
+ if err != nil {
+ return err
+ }
+ return f.db.Put(key, value)
+}
+
+// PutInBatch is the same as Put method, but it just
+// saves the key/value pair to the batch instead
+// directly to the database.
+func (f Index) PutInBatch(batch *leveldb.Batch, i Item) (err error) {
+ key, err := f.encodeKeyFunc(i)
+ if err != nil {
+ return err
+ }
+ value, err := f.encodeValueFunc(i)
+ if err != nil {
+ return err
+ }
+ batch.Put(key, value)
+ return nil
+}
+
+// Delete accepts Item to remove a key/value pair
+// from the database based on its fields.
+func (f Index) Delete(keyFields Item) (err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return err
+ }
+ return f.db.Delete(key)
+}
+
+// DeleteInBatch is the same as Delete just the operation
+// is performed on the batch instead on the database.
+func (f Index) DeleteInBatch(batch *leveldb.Batch, keyFields Item) (err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return err
+ }
+ batch.Delete(key)
+ return nil
+}
+
+// IndexIterFunc is a callback on every Item that is decoded
+// by iterating on an Index keys.
+// By returning a true for stop variable, iteration will
+// stop, and by returning the error, that error will be
+// propagated to the called iterator method on Index.
+type IndexIterFunc func(item Item) (stop bool, err error)
+
+// IterateOptions defines optional parameters for Iterate function.
+type IterateOptions struct {
+ // StartFrom is the Item to start the iteration from.
+ StartFrom *Item
+ // If SkipStartFromItem is true, StartFrom item will not
+ // be iterated on.
+ SkipStartFromItem bool
+ // Iterate over items which keys have a common prefix.
+ Prefix []byte
+}
+
+// Iterate function iterates over keys of the Index.
+// If IterateOptions is nil, the iterations is over all keys.
+func (f Index) Iterate(fn IndexIterFunc, options *IterateOptions) (err error) {
+ if options == nil {
+ options = new(IterateOptions)
+ }
+ // construct a prefix with Index prefix and optional common key prefix
+ prefix := append(f.prefix, options.Prefix...)
+ // start from the prefix
+ startKey := prefix
+ if options.StartFrom != nil {
+ // start from the provided StartFrom Item key value
+ startKey, err = f.encodeKeyFunc(*options.StartFrom)
+ if err != nil {
+ return err
+ }
+ }
+ it := f.db.NewIterator()
+ defer it.Release()
+
+ // move the cursor to the start key
+ ok := it.Seek(startKey)
+ if !ok {
+ // stop iterator if seek has failed
+ return it.Error()
+ }
+ if options.SkipStartFromItem && bytes.Equal(startKey, it.Key()) {
+ // skip the start from Item if it is the first key
+ // and it is explicitly configured to skip it
+ ok = it.Next()
+ }
+ for ; ok; ok = it.Next() {
+ key := it.Key()
+ if !bytes.HasPrefix(key, prefix) {
+ break
+ }
+ // create a copy of key byte slice not to share leveldb underlaying slice array
+ keyItem, err := f.decodeKeyFunc(append([]byte(nil), key...))
+ if err != nil {
+ return err
+ }
+ // create a copy of value byte slice not to share leveldb underlaying slice array
+ valueItem, err := f.decodeValueFunc(keyItem, append([]byte(nil), it.Value()...))
+ if err != nil {
+ return err
+ }
+ stop, err := fn(keyItem.Merge(valueItem))
+ if err != nil {
+ return err
+ }
+ if stop {
+ break
+ }
+ }
+ return it.Error()
+}
+
+// Count returns the number of items in index.
+func (f Index) Count() (count int, err error) {
+ it := f.db.NewIterator()
+ defer it.Release()
+
+ for ok := it.Seek(f.prefix); ok; ok = it.Next() {
+ key := it.Key()
+ if key[0] != f.prefix[0] {
+ break
+ }
+ count++
+ }
+ return count, it.Error()
+}
+
+// CountFrom returns the number of items in index keys
+// starting from the key encoded from the provided Item.
+func (f Index) CountFrom(start Item) (count int, err error) {
+ startKey, err := f.encodeKeyFunc(start)
+ if err != nil {
+ return 0, err
+ }
+ it := f.db.NewIterator()
+ defer it.Release()
+
+ for ok := it.Seek(startKey); ok; ok = it.Next() {
+ key := it.Key()
+ if key[0] != f.prefix[0] {
+ break
+ }
+ count++
+ }
+ return count, it.Error()
+}
diff --git a/swarm/shed/index_test.go b/swarm/shed/index_test.go
new file mode 100644
index 000000000..97d7c91f4
--- /dev/null
+++ b/swarm/shed/index_test.go
@@ -0,0 +1,781 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Index functions for the index that is used in tests in this file.
+var retrievalIndexFuncs = IndexFuncs{
+ EncodeKey: func(fields Item) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e Item, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields Item) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+ value = append(b, fields.Data...)
+ return value, nil
+ },
+ DecodeValue: func(keyItem Item, value []byte) (e Item, err error) {
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
+ e.Data = value[8:]
+ return e, nil
+ },
+}
+
+// TestIndex validates put, get and delete functions of the Index implementation.
+func TestIndex(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("put", func(t *testing.T) {
+ want := Item{
+ Address: []byte("put-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(Item{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkItem(t, got, want)
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := Item{
+ Address: []byte("put-hash"),
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err = index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(Item{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkItem(t, got, want)
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ want := Item{
+ Address: []byte("put-in-batch-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ batch := new(leveldb.Batch)
+ index.PutInBatch(batch, want)
+ err := db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(Item{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkItem(t, got, want)
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := Item{
+ Address: []byte("put-in-batch-hash"),
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ batch := new(leveldb.Batch)
+ index.PutInBatch(batch, want)
+ db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(Item{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkItem(t, got, want)
+ })
+ })
+
+ t.Run("put in batch twice", func(t *testing.T) {
+ // ensure that the last item of items with the same db keys
+ // is actually saved
+ batch := new(leveldb.Batch)
+ address := []byte("put-in-batch-twice-hash")
+
+ // put the first item
+ index.PutInBatch(batch, Item{
+ Address: address,
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ })
+
+ want := Item{
+ Address: address,
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+ // then put the item that will produce the same key
+ // but different value in the database
+ index.PutInBatch(batch, want)
+ db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(Item{
+ Address: address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkItem(t, got, want)
+ })
+
+ t.Run("delete", func(t *testing.T) {
+ want := Item{
+ Address: []byte("delete-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(Item{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkItem(t, got, want)
+
+ err = index.Delete(Item{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wantErr := leveldb.ErrNotFound
+ got, err = index.Get(Item{
+ Address: want.Address,
+ })
+ if err != wantErr {
+ t.Fatalf("got error %v, want %v", err, wantErr)
+ }
+ })
+
+ t.Run("delete in batch", func(t *testing.T) {
+ want := Item{
+ Address: []byte("delete-in-batch-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(Item{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkItem(t, got, want)
+
+ batch := new(leveldb.Batch)
+ index.DeleteInBatch(batch, Item{
+ Address: want.Address,
+ })
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wantErr := leveldb.ErrNotFound
+ got, err = index.Get(Item{
+ Address: want.Address,
+ })
+ if err != wantErr {
+ t.Fatalf("got error %v, want %v", err, wantErr)
+ }
+ })
+}
+
+// TestIndex_Iterate validates index Iterate
+// functions for correctness.
+func TestIndex_Iterate(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ items := []Item{
+ {
+ Address: []byte("iterate-hash-01"),
+ Data: []byte("data80"),
+ },
+ {
+ Address: []byte("iterate-hash-03"),
+ Data: []byte("data22"),
+ },
+ {
+ Address: []byte("iterate-hash-05"),
+ Data: []byte("data41"),
+ },
+ {
+ Address: []byte("iterate-hash-02"),
+ Data: []byte("data84"),
+ },
+ {
+ Address: []byte("iterate-hash-06"),
+ Data: []byte("data1"),
+ },
+ }
+ batch := new(leveldb.Batch)
+ for _, i := range items {
+ index.PutInBatch(batch, i)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ item04 := Item{
+ Address: []byte("iterate-hash-04"),
+ Data: []byte("data0"),
+ }
+ err = index.Put(item04)
+ if err != nil {
+ t.Fatal(err)
+ }
+ items = append(items, item04)
+
+ sort.SliceStable(items, func(i, j int) bool {
+ return bytes.Compare(items[i].Address, items[j].Address) < 0
+ })
+
+ t.Run("all", func(t *testing.T) {
+ var i int
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ t.Run("start from", func(t *testing.T) {
+ startIndex := 2
+ i := startIndex
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, &IterateOptions{
+ StartFrom: &items[startIndex],
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ t.Run("skip start from", func(t *testing.T) {
+ startIndex := 2
+ i := startIndex + 1
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, &IterateOptions{
+ StartFrom: &items[startIndex],
+ SkipStartFromItem: true,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ t.Run("stop", func(t *testing.T) {
+ var i int
+ stopIndex := 3
+ var count int
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ count++
+ if i == stopIndex {
+ return true, nil
+ }
+ i++
+ return false, nil
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantItemsCount := stopIndex + 1
+ if count != wantItemsCount {
+ t.Errorf("got %v items, expected %v", count, wantItemsCount)
+ }
+ })
+
+ t.Run("no overflow", func(t *testing.T) {
+ secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ secondItem := Item{
+ Address: []byte("iterate-hash-10"),
+ Data: []byte("data-second"),
+ }
+ err = secondIndex.Put(secondItem)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var i int
+ err = index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ i = 0
+ err = secondIndex.Iterate(func(item Item) (stop bool, err error) {
+ if i > 1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ checkItem(t, item, secondItem)
+ i++
+ return false, nil
+ }, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// TestIndex_Iterate_withPrefix validates index Iterate
+// function for correctness.
+func TestIndex_Iterate_withPrefix(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ allItems := []Item{
+ {Address: []byte("want-hash-00"), Data: []byte("data80")},
+ {Address: []byte("skip-hash-01"), Data: []byte("data81")},
+ {Address: []byte("skip-hash-02"), Data: []byte("data82")},
+ {Address: []byte("skip-hash-03"), Data: []byte("data83")},
+ {Address: []byte("want-hash-04"), Data: []byte("data84")},
+ {Address: []byte("want-hash-05"), Data: []byte("data85")},
+ {Address: []byte("want-hash-06"), Data: []byte("data86")},
+ {Address: []byte("want-hash-07"), Data: []byte("data87")},
+ {Address: []byte("want-hash-08"), Data: []byte("data88")},
+ {Address: []byte("want-hash-09"), Data: []byte("data89")},
+ {Address: []byte("skip-hash-10"), Data: []byte("data90")},
+ }
+ batch := new(leveldb.Batch)
+ for _, i := range allItems {
+ index.PutInBatch(batch, i)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ prefix := []byte("want")
+
+ items := make([]Item, 0)
+ for _, item := range allItems {
+ if bytes.HasPrefix(item.Address, prefix) {
+ items = append(items, item)
+ }
+ }
+ sort.SliceStable(items, func(i, j int) bool {
+ return bytes.Compare(items[i].Address, items[j].Address) < 0
+ })
+
+ t.Run("with prefix", func(t *testing.T) {
+ var i int
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, &IterateOptions{
+ Prefix: prefix,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if i != len(items) {
+ t.Errorf("got %v items, want %v", i, len(items))
+ }
+ })
+
+ t.Run("with prefix and start from", func(t *testing.T) {
+ startIndex := 2
+ var count int
+ i := startIndex
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ count++
+ return false, nil
+ }, &IterateOptions{
+ StartFrom: &items[startIndex],
+ Prefix: prefix,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantCount := len(items) - startIndex
+ if count != wantCount {
+ t.Errorf("got %v items, want %v", count, wantCount)
+ }
+ })
+
+ t.Run("with prefix and skip start from", func(t *testing.T) {
+ startIndex := 2
+ var count int
+ i := startIndex + 1
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ count++
+ return false, nil
+ }, &IterateOptions{
+ StartFrom: &items[startIndex],
+ SkipStartFromItem: true,
+ Prefix: prefix,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantCount := len(items) - startIndex - 1
+ if count != wantCount {
+ t.Errorf("got %v items, want %v", count, wantCount)
+ }
+ })
+
+ t.Run("stop", func(t *testing.T) {
+ var i int
+ stopIndex := 3
+ var count int
+ err := index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ count++
+ if i == stopIndex {
+ return true, nil
+ }
+ i++
+ return false, nil
+ }, &IterateOptions{
+ Prefix: prefix,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantItemsCount := stopIndex + 1
+ if count != wantItemsCount {
+ t.Errorf("got %v items, expected %v", count, wantItemsCount)
+ }
+ })
+
+ t.Run("no overflow", func(t *testing.T) {
+ secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ secondItem := Item{
+ Address: []byte("iterate-hash-10"),
+ Data: []byte("data-second"),
+ }
+ err = secondIndex.Put(secondItem)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var i int
+ err = index.Iterate(func(item Item) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkItem(t, item, want)
+ i++
+ return false, nil
+ }, &IterateOptions{
+ Prefix: prefix,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if i != len(items) {
+ t.Errorf("got %v items, want %v", i, len(items))
+ }
+ })
+}
+
+// TestIndex_count tests if Index.Count and Index.CountFrom
+// returns the correct number of items.
+func TestIndex_count(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ items := []Item{
+ {
+ Address: []byte("iterate-hash-01"),
+ Data: []byte("data80"),
+ },
+ {
+ Address: []byte("iterate-hash-02"),
+ Data: []byte("data84"),
+ },
+ {
+ Address: []byte("iterate-hash-03"),
+ Data: []byte("data22"),
+ },
+ {
+ Address: []byte("iterate-hash-04"),
+ Data: []byte("data41"),
+ },
+ {
+ Address: []byte("iterate-hash-05"),
+ Data: []byte("data1"),
+ },
+ }
+ batch := new(leveldb.Batch)
+ for _, i := range items {
+ index.PutInBatch(batch, i)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("Count", func(t *testing.T) {
+ got, err := index.Count()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := len(items)
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+
+ t.Run("CountFrom", func(t *testing.T) {
+ got, err := index.CountFrom(Item{
+ Address: items[1].Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := len(items) - 1
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+
+ // update the index with another item
+ t.Run("add item", func(t *testing.T) {
+ item04 := Item{
+ Address: []byte("iterate-hash-06"),
+ Data: []byte("data0"),
+ }
+ err = index.Put(item04)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ count := len(items) + 1
+
+ t.Run("Count", func(t *testing.T) {
+ got, err := index.Count()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := count
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+
+ t.Run("CountFrom", func(t *testing.T) {
+ got, err := index.CountFrom(Item{
+ Address: items[1].Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := count - 1
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+ })
+
+ // delete some items
+ t.Run("delete items", func(t *testing.T) {
+ deleteCount := 3
+
+ for _, item := range items[:deleteCount] {
+ err := index.Delete(item)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ count := len(items) + 1 - deleteCount
+
+ t.Run("Count", func(t *testing.T) {
+ got, err := index.Count()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := count
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+
+ t.Run("CountFrom", func(t *testing.T) {
+ got, err := index.CountFrom(Item{
+ Address: items[deleteCount+1].Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := count - 1
+ if got != want {
+ t.Errorf("got %v items count, want %v", got, want)
+ }
+ })
+ })
+}
+
+// checkItem is a test helper function that compares if two Index items are the same.
+func checkItem(t *testing.T, got, want Item) {
+ t.Helper()
+
+ if !bytes.Equal(got.Address, want.Address) {
+ t.Errorf("got hash %q, expected %q", string(got.Address), string(want.Address))
+ }
+ if !bytes.Equal(got.Data, want.Data) {
+ t.Errorf("got data %q, expected %q", string(got.Data), string(want.Data))
+ }
+ if got.StoreTimestamp != want.StoreTimestamp {
+ t.Errorf("got store timestamp %v, expected %v", got.StoreTimestamp, want.StoreTimestamp)
+ }
+ if got.AccessTimestamp != want.AccessTimestamp {
+ t.Errorf("got access timestamp %v, expected %v", got.AccessTimestamp, want.AccessTimestamp)
+ }
+}
diff --git a/swarm/shed/schema.go b/swarm/shed/schema.go
new file mode 100644
index 000000000..cfb7c6d64
--- /dev/null
+++ b/swarm/shed/schema.go
@@ -0,0 +1,134 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+)
+
+var (
+ // LevelDB key value for storing the schema.
+ keySchema = []byte{0}
+ // LevelDB key prefix for all field type.
+ // LevelDB keys will be constructed by appending name values to this prefix.
+ keyPrefixFields byte = 1
+ // LevelDB key prefix from which indexing keys start.
+ // Every index has its own key prefix and this value defines the first one.
+ keyPrefixIndexStart byte = 2 // Q: or maybe a higher number like 7, to have more space for potential specific perfixes
+)
+
+// schema is used to serialize known database structure information.
+type schema struct {
+ Fields map[string]fieldSpec `json:"fields"` // keys are field names
+ Indexes map[byte]indexSpec `json:"indexes"` // keys are index prefix bytes
+}
+
+// fieldSpec holds information about a particular field.
+// It does not need Name field as it is contained in the
+// schema.Field map key.
+type fieldSpec struct {
+ Type string `json:"type"`
+}
+
+// indxSpec holds information about a particular index.
+// It does not contain index type, as indexes do not have type.
+type indexSpec struct {
+ Name string `json:"name"`
+}
+
+// schemaFieldKey retrives the complete LevelDB key for
+// a particular field form the schema definition.
+func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
+ if name == "" {
+ return nil, errors.New("field name can not be blank")
+ }
+ if fieldType == "" {
+ return nil, errors.New("field type can not be blank")
+ }
+ s, err := db.getSchema()
+ if err != nil {
+ return nil, err
+ }
+ var found bool
+ for n, f := range s.Fields {
+ if n == name {
+ if f.Type != fieldType {
+ return nil, fmt.Errorf("field %q of type %q stored as %q in db", name, fieldType, f.Type)
+ }
+ break
+ }
+ }
+ if !found {
+ s.Fields[name] = fieldSpec{
+ Type: fieldType,
+ }
+ err := db.putSchema(s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return append([]byte{keyPrefixFields}, []byte(name)...), nil
+}
+
+// schemaIndexID retrieves the complete LevelDB prefix for
+// a particular index.
+func (db *DB) schemaIndexPrefix(name string) (id byte, err error) {
+ if name == "" {
+ return 0, errors.New("index name can not be blank")
+ }
+ s, err := db.getSchema()
+ if err != nil {
+ return 0, err
+ }
+ nextID := keyPrefixIndexStart
+ for i, f := range s.Indexes {
+ if i >= nextID {
+ nextID = i + 1
+ }
+ if f.Name == name {
+ return i, nil
+ }
+ }
+ id = nextID
+ s.Indexes[id] = indexSpec{
+ Name: name,
+ }
+ return id, db.putSchema(s)
+}
+
+// getSchema retrieves the complete schema from
+// the database.
+func (db *DB) getSchema() (s schema, err error) {
+ b, err := db.Get(keySchema)
+ if err != nil {
+ return s, err
+ }
+ err = json.Unmarshal(b, &s)
+ return s, err
+}
+
+// putSchema stores the complete schema to
+// the database.
+func (db *DB) putSchema(s schema) (err error) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ return db.Put(keySchema, b)
+}
diff --git a/swarm/shed/schema_test.go b/swarm/shed/schema_test.go
new file mode 100644
index 000000000..a0c1838c8
--- /dev/null
+++ b/swarm/shed/schema_test.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package shed
+
+import (
+ "bytes"
+ "testing"
+)
+
+// TestDB_schemaFieldKey validates correctness of schemaFieldKey.
+func TestDB_schemaFieldKey(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ t.Run("empty name or type", func(t *testing.T) {
+ _, err := db.schemaFieldKey("", "")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ _, err = db.schemaFieldKey("", "type")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+
+ _, err = db.schemaFieldKey("test", "")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ })
+
+ t.Run("same field", func(t *testing.T) {
+ key1, err := db.schemaFieldKey("test", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key2, err := db.schemaFieldKey("test", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(key1, key2) {
+ t.Errorf("schema keys for the same field name are not the same: %q, %q", string(key1), string(key2))
+ }
+ })
+
+ t.Run("different fields", func(t *testing.T) {
+ key1, err := db.schemaFieldKey("test1", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key2, err := db.schemaFieldKey("test2", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if bytes.Equal(key1, key2) {
+ t.Error("schema keys for the same field name are the same, but must not be")
+ }
+ })
+
+ t.Run("same field name different types", func(t *testing.T) {
+ _, err := db.schemaFieldKey("the-field", "one-type")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.schemaFieldKey("the-field", "another-type")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ })
+}
+
+// TestDB_schemaIndexPrefix validates correctness of schemaIndexPrefix.
+func TestDB_schemaIndexPrefix(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ t.Run("same name", func(t *testing.T) {
+ id1, err := db.schemaIndexPrefix("test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id2, err := db.schemaIndexPrefix("test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if id1 != id2 {
+ t.Errorf("schema keys for the same field name are not the same: %v, %v", id1, id2)
+ }
+ })
+
+ t.Run("different names", func(t *testing.T) {
+ id1, err := db.schemaIndexPrefix("test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id2, err := db.schemaIndexPrefix("test2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if id1 == id2 {
+ t.Error("schema ids for the same index name are the same, but must not be")
+ }
+ })
+}
diff --git a/swarm/state.go b/swarm/state.go
deleted file mode 100644
index 1984ab031..000000000
--- a/swarm/state.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package swarm
-
-type Voidstore struct {
-}
-
-func (self Voidstore) Load(string) ([]byte, error) {
- return nil, nil
-}
-
-func (self Voidstore) Save(string, []byte) error {
- return nil
-}
diff --git a/swarm/state/dbstore.go b/swarm/state/dbstore.go
index fc5dd8f7c..147e34b23 100644
--- a/swarm/state/dbstore.go
+++ b/swarm/state/dbstore.go
@@ -28,9 +28,6 @@ import (
// ErrNotFound is returned when no results are returned from the database
var ErrNotFound = errors.New("ErrorNotFound")
-// ErrInvalidArgument is returned when the argument type does not match the expected type
-var ErrInvalidArgument = errors.New("ErrorInvalidArgument")
-
// Store defines methods required to get, set, delete values for different keys
// and close the underlying resources.
type Store interface {
diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go
index 40292e88f..a8bfe2d1c 100644
--- a/swarm/storage/chunker.go
+++ b/swarm/storage/chunker.go
@@ -22,6 +22,7 @@ import (
"fmt"
"io"
"sync"
+ "time"
"github.com/ethereum/go-ethereum/metrics"
ch "github.com/ethereum/go-ethereum/swarm/chunk"
@@ -64,10 +65,6 @@ If all is well it is possible to implement this by simply composing readers so t
The hashing itself does use extra copies and allocation though, since it does need it.
*/
-var (
- errAppendOppNotSuported = errors.New("Append operation not supported")
-)
-
type ChunkerParams struct {
chunkSize int64
hashSize int64
@@ -98,7 +95,6 @@ type TreeChunker struct {
ctx context.Context
branches int64
- hashFunc SwarmHasher
dataSize int64
data io.Reader
// calculated
@@ -364,10 +360,6 @@ func (tc *TreeChunker) runWorker(ctx context.Context) {
}()
}
-func (tc *TreeChunker) Append() (Address, func(), error) {
- return nil, nil, errAppendOppNotSuported
-}
-
// LazyChunkReader implements LazySectionReader
type LazyChunkReader struct {
ctx context.Context
@@ -410,18 +402,16 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
log.Debug("lazychunkreader.size", "addr", r.addr)
if r.chunkData == nil {
+ startTime := time.Now()
chunkData, err := r.getter.Get(cctx, Reference(r.addr))
if err != nil {
+ metrics.GetOrRegisterResettingTimer("lcr.getter.get.err", nil).UpdateSince(startTime)
return 0, err
}
+ metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime)
r.chunkData = chunkData
- s := r.chunkData.Size()
- log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
- if s < 0 {
- return 0, errors.New("corrupt size")
- }
- return int64(s), nil
}
+
s := r.chunkData.Size()
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
@@ -542,8 +532,10 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS
wg.Add(1)
go func(j int64) {
childAddress := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize]
+ startTime := time.Now()
chunkData, err := r.getter.Get(r.ctx, Reference(childAddress))
if err != nil {
+ metrics.GetOrRegisterResettingTimer("lcr.getter.get.err", nil).UpdateSince(startTime)
log.Debug("lazychunkreader.join", "key", fmt.Sprintf("%x", childAddress), "err", err)
select {
case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childAddress)):
@@ -551,6 +543,7 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS
}
return
}
+ metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime)
if l := len(chunkData); l < 9 {
select {
case errC <- fmt.Errorf("chunk %v-%v incomplete; key: %s, data length %v", off, off+treeSize, fmt.Sprintf("%x", childAddress), l):
diff --git a/swarm/storage/chunker_test.go b/swarm/storage/chunker_test.go
index 1f847edcb..9a1259444 100644
--- a/swarm/storage/chunker_test.go
+++ b/swarm/storage/chunker_test.go
@@ -24,8 +24,8 @@ import (
"io"
"testing"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
/*
@@ -142,7 +142,7 @@ func TestSha3ForCorrectness(t *testing.T) {
io.LimitReader(bytes.NewReader(input[8:]), int64(size))
- rawSha3 := sha3.NewKeccak256()
+ rawSha3 := sha3.NewLegacyKeccak256()
rawSha3.Reset()
rawSha3.Write(input)
rawSha3Output := rawSha3.Sum(nil)
diff --git a/swarm/storage/common_test.go b/swarm/storage/common_test.go
index af104a5ae..bcc29d8cc 100644
--- a/swarm/storage/common_test.go
+++ b/swarm/storage/common_test.go
@@ -179,8 +179,9 @@ func testStoreCorrect(m ChunkStore, n int, chunksize int64, t *testing.T) {
return fmt.Errorf("key does not match retrieved chunk Address")
}
hasher := MakeHashFunc(DefaultHash)()
- hasher.ResetWithLength(chunk.SpanBytes())
- hasher.Write(chunk.Payload())
+ data := chunk.Data()
+ hasher.ResetWithLength(data[:8])
+ hasher.Write(data[8:])
exp := hasher.Sum(nil)
if !bytes.Equal(h, exp) {
return fmt.Errorf("key is not hash of chunk data")
diff --git a/swarm/storage/database.go b/swarm/storage/database.go
index e25fce31f..12367b905 100644
--- a/swarm/storage/database.go
+++ b/swarm/storage/database.go
@@ -64,16 +64,6 @@ func (db *LDBDatabase) Delete(key []byte) error {
return db.db.Delete(key, nil)
}
-func (db *LDBDatabase) LastKnownTD() []byte {
- data, _ := db.Get([]byte("LTD"))
-
- if len(data) == 0 {
- data = []byte{0x0}
- }
-
- return data
-}
-
func (db *LDBDatabase) NewIterator() iterator.Iterator {
metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1)
diff --git a/swarm/storage/encryption/encryption_test.go b/swarm/storage/encryption/encryption_test.go
index 0c0d0508c..3b4f8a4e3 100644
--- a/swarm/storage/encryption/encryption_test.go
+++ b/swarm/storage/encryption/encryption_test.go
@@ -22,13 +22,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/testutil"
+ "golang.org/x/crypto/sha3"
)
var expectedTransformedHex = "352187af3a843decc63ceca6cb01ea39dbcf77caf0a8f705f5c30d557044ceec9392b94a79376f1e5c10cd0c0f2a98e5353bf22b3ea4fdac6677ee553dec192e3db64e179d0474e96088fb4abd2babd67de123fb398bdf84d818f7bda2c1ab60b3ea0e0569ae54aa969658eb4844e6960d2ff44d7c087ee3aaffa1c0ee5df7e50b615f7ad90190f022934ad5300c7d1809bfe71a11cc04cece5274eb97a5f20350630522c1dbb7cebaf4f97f84e03f5cfd88f2b48880b25d12f4d5e75c150f704ef6b46c72e07db2b705ac3644569dccd22fd8f964f6ef787fda63c46759af334e6f665f70eac775a7017acea49f3c7696151cb1b9434fa4ac27fb803921ffb5ec58dafa168098d7d5b97e384be3384cf5bc235c3d887fef89fe76c0065f9b8d6ad837b442340d9e797b46ef5709ea3358bc415df11e4830de986ef0f1c418ffdcc80e9a3cda9bea0ab5676c0d4240465c43ba527e3b4ea50b4f6255b510e5d25774a75449b0bd71e56c537ade4fcf0f4d63c99ae1dbb5a844971e2c19941b8facfcfc8ee3056e7cb3c7114c5357e845b52f7103cb6e00d2308c37b12baa5b769e1cc7b00fc06f2d16e70cc27a82cb9c1a4e40cb0d43907f73df2c9db44f1b51a6b0bc6d09f77ac3be14041fae3f9df2da42df43ae110904f9ecee278030185254d7c6e918a5512024d047f77a992088cb3190a6587aa54d0c7231c1cd2e455e0d4c07f74bece68e29cd8ba0190c0bcfb26d24634af5d91a81ef5d4dd3d614836ce942ddbf7bb1399317f4c03faa675f325f18324bf9433844bfe5c4cc04130c8d5c329562b7cd66e72f7355de8f5375a72202971613c32bd7f3fcdcd51080758cd1d0a46dbe8f0374381dbc359f5864250c63dde8131cbd7c98ae2b0147d6ea4bf65d1443d511b18e6d608bbb46ac036353b4c51df306a10a6f6939c38629a5c18aaf89cac04bd3ad5156e6b92011c88341cb08551bab0a89e6a46538f5af33b86121dba17e3a434c273f385cd2e8cb90bdd32747d8425d929ccbd9b0815c73325988855549a8489dfd047daf777aaa3099e54cf997175a5d9e1edfe363e3b68c70e02f6bf4fcde6a0f3f7d0e7e98bde1a72ae8b6cd27b32990680cc4a04fc467f41c5adcaddabfc71928a3f6872c360c1d765260690dd28b269864c8e380d9c92ef6b89b0094c8f9bb22608b4156381b19b920e9583c9616ce5693b4d2a6c689f02e6a91584a8e501e107403d2689dd0045269dd9946c0e969fb656a3b39d84a798831f5f9290f163eb2f97d3ae25071324e95e2256d9c1e56eb83c26397855323edc202d56ad05894333b7f0ed3c1e4734782eb8bd5477242fd80d7a89b12866f85cfae476322f032465d6b1253993033fccd4723530630ab97a1566460af9c90c9da843c229406e65f3fa578bd6bf04dee9b6153807ddadb8ceefc5c601a8ab26023c67b1ab1e8e0f29ce94c78c308005a781853e7a2e0e51738939a657c987b5e611f32f47b5ff461c52e63e0ea390515a8e1f5393dae54ea526934b5f310b76e3fa050e40718cb4c8a20e58946d6ee1879f08c52764422fe542b3240e75eccb7aa75b1f8a651e37a3bc56b0932cdae0e985948468db1f98eb4b77b82081ea25d8a762db00f7898864984bd80e2f3f35f236bf57291dec28f550769943bcfb6f884b7687589b673642ef7fe5d7d5a87d3eca5017f83ccb9a3310520474479464cb3f433440e7e2f1e28c0aef700a45848573409e7ab66e0cfd4fe5d2147ace81bc65fd8891f6245cd69246bbf5c27830e5ab882dd1d02aba34ff6ca9af88df00fd602892f02fedbdc65dedec203faf3f8ff4a97314e0ddb58b9ab756a61a562597f4088b445fcc3b28a708ca7b1485dcd791b779fbf2b3ef1ec5c6205f595fbe45a02105034147e5a146089c200a49dae33ae051a08ea5f974a21540aaeffa7f9d9e3d35478016fb27b871036eb27217a5b834b461f535752fb5f1c8dded3ae14ce3a2ef6639e2fe41939e3509e46e347a95d50b2080f1ba42c804b290ddc912c952d1cec3f2661369f738feacc0dbf1ea27429c644e45f9e26f30c341acd34c7519b2a1663e334621691e810767e9918c2c547b2e23cce915f97d26aac8d0d2fcd3edb7986ad4e2b8a852edebad534cb6c0e9f0797d3563e5409d7e068e48356c67ce519246cd9c560e881453df97cbba562018811e6cf8c327f399d1d1253ab47a19f4a0ccc7c6d86a9603e0551da310ea595d71305c4aad96819120a92cdbaf1f77ec8df9cc7c838c0d4de1e8692dd81da38268d1d71324bcffdafbe5122e4b81828e021e936d83ae8021eac592aa52cd296b5ce392c7173d622f8e07d18f59bb1b08ba15211af6703463b09b593af3c37735296816d9f2e7a369354a5374ea3955e14ca8ac56d5bfe4aef7a21bd825d6ae85530bee5d2aaaa4914981b3dfdb2e92ec2a27c83d74b59e84ff5c056f7d8945745f2efc3dcf28f288c6cd8383700fb2312f7001f24dd40015e436ae23e052fe9070ea9535b9c989898a9bda3d5382cf10e432fae6ccf0c825b3e6436edd3a9f8846e5606f8563931b5f29ba407c5236e5730225dda211a8504ec1817bc935e1fd9a532b648c502df302ed2063aed008fd5676131ac9e95998e9447b02bd29d77e38fcfd2959f2de929b31970335eb2a74348cc6918bc35b9bf749eab0fe304c946cd9e1ca284e6853c42646e60b6b39e0d3fb3c260abfc5c1b4ca3c3770f344118ca7c7f5c1ad1f123f8f369cd60afc3cdb3e9e81968c5c9fa7c8b014ffe0508dd4f0a2a976d5d1ca8fc9ad7a237d92cfe7b41413d934d6e142824b252699397e48e4bac4e91ebc10602720684bd0863773c548f9a2f9724245e47b129ecf65afd7252aac48c8a8d6fd3d888af592a01fb02dc71ed7538a700d3d16243e4621e0fcf9f8ed2b4e11c9fa9a95338bb1dac74a7d9bc4eb8cbf900b634a2a56469c00f5994e4f0934bdb947640e6d67e47d0b621aacd632bfd3c800bd7d93bd329f494a90e06ed51535831bd6e07ac1b4b11434ef3918fa9511813a002913f33f836454798b8d1787fea9a4c4743ba091ed192ed92f4d33e43a226bf9503e1a83a16dd340b3cbbf38af6db0d99201da8de529b4225f3d2fa2aad6621afc6c79ef3537720591edfc681ae6d00ede53ed724fc71b23b90d2e9b7158aaee98d626a4fe029107df2cb5f90147e07ebe423b1519d848af18af365c71bfd0665db46be493bbe99b79a188de0cf3594aef2299f0324075bdce9eb0b87bc29d62401ba4fd6ae48b1ba33261b5b845279becf38ee03e3dc5c45303321c5fac96fd02a3ad8c9e3b02127b320501333c9e6360440d1ad5e64a6239501502dde1a49c9abe33b66098458eee3d611bb06ffcd234a1b9aef4af5021cd61f0de6789f822ee116b5078aae8c129e8391d8987500d322b58edd1595dc570b57341f2df221b94a96ab7fbcf32a8ca9684196455694024623d7ed49f7d66e8dd453c0bae50e0d8b34377b22d0ece059e2c385dfc70b9089fcd27577c51f4d870b5738ee2b68c361a67809c105c7848b68860a829f29930857a9f9d40b14fd2384ac43bafdf43c0661103794c4bd07d1cfdd4681b6aeaefad53d4c1473359bcc5a83b09189352e5bb9a7498dd0effb89c35aad26954551f8b0621374b449bf515630bd3974dca982279733470fdd059aa9c3df403d8f22b38c4709c82d8f12b888e22990350490e16179caf406293cc9e65f116bafcbe96af132f679877061107a2f690a82a8cb46eea57a90abd23798c5937c6fe6b17be3f9bfa01ce117d2c268181b9095bf49f395fea07ca03838de0588c5e2db633e836d64488c1421e653ea52d810d096048c092d0da6e02fa6613890219f51a76148c8588c2487b171a28f17b7a299204874af0131725d793481333be5f08e86ca837a226850b0c1060891603bfecf9e55cddd22c0dbb28d495342d9cc3de8409f72e52a0115141cffe755c74f061c1a770428ccb0ae59536ee6fc074fbfc6cacb51a549d327527e20f8407477e60355863f1153f9ce95641198663c968874e7fdb29407bd771d94fdda8180cbb0358f5874738db705924b8cbe0cd5e1484aeb64542fe8f38667b7c34baf818c63b1e18440e9fba575254d063fd49f24ef26432f4eb323f3836972dca87473e3e9bb26dc3be236c3aae6bc8a6da567442309da0e8450e242fc9db836e2964f2c76a3b80a2c677979882dda7d7ebf62c93664018bcf4ec431fe6b403d49b3b36618b9c07c2d0d4569cb8d52223903debc72ec113955b206c34f1ae5300990ccfc0180f47d91afdb542b6312d12aeff7e19c645dc0b9fe6e3288e9539f6d5870f99882df187bfa6d24d179dfd1dac22212c8b5339f7171a3efc15b760fed8f68538bc5cbd845c2d1ab41f3a6c692820653eaef7930c02fbe6061d93805d73decdbb945572a7c44ed0241982a6e4d2d730898f82b3d9877cb7bca41cc6dcee67aa0c3d6db76f0b0a708ace0031113e48429de5d886c10e9200f68f32263a2fbf44a5992c2459fda7b8796ba796e3a0804fc25992ed2c9a5fe0580a6b809200ecde6caa0364b58be11564dcb9a616766dd7906db5636ee708b0204f38d309466d8d4a162965dd727e29f5a6c133e9b4ed5bafe803e479f9b2a7640c942c4a40b14ac7dc9828546052761a070f6404008f1ec3605836339c3da95a00b4fd81b2cabf88b51d2087d5b83e8c5b69bf96d8c72cbd278dad3bbb42b404b436f84ad688a22948adf60a81090f1e904291503c16e9f54b05fc76c881a5f95f0e732949e95d3f1bae2d3652a14fe0dda2d68879604657171856ef72637def2a96ac47d7b3fe86eb3198f5e0e626f06be86232305f2ae79ffcd2725e48208f9d8d63523f81915acc957563ab627cd6bc68c2a37d59fb0ed77a90aa9d085d6914a8ebada22a2c2d471b5163aeddd799d90fbb10ed6851ace2c4af504b7d572686700a59d6db46d5e42bb83f8e0c0ffe1dfa6582cc0b34c921ff6e85e83188d24906d5c08bb90069639e713051b3102b53e6f703e8210017878add5df68e6f2b108de279c5490e9eef5590185c4a1c744d4e00d244e1245a8805bd30407b1bc488db44870ccfd75a8af104df78efa2fb7ba31f048a263efdb3b63271fff4922bece9a71187108f65744a24f4947dc556b7440cb4fa45d296bb7f724588d1f245125b21ea063500029bd49650237f53899daf1312809552c81c5827341263cc807a29fe84746170cdfa1ff3838399a5645319bcaff674bb70efccdd88b3d3bb2f2d98111413585dc5d5bd5168f43b3f55e58972a5b2b9b3733febf02f931bd436648cb617c3794841aab961fe41277ab07812e1d3bc4ff6f4350a3e615bfba08c3b9480ef57904d3a16f7e916345202e3f93d11f7a7305170cb8c4eb9ac88ace8bbd1f377bdd5855d3162d6723d4435e84ce529b8f276a8927915ac759a0d04e5ca4a9d3da6291f0333b475df527e99fe38f7a4082662e8125936640c26dd1d17cf284ce6e2b17777a05aa0574f7793a6a062cc6f7263f7ab126b4528a17becfdec49ac0f7d8705aa1704af97fb861faa8a466161b2b5c08a5bacc79fe8500b913d65c8d3c52d1fd52d2ab2c9f52196e712455619c1cd3e0f391b274487944240e2ed8858dd0823c801094310024ae3fe4dd1cf5a2b6487b42cc5937bbafb193ee331d87e378258963d49b9da90899bbb4b88e79f78e866b0213f4719f67da7bcc2fce073c01e87c62ea3cdbcd589cfc41281f2f4c757c742d6d1e"
-var hashFunc = sha3.NewKeccak256
+var hashFunc = sha3.NewLegacyKeccak256
var testKey Key
func init() {
diff --git a/swarm/storage/error.go b/swarm/storage/error.go
index 44261c084..a9d0616fa 100644
--- a/swarm/storage/error.go
+++ b/swarm/storage/error.go
@@ -23,23 +23,15 @@ import (
const (
ErrInit = iota
ErrNotFound
- ErrIO
ErrUnauthorized
ErrInvalidValue
ErrDataOverflow
ErrNothingToReturn
- ErrCorruptData
ErrInvalidSignature
ErrNotSynced
- ErrPeriodDepth
- ErrCnt
)
var (
- ErrChunkNotFound = errors.New("chunk not found")
- ErrFetching = errors.New("chunk still fetching")
- ErrChunkInvalid = errors.New("invalid chunk")
- ErrChunkForward = errors.New("cannot forward")
- ErrChunkUnavailable = errors.New("chunk unavailable")
- ErrChunkTimeout = errors.New("timeout")
+ ErrChunkNotFound = errors.New("chunk not found")
+ ErrChunkInvalid = errors.New("invalid chunk")
)
diff --git a/swarm/storage/feed/handler.go b/swarm/storage/feed/handler.go
index 9e2640282..063d3e92a 100644
--- a/swarm/storage/feed/handler.go
+++ b/swarm/storage/feed/handler.go
@@ -23,7 +23,6 @@ import (
"context"
"fmt"
"sync"
- "time"
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
@@ -32,12 +31,10 @@ import (
)
type Handler struct {
- chunkStore *storage.NetStore
- HashSize int
- cache map[uint64]*cacheEntry
- cacheLock sync.RWMutex
- storeTimeout time.Duration
- queryMaxPeriods uint32
+ chunkStore *storage.NetStore
+ HashSize int
+ cache map[uint64]*cacheEntry
+ cacheLock sync.RWMutex
}
// HandlerParams pass parameters to the Handler constructor NewHandler
@@ -82,9 +79,8 @@ func (h *Handler) SetStore(store *storage.NetStore) {
// Validate is a chunk validation method
// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature
// It implements the storage.ChunkValidator interface
-func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
- dataLength := len(data)
- if dataLength < minimumSignedUpdateLength {
+func (h *Handler) Validate(chunk storage.Chunk) bool {
+ if len(chunk.Data()) < minimumSignedUpdateLength {
return false
}
@@ -94,8 +90,8 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
// First, deserialize the chunk
var r Request
- if err := r.fromChunk(chunkAddr, data); err != nil {
- log.Debug("Invalid feed update chunk", "addr", chunkAddr.Hex(), "err", err.Error())
+ if err := r.fromChunk(chunk); err != nil {
+ log.Debug("Invalid feed update chunk", "addr", chunk.Address(), "err", err)
return false
}
@@ -198,7 +194,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
}
var request Request
- if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil {
+ if err := request.fromChunk(chunk); err != nil {
return nil, nil
}
if request.Time <= timeLimit {
diff --git a/swarm/storage/feed/handler_test.go b/swarm/storage/feed/handler_test.go
index fb2ef3a6b..2f8a52453 100644
--- a/swarm/storage/feed/handler_test.go
+++ b/swarm/storage/feed/handler_test.go
@@ -40,7 +40,6 @@ var (
}
cleanF func()
subtopicName = "føø.bar"
- hashfunc = storage.MakeHashFunc(storage.DefaultHash)
)
func init() {
@@ -366,7 +365,7 @@ func TestValidator(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if !rh.Validate(chunk.Address(), chunk.Data()) {
+ if !rh.Validate(chunk) {
t.Fatal("Chunk validator fail on update chunk")
}
@@ -375,7 +374,7 @@ func TestValidator(t *testing.T) {
address[0] = 11
address[15] = 99
- if rh.Validate(address, chunk.Data()) {
+ if rh.Validate(storage.NewChunk(address, chunk.Data())) {
t.Fatal("Expected Validate to fail with false chunk address")
}
}
diff --git a/swarm/storage/feed/request.go b/swarm/storage/feed/request.go
index 6968d8b9a..dd91a7cf4 100644
--- a/swarm/storage/feed/request.go
+++ b/swarm/storage/feed/request.go
@@ -171,9 +171,11 @@ func (r *Request) toChunk() (storage.Chunk, error) {
}
// fromChunk populates this structure from chunk data. It does not verify the signature is valid.
-func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
+func (r *Request) fromChunk(chunk storage.Chunk) error {
// for update chunk layout see Request definition
+ chunkdata := chunk.Data()
+
//deserialize the feed update portion
if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil {
return err
@@ -189,7 +191,7 @@ func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error
}
r.Signature = signature
- r.idAddr = updateAddr
+ r.idAddr = chunk.Address()
r.binaryData = chunkdata
return nil
diff --git a/swarm/storage/feed/request_test.go b/swarm/storage/feed/request_test.go
index f5de32b74..c30158fdd 100644
--- a/swarm/storage/feed/request_test.go
+++ b/swarm/storage/feed/request_test.go
@@ -197,7 +197,7 @@ func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
// Test that parseUpdate fails if the chunk is too small
var r Request
- if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil {
+ if err := r.fromChunk(storage.NewChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength))); err == nil {
t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength)
}
@@ -226,7 +226,7 @@ func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00")
var recovered Request
- recovered.fromChunk(chunk.Address(), chunk.Data())
+ recovered.fromChunk(chunk)
if !reflect.DeepEqual(recovered, r) {
t.Fatal("Expected recovered feed update request to equal the original one")
}
@@ -282,7 +282,7 @@ func TestReverse(t *testing.T) {
// check that we can recover the owner account from the update chunk's signature
var checkUpdate Request
- if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil {
+ if err := checkUpdate.fromChunk(chunk); err != nil {
t.Fatal(err)
}
checkdigest, err := checkUpdate.GetDigest()
diff --git a/swarm/storage/feed/timestampprovider.go b/swarm/storage/feed/timestampprovider.go
index 072dc3a48..fb60cea9c 100644
--- a/swarm/storage/feed/timestampprovider.go
+++ b/swarm/storage/feed/timestampprovider.go
@@ -17,7 +17,6 @@
package feed
import (
- "encoding/binary"
"encoding/json"
"time"
)
@@ -30,32 +29,11 @@ type Timestamp struct {
Time uint64 `json:"time"` // Unix epoch timestamp, in seconds
}
-// 8 bytes uint64 Time
-const timestampLength = 8
-
// timestampProvider interface describes a source of timestamp information
type timestampProvider interface {
Now() Timestamp // returns the current timestamp information
}
-// binaryGet populates the timestamp structure from the given byte slice
-func (t *Timestamp) binaryGet(data []byte) error {
- if len(data) != timestampLength {
- return NewError(ErrCorruptData, "timestamp data has the wrong size")
- }
- t.Time = binary.LittleEndian.Uint64(data[:8])
- return nil
-}
-
-// binaryPut Serializes a Timestamp to a byte slice
-func (t *Timestamp) binaryPut(data []byte) error {
- if len(data) != timestampLength {
- return NewError(ErrCorruptData, "timestamp data has the wrong size")
- }
- binary.LittleEndian.PutUint64(data, t.Time)
- return nil
-}
-
// UnmarshalJSON implements the json.Unmarshaller interface
func (t *Timestamp) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &t.Time)
diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go
index ff18e64c7..23b52ee0d 100644
--- a/swarm/storage/hasherstore.go
+++ b/swarm/storage/hasherstore.go
@@ -21,9 +21,9 @@ import (
"fmt"
"sync/atomic"
- "github.com/ethereum/go-ethereum/crypto/sha3"
ch "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
+ "golang.org/x/crypto/sha3"
)
type hasherStore struct {
@@ -232,11 +232,11 @@ func (h *hasherStore) decrypt(chunkData ChunkData, key encryption.Key) ([]byte,
}
func (h *hasherStore) newSpanEncryption(key encryption.Key) encryption.Encryption {
- return encryption.New(key, 0, uint32(ch.DefaultSize/h.refSize), sha3.NewKeccak256)
+ return encryption.New(key, 0, uint32(ch.DefaultSize/h.refSize), sha3.NewLegacyKeccak256)
}
func (h *hasherStore) newDataEncryption(key encryption.Key) encryption.Encryption {
- return encryption.New(key, int(ch.DefaultSize), 0, sha3.NewKeccak256)
+ return encryption.New(key, int(ch.DefaultSize), 0, sha3.NewLegacyKeccak256)
}
func (h *hasherStore) storeChunk(ctx context.Context, chunk *chunk) {
diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go
index bd4f6b916..635d33429 100644
--- a/swarm/storage/ldbstore.go
+++ b/swarm/storage/ldbstore.go
@@ -248,10 +248,6 @@ func U64ToBytes(val uint64) []byte {
return data
}
-func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) {
- index.Access = s.accessCnt
-}
-
func getIndexKey(hash Address) []byte {
hashSize := len(hash)
key := make([]byte, hashSize+1)
@@ -777,18 +773,6 @@ func (s *LDBStore) BinIndex(po uint8) uint64 {
return s.bucketCnt[po]
}
-func (s *LDBStore) Size() uint64 {
- s.lock.RLock()
- defer s.lock.RUnlock()
- return s.entryCnt
-}
-
-func (s *LDBStore) CurrentStorageIndex() uint64 {
- s.lock.RLock()
- defer s.lock.RUnlock()
- return s.dataIdx
-}
-
// Put adds a chunk to the database, adding indices and incrementing global counters.
// If it already exists, it merely increments the access count of the existing entry.
// Is thread safe
@@ -810,11 +794,11 @@ func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
batch := s.batch
log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
- idata, err := s.db.Get(ikey)
+ _, err := s.db.Get(ikey)
if err != nil {
s.doPut(chunk, &index, po)
}
- idata = encodeIndex(&index)
+ idata := encodeIndex(&index)
s.batch.Put(ikey, idata)
// add the access-chunkindex index for garbage collection
diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go
index e8b9ae39b..1fe466f93 100644
--- a/swarm/storage/ldbstore_test.go
+++ b/swarm/storage/ldbstore_test.go
@@ -79,14 +79,6 @@ func testPoFunc(k Address) (ret uint8) {
return uint8(Proximity(basekey, k[:]))
}
-func (db *testDbStore) close() {
- db.Close()
- err := os.RemoveAll(db.dir)
- if err != nil {
- panic(err)
- }
-}
-
func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) {
db, cleanup, err := newTestDbStore(mock, true)
defer cleanup()
@@ -453,7 +445,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
for i := 0; i < n; i++ {
- ret, err := ldb.Get(nil, chunks[i].Address())
+ ret, err := ldb.Get(context.TODO(), chunks[i].Address())
if i%2 == 0 {
// expect even chunks to be missing
diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go
index 111821ff6..956560902 100644
--- a/swarm/storage/localstore.go
+++ b/swarm/storage/localstore.go
@@ -92,7 +92,7 @@ func (ls *LocalStore) isValid(chunk Chunk) bool {
// ls.Validators contains a list of one validator per chunk type.
// if one validator succeeds, then the chunk is valid
for _, v := range ls.Validators {
- if valid = v.Validate(chunk.Address(), chunk.Data()); valid {
+ if valid = v.Validate(chunk); valid {
break
}
}
diff --git a/swarm/storage/localstore_test.go b/swarm/storage/localstore_test.go
index 7a07726d1..7a4162a47 100644
--- a/swarm/storage/localstore_test.go
+++ b/swarm/storage/localstore_test.go
@@ -118,7 +118,7 @@ func TestValidator(t *testing.T) {
type boolTestValidator bool
-func (self boolTestValidator) Validate(addr Address, data []byte) bool {
+func (self boolTestValidator) Validate(chunk Chunk) bool {
return bool(self)
}
diff --git a/swarm/storage/memstore.go b/swarm/storage/memstore.go
index 36b1e00d9..86e5813d1 100644
--- a/swarm/storage/memstore.go
+++ b/swarm/storage/memstore.go
@@ -57,7 +57,7 @@ func (m *MemStore) Get(_ context.Context, addr Address) (Chunk, error) {
if !ok {
return nil, ErrChunkNotFound
}
- return c.(*chunk), nil
+ return c.(Chunk), nil
}
func (m *MemStore) Put(_ context.Context, c Chunk) error {
diff --git a/swarm/storage/mock/db/db.go b/swarm/storage/mock/db/db.go
index 43bfa24f0..73ae199e8 100644
--- a/swarm/storage/mock/db/db.go
+++ b/swarm/storage/mock/db/db.go
@@ -86,6 +86,13 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return s.db.Write(batch, nil)
}
+// Delete removes the chunk reference to node with address addr.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ batch := new(leveldb.Batch)
+ batch.Delete(nodeDBKey(addr, key))
+ return s.db.Write(batch, nil)
+}
+
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
has, err := s.db.Has(nodeDBKey(addr, key), nil)
diff --git a/swarm/storage/mock/mem/mem.go b/swarm/storage/mock/mem/mem.go
index 8878309d0..3a0a2beb8 100644
--- a/swarm/storage/mock/mem/mem.go
+++ b/swarm/storage/mock/mem/mem.go
@@ -83,6 +83,22 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return nil
}
+// Delete removes the chunk data for node with address addr.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var count int
+ if _, ok := s.nodes[string(key)]; ok {
+ delete(s.nodes[string(key)], addr)
+ count = len(s.nodes[string(key)])
+ }
+ if count == 0 {
+ delete(s.data, string(key))
+ }
+ return nil
+}
+
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
s.mu.Lock()
diff --git a/swarm/storage/mock/mock.go b/swarm/storage/mock/mock.go
index 81340f927..626ba3fe1 100644
--- a/swarm/storage/mock/mock.go
+++ b/swarm/storage/mock/mock.go
@@ -70,6 +70,12 @@ func (n *NodeStore) Put(key []byte, data []byte) error {
return n.store.Put(n.addr, key, data)
}
+// Delete removes chunk data for a key for a node that has the address
+// provided on NodeStore initialization.
+func (n *NodeStore) Delete(key []byte) error {
+ return n.store.Delete(n.addr, key)
+}
+
// GlobalStorer defines methods for mock db store
// that stores chunk data for all swarm nodes.
// It is used in tests to construct mock NodeStores
@@ -77,6 +83,7 @@ func (n *NodeStore) Put(key []byte, data []byte) error {
type GlobalStorer interface {
Get(addr common.Address, key []byte) (data []byte, err error)
Put(addr common.Address, key []byte, data []byte) error
+ Delete(addr common.Address, key []byte) error
HasKey(addr common.Address, key []byte) bool
// NewNodeStore creates an instance of NodeStore
// to be used by a single swarm node with
@@ -96,13 +103,6 @@ type Exporter interface {
Export(w io.Writer) (n int, err error)
}
-// ImportExporter is an interface for importing and exporting
-// mock store data to and from a tar archive.
-type ImportExporter interface {
- Importer
- Exporter
-}
-
// ExportedChunk is the structure that is saved in tar archive for
// each chunk as JSON-encoded bytes.
type ExportedChunk struct {
diff --git a/swarm/storage/mock/rpc/rpc.go b/swarm/storage/mock/rpc/rpc.go
index 6e735f698..8cd6c83a7 100644
--- a/swarm/storage/mock/rpc/rpc.go
+++ b/swarm/storage/mock/rpc/rpc.go
@@ -73,6 +73,12 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return err
}
+// Delete calls a Delete method to RPC server.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ err := s.client.Call(nil, "mockStore_delete", addr, key)
+ return err
+}
+
// HasKey calls a HasKey method to RPC server.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
var has bool
diff --git a/swarm/storage/mock/test/test.go b/swarm/storage/mock/test/test.go
index 02da3af55..69828b144 100644
--- a/swarm/storage/mock/test/test.go
+++ b/swarm/storage/mock/test/test.go
@@ -72,6 +72,31 @@ func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) {
}
}
}
+ t.Run("delete", func(t *testing.T) {
+ chunkAddr := storage.Address([]byte("1234567890abcd"))
+ for _, addr := range addrs {
+ err := globalStore.Put(addr, chunkAddr, []byte("data"))
+ if err != nil {
+ t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ firstNodeAddr := addrs[0]
+ if err := globalStore.Delete(firstNodeAddr, chunkAddr); err != nil {
+ t.Fatalf("delete from store %s key %s: %v", firstNodeAddr.Hex(), chunkAddr.Hex(), err)
+ }
+ for i, addr := range addrs {
+ _, err := globalStore.Get(addr, chunkAddr)
+ if i == 0 {
+ if err != mock.ErrNotFound {
+ t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ }
+ })
})
t.Run("NodeStore", func(t *testing.T) {
@@ -114,6 +139,34 @@ func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) {
}
}
}
+ t.Run("delete", func(t *testing.T) {
+ chunkAddr := storage.Address([]byte("1234567890abcd"))
+ var chosenStore *mock.NodeStore
+ for addr, store := range nodes {
+ if chosenStore == nil {
+ chosenStore = store
+ }
+ err := store.Put(chunkAddr, []byte("data"))
+ if err != nil {
+ t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ if err := chosenStore.Delete(chunkAddr); err != nil {
+ t.Fatalf("delete key %s: %v", chunkAddr.Hex(), err)
+ }
+ for addr, store := range nodes {
+ _, err := store.Get(chunkAddr)
+ if store == chosenStore {
+ if err != mock.ErrNotFound {
+ t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ }
+ })
})
}
@@ -143,17 +196,22 @@ func ImportExport(t *testing.T, outStore, inStore mock.GlobalStorer, n int) {
r, w := io.Pipe()
defer r.Close()
+ exportErrChan := make(chan error)
go func() {
defer w.Close()
- if _, err := exporter.Export(w); err != nil {
- t.Fatalf("export: %v", err)
- }
+
+ _, err := exporter.Export(w)
+ exportErrChan <- err
}()
if _, err := importer.Import(r); err != nil {
t.Fatalf("import: %v", err)
}
+ if err := <-exportErrChan; err != nil {
+ t.Fatalf("export: %v", err)
+ }
+
for i, addr := range addrs {
chunkAddr := storage.Address(append(addr[:], []byte(strconv.FormatInt(int64(i)+1, 16))...))
data := []byte(strconv.FormatInt(int64(i)+1, 16))
diff --git a/swarm/storage/netstore_test.go b/swarm/storage/netstore_test.go
index 8a09fa5ae..2ed3e0752 100644
--- a/swarm/storage/netstore_test.go
+++ b/swarm/storage/netstore_test.go
@@ -20,6 +20,8 @@ import (
"bytes"
"context"
"crypto/rand"
+ "errors"
+ "fmt"
"io/ioutil"
"sync"
"testing"
@@ -114,19 +116,24 @@ func TestNetStoreGetAndPut(t *testing.T) {
defer cancel()
c := make(chan struct{}) // this channel ensures that the gouroutine with the Put does not run earlier than the Get
+ putErrC := make(chan error)
go func() {
<-c // wait for the Get to be called
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
- t.Fatal("Expected netStore to use a fetcher for the Get call")
+ putErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
+ return
}
err := netStore.Put(ctx, chunk)
if err != nil {
- t.Fatalf("Expected no err got %v", err)
+ putErrC <- fmt.Errorf("Expected no err got %v", err)
+ return
}
+
+ putErrC <- nil
}()
close(c)
@@ -134,6 +141,10 @@ func TestNetStoreGetAndPut(t *testing.T) {
if err != nil {
t.Fatalf("Expected no err got %v", err)
}
+
+ if err := <-putErrC; err != nil {
+ t.Fatal(err)
+ }
// the retrieved chunk should be the same as what we Put
if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
t.Fatalf("Different chunk received than what was put")
@@ -200,14 +211,18 @@ func TestNetStoreGetTimeout(t *testing.T) {
defer cancel()
c := make(chan struct{}) // this channel ensures that the gouroutine does not run earlier than the Get
+ fetcherErrC := make(chan error)
go func() {
<-c // wait for the Get to be called
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
- t.Fatal("Expected netStore to use a fetcher for the Get call")
+ fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
+ return
}
+
+ fetcherErrC <- nil
}()
close(c)
@@ -220,6 +235,10 @@ func TestNetStoreGetTimeout(t *testing.T) {
t.Fatalf("Expected context.DeadLineExceeded err got %v", err)
}
+ if err := <-fetcherErrC; err != nil {
+ t.Fatal(err)
+ }
+
// A fetcher was created, check if it has been removed after timeout
if netStore.fetchers.Len() != 0 {
t.Fatal("Expected netStore to remove the fetcher after timeout")
@@ -243,20 +262,29 @@ func TestNetStoreGetCancel(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
c := make(chan struct{}) // this channel ensures that the gouroutine with the cancel does not run earlier than the Get
+ fetcherErrC := make(chan error, 1)
go func() {
<-c // wait for the Get to be called
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
- t.Fatal("Expected netStore to use a fetcher for the Get call")
+ fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
+ return
}
+
+ fetcherErrC <- nil
cancel()
}()
close(c)
+
// We call Get with an unavailable chunk, so it will create a fetcher and wait for delivery
_, err := netStore.Get(ctx, chunk.Address())
+ if err := <-fetcherErrC; err != nil {
+ t.Fatal(err)
+ }
+
// After the context is cancelled above Get should return with an error
if err != context.Canceled {
t.Fatalf("Expected context.Canceled err got %v", err)
@@ -286,46 +314,55 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
+ putErrC := make(chan error)
go func() {
// sleep to make sure Put is called after all the Get
time.Sleep(500 * time.Millisecond)
// check if netStore created exactly one fetcher for all Get calls
if netStore.fetchers.Len() != 1 {
- t.Fatal("Expected netStore to use one fetcher for all Get calls")
+ putErrC <- errors.New("Expected netStore to use one fetcher for all Get calls")
+ return
}
err := netStore.Put(ctx, chunk)
if err != nil {
- t.Fatalf("Expected no err got %v", err)
+ putErrC <- fmt.Errorf("Expected no err got %v", err)
+ return
}
+ putErrC <- nil
}()
+ count := 4
// call Get 4 times for the same unavailable chunk. The calls will be blocked until the Put above.
- getWG := sync.WaitGroup{}
- for i := 0; i < 4; i++ {
- getWG.Add(1)
+ errC := make(chan error)
+ for i := 0; i < count; i++ {
go func() {
- defer getWG.Done()
recChunk, err := netStore.Get(ctx, chunk.Address())
if err != nil {
- t.Fatalf("Expected no err got %v", err)
+ errC <- fmt.Errorf("Expected no err got %v", err)
}
if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
- t.Fatalf("Different chunk received than what was put")
+ errC <- errors.New("Different chunk received than what was put")
}
+ errC <- nil
}()
}
- finishedC := make(chan struct{})
- go func() {
- getWG.Wait()
- close(finishedC)
- }()
+ if err := <-putErrC; err != nil {
+ t.Fatal(err)
+ }
+
+ timeout := time.After(1 * time.Second)
// The Get calls should return after Put, so no timeout expected
- select {
- case <-finishedC:
- case <-time.After(1 * time.Second):
- t.Fatalf("Timeout waiting for Get calls to return")
+ for i := 0; i < count; i++ {
+ select {
+ case err := <-errC:
+ if err != nil {
+ t.Fatal(err)
+ }
+ case <-timeout:
+ t.Fatalf("Timeout waiting for Get calls to return")
+ }
}
// A fetcher was created, check if it has been removed after cancel
@@ -448,7 +485,7 @@ func TestNetStoreGetCallsOffer(t *testing.T) {
defer cancel()
// We call get for a not available chunk, it will timeout because the chunk is not delivered
- chunk, err := netStore.Get(ctx, chunk.Address())
+ _, err := netStore.Get(ctx, chunk.Address())
if err != context.DeadlineExceeded {
t.Fatalf("Expect error %v got %v", context.DeadlineExceeded, err)
@@ -542,16 +579,12 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
}
- // Call wait three times parallelly
- wg := sync.WaitGroup{}
- for i := 0; i < 3; i++ {
- wg.Add(1)
+ // Call wait three times in parallel
+ count := 3
+ errC := make(chan error)
+ for i := 0; i < count; i++ {
go func() {
- err := wait(ctx)
- if err != nil {
- t.Fatalf("Expected no err got %v", err)
- }
- wg.Done()
+ errC <- wait(ctx)
}()
}
@@ -570,7 +603,12 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
}
// wait until all wait calls return (because the chunk is delivered)
- wg.Wait()
+ for i := 0; i < count; i++ {
+ err := <-errC
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
// There should be no more fetchers for the delivered chunk
if netStore.fetchers.Len() != 0 {
@@ -606,23 +644,29 @@ func TestNetStoreFetcherLifeCycleWithTimeout(t *testing.T) {
t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
}
- // Call wait three times parallelly
- wg := sync.WaitGroup{}
- for i := 0; i < 3; i++ {
- wg.Add(1)
+ // Call wait three times in parallel
+ count := 3
+ errC := make(chan error)
+ for i := 0; i < count; i++ {
go func() {
- defer wg.Done()
rctx, rcancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer rcancel()
err := wait(rctx)
if err != context.DeadlineExceeded {
- t.Fatalf("Expected err %v got %v", context.DeadlineExceeded, err)
+ errC <- fmt.Errorf("Expected err %v got %v", context.DeadlineExceeded, err)
+ return
}
+ errC <- nil
}()
}
// wait until all wait calls timeout
- wg.Wait()
+ for i := 0; i < count; i++ {
+ err := <-errC
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
// There should be no more fetchers after timeout
if netStore.fetchers.Len() != 0 {
diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go
index f74eef06b..e5bd7a76a 100644
--- a/swarm/storage/pyramid.go
+++ b/swarm/storage/pyramid.go
@@ -71,11 +71,6 @@ const (
splitTimeout = time.Minute * 5
)
-const (
- DataChunk = 0
- TreeChunk = 1
-)
-
type PyramidSplitterParams struct {
SplitterParams
getter Getter
diff --git a/swarm/storage/types.go b/swarm/storage/types.go
index 42557766e..d79235225 100644
--- a/swarm/storage/types.go
+++ b/swarm/storage/types.go
@@ -23,62 +23,21 @@ import (
"crypto/rand"
"encoding/binary"
"fmt"
- "hash"
"io"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/swarm/bmt"
ch "github.com/ethereum/go-ethereum/swarm/chunk"
+ "golang.org/x/crypto/sha3"
)
const MaxPO = 16
const AddressLength = 32
-type Hasher func() hash.Hash
type SwarmHasher func() SwarmHash
-// Peer is the recorded as Source on the chunk
-// should probably not be here? but network should wrap chunk object
-type Peer interface{}
-
type Address []byte
-func (a Address) Size() uint {
- return uint(len(a))
-}
-
-func (a Address) isEqual(y Address) bool {
- return bytes.Equal(a, y)
-}
-
-func (a Address) bits(i, j uint) uint {
- ii := i >> 3
- jj := i & 7
- if ii >= a.Size() {
- return 0
- }
-
- if jj+j <= 8 {
- return uint((a[ii] >> jj) & ((1 << j) - 1))
- }
-
- res := uint(a[ii] >> jj)
- jj = 8 - jj
- j -= jj
- for j != 0 {
- ii++
- if j < 8 {
- res += uint(a[ii]&((1<>uint8(7-j))&0x01 != 0 {
return i*8 + j
@@ -112,10 +68,6 @@ func Proximity(one, other []byte) (ret int) {
return MaxPO
}
-func IsZeroAddr(addr Address) bool {
- return len(addr) == 0 || bytes.Equal(addr, ZeroAddr)
-}
-
var ZeroAddr = Address(common.Hash{}.Bytes())
func MakeHashFunc(hash string) SwarmHasher {
@@ -123,10 +75,10 @@ func MakeHashFunc(hash string) SwarmHasher {
case "SHA256":
return func() SwarmHash { return &HashWithLength{crypto.SHA256.New()} }
case "SHA3":
- return func() SwarmHash { return &HashWithLength{sha3.NewKeccak256()} }
+ return func() SwarmHash { return &HashWithLength{sha3.NewLegacyKeccak256()} }
case "BMT":
return func() SwarmHash {
- hasher := sha3.NewKeccak256
+ hasher := sha3.NewLegacyKeccak256
hasherSize := hasher().Size()
segmentCount := ch.DefaultSize / hasherSize
pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize)
@@ -184,9 +136,6 @@ func (c AddressCollection) Swap(i, j int) {
// Chunk interface implemented by context.Contexts and data chunks
type Chunk interface {
Address() Address
- Payload() []byte
- SpanBytes() []byte
- Span() int64
Data() []byte
}
@@ -208,25 +157,10 @@ func (c *chunk) Address() Address {
return c.addr
}
-func (c *chunk) SpanBytes() []byte {
- return c.sdata[:8]
-}
-
-func (c *chunk) Span() int64 {
- if c.span == -1 {
- c.span = int64(binary.LittleEndian.Uint64(c.sdata[:8]))
- }
- return c.span
-}
-
func (c *chunk) Data() []byte {
return c.sdata
}
-func (c *chunk) Payload() []byte {
- return c.sdata[8:]
-}
-
// String() for pretty printing
func (self *chunk) String() string {
return fmt.Sprintf("Address: %v TreeSize: %v Chunksize: %v", self.addr.Log(), self.span, len(self.sdata))
@@ -322,12 +256,8 @@ func (c ChunkData) Size() uint64 {
return binary.LittleEndian.Uint64(c[:8])
}
-func (c ChunkData) Data() []byte {
- return c[8:]
-}
-
type ChunkValidator interface {
- Validate(addr Address, data []byte) bool
+ Validate(chunk Chunk) bool
}
// Provides method for validation of content address in chunks
@@ -344,7 +274,8 @@ func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator {
}
// Validate that the given key is a valid content address for the given data
-func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool {
+func (v *ContentAddressValidator) Validate(chunk Chunk) bool {
+ data := chunk.Data()
if l := len(data); l < 9 || l > ch.DefaultSize+8 {
// log.Error("invalid chunk size", "chunk", addr.Hex(), "size", l)
return false
@@ -355,7 +286,7 @@ func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool {
hasher.Write(data[8:])
hash := hasher.Sum(nil)
- return bytes.Equal(hash, addr[:])
+ return bytes.Equal(hash, chunk.Address())
}
type ChunkStore interface {
diff --git a/swarm/storage/types_test.go b/swarm/storage/types_test.go
new file mode 100644
index 000000000..32907bbf4
--- /dev/null
+++ b/swarm/storage/types_test.go
@@ -0,0 +1,186 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package storage
+
+import (
+ "strconv"
+ "testing"
+)
+
+// TestProximity validates Proximity function with explicit
+// values in a table-driven test. It is highly dependant on
+// MaxPO constant and it validates cases up to MaxPO=32.
+func TestProximity(t *testing.T) {
+ // integer from base2 encoded string
+ bx := func(s string) uint8 {
+ i, err := strconv.ParseUint(s, 2, 8)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return uint8(i)
+ }
+ // adjust expected bins in respect to MaxPO
+ limitPO := func(po uint8) uint8 {
+ if po > MaxPO {
+ return MaxPO
+ }
+ return po
+ }
+ base := []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000000")}
+ for _, tc := range []struct {
+ addr []byte
+ po uint8
+ }{
+ {
+ addr: base,
+ po: MaxPO,
+ },
+ {
+ addr: []byte{bx("10000000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(0),
+ },
+ {
+ addr: []byte{bx("01000000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(1),
+ },
+ {
+ addr: []byte{bx("00100000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(2),
+ },
+ {
+ addr: []byte{bx("00010000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(3),
+ },
+ {
+ addr: []byte{bx("00001000"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(4),
+ },
+ {
+ addr: []byte{bx("00000100"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(5),
+ },
+ {
+ addr: []byte{bx("00000010"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(6),
+ },
+ {
+ addr: []byte{bx("00000001"), bx("00000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(7),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("10000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(8),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("01000000"), bx("00000000"), bx("00000000")},
+ po: limitPO(9),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00100000"), bx("00000000"), bx("00000000")},
+ po: limitPO(10),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00010000"), bx("00000000"), bx("00000000")},
+ po: limitPO(11),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00001000"), bx("00000000"), bx("00000000")},
+ po: limitPO(12),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000100"), bx("00000000"), bx("00000000")},
+ po: limitPO(13),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000010"), bx("00000000"), bx("00000000")},
+ po: limitPO(14),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000001"), bx("00000000"), bx("00000000")},
+ po: limitPO(15),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("10000000"), bx("00000000")},
+ po: limitPO(16),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("01000000"), bx("00000000")},
+ po: limitPO(17),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00100000"), bx("00000000")},
+ po: limitPO(18),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00010000"), bx("00000000")},
+ po: limitPO(19),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00001000"), bx("00000000")},
+ po: limitPO(20),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000100"), bx("00000000")},
+ po: limitPO(21),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000010"), bx("00000000")},
+ po: limitPO(22),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000001"), bx("00000000")},
+ po: limitPO(23),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("10000000")},
+ po: limitPO(24),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("01000000")},
+ po: limitPO(25),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00100000")},
+ po: limitPO(26),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00010000")},
+ po: limitPO(27),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00001000")},
+ po: limitPO(28),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000100")},
+ po: limitPO(29),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000010")},
+ po: limitPO(30),
+ },
+ {
+ addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000001")},
+ po: limitPO(31),
+ },
+ } {
+ got := uint8(Proximity(base, tc.addr))
+ if got != tc.po {
+ t.Errorf("got %v bin, want %v", got, tc.po)
+ }
+ }
+}
diff --git a/swarm/swap/swap.go b/swarm/swap/swap.go
index 137eb141d..5d636dc20 100644
--- a/swarm/swap/swap.go
+++ b/swarm/swap/swap.go
@@ -91,3 +91,8 @@ func (s *Swap) loadState(peer *protocols.Peer) (err error) {
}
return
}
+
+//Clean up Swap
+func (swap *Swap) Close() {
+ swap.stateStore.Close()
+}
diff --git a/swarm/swarm.go b/swarm/swarm.go
index dc3756d3a..db52675fd 100644
--- a/swarm/swarm.go
+++ b/swarm/swarm.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -66,36 +66,24 @@ var (
// the swarm stack
type Swarm struct {
- config *api.Config // swarm configuration
- api *api.API // high level api layer (fs/manifest)
- dns api.Resolver // DNS registrar
- fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support
- streamer *stream.Registry
- bzz *network.Bzz // the logistic manager
- backend chequebook.Backend // simple blockchain Backend
- privateKey *ecdsa.PrivateKey
- corsString string
- swapEnabled bool
- netStore *storage.NetStore
- sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
- ps *pss.Pss
- swap *swap.Swap
+ config *api.Config // swarm configuration
+ api *api.API // high level api layer (fs/manifest)
+ dns api.Resolver // DNS registrar
+ fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support
+ streamer *stream.Registry
+ bzz *network.Bzz // the logistic manager
+ backend chequebook.Backend // simple blockchain Backend
+ privateKey *ecdsa.PrivateKey
+ netStore *storage.NetStore
+ sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
+ ps *pss.Pss
+ swap *swap.Swap
+ stateStore *state.DBStore
+ accountingMetrics *protocols.AccountingMetrics
tracerClose io.Closer
}
-type SwarmAPI struct {
- Api *api.API
- Backend chequebook.Backend
-}
-
-func (self *Swarm) API() *SwarmAPI {
- return &SwarmAPI{
- Api: self.api,
- Backend: self.backend,
- }
-}
-
// creates a new swarm service instance
// implements node.Service
// If mockStore is not nil, it will be used as the storage for chunk data.
@@ -134,7 +122,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
LightNode: config.LightNodeEnabled,
}
- stateStore, err := state.NewDBStore(filepath.Join(config.Path, "state-store.db"))
+ self.stateStore, err = state.NewDBStore(filepath.Join(config.Path, "state-store.db"))
if err != nil {
return
}
@@ -179,6 +167,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
return nil, err
}
self.swap = swap.New(balancesStore)
+ self.accountingMetrics = protocols.SetupAccountingMetrics(10*time.Second, filepath.Join(config.Path, "metrics.db"))
}
var nodeID enode.ID
@@ -203,7 +192,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
SyncUpdateDelay: config.SyncUpdateDelay,
MaxPeerServers: config.MaxStreamPeerServers,
}
- self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, stateStore, registryOptions, self.swap)
+ self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, self.stateStore, registryOptions, self.swap)
// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams)
@@ -226,7 +215,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
log.Debug("Setup local storage")
- self.bzz = network.NewBzz(bzzconfig, to, stateStore, self.streamer.GetSpec(), self.streamer.Run)
+ self.bzz = network.NewBzz(bzzconfig, to, self.stateStore, self.streamer.GetSpec(), self.streamer.Run)
// Pss = postal service over swarm (devp2p over bzz)
self.ps, err = pss.NewPss(to, config.Pss)
@@ -446,14 +435,24 @@ func (self *Swarm) Stop() error {
ch.Stop()
ch.Save()
}
-
+ if self.swap != nil {
+ self.swap.Close()
+ }
+ if self.accountingMetrics != nil {
+ self.accountingMetrics.Close()
+ }
if self.netStore != nil {
self.netStore.Close()
}
self.sfs.Stop()
stopCounter.Inc(1)
self.streamer.Stop()
- return self.bzz.Stop()
+
+ err := self.bzz.Stop()
+ if self.stateStore != nil {
+ self.stateStore.Close()
+ }
+ return err
}
// implements the node.Service interface
@@ -466,14 +465,6 @@ func (self *Swarm) Protocols() (protos []p2p.Protocol) {
return
}
-func (self *Swarm) RegisterPssProtocol(spec *protocols.Spec, targetprotocol *p2p.Protocol, options *pss.ProtocolParams) (*pss.Protocol, error) {
- if !pss.IsActiveProtocol {
- return nil, fmt.Errorf("Pss protocols not available (built with !nopssprotocol tag)")
- }
- topic := pss.ProtocolTopic(spec)
- return pss.RegisterProtocol(self.ps, &topic, spec, targetprotocol, options)
-}
-
// implements node.Service
// APIs returns the RPC API descriptors the Swarm implementation offers
func (self *Swarm) APIs() []rpc.API {
@@ -505,6 +496,12 @@ func (self *Swarm) APIs() []rpc.API {
Service: self.sfs,
Public: false,
},
+ {
+ Namespace: "accounting",
+ Version: protocols.AccountingVersion,
+ Service: protocols.NewAccountingApi(self.accountingMetrics),
+ Public: false,
+ },
}
apis = append(apis, self.bzz.APIs()...)
@@ -516,10 +513,6 @@ func (self *Swarm) APIs() []rpc.API {
return apis
}
-func (self *Swarm) Api() *api.API {
- return self.api
-}
-
// SetChequebook ensures that the local checquebook is set up on chain.
func (self *Swarm) SetChequebook(ctx context.Context) error {
err := self.config.Swap.SetChequebook(ctx, self.backend, self.config.Path)
diff --git a/swarm/version/version.go b/swarm/version/version.go
index 17ef34f5f..831080eb8 100644
--- a/swarm/version/version.go
+++ b/swarm/version/version.go
@@ -21,10 +21,10 @@ import (
)
const (
- VersionMajor = 0 // Major version component of the current release
- VersionMinor = 3 // Minor version component of the current release
- VersionPatch = 7 // Patch version component of the current release
- VersionMeta = "unstable" // Version metadata to append to the version string
+ VersionMajor = 0 // Major version component of the current release
+ VersionMinor = 3 // Minor version component of the current release
+ VersionPatch = 9 // Patch version component of the current release
+ VersionMeta = "stable" // Version metadata to append to the version string
)
// Version holds the textual version string.
diff --git a/tests/init.go b/tests/init.go
index f0a4943c1..db0457b6d 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -86,6 +86,15 @@ var Forks = map[string]*params.ChainConfig{
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(5),
},
+ "ByzantiumToConstantinopleAt5": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(5),
+ },
}
// UnsupportedForkError is returned when a test requests a fork that isn't implemented.
diff --git a/tests/state_test.go b/tests/state_test.go
index ad77e4f33..964405382 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -18,10 +18,12 @@ package tests
import (
"bytes"
+ "flag"
"fmt"
"reflect"
"testing"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/core/vm"
)
@@ -65,8 +67,17 @@ func TestState(t *testing.T) {
// Transactions with gasLimit above this value will not get a VM trace on failure.
const traceErrorLimit = 400000
+// The VM config for state tests that accepts --vm.* command line arguments.
+var testVMConfig = func() vm.Config {
+ vmconfig := vm.Config{}
+ flag.StringVar(&vmconfig.EVMInterpreter, utils.EVMInterpreterFlag.Name, utils.EVMInterpreterFlag.Value, utils.EVMInterpreterFlag.Usage)
+ flag.StringVar(&vmconfig.EWASMInterpreter, utils.EWASMInterpreterFlag.Name, utils.EWASMInterpreterFlag.Value, utils.EWASMInterpreterFlag.Usage)
+ flag.Parse()
+ return vmconfig
+}()
+
func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) {
- err := test(vm.Config{})
+ err := test(testVMConfig)
if err == nil {
return
}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 3683aae32..436284196 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -31,10 +31,10 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
// StateTest checks transaction processing without block context.
@@ -248,7 +248,7 @@ func (tx *stTransaction) toMessage(ps stPostState) (core.Message, error) {
}
func rlpHash(x interface{}) (h common.Hash) {
- hw := sha3.NewKeccak256()
+ hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
diff --git a/tests/testdata b/tests/testdata
index 95a309203..c02a2a17c 160000
--- a/tests/testdata
+++ b/tests/testdata
@@ -1 +1 @@
-Subproject commit 95a309203890e6244c6d4353ca411671973c13b5
+Subproject commit c02a2a17c0288a255572b37dc7ec1fcb838b9dbf
diff --git a/trie/database.go b/trie/database.go
index 71190b3f3..739a98add 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -141,7 +141,7 @@ type cachedNode struct {
node node // Cached collapsed trie node, or raw rlp data
size uint16 // Byte size of the useful cached data
- parents uint16 // Number of live nodes referencing this one
+ parents uint32 // Number of live nodes referencing this one
children map[common.Hash]uint16 // External children referenced by this node
flushPrev common.Hash // Previous node in the flush-list
diff --git a/trie/hasher.go b/trie/hasher.go
index 7b1d7793f..9d6756b6f 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -21,8 +21,8 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
)
type hasher struct {
@@ -57,7 +57,7 @@ var hasherPool = sync.Pool{
New: func() interface{} {
return &hasher{
tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
- sha: sha3.NewKeccak256().(keccakState),
+ sha: sha3.NewLegacyKeccak256().(keccakState),
}
},
}
diff --git a/vendor/github.com/karalabe/hid/appveyor.yml b/vendor/github.com/karalabe/hid/appveyor.yml
index f43958747..84b3c95ff 100644
--- a/vendor/github.com/karalabe/hid/appveyor.yml
+++ b/vendor/github.com/karalabe/hid/appveyor.yml
@@ -22,8 +22,8 @@ environment:
install:
- rmdir C:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.windows-%GOARCH%.zip
- - 7z x go1.8.windows-%GOARCH%.zip -y -oC:\ > NUL
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.1.windows-%GOARCH%.zip
+ - 7z x go1.10.1.windows-%GOARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version
diff --git a/vendor/github.com/karalabe/hid/hid_disabled.go b/vendor/github.com/karalabe/hid/hid_disabled.go
index 1f4026379..0f266ba58 100644
--- a/vendor/github.com/karalabe/hid/hid_disabled.go
+++ b/vendor/github.com/karalabe/hid/hid_disabled.go
@@ -36,7 +36,7 @@ func (info DeviceInfo) Open() (*Device, error) {
// Close releases the HID USB device handle. On platforms that this file implements
// the method is just a noop.
-func (dev *Device) Close() {}
+func (dev *Device) Close() error { return nil }
// Write sends an output report to a HID device. On platforms that this file
// implements the method just returns an error.
diff --git a/vendor/github.com/karalabe/hid/hid_enabled.go b/vendor/github.com/karalabe/hid/hid_enabled.go
index 419273be6..e95e5792d 100644
--- a/vendor/github.com/karalabe/hid/hid_enabled.go
+++ b/vendor/github.com/karalabe/hid/hid_enabled.go
@@ -41,6 +41,7 @@ package hid
#endif
*/
import "C"
+
import (
"errors"
"runtime"
@@ -57,11 +58,6 @@ import (
// > "subsequent calls will cause the hid manager to release previously enumerated devices"
var enumerateLock sync.Mutex
-func init() {
- // Initialize the HIDAPI library
- C.hid_init()
-}
-
// Supported returns whether this platform is supported by the HID library or not.
// The goal of this method is to allow programatically handling platforms that do
// not support USB HID and not having to fall back to build constraints.
@@ -113,6 +109,9 @@ func Enumerate(vendorID uint16, productID uint16) []DeviceInfo {
// Open connects to an HID device by its path name.
func (info DeviceInfo) Open() (*Device, error) {
+ enumerateLock.Lock()
+ defer enumerateLock.Unlock()
+
path := C.CString(info.Path)
defer C.free(unsafe.Pointer(path))
@@ -135,7 +134,7 @@ type Device struct {
}
// Close releases the HID USB device handle.
-func (dev *Device) Close() {
+func (dev *Device) Close() error {
dev.lock.Lock()
defer dev.lock.Unlock()
@@ -143,6 +142,7 @@ func (dev *Device) Close() {
C.hid_close(dev.device)
dev.device = nil
}
+ return nil
}
// Write sends an output report to a HID device.
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
new file mode 100644
index 000000000..bb7d3c422
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
@@ -0,0 +1,25 @@
+// +build linux
+// +build ppc64 ppc64le
+
+package isatty
+
+import (
+ "unsafe"
+
+ syscall "golang.org/x/sys/unix"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go
new file mode 100644
index 000000000..f02849c56
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_others.go
@@ -0,0 +1,15 @@
+// +build appengine js
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on js and appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+ return false
+}
+
+// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
index c5940b232..c36ad3235 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -331,7 +331,6 @@ func (r *Cache) delete(n *Node) bool {
return deleted
}
}
- return false
}
// Nodes returns number of 'cache node' in the map.
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
index 14dddf88d..abf9fb65c 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
@@ -29,7 +29,7 @@ func (bytesComparer) Separator(dst, a, b []byte) []byte {
// Do not shorten if one string is a prefix of the other
} else if c := a[i]; c < 0xff && c+1 < b[i] {
dst = append(dst, a[:i+1]...)
- dst[i]++
+ dst[len(dst)-1]++
return dst
}
return nil
@@ -39,7 +39,7 @@ func (bytesComparer) Successor(dst, b []byte) []byte {
for i, c := range b {
if c != 0xff {
dst = append(dst, b[:i+1]...)
- dst[i]++
+ dst[len(dst)-1]++
return dst
}
}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
index 14a28f16f..2c522db23 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
@@ -36,7 +36,7 @@ type Comparer interface {
// by any users of this package.
Name() string
- // Bellow are advanced functions used used to reduce the space requirements
+ // Bellow are advanced functions used to reduce the space requirements
// for internal data structures such as index blocks.
// Separator appends a sequence of bytes x to dst such that a <= x && x < b,
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
index e7ac06541..b27c38d37 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
@@ -182,7 +182,7 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
err = s.recover()
if err != nil {
- if !os.IsNotExist(err) || s.o.GetErrorIfMissing() {
+ if !os.IsNotExist(err) || s.o.GetErrorIfMissing() || s.o.GetReadOnly() {
return
}
err = s.create()
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
index 28e50906a..0c1b9a53b 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -663,7 +663,7 @@ type cCmd interface {
}
type cAuto struct {
- // Note for table compaction, an empty ackC represents it's a compaction waiting command.
+ // Note for table compaction, an non-empty ackC represents it's a compaction waiting command.
ackC chan<- error
}
@@ -777,8 +777,8 @@ func (db *DB) mCompaction() {
func (db *DB) tCompaction() {
var (
- x cCmd
- ackQ, waitQ []cCmd
+ x cCmd
+ waitQ []cCmd
)
defer func() {
@@ -787,10 +787,6 @@ func (db *DB) tCompaction() {
panic(x)
}
}
- for i := range ackQ {
- ackQ[i].ack(ErrClosed)
- ackQ[i] = nil
- }
for i := range waitQ {
waitQ[i].ack(ErrClosed)
waitQ[i] = nil
@@ -821,11 +817,6 @@ func (db *DB) tCompaction() {
waitQ = waitQ[:0]
}
} else {
- for i := range ackQ {
- ackQ[i].ack(nil)
- ackQ[i] = nil
- }
- ackQ = ackQ[:0]
for i := range waitQ {
waitQ[i].ack(nil)
waitQ[i] = nil
@@ -844,9 +835,12 @@ func (db *DB) tCompaction() {
switch cmd := x.(type) {
case cAuto:
if cmd.ackC != nil {
- waitQ = append(waitQ, x)
- } else {
- ackQ = append(ackQ, x)
+ // Check the write pause state before caching it.
+ if db.resumeWrite() {
+ x.ack(nil)
+ } else {
+ waitQ = append(waitQ, x)
+ }
}
case cRange:
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
index 7ecd960d2..3f0654894 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -84,7 +84,7 @@ func (db *DB) checkAndCleanFiles() error {
var mfds []storage.FileDesc
for num, present := range tmap {
if !present {
- mfds = append(mfds, storage.FileDesc{storage.TypeTable, num})
+ mfds = append(mfds, storage.FileDesc{Type: storage.TypeTable, Num: num})
db.logf("db@janitor table missing @%d", num)
}
}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
index b16e3a704..96fb0f685 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -40,11 +40,11 @@ type IteratorSeeker interface {
Seek(key []byte) bool
// Next moves the iterator to the next key/value pair.
- // It returns whether the iterator is exhausted.
+ // It returns false if the iterator is exhausted.
Next() bool
// Prev moves the iterator to the previous key/value pair.
- // It returns whether the iterator is exhausted.
+ // It returns false if the iterator is exhausted.
Prev() bool
}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
index 44e7d9adc..528b16423 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -158,6 +158,12 @@ type Options struct {
// The default value is 8MiB.
BlockCacheCapacity int
+ // BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging
+ // to removed 'sorted table'.
+ //
+ // The default if false.
+ BlockCacheEvictRemoved bool
+
// BlockRestartInterval is the number of keys between restart points for
// delta encoding of keys.
//
@@ -384,6 +390,13 @@ func (o *Options) GetBlockCacheCapacity() int {
return o.BlockCacheCapacity
}
+func (o *Options) GetBlockCacheEvictRemoved() bool {
+ if o == nil {
+ return false
+ }
+ return o.BlockCacheEvictRemoved
+}
+
func (o *Options) GetBlockRestartInterval() int {
if o == nil || o.BlockRestartInterval <= 0 {
return DefaultBlockRestartInterval
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
index 92328933c..40cb2cf95 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -36,7 +36,7 @@ func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf
func (s *session) newTemp() storage.FileDesc {
num := atomic.AddInt64(&s.stTempFileNum, 1) - 1
- return storage.FileDesc{storage.TypeTemp, num}
+ return storage.FileDesc{Type: storage.TypeTemp, Num: num}
}
func (s *session) addFileRef(fd storage.FileDesc, ref int) int {
@@ -190,7 +190,7 @@ func (s *session) recordCommited(rec *sessionRecord) {
// Create a new manifest file; need external synchronization.
func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
- fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()}
+ fd := storage.FileDesc{Type: storage.TypeManifest, Num: s.allocFileNum()}
writer, err := s.stor.Create(fd)
if err != nil {
return
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
index 81d18a531..1fac60d05 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/table.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
@@ -78,7 +78,7 @@ func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFil
}
func tableFileFromRecord(r atRecord) *tFile {
- return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax)
+ return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax)
}
// tFiles hold multiple tFile.
@@ -290,16 +290,17 @@ func (x *tFilesSortByNum) Less(i, j int) bool {
// Table operations.
type tOps struct {
- s *session
- noSync bool
- cache *cache.Cache
- bcache *cache.Cache
- bpool *util.BufferPool
+ s *session
+ noSync bool
+ evictRemoved bool
+ cache *cache.Cache
+ bcache *cache.Cache
+ bpool *util.BufferPool
}
// Creates an empty table and returns table writer.
func (t *tOps) create() (*tWriter, error) {
- fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()}
+ fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()}
fw, err := t.s.stor.Create(fd)
if err != nil {
return nil, err
@@ -422,7 +423,7 @@ func (t *tOps) remove(f *tFile) {
} else {
t.s.logf("table@remove removed @%d", f.fd.Num)
}
- if t.bcache != nil {
+ if t.evictRemoved && t.bcache != nil {
t.bcache.EvictNS(uint64(f.fd.Num))
}
})
@@ -451,7 +452,7 @@ func newTableOps(s *session) *tOps {
if !s.o.GetDisableBlockCache() {
var bcacher cache.Cacher
if s.o.GetBlockCacheCapacity() > 0 {
- bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
+ bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity())
}
bcache = cache.NewCache(bcacher)
}
@@ -459,11 +460,12 @@ func newTableOps(s *session) *tOps {
bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
}
return &tOps{
- s: s,
- noSync: s.o.GetNoSync(),
- cache: cache.NewCache(cacher),
- bcache: bcache,
- bpool: bpool,
+ s: s,
+ noSync: s.o.GetNoSync(),
+ evictRemoved: s.o.GetBlockCacheEvictRemoved(),
+ cache: cache.NewCache(cacher),
+ bcache: bcache,
+ bpool: bpool,
}
}
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
index 4f26b49b6..d6f683ba3 100644
--- a/vendor/golang.org/x/crypto/ed25519/ed25519.go
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go
@@ -6,7 +6,10 @@
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
-// RFC 8032.
+// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
+// representation includes a public key suffix to make multiple signing
+// operations with the same key more efficient. This package refers to the RFC
+// 8032 private key as the “seed”.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519
@@ -31,6 +34,8 @@ const (
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
+ // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
+ SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
@@ -46,6 +51,15 @@ func (priv PrivateKey) Public() crypto.PublicKey {
return PublicKey(publicKey)
}
+// Seed returns the private key seed corresponding to priv. It is provided for
+// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
+// in this package.
+func (priv PrivateKey) Seed() []byte {
+ seed := make([]byte, SeedSize)
+ copy(seed, priv[:32])
+ return seed
+}
+
// Sign signs the given message with priv.
// Ed25519 performs two passes over messages to be signed and therefore cannot
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
@@ -61,19 +75,33 @@ func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOp
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {
+func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
if rand == nil {
rand = cryptorand.Reader
}
- privateKey = make([]byte, PrivateKeySize)
- publicKey = make([]byte, PublicKeySize)
- _, err = io.ReadFull(rand, privateKey[:32])
- if err != nil {
+ seed := make([]byte, SeedSize)
+ if _, err := io.ReadFull(rand, seed); err != nil {
return nil, nil, err
}
- digest := sha512.Sum512(privateKey[:32])
+ privateKey := NewKeyFromSeed(seed)
+ publicKey := make([]byte, PublicKeySize)
+ copy(publicKey, privateKey[32:])
+
+ return publicKey, privateKey, nil
+}
+
+// NewKeyFromSeed calculates a private key from a seed. It will panic if
+// len(seed) is not SeedSize. This function is provided for interoperability
+// with RFC 8032. RFC 8032's private keys correspond to seeds in this
+// package.
+func NewKeyFromSeed(seed []byte) PrivateKey {
+ if l := len(seed); l != SeedSize {
+ panic("ed25519: bad seed length: " + strconv.Itoa(l))
+ }
+
+ digest := sha512.Sum512(seed)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
@@ -85,10 +113,11 @@ func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, er
var publicKeyBytes [32]byte
A.ToBytes(&publicKeyBytes)
+ privateKey := make([]byte, PrivateKeySize)
+ copy(privateKey, seed)
copy(privateKey[32:], publicKeyBytes[:])
- copy(publicKey, publicKeyBytes[:])
- return publicKey, privateKey, nil
+ return privateKey
}
// Sign signs the message with privateKey and returns a signature. It will
@@ -171,9 +200,16 @@ func Verify(publicKey PublicKey, message, sig []byte) bool {
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
- var b [32]byte
- copy(b[:], sig[32:])
- edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
+ var s [32]byte
+ copy(s[:], sig[32:])
+
+ // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
+ // the range [0, order) in order to prevent signature malleability.
+ if !edwards25519.ScMinimal(&s) {
+ return false
+ }
+
+ edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
var checkR [32]byte
R.ToBytes(&checkR)
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
index 5f8b99478..fd03c252a 100644
--- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
+++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
@@ -4,6 +4,8 @@
package edwards25519
+import "encoding/binary"
+
// This code is a port of the public domain, “ref10” implementation of ed25519
// from SUPERCOP.
@@ -1769,3 +1771,23 @@ func ScReduce(out *[32]byte, s *[64]byte) {
out[30] = byte(s11 >> 9)
out[31] = byte(s11 >> 17)
}
+
+// order is the order of Curve25519 in little-endian form.
+var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000}
+
+// ScMinimal returns true if the given scalar is less than the order of the
+// curve.
+func ScMinimal(scalar *[32]byte) bool {
+ for i := 3; ; i-- {
+ v := binary.LittleEndian.Uint64(scalar[i*8:])
+ if v > order[i] {
+ return false
+ } else if v < order[i] {
+ break
+ } else if i == 0 {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go
new file mode 100644
index 000000000..6570847f5
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go
@@ -0,0 +1,264 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ChaCha20 implements the core ChaCha20 function as specified
+// in https://tools.ietf.org/html/rfc7539#section-2.3.
+package chacha20
+
+import (
+ "crypto/cipher"
+ "encoding/binary"
+
+ "golang.org/x/crypto/internal/subtle"
+)
+
+// assert that *Cipher implements cipher.Stream
+var _ cipher.Stream = (*Cipher)(nil)
+
+// Cipher is a stateful instance of ChaCha20 using a particular key
+// and nonce. A *Cipher implements the cipher.Stream interface.
+type Cipher struct {
+ key [8]uint32
+ counter uint32 // incremented after each block
+ nonce [3]uint32
+ buf [bufSize]byte // buffer for unused keystream bytes
+ len int // number of unused keystream bytes at end of buf
+}
+
+// New creates a new ChaCha20 stream cipher with the given key and nonce.
+// The initial counter value is set to 0.
+func New(key [8]uint32, nonce [3]uint32) *Cipher {
+ return &Cipher{key: key, nonce: nonce}
+}
+
+// ChaCha20 constants spelling "expand 32-byte k"
+const (
+ j0 uint32 = 0x61707865
+ j1 uint32 = 0x3320646e
+ j2 uint32 = 0x79622d32
+ j3 uint32 = 0x6b206574
+)
+
+func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
+ a += b
+ d ^= a
+ d = (d << 16) | (d >> 16)
+ c += d
+ b ^= c
+ b = (b << 12) | (b >> 20)
+ a += b
+ d ^= a
+ d = (d << 8) | (d >> 24)
+ c += d
+ b ^= c
+ b = (b << 7) | (b >> 25)
+ return a, b, c, d
+}
+
+// XORKeyStream XORs each byte in the given slice with a byte from the
+// cipher's key stream. Dst and src must overlap entirely or not at all.
+//
+// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
+// to pass a dst bigger than src, and in that case, XORKeyStream will
+// only update dst[:len(src)] and will not touch the rest of dst.
+//
+// Multiple calls to XORKeyStream behave as if the concatenation of
+// the src buffers was passed in a single run. That is, Cipher
+// maintains state and does not reset at each XORKeyStream call.
+func (s *Cipher) XORKeyStream(dst, src []byte) {
+ if len(dst) < len(src) {
+ panic("chacha20: output smaller than input")
+ }
+ if subtle.InexactOverlap(dst[:len(src)], src) {
+ panic("chacha20: invalid buffer overlap")
+ }
+
+ // xor src with buffered keystream first
+ if s.len != 0 {
+ buf := s.buf[len(s.buf)-s.len:]
+ if len(src) < len(buf) {
+ buf = buf[:len(src)]
+ }
+ td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint
+ for i, b := range buf {
+ td[i] = ts[i] ^ b
+ }
+ s.len -= len(buf)
+ if s.len != 0 {
+ return
+ }
+ s.buf = [len(s.buf)]byte{} // zero the empty buffer
+ src = src[len(buf):]
+ dst = dst[len(buf):]
+ }
+
+ if len(src) == 0 {
+ return
+ }
+ if haveAsm {
+ if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 {
+ panic("chacha20: counter overflow")
+ }
+ s.xorKeyStreamAsm(dst, src)
+ return
+ }
+
+ // set up a 64-byte buffer to pad out the final block if needed
+ // (hoisted out of the main loop to avoid spills)
+ rem := len(src) % 64 // length of final block
+ fin := len(src) - rem // index of final block
+ if rem > 0 {
+ copy(s.buf[len(s.buf)-64:], src[fin:])
+ }
+
+ // pre-calculate most of the first round
+ s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0])
+ s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1])
+ s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2])
+
+ n := len(src)
+ src, dst = src[:n:n], dst[:n:n] // BCE hint
+ for i := 0; i < n; i += 64 {
+ // calculate the remainder of the first round
+ s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter)
+
+ // execute the second round
+ x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15)
+ x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12)
+ x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13)
+ x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14)
+
+ // execute the remaining 18 rounds
+ for i := 0; i < 9; i++ {
+ x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
+ x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
+ x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
+ x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
+
+ x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
+ x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
+ x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
+ x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
+ }
+
+ x0 += j0
+ x1 += j1
+ x2 += j2
+ x3 += j3
+
+ x4 += s.key[0]
+ x5 += s.key[1]
+ x6 += s.key[2]
+ x7 += s.key[3]
+ x8 += s.key[4]
+ x9 += s.key[5]
+ x10 += s.key[6]
+ x11 += s.key[7]
+
+ x12 += s.counter
+ x13 += s.nonce[0]
+ x14 += s.nonce[1]
+ x15 += s.nonce[2]
+
+ // increment the counter
+ s.counter += 1
+ if s.counter == 0 {
+ panic("chacha20: counter overflow")
+ }
+
+ // pad to 64 bytes if needed
+ in, out := src[i:], dst[i:]
+ if i == fin {
+ // src[fin:] has already been copied into s.buf before
+ // the main loop
+ in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:]
+ }
+ in, out = in[:64], out[:64] // BCE hint
+
+ // XOR the key stream with the source and write out the result
+ xor(out[0:], in[0:], x0)
+ xor(out[4:], in[4:], x1)
+ xor(out[8:], in[8:], x2)
+ xor(out[12:], in[12:], x3)
+ xor(out[16:], in[16:], x4)
+ xor(out[20:], in[20:], x5)
+ xor(out[24:], in[24:], x6)
+ xor(out[28:], in[28:], x7)
+ xor(out[32:], in[32:], x8)
+ xor(out[36:], in[36:], x9)
+ xor(out[40:], in[40:], x10)
+ xor(out[44:], in[44:], x11)
+ xor(out[48:], in[48:], x12)
+ xor(out[52:], in[52:], x13)
+ xor(out[56:], in[56:], x14)
+ xor(out[60:], in[60:], x15)
+ }
+ // copy any trailing bytes out of the buffer and into dst
+ if rem != 0 {
+ s.len = 64 - rem
+ copy(dst[fin:], s.buf[len(s.buf)-64:])
+ }
+}
+
+// Advance discards bytes in the key stream until the next 64 byte block
+// boundary is reached and updates the counter accordingly. If the key
+// stream is already at a block boundary no bytes will be discarded and
+// the counter will be unchanged.
+func (s *Cipher) Advance() {
+ s.len -= s.len % 64
+ if s.len == 0 {
+ s.buf = [len(s.buf)]byte{}
+ }
+}
+
+// XORKeyStream crypts bytes from in to out using the given key and counters.
+// In and out must overlap entirely or not at all. Counter contains the raw
+// ChaCha20 counter bytes (i.e. block counter followed by nonce).
+func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ s := Cipher{
+ key: [8]uint32{
+ binary.LittleEndian.Uint32(key[0:4]),
+ binary.LittleEndian.Uint32(key[4:8]),
+ binary.LittleEndian.Uint32(key[8:12]),
+ binary.LittleEndian.Uint32(key[12:16]),
+ binary.LittleEndian.Uint32(key[16:20]),
+ binary.LittleEndian.Uint32(key[20:24]),
+ binary.LittleEndian.Uint32(key[24:28]),
+ binary.LittleEndian.Uint32(key[28:32]),
+ },
+ nonce: [3]uint32{
+ binary.LittleEndian.Uint32(counter[4:8]),
+ binary.LittleEndian.Uint32(counter[8:12]),
+ binary.LittleEndian.Uint32(counter[12:16]),
+ },
+ counter: binary.LittleEndian.Uint32(counter[0:4]),
+ }
+ s.XORKeyStream(out, in)
+}
+
+// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a
+// nonce. It should only be used as part of the XChaCha20 construction.
+func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 {
+ x0, x1, x2, x3 := j0, j1, j2, j3
+ x4, x5, x6, x7 := key[0], key[1], key[2], key[3]
+ x8, x9, x10, x11 := key[4], key[5], key[6], key[7]
+ x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3]
+
+ for i := 0; i < 10; i++ {
+ x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
+ x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
+ x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
+ x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
+
+ x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
+ x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
+ x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
+ x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
+ }
+
+ var out [8]uint32
+ out[0], out[1], out[2], out[3] = x0, x1, x2, x3
+ out[4], out[5], out[6], out[7] = x12, x13, x14, x15
+ return out
+}
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go
new file mode 100644
index 000000000..91520d1de
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !s390x gccgo appengine
+
+package chacha20
+
+const (
+ bufSize = 64
+ haveAsm = false
+)
+
+func (*Cipher) xorKeyStreamAsm(dst, src []byte) {
+ panic("not implemented")
+}
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go
new file mode 100644
index 000000000..0c1c671c4
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go
@@ -0,0 +1,30 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x,!gccgo,!appengine
+
+package chacha20
+
+var haveAsm = hasVectorFacility()
+
+const bufSize = 256
+
+// hasVectorFacility reports whether the machine supports the vector
+// facility (vx).
+// Implementation in asm_s390x.s.
+func hasVectorFacility() bool
+
+// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
+// be called when the vector facility is available.
+// Implementation in asm_s390x.s.
+//go:noescape
+func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
+
+func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
+ xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len)
+}
+
+// EXRL targets, DO NOT CALL!
+func mvcSrcToBuf()
+func mvcBufToDst()
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s
new file mode 100644
index 000000000..98427c5e2
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s
@@ -0,0 +1,283 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x,!gccgo,!appengine
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// This is an implementation of the ChaCha20 encryption algorithm as
+// specified in RFC 7539. It uses vector instructions to compute
+// 4 keystream blocks in parallel (256 bytes) which are then XORed
+// with the bytes in the input slice.
+
+GLOBL ·constants<>(SB), RODATA|NOPTR, $32
+// BSWAP: swap bytes in each 4-byte element
+DATA ·constants<>+0x00(SB)/4, $0x03020100
+DATA ·constants<>+0x04(SB)/4, $0x07060504
+DATA ·constants<>+0x08(SB)/4, $0x0b0a0908
+DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c
+// J0: [j0, j1, j2, j3]
+DATA ·constants<>+0x10(SB)/4, $0x61707865
+DATA ·constants<>+0x14(SB)/4, $0x3320646e
+DATA ·constants<>+0x18(SB)/4, $0x79622d32
+DATA ·constants<>+0x1c(SB)/4, $0x6b206574
+
+// EXRL targets:
+TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0
+ MVC $1, (R1), (R8)
+ RET
+
+TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0
+ MVC $1, (R8), (R9)
+ RET
+
+#define BSWAP V5
+#define J0 V6
+#define KEY0 V7
+#define KEY1 V8
+#define NONCE V9
+#define CTR V10
+#define M0 V11
+#define M1 V12
+#define M2 V13
+#define M3 V14
+#define INC V15
+#define X0 V16
+#define X1 V17
+#define X2 V18
+#define X3 V19
+#define X4 V20
+#define X5 V21
+#define X6 V22
+#define X7 V23
+#define X8 V24
+#define X9 V25
+#define X10 V26
+#define X11 V27
+#define X12 V28
+#define X13 V29
+#define X14 V30
+#define X15 V31
+
+#define NUM_ROUNDS 20
+
+#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \
+ VAF a1, a0, a0 \
+ VAF b1, b0, b0 \
+ VAF c1, c0, c0 \
+ VAF d1, d0, d0 \
+ VX a0, a2, a2 \
+ VX b0, b2, b2 \
+ VX c0, c2, c2 \
+ VX d0, d2, d2 \
+ VERLLF $16, a2, a2 \
+ VERLLF $16, b2, b2 \
+ VERLLF $16, c2, c2 \
+ VERLLF $16, d2, d2 \
+ VAF a2, a3, a3 \
+ VAF b2, b3, b3 \
+ VAF c2, c3, c3 \
+ VAF d2, d3, d3 \
+ VX a3, a1, a1 \
+ VX b3, b1, b1 \
+ VX c3, c1, c1 \
+ VX d3, d1, d1 \
+ VERLLF $12, a1, a1 \
+ VERLLF $12, b1, b1 \
+ VERLLF $12, c1, c1 \
+ VERLLF $12, d1, d1 \
+ VAF a1, a0, a0 \
+ VAF b1, b0, b0 \
+ VAF c1, c0, c0 \
+ VAF d1, d0, d0 \
+ VX a0, a2, a2 \
+ VX b0, b2, b2 \
+ VX c0, c2, c2 \
+ VX d0, d2, d2 \
+ VERLLF $8, a2, a2 \
+ VERLLF $8, b2, b2 \
+ VERLLF $8, c2, c2 \
+ VERLLF $8, d2, d2 \
+ VAF a2, a3, a3 \
+ VAF b2, b3, b3 \
+ VAF c2, c3, c3 \
+ VAF d2, d3, d3 \
+ VX a3, a1, a1 \
+ VX b3, b1, b1 \
+ VX c3, c1, c1 \
+ VX d3, d1, d1 \
+ VERLLF $7, a1, a1 \
+ VERLLF $7, b1, b1 \
+ VERLLF $7, c1, c1 \
+ VERLLF $7, d1, d1
+
+#define PERMUTE(mask, v0, v1, v2, v3) \
+ VPERM v0, v0, mask, v0 \
+ VPERM v1, v1, mask, v1 \
+ VPERM v2, v2, mask, v2 \
+ VPERM v3, v3, mask, v3
+
+#define ADDV(x, v0, v1, v2, v3) \
+ VAF x, v0, v0 \
+ VAF x, v1, v1 \
+ VAF x, v2, v2 \
+ VAF x, v3, v3
+
+#define XORV(off, dst, src, v0, v1, v2, v3) \
+ VLM off(src), M0, M3 \
+ PERMUTE(BSWAP, v0, v1, v2, v3) \
+ VX v0, M0, M0 \
+ VX v1, M1, M1 \
+ VX v2, M2, M2 \
+ VX v3, M3, M3 \
+ VSTM M0, M3, off(dst)
+
+#define SHUFFLE(a, b, c, d, t, u, v, w) \
+ VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]}
+ VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]}
+ VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]}
+ VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]}
+ VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]}
+ VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]}
+ VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]}
+ VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]}
+
+// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
+TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
+ MOVD $·constants<>(SB), R1
+ MOVD dst+0(FP), R2 // R2=&dst[0]
+ LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src)
+ MOVD key+48(FP), R5 // R5=key
+ MOVD nonce+56(FP), R6 // R6=nonce
+ MOVD counter+64(FP), R7 // R7=counter
+ MOVD buf+72(FP), R8 // R8=buf
+ MOVD len+80(FP), R9 // R9=len
+
+ // load BSWAP and J0
+ VLM (R1), BSWAP, J0
+
+ // set up tail buffer
+ ADD $-1, R4, R12
+ MOVBZ R12, R12
+ CMPUBEQ R12, $255, aligned
+ MOVD R4, R1
+ AND $~255, R1
+ MOVD $(R3)(R1*1), R1
+ EXRL $·mvcSrcToBuf(SB), R12
+ MOVD $255, R0
+ SUB R12, R0
+ MOVD R0, (R9) // update len
+
+aligned:
+ // setup
+ MOVD $95, R0
+ VLM (R5), KEY0, KEY1
+ VLL R0, (R6), NONCE
+ VZERO M0
+ VLEIB $7, $32, M0
+ VSRLB M0, NONCE, NONCE
+
+ // initialize counter values
+ VLREPF (R7), CTR
+ VZERO INC
+ VLEIF $1, $1, INC
+ VLEIF $2, $2, INC
+ VLEIF $3, $3, INC
+ VAF INC, CTR, CTR
+ VREPIF $4, INC
+
+chacha:
+ VREPF $0, J0, X0
+ VREPF $1, J0, X1
+ VREPF $2, J0, X2
+ VREPF $3, J0, X3
+ VREPF $0, KEY0, X4
+ VREPF $1, KEY0, X5
+ VREPF $2, KEY0, X6
+ VREPF $3, KEY0, X7
+ VREPF $0, KEY1, X8
+ VREPF $1, KEY1, X9
+ VREPF $2, KEY1, X10
+ VREPF $3, KEY1, X11
+ VLR CTR, X12
+ VREPF $1, NONCE, X13
+ VREPF $2, NONCE, X14
+ VREPF $3, NONCE, X15
+
+ MOVD $(NUM_ROUNDS/2), R1
+
+loop:
+ ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11)
+ ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9)
+
+ ADD $-1, R1
+ BNE loop
+
+ // decrement length
+ ADD $-256, R4
+ BLT tail
+
+continue:
+ // rearrange vectors
+ SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3)
+ ADDV(J0, X0, X1, X2, X3)
+ SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3)
+ ADDV(KEY0, X4, X5, X6, X7)
+ SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3)
+ ADDV(KEY1, X8, X9, X10, X11)
+ VAF CTR, X12, X12
+ SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3)
+ ADDV(NONCE, X12, X13, X14, X15)
+
+ // increment counters
+ VAF INC, CTR, CTR
+
+ // xor keystream with plaintext
+ XORV(0*64, R2, R3, X0, X4, X8, X12)
+ XORV(1*64, R2, R3, X1, X5, X9, X13)
+ XORV(2*64, R2, R3, X2, X6, X10, X14)
+ XORV(3*64, R2, R3, X3, X7, X11, X15)
+
+ // increment pointers
+ MOVD $256(R2), R2
+ MOVD $256(R3), R3
+
+ CMPBNE R4, $0, chacha
+ CMPUBEQ R12, $255, return
+ EXRL $·mvcBufToDst(SB), R12 // len was updated during setup
+
+return:
+ VSTEF $0, CTR, (R7)
+ RET
+
+tail:
+ MOVD R2, R9
+ MOVD R8, R2
+ MOVD R8, R3
+ MOVD $0, R4
+ JMP continue
+
+// func hasVectorFacility() bool
+TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
+ MOVD $x-24(SP), R1
+ XC $24, 0(R1), 0(R1) // clear the storage
+ MOVD $2, R0 // R0 is the number of double words stored -1
+ WORD $0xB2B01000 // STFLE 0(R1)
+ XOR R0, R0 // reset the value of R0
+ MOVBZ z-8(SP), R1
+ AND $0x40, R1
+ BEQ novector
+
+vectorinstalled:
+ // check if the vector instruction has been enabled
+ VLEIB $0, $0xF, V16
+ VLGVB $0, V16, R1
+ CMPBNE R1, $0xF, novector
+ MOVB $1, ret+0(FP) // have vx
+ RET
+
+novector:
+ MOVB $0, ret+0(FP) // no vx
+ RET
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/xor.go b/vendor/golang.org/x/crypto/internal/chacha20/xor.go
new file mode 100644
index 000000000..9c5ba0b33
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/chacha20/xor.go
@@ -0,0 +1,43 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found src the LICENSE file.
+
+package chacha20
+
+import (
+ "runtime"
+)
+
+// Platforms that have fast unaligned 32-bit little endian accesses.
+const unaligned = runtime.GOARCH == "386" ||
+ runtime.GOARCH == "amd64" ||
+ runtime.GOARCH == "arm64" ||
+ runtime.GOARCH == "ppc64le" ||
+ runtime.GOARCH == "s390x"
+
+// xor reads a little endian uint32 from src, XORs it with u and
+// places the result in little endian byte order in dst.
+func xor(dst, src []byte, u uint32) {
+ _, _ = src[3], dst[3] // eliminate bounds checks
+ if unaligned {
+ // The compiler should optimize this code into
+ // 32-bit unaligned little endian loads and stores.
+ // TODO: delete once the compiler does a reliably
+ // good job with the generic code below.
+ // See issue #25111 for more details.
+ v := uint32(src[0])
+ v |= uint32(src[1]) << 8
+ v |= uint32(src[2]) << 16
+ v |= uint32(src[3]) << 24
+ v ^= u
+ dst[0] = byte(v)
+ dst[1] = byte(v >> 8)
+ dst[2] = byte(v >> 16)
+ dst[3] = byte(v >> 24)
+ } else {
+ dst[0] = src[0] ^ byte(u)
+ dst[1] = src[1] ^ byte(u>>8)
+ dst[2] = src[2] ^ byte(u>>16)
+ dst[3] = src[3] ^ byte(u>>24)
+ }
+}
diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go
new file mode 100644
index 000000000..f38797bfa
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go
@@ -0,0 +1,32 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+// Package subtle implements functions that are often useful in cryptographic
+// code but require careful thought to use correctly.
+package subtle // import "golang.org/x/crypto/internal/subtle"
+
+import "unsafe"
+
+// AnyOverlap reports whether x and y share memory at any (not necessarily
+// corresponding) index. The memory beyond the slice length is ignored.
+func AnyOverlap(x, y []byte) bool {
+ return len(x) > 0 && len(y) > 0 &&
+ uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
+ uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
+}
+
+// InexactOverlap reports whether x and y share memory at any non-corresponding
+// index. The memory beyond the slice length is ignored. Note that x and y can
+// have different lengths and still not have any inexact overlap.
+//
+// InexactOverlap can be used to implement the requirements of the crypto/cipher
+// AEAD, Block, BlockMode and Stream interfaces.
+func InexactOverlap(x, y []byte) bool {
+ if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
+ return false
+ }
+ return AnyOverlap(x, y)
+}
diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go
new file mode 100644
index 000000000..0cc4a8a64
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go
@@ -0,0 +1,35 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+// Package subtle implements functions that are often useful in cryptographic
+// code but require careful thought to use correctly.
+package subtle // import "golang.org/x/crypto/internal/subtle"
+
+// This is the Google App Engine standard variant based on reflect
+// because the unsafe package and cgo are disallowed.
+
+import "reflect"
+
+// AnyOverlap reports whether x and y share memory at any (not necessarily
+// corresponding) index. The memory beyond the slice length is ignored.
+func AnyOverlap(x, y []byte) bool {
+ return len(x) > 0 && len(y) > 0 &&
+ reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
+ reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
+}
+
+// InexactOverlap reports whether x and y share memory at any non-corresponding
+// index. The memory beyond the slice length is ignored. Note that x and y can
+// have different lengths and still not have any inexact overlap.
+//
+// InexactOverlap can be used to implement the requirements of the crypto/cipher
+// AEAD, Block, BlockMode and Stream interfaces.
+func InexactOverlap(x, y []byte) bool {
+ if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
+ return false
+ }
+ return AnyOverlap(x, y)
+}
diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go
index 68b14c6ae..3e2518600 100644
--- a/vendor/golang.org/x/crypto/openpgp/keys.go
+++ b/vendor/golang.org/x/crypto/openpgp/keys.go
@@ -325,16 +325,14 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) {
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
packets.Unread(p)
return nil, errors.StructuralError("first packet was not a public/private key")
- } else {
- e.PrimaryKey = &e.PrivateKey.PublicKey
}
+ e.PrimaryKey = &e.PrivateKey.PublicKey
}
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
return nil, errors.StructuralError("primary key cannot be used for signatures")
}
- var current *Identity
var revocations []*packet.Signature
EachPacket:
for {
@@ -347,32 +345,8 @@ EachPacket:
switch pkt := p.(type) {
case *packet.UserId:
- current = new(Identity)
- current.Name = pkt.Id
- current.UserId = pkt
- e.Identities[pkt.Id] = current
-
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return nil, io.ErrUnexpectedEOF
- } else if err != nil {
- return nil, err
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- return nil, errors.StructuralError("user ID packet not followed by self-signature")
- }
-
- if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
- if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
- return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
- }
- current.SelfSignature = sig
- break
- }
- current.Signatures = append(current.Signatures, sig)
+ if err := addUserID(e, packets, pkt); err != nil {
+ return nil, err
}
case *packet.Signature:
if pkt.SigType == packet.SigTypeKeyRevocation {
@@ -381,11 +355,9 @@ EachPacket:
// TODO: RFC4880 5.2.1 permits signatures
// directly on keys (eg. to bind additional
// revocation keys).
- } else if current == nil {
- return nil, errors.StructuralError("signature packet found before user id packet")
- } else {
- current.Signatures = append(current.Signatures, pkt)
}
+ // Else, ignoring the signature as it does not follow anything
+ // we would know to attach it to.
case *packet.PrivateKey:
if pkt.IsSubkey == false {
packets.Unread(p)
@@ -426,33 +398,105 @@ EachPacket:
return e, nil
}
+func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
+ // Make a new Identity object, that we might wind up throwing away.
+ // We'll only add it if we get a valid self-signature over this
+ // userID.
+ identity := new(Identity)
+ identity.Name = pkt.Id
+ identity.UserId = pkt
+
+ for {
+ p, err := packets.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+
+ sig, ok := p.(*packet.Signature)
+ if !ok {
+ packets.Unread(p)
+ break
+ }
+
+ if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
+ if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
+ return errors.StructuralError("user ID self-signature invalid: " + err.Error())
+ }
+ identity.SelfSignature = sig
+ e.Identities[pkt.Id] = identity
+ } else {
+ identity.Signatures = append(identity.Signatures, sig)
+ }
+ }
+
+ return nil
+}
+
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
var subKey Subkey
subKey.PublicKey = pub
subKey.PrivateKey = priv
- p, err := packets.Next()
- if err == io.EOF {
- return io.ErrUnexpectedEOF
+
+ for {
+ p, err := packets.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return errors.StructuralError("subkey signature invalid: " + err.Error())
+ }
+
+ sig, ok := p.(*packet.Signature)
+ if !ok {
+ packets.Unread(p)
+ break
+ }
+
+ if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
+ return errors.StructuralError("subkey signature with wrong type")
+ }
+
+ if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
+ return errors.StructuralError("subkey signature invalid: " + err.Error())
+ }
+
+ switch sig.SigType {
+ case packet.SigTypeSubkeyRevocation:
+ subKey.Sig = sig
+ case packet.SigTypeSubkeyBinding:
+
+ if shouldReplaceSubkeySig(subKey.Sig, sig) {
+ subKey.Sig = sig
+ }
+ }
}
- if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
- var ok bool
- subKey.Sig, ok = p.(*packet.Signature)
- if !ok {
+
+ if subKey.Sig == nil {
return errors.StructuralError("subkey packet not followed by signature")
}
- if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
- return errors.StructuralError("subkey signature with wrong type")
- }
- err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
- if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
+
e.Subkeys = append(e.Subkeys, subKey)
+
return nil
}
+func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool {
+ if potentialNewSig == nil {
+ return false
+ }
+
+ if existingSig == nil {
+ return true
+ }
+
+ if existingSig.SigType == packet.SigTypeSubkeyRevocation {
+ return false // never override a revocation signature
+ }
+
+ return potentialNewSig.CreationTime.After(existingSig.CreationTime)
+}
+
const defaultRSAKeyBits = 2048
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
@@ -487,7 +531,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
}
isPrimaryId := true
e.Identities[uid.Id] = &Identity{
- Name: uid.Name,
+ Name: uid.Id,
UserId: uid,
SelfSignature: &packet.Signature{
CreationTime: currentTime,
@@ -501,6 +545,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
+ err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config)
+ if err != nil {
+ return nil, err
+ }
// If the user passes in a DefaultHash via packet.Config,
// set the PreferredHash for the SelfSignature.
@@ -508,6 +556,11 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
}
+ // Likewise for DefaultCipher.
+ if config != nil && config.DefaultCipher != 0 {
+ e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)}
+ }
+
e.Subkeys = make([]Subkey, 1)
e.Subkeys[0] = Subkey{
PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
@@ -525,13 +578,16 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
}
e.Subkeys[0].PublicKey.IsSubkey = true
e.Subkeys[0].PrivateKey.IsSubkey = true
-
+ err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config)
+ if err != nil {
+ return nil, err
+ }
return e, nil
}
-// SerializePrivate serializes an Entity, including private key material, to
-// the given Writer. For now, it must only be used on an Entity returned from
-// NewEntity.
+// SerializePrivate serializes an Entity, including private key material, but
+// excluding signatures from other entities, to the given Writer.
+// Identities and subkeys are re-signed in case they changed since NewEntry.
// If config is nil, sensible defaults will be used.
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
err = e.PrivateKey.Serialize(w)
@@ -569,8 +625,8 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error
return nil
}
-// Serialize writes the public part of the given Entity to w. (No private
-// key material will be output).
+// Serialize writes the public part of the given Entity to w, including
+// signatures from other entities. No private key material will be output.
func (e *Entity) Serialize(w io.Writer) error {
err := e.PrimaryKey.Serialize(w)
if err != nil {
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
index 266840d05..02b372cf3 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
@@ -42,12 +42,18 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) {
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
case PubKeyAlgoElGamal:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
}
_, err = consumeAll(r)
return
@@ -72,7 +78,8 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
// padding oracle attacks.
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes)
+ k := priv.PrivateKey.(*rsa.PrivateKey)
+ b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
case PubKeyAlgoElGamal:
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
index 3eded93f0..5af64c542 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go
@@ -11,10 +11,12 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
- "golang.org/x/crypto/cast5"
- "golang.org/x/crypto/openpgp/errors"
+ "crypto/rsa"
"io"
"math/big"
+
+ "golang.org/x/crypto/cast5"
+ "golang.org/x/crypto/openpgp/errors"
)
// readFull is the same as io.ReadFull except that reading zero bytes returns
@@ -402,14 +404,16 @@ const (
type PublicKeyAlgorithm uint8
const (
- PubKeyAlgoRSA PublicKeyAlgorithm = 1
- PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
- PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
- PubKeyAlgoElGamal PublicKeyAlgorithm = 16
- PubKeyAlgoDSA PublicKeyAlgorithm = 17
+ PubKeyAlgoRSA PublicKeyAlgorithm = 1
+ PubKeyAlgoElGamal PublicKeyAlgorithm = 16
+ PubKeyAlgoDSA PublicKeyAlgorithm = 17
// RFC 6637, Section 5.
PubKeyAlgoECDH PublicKeyAlgorithm = 18
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
+
+ // Deprecated in RFC 4880, Section 13.5. Use key flags instead.
+ PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
+ PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
)
// CanEncrypt returns true if it's possible to encrypt a message to a public
@@ -500,19 +504,17 @@ func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
numBytes := (int(bitLength) + 7) / 8
mpi = make([]byte, numBytes)
_, err = readFull(r, mpi)
- return
-}
-
-// mpiLength returns the length of the given *big.Int when serialized as an
-// MPI.
-func mpiLength(n *big.Int) (mpiLengthInBytes int) {
- mpiLengthInBytes = 2 /* MPI length */
- mpiLengthInBytes += (n.BitLen() + 7) / 8
+ // According to RFC 4880 3.2. we should check that the MPI has no leading
+ // zeroes (at least when not an encrypted MPI?), but this implementation
+ // does generate leading zeroes, so we keep accepting them.
return
}
// writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
+ // Note that we can produce leading zeroes, in violation of RFC 4880 3.2.
+ // Implementations seem to be tolerant of them, and stripping them would
+ // make it complex to guarantee matching re-serialization.
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
if err == nil {
_, err = w.Write(mpiBytes)
@@ -525,6 +527,18 @@ func writeBig(w io.Writer, i *big.Int) error {
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
}
+// padToKeySize left-pads a MPI with zeroes to match the length of the
+// specified RSA public.
+func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
+ k := (pub.N.BitLen() + 7) / 8
+ if len(b) >= k {
+ return b
+ }
+ bb := make([]byte, k)
+ copy(bb[len(bb)-len(b):], b)
+ return bb
+}
+
// CompressionAlgo Represents the different compression algorithms
// supported by OpenPGP (except for BZIP2, which is not currently
// supported). See Section 9.3 of RFC 4880.
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
index 34734cc63..bd31cceac 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
@@ -64,14 +64,19 @@ func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateK
return pk
}
-// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that
+// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
// implements RSA or ECDSA.
func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
pk := new(PrivateKey)
+ // In general, the public Keys should be used as pointers. We still
+ // type-switch on the values, for backwards-compatibility.
switch pubkey := signer.Public().(type) {
+ case *rsa.PublicKey:
+ pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey)
case rsa.PublicKey:
pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
- pk.PubKeyAlgo = PubKeyAlgoRSASignOnly
+ case *ecdsa.PublicKey:
+ pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey)
case ecdsa.PublicKey:
pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
default:
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go
index ead26233d..fcd5f5251 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go
@@ -244,7 +244,12 @@ func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey
}
pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
- pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes))
+
+ // The bit length is 3 (for the 0x04 specifying an uncompressed key)
+ // plus two field elements (for x and y), which are rounded up to the
+ // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6
+ fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7
+ pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes)
pk.setFingerPrintAndKeyId()
return pk
@@ -515,7 +520,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
- err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes)
+ err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes))
if err != nil {
return errors.SignatureError("RSA verification failure")
}
@@ -566,7 +571,7 @@ func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
- if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
+ if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil {
return errors.SignatureError("RSA verification failure")
}
return
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go
index 6ce0cbedb..b2a24a532 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/signature.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/signature.go
@@ -542,7 +542,7 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e
r, s, err = ecdsa.Sign(config.Random(), pk, digest)
} else {
var b []byte
- b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil)
+ b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
if err == nil {
r, s, err = unwrapECDSASig(b)
}
diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
index 96a2b382a..d19ffbc78 100644
--- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
+++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
@@ -80,7 +80,7 @@ func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
// ImageData returns zero or more byte slices, each containing
// JPEG File Interchange Format (JFIF), for each photo in the
-// the user attribute packet.
+// user attribute packet.
func (uat *UserAttribute) ImageData() (imageData [][]byte) {
for _, sp := range uat.Contents {
if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go
index 65a304cc8..4ee71784e 100644
--- a/vendor/golang.org/x/crypto/openpgp/write.go
+++ b/vendor/golang.org/x/crypto/openpgp/write.go
@@ -164,12 +164,12 @@ func hashToHashId(h crypto.Hash) uint8 {
return v
}
-// Encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
+// writeAndSign writes the data as a payload package and, optionally, signs
+// it. hints contains optional information, that is also encrypted,
+// that aids the recipients in processing the message. The resulting
+// WriteCloser must be closed after the contents of the file have been
+// written. If config is nil, sensible defaults will be used.
+func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
var signer *packet.PrivateKey
if signed != nil {
signKey, ok := signed.signingKey(config.Now())
@@ -185,6 +185,83 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
+ var hash crypto.Hash
+ for _, hashId := range candidateHashes {
+ if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
+ hash = h
+ break
+ }
+ }
+
+ // If the hash specified by config is a candidate, we'll use that.
+ if configuredHash := config.Hash(); configuredHash.Available() {
+ for _, hashId := range candidateHashes {
+ if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
+ hash = h
+ break
+ }
+ }
+ }
+
+ if hash == 0 {
+ hashId := candidateHashes[0]
+ name, ok := s2k.HashIdToString(hashId)
+ if !ok {
+ name = "#" + strconv.Itoa(int(hashId))
+ }
+ return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
+ }
+
+ if signer != nil {
+ ops := &packet.OnePassSignature{
+ SigType: packet.SigTypeBinary,
+ Hash: hash,
+ PubKeyAlgo: signer.PubKeyAlgo,
+ KeyId: signer.KeyId,
+ IsLast: true,
+ }
+ if err := ops.Serialize(payload); err != nil {
+ return nil, err
+ }
+ }
+
+ if hints == nil {
+ hints = &FileHints{}
+ }
+
+ w := payload
+ if signer != nil {
+ // If we need to write a signature packet after the literal
+ // data then we need to stop literalData from closing
+ // encryptedData.
+ w = noOpCloser{w}
+
+ }
+ var epochSeconds uint32
+ if !hints.ModTime.IsZero() {
+ epochSeconds = uint32(hints.ModTime.Unix())
+ }
+ literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
+ if err != nil {
+ return nil, err
+ }
+
+ if signer != nil {
+ return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil
+ }
+ return literalData, nil
+}
+
+// Encrypt encrypts a message to a number of recipients and, optionally, signs
+// it. hints contains optional information, that is also encrypted, that aids
+// the recipients in processing the message. The resulting WriteCloser must
+// be closed after the contents of the file have been written.
+// If config is nil, sensible defaults will be used.
+func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
+ if len(to) == 0 {
+ return nil, errors.InvalidArgumentError("no encryption recipient provided")
+ }
+
// These are the possible ciphers that we'll use for the message.
candidateCiphers := []uint8{
uint8(packet.CipherAES128),
@@ -194,6 +271,7 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
+ hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
@@ -241,33 +319,6 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
- var hash crypto.Hash
- for _, hashId := range candidateHashes {
- if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
- hash = h
- break
- }
- }
-
- // If the hash specified by config is a candidate, we'll use that.
- if configuredHash := config.Hash(); configuredHash.Available() {
- for _, hashId := range candidateHashes {
- if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
- hash = h
- break
- }
- }
- }
-
- if hash == 0 {
- hashId := candidateHashes[0]
- name, ok := s2k.HashIdToString(hashId)
- if !ok {
- name = "#" + strconv.Itoa(int(hashId))
- }
- return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
- }
-
symKey := make([]byte, cipher.KeySize())
if _, err := io.ReadFull(config.Random(), symKey); err != nil {
return nil, err
@@ -279,49 +330,38 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
- encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
+ payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
if err != nil {
return
}
- if signer != nil {
- ops := &packet.OnePassSignature{
- SigType: packet.SigTypeBinary,
- Hash: hash,
- PubKeyAlgo: signer.PubKeyAlgo,
- KeyId: signer.KeyId,
- IsLast: true,
- }
- if err := ops.Serialize(encryptedData); err != nil {
- return nil, err
- }
+ return writeAndSign(payload, candidateHashes, signed, hints, config)
+}
+
+// Sign signs a message. The resulting WriteCloser must be closed after the
+// contents of the file have been written. hints contains optional information
+// that aids the recipients in processing the message.
+// If config is nil, sensible defaults will be used.
+func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) {
+ if signed == nil {
+ return nil, errors.InvalidArgumentError("no signer provided")
}
- if hints == nil {
- hints = &FileHints{}
+ // These are the possible hash functions that we'll use for the signature.
+ candidateHashes := []uint8{
+ hashToHashId(crypto.SHA256),
+ hashToHashId(crypto.SHA384),
+ hashToHashId(crypto.SHA512),
+ hashToHashId(crypto.SHA1),
+ hashToHashId(crypto.RIPEMD160),
}
-
- w := encryptedData
- if signer != nil {
- // If we need to write a signature packet after the literal
- // data then we need to stop literalData from closing
- // encryptedData.
- w = noOpCloser{encryptedData}
-
+ defaultHashes := candidateHashes[len(candidateHashes)-1:]
+ preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash
+ if len(preferredHashes) == 0 {
+ preferredHashes = defaultHashes
}
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
- if err != nil {
- return nil, err
- }
-
- if signer != nil {
- return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
- }
- return literalData, nil
+ candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
+ return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config)
}
// signatureWriter hashes the contents of a message while passing it along to
diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go
new file mode 100644
index 000000000..f562fa571
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go
@@ -0,0 +1,33 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package poly1305 implements Poly1305 one-time message authentication code as
+specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
+
+Poly1305 is a fast, one-time authentication function. It is infeasible for an
+attacker to generate an authenticator for a message without the key. However, a
+key must only be used for a single message. Authenticating two different
+messages with the same key allows an attacker to forge authenticators for other
+messages with the same key.
+
+Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
+used with a fixed key in order to generate one-time keys from an nonce.
+However, in this package AES isn't used and the one-time key is specified
+directly.
+*/
+package poly1305 // import "golang.org/x/crypto/poly1305"
+
+import "crypto/subtle"
+
+// TagSize is the size, in bytes, of a poly1305 authenticator.
+const TagSize = 16
+
+// Verify returns true if mac is a valid authenticator for m with the given
+// key.
+func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
+ var tmp [16]byte
+ Sum(&tmp, m, key)
+ return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
+}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
new file mode 100644
index 000000000..4dd72fe79
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
@@ -0,0 +1,22 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+package poly1305
+
+// This function is implemented in sum_amd64.s
+//go:noescape
+func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+ var mPtr *byte
+ if len(m) > 0 {
+ mPtr = &m[0]
+ }
+ poly1305(out, mPtr, uint64(len(m)), key)
+}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s
new file mode 100644
index 000000000..2edae6382
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s
@@ -0,0 +1,125 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+#include "textflag.h"
+
+#define POLY1305_ADD(msg, h0, h1, h2) \
+ ADDQ 0(msg), h0; \
+ ADCQ 8(msg), h1; \
+ ADCQ $1, h2; \
+ LEAQ 16(msg), msg
+
+#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
+ MOVQ r0, AX; \
+ MULQ h0; \
+ MOVQ AX, t0; \
+ MOVQ DX, t1; \
+ MOVQ r0, AX; \
+ MULQ h1; \
+ ADDQ AX, t1; \
+ ADCQ $0, DX; \
+ MOVQ r0, t2; \
+ IMULQ h2, t2; \
+ ADDQ DX, t2; \
+ \
+ MOVQ r1, AX; \
+ MULQ h0; \
+ ADDQ AX, t1; \
+ ADCQ $0, DX; \
+ MOVQ DX, h0; \
+ MOVQ r1, t3; \
+ IMULQ h2, t3; \
+ MOVQ r1, AX; \
+ MULQ h1; \
+ ADDQ AX, t2; \
+ ADCQ DX, t3; \
+ ADDQ h0, t2; \
+ ADCQ $0, t3; \
+ \
+ MOVQ t0, h0; \
+ MOVQ t1, h1; \
+ MOVQ t2, h2; \
+ ANDQ $3, h2; \
+ MOVQ t2, t0; \
+ ANDQ $0xFFFFFFFFFFFFFFFC, t0; \
+ ADDQ t0, h0; \
+ ADCQ t3, h1; \
+ ADCQ $0, h2; \
+ SHRQ $2, t3, t2; \
+ SHRQ $2, t3; \
+ ADDQ t2, h0; \
+ ADCQ t3, h1; \
+ ADCQ $0, h2
+
+DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
+DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
+GLOBL ·poly1305Mask<>(SB), RODATA, $16
+
+// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
+TEXT ·poly1305(SB), $0-32
+ MOVQ out+0(FP), DI
+ MOVQ m+8(FP), SI
+ MOVQ mlen+16(FP), R15
+ MOVQ key+24(FP), AX
+
+ MOVQ 0(AX), R11
+ MOVQ 8(AX), R12
+ ANDQ ·poly1305Mask<>(SB), R11 // r0
+ ANDQ ·poly1305Mask<>+8(SB), R12 // r1
+ XORQ R8, R8 // h0
+ XORQ R9, R9 // h1
+ XORQ R10, R10 // h2
+
+ CMPQ R15, $16
+ JB bytes_between_0_and_15
+
+loop:
+ POLY1305_ADD(SI, R8, R9, R10)
+
+multiply:
+ POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
+ SUBQ $16, R15
+ CMPQ R15, $16
+ JAE loop
+
+bytes_between_0_and_15:
+ TESTQ R15, R15
+ JZ done
+ MOVQ $1, BX
+ XORQ CX, CX
+ XORQ R13, R13
+ ADDQ R15, SI
+
+flush_buffer:
+ SHLQ $8, BX, CX
+ SHLQ $8, BX
+ MOVB -1(SI), R13
+ XORQ R13, BX
+ DECQ SI
+ DECQ R15
+ JNZ flush_buffer
+
+ ADDQ BX, R8
+ ADCQ CX, R9
+ ADCQ $0, R10
+ MOVQ $16, R15
+ JMP multiply
+
+done:
+ MOVQ R8, AX
+ MOVQ R9, BX
+ SUBQ $0xFFFFFFFFFFFFFFFB, AX
+ SBBQ $0xFFFFFFFFFFFFFFFF, BX
+ SBBQ $3, R10
+ CMOVQCS R8, AX
+ CMOVQCS R9, BX
+ MOVQ key+24(FP), R8
+ ADDQ 16(R8), AX
+ ADCQ 24(R8), BX
+
+ MOVQ AX, 0(DI)
+ MOVQ BX, 8(DI)
+ RET
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go
new file mode 100644
index 000000000..5dc321c2f
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,!gccgo,!appengine,!nacl
+
+package poly1305
+
+// This function is implemented in sum_arm.s
+//go:noescape
+func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+ var mPtr *byte
+ if len(m) > 0 {
+ mPtr = &m[0]
+ }
+ poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
+}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s
new file mode 100644
index 000000000..f70b4ac48
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.s
@@ -0,0 +1,427 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,!gccgo,!appengine,!nacl
+
+#include "textflag.h"
+
+// This code was translated into a form compatible with 5a from the public
+// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
+
+DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
+DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
+DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
+DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
+DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
+GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
+
+// Warning: the linker may use R11 to synthesize certain instructions. Please
+// take care and verify that no synthetic instructions use it.
+
+TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
+ // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It
+ // might look like it's only 60 bytes of space but the final four bytes
+ // will be written by another function.) We need to skip over four
+ // bytes of stack because that's saving the value of 'g'.
+ ADD $4, R13, R8
+ MOVM.IB [R4-R7], (R8)
+ MOVM.IA.W (R1), [R2-R5]
+ MOVW $·poly1305_init_constants_armv6<>(SB), R7
+ MOVW R2, R8
+ MOVW R2>>26, R9
+ MOVW R3>>20, g
+ MOVW R4>>14, R11
+ MOVW R5>>8, R12
+ ORR R3<<6, R9, R9
+ ORR R4<<12, g, g
+ ORR R5<<18, R11, R11
+ MOVM.IA (R7), [R2-R6]
+ AND R8, R2, R2
+ AND R9, R3, R3
+ AND g, R4, R4
+ AND R11, R5, R5
+ AND R12, R6, R6
+ MOVM.IA.W [R2-R6], (R0)
+ EOR R2, R2, R2
+ EOR R3, R3, R3
+ EOR R4, R4, R4
+ EOR R5, R5, R5
+ EOR R6, R6, R6
+ MOVM.IA.W [R2-R6], (R0)
+ MOVM.IA.W (R1), [R2-R5]
+ MOVM.IA [R2-R6], (R0)
+ ADD $20, R13, R0
+ MOVM.DA (R0), [R4-R7]
+ RET
+
+#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
+ MOVBU (offset+0)(Rsrc), Rtmp; \
+ MOVBU Rtmp, (offset+0)(Rdst); \
+ MOVBU (offset+1)(Rsrc), Rtmp; \
+ MOVBU Rtmp, (offset+1)(Rdst); \
+ MOVBU (offset+2)(Rsrc), Rtmp; \
+ MOVBU Rtmp, (offset+2)(Rdst); \
+ MOVBU (offset+3)(Rsrc), Rtmp; \
+ MOVBU Rtmp, (offset+3)(Rdst)
+
+TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
+ // Needs 24 bytes of stack for saved registers and then 88 bytes of
+ // scratch space after that. We assume that 24 bytes at (R13) have
+ // already been used: four bytes for the link register saved in the
+ // prelude of poly1305_auth_armv6, four bytes for saving the value of g
+ // in that function and 16 bytes of scratch space used around
+ // poly1305_finish_ext_armv6_skip1.
+ ADD $24, R13, R12
+ MOVM.IB [R4-R8, R14], (R12)
+ MOVW R0, 88(R13)
+ MOVW R1, 92(R13)
+ MOVW R2, 96(R13)
+ MOVW R1, R14
+ MOVW R2, R12
+ MOVW 56(R0), R8
+ WORD $0xe1180008 // TST R8, R8 not working see issue 5921
+ EOR R6, R6, R6
+ MOVW.EQ $(1<<24), R6
+ MOVW R6, 84(R13)
+ ADD $116, R13, g
+ MOVM.IA (R0), [R0-R9]
+ MOVM.IA [R0-R4], (g)
+ CMP $16, R12
+ BLO poly1305_blocks_armv6_done
+
+poly1305_blocks_armv6_mainloop:
+ WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
+ BEQ poly1305_blocks_armv6_mainloop_aligned
+ ADD $100, R13, g
+ MOVW_UNALIGNED(R14, g, R0, 0)
+ MOVW_UNALIGNED(R14, g, R0, 4)
+ MOVW_UNALIGNED(R14, g, R0, 8)
+ MOVW_UNALIGNED(R14, g, R0, 12)
+ MOVM.IA (g), [R0-R3]
+ ADD $16, R14
+ B poly1305_blocks_armv6_mainloop_loaded
+
+poly1305_blocks_armv6_mainloop_aligned:
+ MOVM.IA.W (R14), [R0-R3]
+
+poly1305_blocks_armv6_mainloop_loaded:
+ MOVW R0>>26, g
+ MOVW R1>>20, R11
+ MOVW R2>>14, R12
+ MOVW R14, 92(R13)
+ MOVW R3>>8, R4
+ ORR R1<<6, g, g
+ ORR R2<<12, R11, R11
+ ORR R3<<18, R12, R12
+ BIC $0xfc000000, R0, R0
+ BIC $0xfc000000, g, g
+ MOVW 84(R13), R3
+ BIC $0xfc000000, R11, R11
+ BIC $0xfc000000, R12, R12
+ ADD R0, R5, R5
+ ADD g, R6, R6
+ ORR R3, R4, R4
+ ADD R11, R7, R7
+ ADD $116, R13, R14
+ ADD R12, R8, R8
+ ADD R4, R9, R9
+ MOVM.IA (R14), [R0-R4]
+ MULLU R4, R5, (R11, g)
+ MULLU R3, R5, (R14, R12)
+ MULALU R3, R6, (R11, g)
+ MULALU R2, R6, (R14, R12)
+ MULALU R2, R7, (R11, g)
+ MULALU R1, R7, (R14, R12)
+ ADD R4<<2, R4, R4
+ ADD R3<<2, R3, R3
+ MULALU R1, R8, (R11, g)
+ MULALU R0, R8, (R14, R12)
+ MULALU R0, R9, (R11, g)
+ MULALU R4, R9, (R14, R12)
+ MOVW g, 76(R13)
+ MOVW R11, 80(R13)
+ MOVW R12, 68(R13)
+ MOVW R14, 72(R13)
+ MULLU R2, R5, (R11, g)
+ MULLU R1, R5, (R14, R12)
+ MULALU R1, R6, (R11, g)
+ MULALU R0, R6, (R14, R12)
+ MULALU R0, R7, (R11, g)
+ MULALU R4, R7, (R14, R12)
+ ADD R2<<2, R2, R2
+ ADD R1<<2, R1, R1
+ MULALU R4, R8, (R11, g)
+ MULALU R3, R8, (R14, R12)
+ MULALU R3, R9, (R11, g)
+ MULALU R2, R9, (R14, R12)
+ MOVW g, 60(R13)
+ MOVW R11, 64(R13)
+ MOVW R12, 52(R13)
+ MOVW R14, 56(R13)
+ MULLU R0, R5, (R11, g)
+ MULALU R4, R6, (R11, g)
+ MULALU R3, R7, (R11, g)
+ MULALU R2, R8, (R11, g)
+ MULALU R1, R9, (R11, g)
+ ADD $52, R13, R0
+ MOVM.IA (R0), [R0-R7]
+ MOVW g>>26, R12
+ MOVW R4>>26, R14
+ ORR R11<<6, R12, R12
+ ORR R5<<6, R14, R14
+ BIC $0xfc000000, g, g
+ BIC $0xfc000000, R4, R4
+ ADD.S R12, R0, R0
+ ADC $0, R1, R1
+ ADD.S R14, R6, R6
+ ADC $0, R7, R7
+ MOVW R0>>26, R12
+ MOVW R6>>26, R14
+ ORR R1<<6, R12, R12
+ ORR R7<<6, R14, R14
+ BIC $0xfc000000, R0, R0
+ BIC $0xfc000000, R6, R6
+ ADD R14<<2, R14, R14
+ ADD.S R12, R2, R2
+ ADC $0, R3, R3
+ ADD R14, g, g
+ MOVW R2>>26, R12
+ MOVW g>>26, R14
+ ORR R3<<6, R12, R12
+ BIC $0xfc000000, g, R5
+ BIC $0xfc000000, R2, R7
+ ADD R12, R4, R4
+ ADD R14, R0, R0
+ MOVW R4>>26, R12
+ BIC $0xfc000000, R4, R8
+ ADD R12, R6, R9
+ MOVW 96(R13), R12
+ MOVW 92(R13), R14
+ MOVW R0, R6
+ CMP $32, R12
+ SUB $16, R12, R12
+ MOVW R12, 96(R13)
+ BHS poly1305_blocks_armv6_mainloop
+
+poly1305_blocks_armv6_done:
+ MOVW 88(R13), R12
+ MOVW R5, 20(R12)
+ MOVW R6, 24(R12)
+ MOVW R7, 28(R12)
+ MOVW R8, 32(R12)
+ MOVW R9, 36(R12)
+ ADD $48, R13, R0
+ MOVM.DA (R0), [R4-R8, R14]
+ RET
+
+#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+ MOVBU.P 1(Rsrc), Rtmp; \
+ MOVBU.P Rtmp, 1(Rdst); \
+ MOVBU.P 1(Rsrc), Rtmp; \
+ MOVBU.P Rtmp, 1(Rdst)
+
+#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+ MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
+ MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
+
+// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
+TEXT ·poly1305_auth_armv6(SB), $196-16
+ // The value 196, just above, is the sum of 64 (the size of the context
+ // structure) and 132 (the amount of stack needed).
+ //
+ // At this point, the stack pointer (R13) has been moved down. It
+ // points to the saved link register and there's 196 bytes of free
+ // space above it.
+ //
+ // The stack for this function looks like:
+ //
+ // +---------------------
+ // |
+ // | 64 bytes of context structure
+ // |
+ // +---------------------
+ // |
+ // | 112 bytes for poly1305_blocks_armv6
+ // |
+ // +---------------------
+ // | 16 bytes of final block, constructed at
+ // | poly1305_finish_ext_armv6_skip8
+ // +---------------------
+ // | four bytes of saved 'g'
+ // +---------------------
+ // | lr, saved by prelude <- R13 points here
+ // +---------------------
+ MOVW g, 4(R13)
+
+ MOVW out+0(FP), R4
+ MOVW m+4(FP), R5
+ MOVW mlen+8(FP), R6
+ MOVW key+12(FP), R7
+
+ ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112
+ MOVW R7, R1
+
+ // poly1305_init_ext_armv6 will write to the stack from R13+4, but
+ // that's ok because none of the other values have been written yet.
+ BL poly1305_init_ext_armv6<>(SB)
+ BIC.S $15, R6, R2
+ BEQ poly1305_auth_armv6_noblocks
+ ADD $136, R13, R0
+ MOVW R5, R1
+ ADD R2, R5, R5
+ SUB R2, R6, R6
+ BL poly1305_blocks_armv6<>(SB)
+
+poly1305_auth_armv6_noblocks:
+ ADD $136, R13, R0
+ MOVW R5, R1
+ MOVW R6, R2
+ MOVW R4, R3
+
+ MOVW R0, R5
+ MOVW R1, R6
+ MOVW R2, R7
+ MOVW R3, R8
+ AND.S R2, R2, R2
+ BEQ poly1305_finish_ext_armv6_noremaining
+ EOR R0, R0
+ ADD $8, R13, R9 // 8 = offset to 16 byte scratch space
+ MOVW R0, (R9)
+ MOVW R0, 4(R9)
+ MOVW R0, 8(R9)
+ MOVW R0, 12(R9)
+ WORD $0xe3110003 // TST R1, #3 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_aligned
+ WORD $0xe3120008 // TST R2, #8 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip8
+ MOVWP_UNALIGNED(R1, R9, g)
+ MOVWP_UNALIGNED(R1, R9, g)
+
+poly1305_finish_ext_armv6_skip8:
+ WORD $0xe3120004 // TST $4, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip4
+ MOVWP_UNALIGNED(R1, R9, g)
+
+poly1305_finish_ext_armv6_skip4:
+ WORD $0xe3120002 // TST $2, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip2
+ MOVHUP_UNALIGNED(R1, R9, g)
+ B poly1305_finish_ext_armv6_skip2
+
+poly1305_finish_ext_armv6_aligned:
+ WORD $0xe3120008 // TST R2, #8 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip8_aligned
+ MOVM.IA.W (R1), [g-R11]
+ MOVM.IA.W [g-R11], (R9)
+
+poly1305_finish_ext_armv6_skip8_aligned:
+ WORD $0xe3120004 // TST $4, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip4_aligned
+ MOVW.P 4(R1), g
+ MOVW.P g, 4(R9)
+
+poly1305_finish_ext_armv6_skip4_aligned:
+ WORD $0xe3120002 // TST $2, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip2
+ MOVHU.P 2(R1), g
+ MOVH.P g, 2(R9)
+
+poly1305_finish_ext_armv6_skip2:
+ WORD $0xe3120001 // TST $1, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip1
+ MOVBU.P 1(R1), g
+ MOVBU.P g, 1(R9)
+
+poly1305_finish_ext_armv6_skip1:
+ MOVW $1, R11
+ MOVBU R11, 0(R9)
+ MOVW R11, 56(R5)
+ MOVW R5, R0
+ ADD $8, R13, R1
+ MOVW $16, R2
+ BL poly1305_blocks_armv6<>(SB)
+
+poly1305_finish_ext_armv6_noremaining:
+ MOVW 20(R5), R0
+ MOVW 24(R5), R1
+ MOVW 28(R5), R2
+ MOVW 32(R5), R3
+ MOVW 36(R5), R4
+ MOVW R4>>26, R12
+ BIC $0xfc000000, R4, R4
+ ADD R12<<2, R12, R12
+ ADD R12, R0, R0
+ MOVW R0>>26, R12
+ BIC $0xfc000000, R0, R0
+ ADD R12, R1, R1
+ MOVW R1>>26, R12
+ BIC $0xfc000000, R1, R1
+ ADD R12, R2, R2
+ MOVW R2>>26, R12
+ BIC $0xfc000000, R2, R2
+ ADD R12, R3, R3
+ MOVW R3>>26, R12
+ BIC $0xfc000000, R3, R3
+ ADD R12, R4, R4
+ ADD $5, R0, R6
+ MOVW R6>>26, R12
+ BIC $0xfc000000, R6, R6
+ ADD R12, R1, R7
+ MOVW R7>>26, R12
+ BIC $0xfc000000, R7, R7
+ ADD R12, R2, g
+ MOVW g>>26, R12
+ BIC $0xfc000000, g, g
+ ADD R12, R3, R11
+ MOVW $-(1<<26), R12
+ ADD R11>>26, R12, R12
+ BIC $0xfc000000, R11, R11
+ ADD R12, R4, R9
+ MOVW R9>>31, R12
+ SUB $1, R12
+ AND R12, R6, R6
+ AND R12, R7, R7
+ AND R12, g, g
+ AND R12, R11, R11
+ AND R12, R9, R9
+ MVN R12, R12
+ AND R12, R0, R0
+ AND R12, R1, R1
+ AND R12, R2, R2
+ AND R12, R3, R3
+ AND R12, R4, R4
+ ORR R6, R0, R0
+ ORR R7, R1, R1
+ ORR g, R2, R2
+ ORR R11, R3, R3
+ ORR R9, R4, R4
+ ORR R1<<26, R0, R0
+ MOVW R1>>6, R1
+ ORR R2<<20, R1, R1
+ MOVW R2>>12, R2
+ ORR R3<<14, R2, R2
+ MOVW R3>>18, R3
+ ORR R4<<8, R3, R3
+ MOVW 40(R5), R6
+ MOVW 44(R5), R7
+ MOVW 48(R5), g
+ MOVW 52(R5), R11
+ ADD.S R6, R0, R0
+ ADC.S R7, R1, R1
+ ADC.S g, R2, R2
+ ADC.S R11, R3, R3
+ MOVM.IA [R0-R3], (R8)
+ MOVW R5, R12
+ EOR R0, R0, R0
+ EOR R1, R1, R1
+ EOR R2, R2, R2
+ EOR R3, R3, R3
+ EOR R4, R4, R4
+ EOR R5, R5, R5
+ EOR R6, R6, R6
+ EOR R7, R7, R7
+ MOVM.IA.W [R0-R7], (R12)
+ MOVM.IA [R0-R7], (R12)
+ MOVW 4(R13), g
+ RET
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
new file mode 100644
index 000000000..751eec527
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x,!go1.11 !arm,!amd64,!s390x gccgo appengine nacl
+
+package poly1305
+
+// Sum generates an authenticator for msg using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
+ sumGeneric(out, msg, key)
+}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ref.go b/vendor/golang.org/x/crypto/poly1305/sum_ref.go
new file mode 100644
index 000000000..c4d59bd09
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_ref.go
@@ -0,0 +1,139 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poly1305
+
+import "encoding/binary"
+
+// sumGeneric generates an authenticator for msg using a one-time key and
+// puts the 16-byte result into out. This is the generic implementation of
+// Sum and should be called if no assembly implementation is available.
+func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
+ var (
+ h0, h1, h2, h3, h4 uint32 // the hash accumulators
+ r0, r1, r2, r3, r4 uint64 // the r part of the key
+ )
+
+ r0 = uint64(binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff)
+ r1 = uint64((binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03)
+ r2 = uint64((binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff)
+ r3 = uint64((binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff)
+ r4 = uint64((binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff)
+
+ R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5
+
+ for len(msg) >= TagSize {
+ // h += msg
+ h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff
+ h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff
+ h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff
+ h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff
+ h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | (1 << 24)
+
+ // h *= r
+ d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
+ d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
+ d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
+ d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
+ d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
+
+ // h %= p
+ h0 = uint32(d0) & 0x3ffffff
+ h1 = uint32(d1) & 0x3ffffff
+ h2 = uint32(d2) & 0x3ffffff
+ h3 = uint32(d3) & 0x3ffffff
+ h4 = uint32(d4) & 0x3ffffff
+
+ h0 += uint32(d4>>26) * 5
+ h1 += h0 >> 26
+ h0 = h0 & 0x3ffffff
+
+ msg = msg[TagSize:]
+ }
+
+ if len(msg) > 0 {
+ var block [TagSize]byte
+ off := copy(block[:], msg)
+ block[off] = 0x01
+
+ // h += msg
+ h0 += binary.LittleEndian.Uint32(block[0:]) & 0x3ffffff
+ h1 += (binary.LittleEndian.Uint32(block[3:]) >> 2) & 0x3ffffff
+ h2 += (binary.LittleEndian.Uint32(block[6:]) >> 4) & 0x3ffffff
+ h3 += (binary.LittleEndian.Uint32(block[9:]) >> 6) & 0x3ffffff
+ h4 += (binary.LittleEndian.Uint32(block[12:]) >> 8)
+
+ // h *= r
+ d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
+ d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
+ d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
+ d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
+ d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
+
+ // h %= p
+ h0 = uint32(d0) & 0x3ffffff
+ h1 = uint32(d1) & 0x3ffffff
+ h2 = uint32(d2) & 0x3ffffff
+ h3 = uint32(d3) & 0x3ffffff
+ h4 = uint32(d4) & 0x3ffffff
+
+ h0 += uint32(d4>>26) * 5
+ h1 += h0 >> 26
+ h0 = h0 & 0x3ffffff
+ }
+
+ // h %= p reduction
+ h2 += h1 >> 26
+ h1 &= 0x3ffffff
+ h3 += h2 >> 26
+ h2 &= 0x3ffffff
+ h4 += h3 >> 26
+ h3 &= 0x3ffffff
+ h0 += 5 * (h4 >> 26)
+ h4 &= 0x3ffffff
+ h1 += h0 >> 26
+ h0 &= 0x3ffffff
+
+ // h - p
+ t0 := h0 + 5
+ t1 := h1 + (t0 >> 26)
+ t2 := h2 + (t1 >> 26)
+ t3 := h3 + (t2 >> 26)
+ t4 := h4 + (t3 >> 26) - (1 << 26)
+ t0 &= 0x3ffffff
+ t1 &= 0x3ffffff
+ t2 &= 0x3ffffff
+ t3 &= 0x3ffffff
+
+ // select h if h < p else h - p
+ t_mask := (t4 >> 31) - 1
+ h_mask := ^t_mask
+ h0 = (h0 & h_mask) | (t0 & t_mask)
+ h1 = (h1 & h_mask) | (t1 & t_mask)
+ h2 = (h2 & h_mask) | (t2 & t_mask)
+ h3 = (h3 & h_mask) | (t3 & t_mask)
+ h4 = (h4 & h_mask) | (t4 & t_mask)
+
+ // h %= 2^128
+ h0 |= h1 << 26
+ h1 = ((h1 >> 6) | (h2 << 20))
+ h2 = ((h2 >> 12) | (h3 << 14))
+ h3 = ((h3 >> 18) | (h4 << 8))
+
+ // s: the s part of the key
+ // tag = (h + s) % (2^128)
+ t := uint64(h0) + uint64(binary.LittleEndian.Uint32(key[16:]))
+ h0 = uint32(t)
+ t = uint64(h1) + uint64(binary.LittleEndian.Uint32(key[20:])) + (t >> 32)
+ h1 = uint32(t)
+ t = uint64(h2) + uint64(binary.LittleEndian.Uint32(key[24:])) + (t >> 32)
+ h2 = uint32(t)
+ t = uint64(h3) + uint64(binary.LittleEndian.Uint32(key[28:])) + (t >> 32)
+ h3 = uint32(t)
+
+ binary.LittleEndian.PutUint32(out[0:], h0)
+ binary.LittleEndian.PutUint32(out[4:], h1)
+ binary.LittleEndian.PutUint32(out[8:], h2)
+ binary.LittleEndian.PutUint32(out[12:], h3)
+}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go
new file mode 100644
index 000000000..7a266cece
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go
@@ -0,0 +1,49 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x,go1.11,!gccgo,!appengine
+
+package poly1305
+
+// hasVectorFacility reports whether the machine supports
+// the vector facility (vx).
+func hasVectorFacility() bool
+
+// hasVMSLFacility reports whether the machine supports
+// Vector Multiply Sum Logical (VMSL).
+func hasVMSLFacility() bool
+
+var hasVX = hasVectorFacility()
+var hasVMSL = hasVMSLFacility()
+
+// poly1305vx is an assembly implementation of Poly1305 that uses vector
+// instructions. It must only be called if the vector facility (vx) is
+// available.
+//go:noescape
+func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
+
+// poly1305vmsl is an assembly implementation of Poly1305 that uses vector
+// instructions, including VMSL. It must only be called if the vector facility (vx) is
+// available and if VMSL is supported.
+//go:noescape
+func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+ if hasVX {
+ var mPtr *byte
+ if len(m) > 0 {
+ mPtr = &m[0]
+ }
+ if hasVMSL && len(m) > 256 {
+ poly1305vmsl(out, mPtr, uint64(len(m)), key)
+ } else {
+ poly1305vx(out, mPtr, uint64(len(m)), key)
+ }
+ } else {
+ sumGeneric(out, m, key)
+ }
+}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s
new file mode 100644
index 000000000..356c07a6c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s
@@ -0,0 +1,400 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x,go1.11,!gccgo,!appengine
+
+#include "textflag.h"
+
+// Implementation of Poly1305 using the vector facility (vx).
+
+// constants
+#define MOD26 V0
+#define EX0 V1
+#define EX1 V2
+#define EX2 V3
+
+// temporaries
+#define T_0 V4
+#define T_1 V5
+#define T_2 V6
+#define T_3 V7
+#define T_4 V8
+
+// key (r)
+#define R_0 V9
+#define R_1 V10
+#define R_2 V11
+#define R_3 V12
+#define R_4 V13
+#define R5_1 V14
+#define R5_2 V15
+#define R5_3 V16
+#define R5_4 V17
+#define RSAVE_0 R5
+#define RSAVE_1 R6
+#define RSAVE_2 R7
+#define RSAVE_3 R8
+#define RSAVE_4 R9
+#define R5SAVE_1 V28
+#define R5SAVE_2 V29
+#define R5SAVE_3 V30
+#define R5SAVE_4 V31
+
+// message block
+#define F_0 V18
+#define F_1 V19
+#define F_2 V20
+#define F_3 V21
+#define F_4 V22
+
+// accumulator
+#define H_0 V23
+#define H_1 V24
+#define H_2 V25
+#define H_3 V26
+#define H_4 V27
+
+GLOBL ·keyMask<>(SB), RODATA, $16
+DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
+DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
+
+GLOBL ·bswapMask<>(SB), RODATA, $16
+DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
+DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
+
+GLOBL ·constants<>(SB), RODATA, $64
+// MOD26
+DATA ·constants<>+0(SB)/8, $0x3ffffff
+DATA ·constants<>+8(SB)/8, $0x3ffffff
+// EX0
+DATA ·constants<>+16(SB)/8, $0x0006050403020100
+DATA ·constants<>+24(SB)/8, $0x1016151413121110
+// EX1
+DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706
+DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716
+// EX2
+DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d
+DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d
+
+// h = (f*g) % (2**130-5) [partial reduction]
+#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
+ VMLOF f0, g0, h0 \
+ VMLOF f0, g1, h1 \
+ VMLOF f0, g2, h2 \
+ VMLOF f0, g3, h3 \
+ VMLOF f0, g4, h4 \
+ VMLOF f1, g54, T_0 \
+ VMLOF f1, g0, T_1 \
+ VMLOF f1, g1, T_2 \
+ VMLOF f1, g2, T_3 \
+ VMLOF f1, g3, T_4 \
+ VMALOF f2, g53, h0, h0 \
+ VMALOF f2, g54, h1, h1 \
+ VMALOF f2, g0, h2, h2 \
+ VMALOF f2, g1, h3, h3 \
+ VMALOF f2, g2, h4, h4 \
+ VMALOF f3, g52, T_0, T_0 \
+ VMALOF f3, g53, T_1, T_1 \
+ VMALOF f3, g54, T_2, T_2 \
+ VMALOF f3, g0, T_3, T_3 \
+ VMALOF f3, g1, T_4, T_4 \
+ VMALOF f4, g51, h0, h0 \
+ VMALOF f4, g52, h1, h1 \
+ VMALOF f4, g53, h2, h2 \
+ VMALOF f4, g54, h3, h3 \
+ VMALOF f4, g0, h4, h4 \
+ VAG T_0, h0, h0 \
+ VAG T_1, h1, h1 \
+ VAG T_2, h2, h2 \
+ VAG T_3, h3, h3 \
+ VAG T_4, h4, h4
+
+// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4
+#define REDUCE(h0, h1, h2, h3, h4) \
+ VESRLG $26, h0, T_0 \
+ VESRLG $26, h3, T_1 \
+ VN MOD26, h0, h0 \
+ VN MOD26, h3, h3 \
+ VAG T_0, h1, h1 \
+ VAG T_1, h4, h4 \
+ VESRLG $26, h1, T_2 \
+ VESRLG $26, h4, T_3 \
+ VN MOD26, h1, h1 \
+ VN MOD26, h4, h4 \
+ VESLG $2, T_3, T_4 \
+ VAG T_3, T_4, T_4 \
+ VAG T_2, h2, h2 \
+ VAG T_4, h0, h0 \
+ VESRLG $26, h2, T_0 \
+ VESRLG $26, h0, T_1 \
+ VN MOD26, h2, h2 \
+ VN MOD26, h0, h0 \
+ VAG T_0, h3, h3 \
+ VAG T_1, h1, h1 \
+ VESRLG $26, h3, T_2 \
+ VN MOD26, h3, h3 \
+ VAG T_2, h4, h4
+
+// expand in0 into d[0] and in1 into d[1]
+#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
+ VGBM $0x0707, d1 \ // d1=tmp
+ VPERM in0, in1, EX2, d4 \
+ VPERM in0, in1, EX0, d0 \
+ VPERM in0, in1, EX1, d2 \
+ VN d1, d4, d4 \
+ VESRLG $26, d0, d1 \
+ VESRLG $30, d2, d3 \
+ VESRLG $4, d2, d2 \
+ VN MOD26, d0, d0 \
+ VN MOD26, d1, d1 \
+ VN MOD26, d2, d2 \
+ VN MOD26, d3, d3
+
+// pack h4:h0 into h1:h0 (no carry)
+#define PACK(h0, h1, h2, h3, h4) \
+ VESLG $26, h1, h1 \
+ VESLG $26, h3, h3 \
+ VO h0, h1, h0 \
+ VO h2, h3, h2 \
+ VESLG $4, h2, h2 \
+ VLEIB $7, $48, h1 \
+ VSLB h1, h2, h2 \
+ VO h0, h2, h0 \
+ VLEIB $7, $104, h1 \
+ VSLB h1, h4, h3 \
+ VO h3, h0, h0 \
+ VLEIB $7, $24, h1 \
+ VSRLB h1, h4, h1
+
+// if h > 2**130-5 then h -= 2**130-5
+#define MOD(h0, h1, t0, t1, t2) \
+ VZERO t0 \
+ VLEIG $1, $5, t0 \
+ VACCQ h0, t0, t1 \
+ VAQ h0, t0, t0 \
+ VONE t2 \
+ VLEIG $1, $-4, t2 \
+ VAQ t2, t1, t1 \
+ VACCQ h1, t1, t1 \
+ VONE t2 \
+ VAQ t2, t1, t1 \
+ VN h0, t1, t2 \
+ VNC t0, t1, t1 \
+ VO t1, t2, h0
+
+// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key)
+TEXT ·poly1305vx(SB), $0-32
+ // This code processes up to 2 blocks (32 bytes) per iteration
+ // using the algorithm described in:
+ // NEON crypto, Daniel J. Bernstein & Peter Schwabe
+ // https://cryptojedi.org/papers/neoncrypto-20120320.pdf
+ LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
+
+ // load MOD26, EX0, EX1 and EX2
+ MOVD $·constants<>(SB), R5
+ VLM (R5), MOD26, EX2
+
+ // setup r
+ VL (R4), T_0
+ MOVD $·keyMask<>(SB), R6
+ VL (R6), T_1
+ VN T_0, T_1, T_0
+ EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4)
+
+ // setup r*5
+ VLEIG $0, $5, T_0
+ VLEIG $1, $5, T_0
+
+ // store r (for final block)
+ VMLOF T_0, R_1, R5SAVE_1
+ VMLOF T_0, R_2, R5SAVE_2
+ VMLOF T_0, R_3, R5SAVE_3
+ VMLOF T_0, R_4, R5SAVE_4
+ VLGVG $0, R_0, RSAVE_0
+ VLGVG $0, R_1, RSAVE_1
+ VLGVG $0, R_2, RSAVE_2
+ VLGVG $0, R_3, RSAVE_3
+ VLGVG $0, R_4, RSAVE_4
+
+ // skip r**2 calculation
+ CMPBLE R3, $16, skip
+
+ // calculate r**2
+ MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4)
+ REDUCE(H_0, H_1, H_2, H_3, H_4)
+ VLEIG $0, $5, T_0
+ VLEIG $1, $5, T_0
+ VMLOF T_0, H_1, R5_1
+ VMLOF T_0, H_2, R5_2
+ VMLOF T_0, H_3, R5_3
+ VMLOF T_0, H_4, R5_4
+ VLR H_0, R_0
+ VLR H_1, R_1
+ VLR H_2, R_2
+ VLR H_3, R_3
+ VLR H_4, R_4
+
+ // initialize h
+ VZERO H_0
+ VZERO H_1
+ VZERO H_2
+ VZERO H_3
+ VZERO H_4
+
+loop:
+ CMPBLE R3, $32, b2
+ VLM (R2), T_0, T_1
+ SUB $32, R3
+ MOVD $32(R2), R2
+ EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
+ VLEIB $4, $1, F_4
+ VLEIB $12, $1, F_4
+
+multiply:
+ VAG H_0, F_0, F_0
+ VAG H_1, F_1, F_1
+ VAG H_2, F_2, F_2
+ VAG H_3, F_3, F_3
+ VAG H_4, F_4, F_4
+ MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
+ REDUCE(H_0, H_1, H_2, H_3, H_4)
+ CMPBNE R3, $0, loop
+
+finish:
+ // sum vectors
+ VZERO T_0
+ VSUMQG H_0, T_0, H_0
+ VSUMQG H_1, T_0, H_1
+ VSUMQG H_2, T_0, H_2
+ VSUMQG H_3, T_0, H_3
+ VSUMQG H_4, T_0, H_4
+
+ // h may be >= 2*(2**130-5) so we need to reduce it again
+ REDUCE(H_0, H_1, H_2, H_3, H_4)
+
+ // carry h1->h4
+ VESRLG $26, H_1, T_1
+ VN MOD26, H_1, H_1
+ VAQ T_1, H_2, H_2
+ VESRLG $26, H_2, T_2
+ VN MOD26, H_2, H_2
+ VAQ T_2, H_3, H_3
+ VESRLG $26, H_3, T_3
+ VN MOD26, H_3, H_3
+ VAQ T_3, H_4, H_4
+
+ // h is now < 2*(2**130-5)
+ // pack h into h1 (hi) and h0 (lo)
+ PACK(H_0, H_1, H_2, H_3, H_4)
+
+ // if h > 2**130-5 then h -= 2**130-5
+ MOD(H_0, H_1, T_0, T_1, T_2)
+
+ // h += s
+ MOVD $·bswapMask<>(SB), R5
+ VL (R5), T_1
+ VL 16(R4), T_0
+ VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
+ VAQ T_0, H_0, H_0
+ VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little)
+ VST H_0, (R1)
+
+ RET
+
+b2:
+ CMPBLE R3, $16, b1
+
+ // 2 blocks remaining
+ SUB $17, R3
+ VL (R2), T_0
+ VLL R3, 16(R2), T_1
+ ADD $1, R3
+ MOVBZ $1, R0
+ CMPBEQ R3, $16, 2(PC)
+ VLVGB R3, R0, T_1
+ EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
+ CMPBNE R3, $16, 2(PC)
+ VLEIB $12, $1, F_4
+ VLEIB $4, $1, F_4
+
+ // setup [r²,r]
+ VLVGG $1, RSAVE_0, R_0
+ VLVGG $1, RSAVE_1, R_1
+ VLVGG $1, RSAVE_2, R_2
+ VLVGG $1, RSAVE_3, R_3
+ VLVGG $1, RSAVE_4, R_4
+ VPDI $0, R5_1, R5SAVE_1, R5_1
+ VPDI $0, R5_2, R5SAVE_2, R5_2
+ VPDI $0, R5_3, R5SAVE_3, R5_3
+ VPDI $0, R5_4, R5SAVE_4, R5_4
+
+ MOVD $0, R3
+ BR multiply
+
+skip:
+ VZERO H_0
+ VZERO H_1
+ VZERO H_2
+ VZERO H_3
+ VZERO H_4
+
+ CMPBEQ R3, $0, finish
+
+b1:
+ // 1 block remaining
+ SUB $1, R3
+ VLL R3, (R2), T_0
+ ADD $1, R3
+ MOVBZ $1, R0
+ CMPBEQ R3, $16, 2(PC)
+ VLVGB R3, R0, T_0
+ VZERO T_1
+ EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
+ CMPBNE R3, $16, 2(PC)
+ VLEIB $4, $1, F_4
+ VLEIG $1, $1, R_0
+ VZERO R_1
+ VZERO R_2
+ VZERO R_3
+ VZERO R_4
+ VZERO R5_1
+ VZERO R5_2
+ VZERO R5_3
+ VZERO R5_4
+
+ // setup [r, 1]
+ VLVGG $0, RSAVE_0, R_0
+ VLVGG $0, RSAVE_1, R_1
+ VLVGG $0, RSAVE_2, R_2
+ VLVGG $0, RSAVE_3, R_3
+ VLVGG $0, RSAVE_4, R_4
+ VPDI $0, R5SAVE_1, R5_1, R5_1
+ VPDI $0, R5SAVE_2, R5_2, R5_2
+ VPDI $0, R5SAVE_3, R5_3, R5_3
+ VPDI $0, R5SAVE_4, R5_4, R5_4
+
+ MOVD $0, R3
+ BR multiply
+
+TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
+ MOVD $x-24(SP), R1
+ XC $24, 0(R1), 0(R1) // clear the storage
+ MOVD $2, R0 // R0 is the number of double words stored -1
+ WORD $0xB2B01000 // STFLE 0(R1)
+ XOR R0, R0 // reset the value of R0
+ MOVBZ z-8(SP), R1
+ AND $0x40, R1
+ BEQ novector
+
+vectorinstalled:
+ // check if the vector instruction has been enabled
+ VLEIB $0, $0xF, V16
+ VLGVB $0, V16, R1
+ CMPBNE R1, $0xF, novector
+ MOVB $1, ret+0(FP) // have vx
+ RET
+
+novector:
+ MOVB $0, ret+0(FP) // no vx
+ RET
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s
new file mode 100644
index 000000000..e548020b1
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s
@@ -0,0 +1,931 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build s390x,go1.11,!gccgo,!appengine
+
+#include "textflag.h"
+
+// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction.
+
+// constants
+#define EX0 V1
+#define EX1 V2
+#define EX2 V3
+
+// temporaries
+#define T_0 V4
+#define T_1 V5
+#define T_2 V6
+#define T_3 V7
+#define T_4 V8
+#define T_5 V9
+#define T_6 V10
+#define T_7 V11
+#define T_8 V12
+#define T_9 V13
+#define T_10 V14
+
+// r**2 & r**4
+#define R_0 V15
+#define R_1 V16
+#define R_2 V17
+#define R5_1 V18
+#define R5_2 V19
+// key (r)
+#define RSAVE_0 R7
+#define RSAVE_1 R8
+#define RSAVE_2 R9
+#define R5SAVE_1 R10
+#define R5SAVE_2 R11
+
+// message block
+#define M0 V20
+#define M1 V21
+#define M2 V22
+#define M3 V23
+#define M4 V24
+#define M5 V25
+
+// accumulator
+#define H0_0 V26
+#define H1_0 V27
+#define H2_0 V28
+#define H0_1 V29
+#define H1_1 V30
+#define H2_1 V31
+
+GLOBL ·keyMask<>(SB), RODATA, $16
+DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
+DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
+
+GLOBL ·bswapMask<>(SB), RODATA, $16
+DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
+DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
+
+GLOBL ·constants<>(SB), RODATA, $48
+// EX0
+DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f
+DATA ·constants<>+8(SB)/8, $0x0000050403020100
+// EX1
+DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f
+DATA ·constants<>+24(SB)/8, $0x00000a0908070605
+// EX2
+DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f
+DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b
+
+GLOBL ·c<>(SB), RODATA, $48
+// EX0
+DATA ·c<>+0(SB)/8, $0x0000050403020100
+DATA ·c<>+8(SB)/8, $0x0000151413121110
+// EX1
+DATA ·c<>+16(SB)/8, $0x00000a0908070605
+DATA ·c<>+24(SB)/8, $0x00001a1918171615
+// EX2
+DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b
+DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b
+
+GLOBL ·reduce<>(SB), RODATA, $32
+// 44 bit
+DATA ·reduce<>+0(SB)/8, $0x0
+DATA ·reduce<>+8(SB)/8, $0xfffffffffff
+// 42 bit
+DATA ·reduce<>+16(SB)/8, $0x0
+DATA ·reduce<>+24(SB)/8, $0x3ffffffffff
+
+// h = (f*g) % (2**130-5) [partial reduction]
+// uses T_0...T_9 temporary registers
+// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2
+// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
+// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2
+#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \
+ \ // Eliminate the dependency for the last 2 VMSLs
+ VMSLG m02_0, r_2, m4_2, m4_2 \
+ VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined
+ VMSLG m02_0, r_0, m4_0, m4_0 \
+ VMSLG m02_1, r5_2, V0, T_0 \
+ VMSLG m02_0, r_1, m4_1, m4_1 \
+ VMSLG m02_1, r_0, V0, T_1 \
+ VMSLG m02_1, r_1, V0, T_2 \
+ VMSLG m02_2, r5_1, V0, T_3 \
+ VMSLG m02_2, r5_2, V0, T_4 \
+ VMSLG m13_0, r_0, m5_0, m5_0 \
+ VMSLG m13_1, r5_2, V0, T_5 \
+ VMSLG m13_0, r_1, m5_1, m5_1 \
+ VMSLG m13_1, r_0, V0, T_6 \
+ VMSLG m13_1, r_1, V0, T_7 \
+ VMSLG m13_2, r5_1, V0, T_8 \
+ VMSLG m13_2, r5_2, V0, T_9 \
+ VMSLG m02_2, r_0, m4_2, m4_2 \
+ VMSLG m13_2, r_0, m5_2, m5_2 \
+ VAQ m4_0, T_0, m02_0 \
+ VAQ m4_1, T_1, m02_1 \
+ VAQ m5_0, T_5, m13_0 \
+ VAQ m5_1, T_6, m13_1 \
+ VAQ m02_0, T_3, m02_0 \
+ VAQ m02_1, T_4, m02_1 \
+ VAQ m13_0, T_8, m13_0 \
+ VAQ m13_1, T_9, m13_1 \
+ VAQ m4_2, T_2, m02_2 \
+ VAQ m5_2, T_7, m13_2 \
+
+// SQUARE uses three limbs of r and r_2*5 to output square of r
+// uses T_1, T_5 and T_7 temporary registers
+// input: r_0, r_1, r_2, r5_2
+// temp: TEMP0, TEMP1, TEMP2
+// output: p0, p1, p2
+#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \
+ VMSLG r_0, r_0, p0, p0 \
+ VMSLG r_1, r5_2, V0, TEMP0 \
+ VMSLG r_2, r5_2, p1, p1 \
+ VMSLG r_0, r_1, V0, TEMP1 \
+ VMSLG r_1, r_1, p2, p2 \
+ VMSLG r_0, r_2, V0, TEMP2 \
+ VAQ TEMP0, p0, p0 \
+ VAQ TEMP1, p1, p1 \
+ VAQ TEMP2, p2, p2 \
+ VAQ TEMP0, p0, p0 \
+ VAQ TEMP1, p1, p1 \
+ VAQ TEMP2, p2, p2 \
+
+// carry h0->h1->h2->h0 || h3->h4->h5->h3
+// uses T_2, T_4, T_5, T_7, T_8, T_9
+// t6, t7, t8, t9, t10, t11
+// input: h0, h1, h2, h3, h4, h5
+// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11
+// output: h0, h1, h2, h3, h4, h5
+#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \
+ VLM (R12), t6, t7 \ // 44 and 42 bit clear mask
+ VLEIB $7, $0x28, t10 \ // 5 byte shift mask
+ VREPIB $4, t8 \ // 4 bit shift mask
+ VREPIB $2, t11 \ // 2 bit shift mask
+ VSRLB t10, h0, t0 \ // h0 byte shift
+ VSRLB t10, h1, t1 \ // h1 byte shift
+ VSRLB t10, h2, t2 \ // h2 byte shift
+ VSRLB t10, h3, t3 \ // h3 byte shift
+ VSRLB t10, h4, t4 \ // h4 byte shift
+ VSRLB t10, h5, t5 \ // h5 byte shift
+ VSRL t8, t0, t0 \ // h0 bit shift
+ VSRL t8, t1, t1 \ // h2 bit shift
+ VSRL t11, t2, t2 \ // h2 bit shift
+ VSRL t8, t3, t3 \ // h3 bit shift
+ VSRL t8, t4, t4 \ // h4 bit shift
+ VESLG $2, t2, t9 \ // h2 carry x5
+ VSRL t11, t5, t5 \ // h5 bit shift
+ VN t6, h0, h0 \ // h0 clear carry
+ VAQ t2, t9, t2 \ // h2 carry x5
+ VESLG $2, t5, t9 \ // h5 carry x5
+ VN t6, h1, h1 \ // h1 clear carry
+ VN t7, h2, h2 \ // h2 clear carry
+ VAQ t5, t9, t5 \ // h5 carry x5
+ VN t6, h3, h3 \ // h3 clear carry
+ VN t6, h4, h4 \ // h4 clear carry
+ VN t7, h5, h5 \ // h5 clear carry
+ VAQ t0, h1, h1 \ // h0->h1
+ VAQ t3, h4, h4 \ // h3->h4
+ VAQ t1, h2, h2 \ // h1->h2
+ VAQ t4, h5, h5 \ // h4->h5
+ VAQ t2, h0, h0 \ // h2->h0
+ VAQ t5, h3, h3 \ // h5->h3
+ VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves
+ VREPG $1, t7, t7 \
+ VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5]
+ VSLDB $8, h1, h1, h1 \
+ VSLDB $8, h2, h2, h2 \
+ VO h0, h3, h3 \
+ VO h1, h4, h4 \
+ VO h2, h5, h5 \
+ VESRLG $44, h3, t0 \ // 44 bit shift right
+ VESRLG $44, h4, t1 \
+ VESRLG $42, h5, t2 \
+ VN t6, h3, h3 \ // clear carry bits
+ VN t6, h4, h4 \
+ VN t7, h5, h5 \
+ VESLG $2, t2, t9 \ // multiply carry by 5
+ VAQ t9, t2, t2 \
+ VAQ t0, h4, h4 \
+ VAQ t1, h5, h5 \
+ VAQ t2, h3, h3 \
+
+// carry h0->h1->h2->h0
+// input: h0, h1, h2
+// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8
+// output: h0, h1, h2
+#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \
+ VLEIB $7, $0x28, t3 \ // 5 byte shift mask
+ VREPIB $4, t4 \ // 4 bit shift mask
+ VREPIB $2, t7 \ // 2 bit shift mask
+ VGBM $0x003F, t5 \ // mask to clear carry bits
+ VSRLB t3, h0, t0 \
+ VSRLB t3, h1, t1 \
+ VSRLB t3, h2, t2 \
+ VESRLG $4, t5, t5 \ // 44 bit clear mask
+ VSRL t4, t0, t0 \
+ VSRL t4, t1, t1 \
+ VSRL t7, t2, t2 \
+ VESRLG $2, t5, t6 \ // 42 bit clear mask
+ VESLG $2, t2, t8 \
+ VAQ t8, t2, t2 \
+ VN t5, h0, h0 \
+ VN t5, h1, h1 \
+ VN t6, h2, h2 \
+ VAQ t0, h1, h1 \
+ VAQ t1, h2, h2 \
+ VAQ t2, h0, h0 \
+ VSRLB t3, h0, t0 \
+ VSRLB t3, h1, t1 \
+ VSRLB t3, h2, t2 \
+ VSRL t4, t0, t0 \
+ VSRL t4, t1, t1 \
+ VSRL t7, t2, t2 \
+ VN t5, h0, h0 \
+ VN t5, h1, h1 \
+ VESLG $2, t2, t8 \
+ VN t6, h2, h2 \
+ VAQ t0, h1, h1 \
+ VAQ t8, t2, t2 \
+ VAQ t1, h2, h2 \
+ VAQ t2, h0, h0 \
+
+// expands two message blocks into the lower halfs of the d registers
+// moves the contents of the d registers into upper halfs
+// input: in1, in2, d0, d1, d2, d3, d4, d5
+// temp: TEMP0, TEMP1, TEMP2, TEMP3
+// output: d0, d1, d2, d3, d4, d5
+#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \
+ VGBM $0xff3f, TEMP0 \
+ VGBM $0xff1f, TEMP1 \
+ VESLG $4, d1, TEMP2 \
+ VESLG $4, d4, TEMP3 \
+ VESRLG $4, TEMP0, TEMP0 \
+ VPERM in1, d0, EX0, d0 \
+ VPERM in2, d3, EX0, d3 \
+ VPERM in1, d2, EX2, d2 \
+ VPERM in2, d5, EX2, d5 \
+ VPERM in1, TEMP2, EX1, d1 \
+ VPERM in2, TEMP3, EX1, d4 \
+ VN TEMP0, d0, d0 \
+ VN TEMP0, d3, d3 \
+ VESRLG $4, d1, d1 \
+ VESRLG $4, d4, d4 \
+ VN TEMP1, d2, d2 \
+ VN TEMP1, d5, d5 \
+ VN TEMP0, d1, d1 \
+ VN TEMP0, d4, d4 \
+
+// expands one message block into the lower halfs of the d registers
+// moves the contents of the d registers into upper halfs
+// input: in, d0, d1, d2
+// temp: TEMP0, TEMP1, TEMP2
+// output: d0, d1, d2
+#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \
+ VGBM $0xff3f, TEMP0 \
+ VESLG $4, d1, TEMP2 \
+ VGBM $0xff1f, TEMP1 \
+ VPERM in, d0, EX0, d0 \
+ VESRLG $4, TEMP0, TEMP0 \
+ VPERM in, d2, EX2, d2 \
+ VPERM in, TEMP2, EX1, d1 \
+ VN TEMP0, d0, d0 \
+ VN TEMP1, d2, d2 \
+ VESRLG $4, d1, d1 \
+ VN TEMP0, d1, d1 \
+
+// pack h2:h0 into h1:h0 (no carry)
+// input: h0, h1, h2
+// output: h0, h1, h2
+#define PACK(h0, h1, h2) \
+ VMRLG h1, h2, h2 \ // copy h1 to upper half h2
+ VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20
+ VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1
+ VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1
+ VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1
+ VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1)
+ VLEIG $0, $0, h2 \ // clear upper half of h2
+ VESRLG $40, h2, h1 \ // h1 now has upper two bits of result
+ VLEIB $7, $88, h1 \ // for byte shift (11 bytes)
+ VSLB h1, h2, h2 \ // shift h2 11 bytes to the left
+ VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1
+ VLEIG $0, $0, h1 \ // clear upper half of h1
+
+// if h > 2**130-5 then h -= 2**130-5
+// input: h0, h1
+// temp: t0, t1, t2
+// output: h0
+#define MOD(h0, h1, t0, t1, t2) \
+ VZERO t0 \
+ VLEIG $1, $5, t0 \
+ VACCQ h0, t0, t1 \
+ VAQ h0, t0, t0 \
+ VONE t2 \
+ VLEIG $1, $-4, t2 \
+ VAQ t2, t1, t1 \
+ VACCQ h1, t1, t1 \
+ VONE t2 \
+ VAQ t2, t1, t1 \
+ VN h0, t1, t2 \
+ VNC t0, t1, t1 \
+ VO t1, t2, h0 \
+
+// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key)
+TEXT ·poly1305vmsl(SB), $0-32
+ // This code processes 6 + up to 4 blocks (32 bytes) per iteration
+ // using the algorithm described in:
+ // NEON crypto, Daniel J. Bernstein & Peter Schwabe
+ // https://cryptojedi.org/papers/neoncrypto-20120320.pdf
+ // And as moddified for VMSL as described in
+ // Accelerating Poly1305 Cryptographic Message Authentication on the z14
+ // O'Farrell et al, CASCON 2017, p48-55
+ // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht
+
+ LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
+ VZERO V0 // c
+
+ // load EX0, EX1 and EX2
+ MOVD $·constants<>(SB), R5
+ VLM (R5), EX0, EX2 // c
+
+ // setup r
+ VL (R4), T_0
+ MOVD $·keyMask<>(SB), R6
+ VL (R6), T_1
+ VN T_0, T_1, T_0
+ VZERO T_2 // limbs for r
+ VZERO T_3
+ VZERO T_4
+ EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7)
+
+ // T_2, T_3, T_4: [0, r]
+
+ // setup r*20
+ VLEIG $0, $0, T_0
+ VLEIG $1, $20, T_0 // T_0: [0, 20]
+ VZERO T_5
+ VZERO T_6
+ VMSLG T_0, T_3, T_5, T_5
+ VMSLG T_0, T_4, T_6, T_6
+
+ // store r for final block in GR
+ VLGVG $1, T_2, RSAVE_0 // c
+ VLGVG $1, T_3, RSAVE_1 // c
+ VLGVG $1, T_4, RSAVE_2 // c
+ VLGVG $1, T_5, R5SAVE_1 // c
+ VLGVG $1, T_6, R5SAVE_2 // c
+
+ // initialize h
+ VZERO H0_0
+ VZERO H1_0
+ VZERO H2_0
+ VZERO H0_1
+ VZERO H1_1
+ VZERO H2_1
+
+ // initialize pointer for reduce constants
+ MOVD $·reduce<>(SB), R12
+
+ // calculate r**2 and 20*(r**2)
+ VZERO R_0
+ VZERO R_1
+ VZERO R_2
+ SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7)
+ REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1)
+ VZERO R5_1
+ VZERO R5_2
+ VMSLG T_0, R_1, R5_1, R5_1
+ VMSLG T_0, R_2, R5_2, R5_2
+
+ // skip r**4 calculation if 3 blocks or less
+ CMPBLE R3, $48, b4
+
+ // calculate r**4 and 20*(r**4)
+ VZERO T_8
+ VZERO T_9
+ VZERO T_10
+ SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7)
+ REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1)
+ VZERO T_2
+ VZERO T_3
+ VMSLG T_0, T_9, T_2, T_2
+ VMSLG T_0, T_10, T_3, T_3
+
+ // put r**2 to the right and r**4 to the left of R_0, R_1, R_2
+ VSLDB $8, T_8, T_8, T_8
+ VSLDB $8, T_9, T_9, T_9
+ VSLDB $8, T_10, T_10, T_10
+ VSLDB $8, T_2, T_2, T_2
+ VSLDB $8, T_3, T_3, T_3
+
+ VO T_8, R_0, R_0
+ VO T_9, R_1, R_1
+ VO T_10, R_2, R_2
+ VO T_2, R5_1, R5_1
+ VO T_3, R5_2, R5_2
+
+ CMPBLE R3, $80, load // less than or equal to 5 blocks in message
+
+ // 6(or 5+1) blocks
+ SUB $81, R3
+ VLM (R2), M0, M4
+ VLL R3, 80(R2), M5
+ ADD $1, R3
+ MOVBZ $1, R0
+ CMPBGE R3, $16, 2(PC)
+ VLVGB R3, R0, M5
+ MOVD $96(R2), R2
+ EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
+ EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
+ VLEIB $2, $1, H2_0
+ VLEIB $2, $1, H2_1
+ VLEIB $10, $1, H2_0
+ VLEIB $10, $1, H2_1
+
+ VZERO M0
+ VZERO M1
+ VZERO M2
+ VZERO M3
+ VZERO T_4
+ VZERO T_10
+ EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3)
+ VLR T_4, M4
+ VLEIB $10, $1, M2
+ CMPBLT R3, $16, 2(PC)
+ VLEIB $10, $1, T_10
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
+ VMRHG V0, H0_1, H0_0
+ VMRHG V0, H1_1, H1_0
+ VMRHG V0, H2_1, H2_0
+ VMRLG V0, H0_1, H0_1
+ VMRLG V0, H1_1, H1_1
+ VMRLG V0, H2_1, H2_1
+
+ SUB $16, R3
+ CMPBLE R3, $0, square
+
+load:
+ // load EX0, EX1 and EX2
+ MOVD $·c<>(SB), R5
+ VLM (R5), EX0, EX2
+
+loop:
+ CMPBLE R3, $64, add // b4 // last 4 or less blocks left
+
+ // next 4 full blocks
+ VLM (R2), M2, M5
+ SUB $64, R3
+ MOVD $64(R2), R2
+ REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9)
+
+ // expacc in-lined to create [m2, m3] limbs
+ VGBM $0x3f3f, T_0 // 44 bit clear mask
+ VGBM $0x1f1f, T_1 // 40 bit clear mask
+ VPERM M2, M3, EX0, T_3
+ VESRLG $4, T_0, T_0 // 44 bit clear mask ready
+ VPERM M2, M3, EX1, T_4
+ VPERM M2, M3, EX2, T_5
+ VN T_0, T_3, T_3
+ VESRLG $4, T_4, T_4
+ VN T_1, T_5, T_5
+ VN T_0, T_4, T_4
+ VMRHG H0_1, T_3, H0_0
+ VMRHG H1_1, T_4, H1_0
+ VMRHG H2_1, T_5, H2_0
+ VMRLG H0_1, T_3, H0_1
+ VMRLG H1_1, T_4, H1_1
+ VMRLG H2_1, T_5, H2_1
+ VLEIB $10, $1, H2_0
+ VLEIB $10, $1, H2_1
+ VPERM M4, M5, EX0, T_3
+ VPERM M4, M5, EX1, T_4
+ VPERM M4, M5, EX2, T_5
+ VN T_0, T_3, T_3
+ VESRLG $4, T_4, T_4
+ VN T_1, T_5, T_5
+ VN T_0, T_4, T_4
+ VMRHG V0, T_3, M0
+ VMRHG V0, T_4, M1
+ VMRHG V0, T_5, M2
+ VMRLG V0, T_3, M3
+ VMRLG V0, T_4, M4
+ VMRLG V0, T_5, M5
+ VLEIB $10, $1, M2
+ VLEIB $10, $1, M5
+
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ CMPBNE R3, $0, loop
+ REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
+ VMRHG V0, H0_1, H0_0
+ VMRHG V0, H1_1, H1_0
+ VMRHG V0, H2_1, H2_0
+ VMRLG V0, H0_1, H0_1
+ VMRLG V0, H1_1, H1_1
+ VMRLG V0, H2_1, H2_1
+
+ // load EX0, EX1, EX2
+ MOVD $·constants<>(SB), R5
+ VLM (R5), EX0, EX2
+
+ // sum vectors
+ VAQ H0_0, H0_1, H0_0
+ VAQ H1_0, H1_1, H1_0
+ VAQ H2_0, H2_1, H2_0
+
+ // h may be >= 2*(2**130-5) so we need to reduce it again
+ // M0...M4 are used as temps here
+ REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
+
+next: // carry h1->h2
+ VLEIB $7, $0x28, T_1
+ VREPIB $4, T_2
+ VGBM $0x003F, T_3
+ VESRLG $4, T_3
+
+ // byte shift
+ VSRLB T_1, H1_0, T_4
+
+ // bit shift
+ VSRL T_2, T_4, T_4
+
+ // clear h1 carry bits
+ VN T_3, H1_0, H1_0
+
+ // add carry
+ VAQ T_4, H2_0, H2_0
+
+ // h is now < 2*(2**130-5)
+ // pack h into h1 (hi) and h0 (lo)
+ PACK(H0_0, H1_0, H2_0)
+
+ // if h > 2**130-5 then h -= 2**130-5
+ MOD(H0_0, H1_0, T_0, T_1, T_2)
+
+ // h += s
+ MOVD $·bswapMask<>(SB), R5
+ VL (R5), T_1
+ VL 16(R4), T_0
+ VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
+ VAQ T_0, H0_0, H0_0
+ VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little)
+ VST H0_0, (R1)
+ RET
+
+add:
+ // load EX0, EX1, EX2
+ MOVD $·constants<>(SB), R5
+ VLM (R5), EX0, EX2
+
+ REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
+ VMRHG V0, H0_1, H0_0
+ VMRHG V0, H1_1, H1_0
+ VMRHG V0, H2_1, H2_0
+ VMRLG V0, H0_1, H0_1
+ VMRLG V0, H1_1, H1_1
+ VMRLG V0, H2_1, H2_1
+ CMPBLE R3, $64, b4
+
+b4:
+ CMPBLE R3, $48, b3 // 3 blocks or less
+
+ // 4(3+1) blocks remaining
+ SUB $49, R3
+ VLM (R2), M0, M2
+ VLL R3, 48(R2), M3
+ ADD $1, R3
+ MOVBZ $1, R0
+ CMPBEQ R3, $16, 2(PC)
+ VLVGB R3, R0, M3
+ MOVD $64(R2), R2
+ EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
+ VLEIB $10, $1, H2_0
+ VLEIB $10, $1, H2_1
+ VZERO M0
+ VZERO M1
+ VZERO M4
+ VZERO M5
+ VZERO T_4
+ VZERO T_10
+ EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3)
+ VLR T_4, M2
+ VLEIB $10, $1, M4
+ CMPBNE R3, $16, 2(PC)
+ VLEIB $10, $1, T_10
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
+ VMRHG V0, H0_1, H0_0
+ VMRHG V0, H1_1, H1_0
+ VMRHG V0, H2_1, H2_0
+ VMRLG V0, H0_1, H0_1
+ VMRLG V0, H1_1, H1_1
+ VMRLG V0, H2_1, H2_1
+ SUB $16, R3
+ CMPBLE R3, $0, square // this condition must always hold true!
+
+b3:
+ CMPBLE R3, $32, b2
+
+ // 3 blocks remaining
+
+ // setup [r²,r]
+ VSLDB $8, R_0, R_0, R_0
+ VSLDB $8, R_1, R_1, R_1
+ VSLDB $8, R_2, R_2, R_2
+ VSLDB $8, R5_1, R5_1, R5_1
+ VSLDB $8, R5_2, R5_2, R5_2
+
+ VLVGG $1, RSAVE_0, R_0
+ VLVGG $1, RSAVE_1, R_1
+ VLVGG $1, RSAVE_2, R_2
+ VLVGG $1, R5SAVE_1, R5_1
+ VLVGG $1, R5SAVE_2, R5_2
+
+ // setup [h0, h1]
+ VSLDB $8, H0_0, H0_0, H0_0
+ VSLDB $8, H1_0, H1_0, H1_0
+ VSLDB $8, H2_0, H2_0, H2_0
+ VO H0_1, H0_0, H0_0
+ VO H1_1, H1_0, H1_0
+ VO H2_1, H2_0, H2_0
+ VZERO H0_1
+ VZERO H1_1
+ VZERO H2_1
+
+ VZERO M0
+ VZERO M1
+ VZERO M2
+ VZERO M3
+ VZERO M4
+ VZERO M5
+
+ // H*[r**2, r]
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5)
+
+ SUB $33, R3
+ VLM (R2), M0, M1
+ VLL R3, 32(R2), M2
+ ADD $1, R3
+ MOVBZ $1, R0
+ CMPBEQ R3, $16, 2(PC)
+ VLVGB R3, R0, M2
+
+ // H += m0
+ VZERO T_1
+ VZERO T_2
+ VZERO T_3
+ EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)
+ VLEIB $10, $1, T_3
+ VAG H0_0, T_1, H0_0
+ VAG H1_0, T_2, H1_0
+ VAG H2_0, T_3, H2_0
+
+ VZERO M0
+ VZERO M3
+ VZERO M4
+ VZERO M5
+ VZERO T_10
+
+ // (H+m0)*r
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9)
+
+ // H += m1
+ VZERO V0
+ VZERO T_1
+ VZERO T_2
+ VZERO T_3
+ EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6)
+ VLEIB $10, $1, T_3
+ VAQ H0_0, T_1, H0_0
+ VAQ H1_0, T_2, H1_0
+ VAQ H2_0, T_3, H2_0
+ REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
+
+ // [H, m2] * [r**2, r]
+ EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3)
+ CMPBNE R3, $16, 2(PC)
+ VLEIB $10, $1, H2_0
+ VZERO M0
+ VZERO M1
+ VZERO M2
+ VZERO M3
+ VZERO M4
+ VZERO M5
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10)
+ SUB $16, R3
+ CMPBLE R3, $0, next // this condition must always hold true!
+
+b2:
+ CMPBLE R3, $16, b1
+
+ // 2 blocks remaining
+
+ // setup [r²,r]
+ VSLDB $8, R_0, R_0, R_0
+ VSLDB $8, R_1, R_1, R_1
+ VSLDB $8, R_2, R_2, R_2
+ VSLDB $8, R5_1, R5_1, R5_1
+ VSLDB $8, R5_2, R5_2, R5_2
+
+ VLVGG $1, RSAVE_0, R_0
+ VLVGG $1, RSAVE_1, R_1
+ VLVGG $1, RSAVE_2, R_2
+ VLVGG $1, R5SAVE_1, R5_1
+ VLVGG $1, R5SAVE_2, R5_2
+
+ // setup [h0, h1]
+ VSLDB $8, H0_0, H0_0, H0_0
+ VSLDB $8, H1_0, H1_0, H1_0
+ VSLDB $8, H2_0, H2_0, H2_0
+ VO H0_1, H0_0, H0_0
+ VO H1_1, H1_0, H1_0
+ VO H2_1, H2_0, H2_0
+ VZERO H0_1
+ VZERO H1_1
+ VZERO H2_1
+
+ VZERO M0
+ VZERO M1
+ VZERO M2
+ VZERO M3
+ VZERO M4
+ VZERO M5
+
+ // H*[r**2, r]
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
+ VMRHG V0, H0_1, H0_0
+ VMRHG V0, H1_1, H1_0
+ VMRHG V0, H2_1, H2_0
+ VMRLG V0, H0_1, H0_1
+ VMRLG V0, H1_1, H1_1
+ VMRLG V0, H2_1, H2_1
+
+ // move h to the left and 0s at the right
+ VSLDB $8, H0_0, H0_0, H0_0
+ VSLDB $8, H1_0, H1_0, H1_0
+ VSLDB $8, H2_0, H2_0, H2_0
+
+ // get message blocks and append 1 to start
+ SUB $17, R3
+ VL (R2), M0
+ VLL R3, 16(R2), M1
+ ADD $1, R3
+ MOVBZ $1, R0
+ CMPBEQ R3, $16, 2(PC)
+ VLVGB R3, R0, M1
+ VZERO T_6
+ VZERO T_7
+ VZERO T_8
+ EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3)
+ EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3)
+ VLEIB $2, $1, T_8
+ CMPBNE R3, $16, 2(PC)
+ VLEIB $10, $1, T_8
+
+ // add [m0, m1] to h
+ VAG H0_0, T_6, H0_0
+ VAG H1_0, T_7, H1_0
+ VAG H2_0, T_8, H2_0
+
+ VZERO M2
+ VZERO M3
+ VZERO M4
+ VZERO M5
+ VZERO T_10
+ VZERO M0
+
+ // at this point R_0 .. R5_2 look like [r**2, r]
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
+ SUB $16, R3, R3
+ CMPBLE R3, $0, next
+
+b1:
+ CMPBLE R3, $0, next
+
+ // 1 block remaining
+
+ // setup [r²,r]
+ VSLDB $8, R_0, R_0, R_0
+ VSLDB $8, R_1, R_1, R_1
+ VSLDB $8, R_2, R_2, R_2
+ VSLDB $8, R5_1, R5_1, R5_1
+ VSLDB $8, R5_2, R5_2, R5_2
+
+ VLVGG $1, RSAVE_0, R_0
+ VLVGG $1, RSAVE_1, R_1
+ VLVGG $1, RSAVE_2, R_2
+ VLVGG $1, R5SAVE_1, R5_1
+ VLVGG $1, R5SAVE_2, R5_2
+
+ // setup [h0, h1]
+ VSLDB $8, H0_0, H0_0, H0_0
+ VSLDB $8, H1_0, H1_0, H1_0
+ VSLDB $8, H2_0, H2_0, H2_0
+ VO H0_1, H0_0, H0_0
+ VO H1_1, H1_0, H1_0
+ VO H2_1, H2_0, H2_0
+ VZERO H0_1
+ VZERO H1_1
+ VZERO H2_1
+
+ VZERO M0
+ VZERO M1
+ VZERO M2
+ VZERO M3
+ VZERO M4
+ VZERO M5
+
+ // H*[r**2, r]
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
+
+ // set up [0, m0] limbs
+ SUB $1, R3
+ VLL R3, (R2), M0
+ ADD $1, R3
+ MOVBZ $1, R0
+ CMPBEQ R3, $16, 2(PC)
+ VLVGB R3, R0, M0
+ VZERO T_1
+ VZERO T_2
+ VZERO T_3
+ EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m]
+ CMPBNE R3, $16, 2(PC)
+ VLEIB $10, $1, T_3
+
+ // h+m0
+ VAQ H0_0, T_1, H0_0
+ VAQ H1_0, T_2, H1_0
+ VAQ H2_0, T_3, H2_0
+
+ VZERO M0
+ VZERO M1
+ VZERO M2
+ VZERO M3
+ VZERO M4
+ VZERO M5
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
+
+ BR next
+
+square:
+ // setup [r²,r]
+ VSLDB $8, R_0, R_0, R_0
+ VSLDB $8, R_1, R_1, R_1
+ VSLDB $8, R_2, R_2, R_2
+ VSLDB $8, R5_1, R5_1, R5_1
+ VSLDB $8, R5_2, R5_2, R5_2
+
+ VLVGG $1, RSAVE_0, R_0
+ VLVGG $1, RSAVE_1, R_1
+ VLVGG $1, RSAVE_2, R_2
+ VLVGG $1, R5SAVE_1, R5_1
+ VLVGG $1, R5SAVE_2, R5_2
+
+ // setup [h0, h1]
+ VSLDB $8, H0_0, H0_0, H0_0
+ VSLDB $8, H1_0, H1_0, H1_0
+ VSLDB $8, H2_0, H2_0, H2_0
+ VO H0_1, H0_0, H0_0
+ VO H1_1, H1_0, H1_0
+ VO H2_1, H2_0, H2_0
+ VZERO H0_1
+ VZERO H1_1
+ VZERO H2_1
+
+ VZERO M0
+ VZERO M1
+ VZERO M2
+ VZERO M3
+ VZERO M4
+ VZERO M5
+
+ // (h0*r**2) + (h1*r)
+ MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
+ REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
+ BR next
+
+TEXT ·hasVMSLFacility(SB), NOSPLIT, $24-1
+ MOVD $x-24(SP), R1
+ XC $24, 0(R1), 0(R1) // clear the storage
+ MOVD $2, R0 // R0 is the number of double words stored -1
+ WORD $0xB2B01000 // STFLE 0(R1)
+ XOR R0, R0 // reset the value of R0
+ MOVBZ z-8(SP), R1
+ AND $0x01, R1
+ BEQ novmsl
+
+vectorinstalled:
+ // check if the vector instruction has been enabled
+ VLEIB $0, $0xF, V16
+ VLGVB $0, V16, R1
+ CMPBNE R1, $0xF, novmsl
+ MOVB $1, ret+0(FP) // have vx
+ RET
+
+novmsl:
+ MOVB $0, ret+0(FP) // no vx
+ RET
diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
index 6c6e84236..fd97ba1b0 100644
--- a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
+++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go
@@ -5,7 +5,7 @@
// Package ripemd160 implements the RIPEMD-160 hash algorithm.
package ripemd160 // import "golang.org/x/crypto/ripemd160"
-// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart
+// RIPEMD-160 is designed by Hans Dobbertin, Antoon Bosselaers, and Bart
// Preneel with specifications available at:
// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf.
diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
index 7bc8e6c48..e0edc02f0 100644
--- a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
+++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go
@@ -8,6 +8,10 @@
package ripemd160
+import (
+ "math/bits"
+)
+
// work buffer indices and roll amounts for one line
var _n = [80]uint{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
@@ -59,16 +63,16 @@ func _Block(md *digest, p []byte) int {
i := 0
for i < 16 {
alpha = a + (b ^ c ^ d) + x[_n[i]]
- s := _r[i]
- alpha = (alpha<>(32-s)) + e
- beta = c<<10 | c>>22
+ s := int(_r[i])
+ alpha = bits.RotateLeft32(alpha, s) + e
+ beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
- s = r_[i]
- alpha = (alpha<>(32-s)) + ee
- beta = cc<<10 | cc>>22
+ s = int(r_[i])
+ alpha = bits.RotateLeft32(alpha, s) + ee
+ beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
@@ -77,16 +81,16 @@ func _Block(md *digest, p []byte) int {
// round 2
for i < 32 {
alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
- s := _r[i]
- alpha = (alpha<>(32-s)) + e
- beta = c<<10 | c>>22
+ s := int(_r[i])
+ alpha = bits.RotateLeft32(alpha, s) + e
+ beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
- s = r_[i]
- alpha = (alpha<>(32-s)) + ee
- beta = cc<<10 | cc>>22
+ s = int(r_[i])
+ alpha = bits.RotateLeft32(alpha, s) + ee
+ beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
@@ -95,16 +99,16 @@ func _Block(md *digest, p []byte) int {
// round 3
for i < 48 {
alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
- s := _r[i]
- alpha = (alpha<>(32-s)) + e
- beta = c<<10 | c>>22
+ s := int(_r[i])
+ alpha = bits.RotateLeft32(alpha, s) + e
+ beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
- s = r_[i]
- alpha = (alpha<>(32-s)) + ee
- beta = cc<<10 | cc>>22
+ s = int(r_[i])
+ alpha = bits.RotateLeft32(alpha, s) + ee
+ beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
@@ -113,16 +117,16 @@ func _Block(md *digest, p []byte) int {
// round 4
for i < 64 {
alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
- s := _r[i]
- alpha = (alpha<>(32-s)) + e
- beta = c<<10 | c>>22
+ s := int(_r[i])
+ alpha = bits.RotateLeft32(alpha, s) + e
+ beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
- s = r_[i]
- alpha = (alpha<>(32-s)) + ee
- beta = cc<<10 | cc>>22
+ s = int(r_[i])
+ alpha = bits.RotateLeft32(alpha, s) + ee
+ beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
@@ -131,16 +135,16 @@ func _Block(md *digest, p []byte) int {
// round 5
for i < 80 {
alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
- s := _r[i]
- alpha = (alpha<>(32-s)) + e
- beta = c<<10 | c>>22
+ s := int(_r[i])
+ alpha = bits.RotateLeft32(alpha, s) + e
+ beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
- s = r_[i]
- alpha = (alpha<>(32-s)) + ee
- beta = cc<<10 | cc>>22
+ s = int(r_[i])
+ alpha = bits.RotateLeft32(alpha, s) + ee
+ beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go
index ff28aaef6..3362afd11 100644
--- a/vendor/golang.org/x/crypto/scrypt/scrypt.go
+++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go
@@ -29,7 +29,7 @@ func blockXOR(dst, src []uint32, n int) {
}
// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in,
-// and puts the result into both both tmp and out.
+// and puts the result into both tmp and out.
func salsaXOR(tmp *[16]uint32, in, out []uint32) {
w0 := tmp[0] ^ in[0]
w1 := tmp[1] ^ in[1]
@@ -218,7 +218,7 @@ func smix(b []byte, r, N int, v, xy []uint32) {
// For example, you can get a derived key for e.g. AES-256 (which needs a
// 32-byte key) by doing:
//
-// dk, err := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32)
+// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32)
//
// The recommended parameters for interactive logins as of 2017 are N=32768, r=8
// and p=1. The parameters N, r, and p should be increased as memory latency and
diff --git a/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go
similarity index 96%
rename from crypto/sha3/doc.go
rename to vendor/golang.org/x/crypto/sha3/doc.go
index 3dab530f8..c2fef30af 100644
--- a/crypto/sha3/doc.go
+++ b/vendor/golang.org/x/crypto/sha3/doc.go
@@ -43,7 +43,7 @@
// is then "full" and the permutation is applied to "empty" it. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge in the same way, except that output
-// output is copied out instead of input being XORed in.
+// is copied out instead of input being XORed in.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
@@ -63,4 +63,4 @@
// They produce output of the same length, with the same security strengths
// against all attacks. This means, in particular, that SHA3-256 only has
// 128-bit collision resistance, because its output length is 32 bytes.
-package sha3
+package sha3 // import "golang.org/x/crypto/sha3"
diff --git a/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go
similarity index 58%
rename from crypto/sha3/hashes.go
rename to vendor/golang.org/x/crypto/sha3/hashes.go
index fa0d7b436..0d8043fd2 100644
--- a/crypto/sha3/hashes.go
+++ b/vendor/golang.org/x/crypto/sha3/hashes.go
@@ -12,31 +12,57 @@ import (
"hash"
)
-// NewKeccak256 creates a new Keccak-256 hash.
-func NewKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
-
-// NewKeccak512 creates a new Keccak-512 hash.
-func NewKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
-
// New224 creates a new SHA3-224 hash.
// Its generic security strength is 224 bits against preimage attacks,
// and 112 bits against collision attacks.
-func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
+func New224() hash.Hash {
+ if h := new224Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
+}
// New256 creates a new SHA3-256 hash.
// Its generic security strength is 256 bits against preimage attacks,
// and 128 bits against collision attacks.
-func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
+func New256() hash.Hash {
+ if h := new256Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
+}
// New384 creates a new SHA3-384 hash.
// Its generic security strength is 384 bits against preimage attacks,
// and 192 bits against collision attacks.
-func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
+func New384() hash.Hash {
+ if h := new384Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
+}
// New512 creates a new SHA3-512 hash.
// Its generic security strength is 512 bits against preimage attacks,
// and 256 bits against collision attacks.
-func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
+func New512() hash.Hash {
+ if h := new512Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
+}
+
+// NewLegacyKeccak256 creates a new Keccak-256 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New256 instead.
+func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
+
+// NewLegacyKeccak512 creates a new Keccak-512 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New512 instead.
+func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go
new file mode 100644
index 000000000..c4ff3f6e6
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go
@@ -0,0 +1,27 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//+build gccgo appengine !s390x
+
+package sha3
+
+import (
+ "hash"
+)
+
+// new224Asm returns an assembly implementation of SHA3-224 if available,
+// otherwise it returns nil.
+func new224Asm() hash.Hash { return nil }
+
+// new256Asm returns an assembly implementation of SHA3-256 if available,
+// otherwise it returns nil.
+func new256Asm() hash.Hash { return nil }
+
+// new384Asm returns an assembly implementation of SHA3-384 if available,
+// otherwise it returns nil.
+func new384Asm() hash.Hash { return nil }
+
+// new512Asm returns an assembly implementation of SHA3-512 if available,
+// otherwise it returns nil.
+func new512Asm() hash.Hash { return nil }
diff --git a/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go
similarity index 100%
rename from crypto/sha3/keccakf.go
rename to vendor/golang.org/x/crypto/sha3/keccakf.go
diff --git a/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
similarity index 88%
rename from crypto/sha3/keccakf_amd64.go
rename to vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
index de035c550..788679585 100644
--- a/crypto/sha3/keccakf_amd64.go
+++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
@@ -10,4 +10,4 @@ package sha3
//go:noescape
-func keccakF1600(state *[25]uint64)
+func keccakF1600(a *[25]uint64)
diff --git a/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
similarity index 100%
rename from crypto/sha3/keccakf_amd64.s
rename to vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
diff --git a/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go
similarity index 100%
rename from crypto/sha3/register.go
rename to vendor/golang.org/x/crypto/sha3/register.go
diff --git a/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go
similarity index 100%
rename from crypto/sha3/sha3.go
rename to vendor/golang.org/x/crypto/sha3/sha3.go
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
new file mode 100644
index 000000000..f1fb79cc3
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
@@ -0,0 +1,289 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//+build !gccgo,!appengine
+
+package sha3
+
+// This file contains code for using the 'compute intermediate
+// message digest' (KIMD) and 'compute last message digest' (KLMD)
+// instructions to compute SHA-3 and SHAKE hashes on IBM Z.
+
+import (
+ "hash"
+)
+
+// codes represent 7-bit KIMD/KLMD function codes as defined in
+// the Principles of Operation.
+type code uint64
+
+const (
+ // function codes for KIMD/KLMD
+ sha3_224 code = 32
+ sha3_256 = 33
+ sha3_384 = 34
+ sha3_512 = 35
+ shake_128 = 36
+ shake_256 = 37
+ nopad = 0x100
+)
+
+// hasMSA6 reports whether the machine supports the SHA-3 and SHAKE function
+// codes, as defined in message-security-assist extension 6.
+func hasMSA6() bool
+
+// hasAsm caches the result of hasMSA6 (which might be expensive to call).
+var hasAsm = hasMSA6()
+
+// kimd is a wrapper for the 'compute intermediate message digest' instruction.
+// src must be a multiple of the rate for the given function code.
+//go:noescape
+func kimd(function code, chain *[200]byte, src []byte)
+
+// klmd is a wrapper for the 'compute last message digest' instruction.
+// src padding is handled by the instruction.
+//go:noescape
+func klmd(function code, chain *[200]byte, dst, src []byte)
+
+type asmState struct {
+ a [200]byte // 1600 bit state
+ buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
+ rate int // equivalent to block size
+ storage [3072]byte // underlying storage for buf
+ outputLen int // output length if fixed, 0 if not
+ function code // KIMD/KLMD function code
+ state spongeDirection // whether the sponge is absorbing or squeezing
+}
+
+func newAsmState(function code) *asmState {
+ var s asmState
+ s.function = function
+ switch function {
+ case sha3_224:
+ s.rate = 144
+ s.outputLen = 28
+ case sha3_256:
+ s.rate = 136
+ s.outputLen = 32
+ case sha3_384:
+ s.rate = 104
+ s.outputLen = 48
+ case sha3_512:
+ s.rate = 72
+ s.outputLen = 64
+ case shake_128:
+ s.rate = 168
+ case shake_256:
+ s.rate = 136
+ default:
+ panic("sha3: unrecognized function code")
+ }
+
+ // limit s.buf size to a multiple of s.rate
+ s.resetBuf()
+ return &s
+}
+
+func (s *asmState) clone() *asmState {
+ c := *s
+ c.buf = c.storage[:len(s.buf):cap(s.buf)]
+ return &c
+}
+
+// copyIntoBuf copies b into buf. It will panic if there is not enough space to
+// store all of b.
+func (s *asmState) copyIntoBuf(b []byte) {
+ bufLen := len(s.buf)
+ s.buf = s.buf[:len(s.buf)+len(b)]
+ copy(s.buf[bufLen:], b)
+}
+
+// resetBuf points buf at storage, sets the length to 0 and sets cap to be a
+// multiple of the rate.
+func (s *asmState) resetBuf() {
+ max := (cap(s.storage) / s.rate) * s.rate
+ s.buf = s.storage[:0:max]
+}
+
+// Write (via the embedded io.Writer interface) adds more data to the running hash.
+// It never returns an error.
+func (s *asmState) Write(b []byte) (int, error) {
+ if s.state != spongeAbsorbing {
+ panic("sha3: write to sponge after read")
+ }
+ length := len(b)
+ for len(b) > 0 {
+ if len(s.buf) == 0 && len(b) >= cap(s.buf) {
+ // Hash the data directly and push any remaining bytes
+ // into the buffer.
+ remainder := len(s.buf) % s.rate
+ kimd(s.function, &s.a, b[:len(b)-remainder])
+ if remainder != 0 {
+ s.copyIntoBuf(b[len(b)-remainder:])
+ }
+ return length, nil
+ }
+
+ if len(s.buf) == cap(s.buf) {
+ // flush the buffer
+ kimd(s.function, &s.a, s.buf)
+ s.buf = s.buf[:0]
+ }
+
+ // copy as much as we can into the buffer
+ n := len(b)
+ if len(b) > cap(s.buf)-len(s.buf) {
+ n = cap(s.buf) - len(s.buf)
+ }
+ s.copyIntoBuf(b[:n])
+ b = b[n:]
+ }
+ return length, nil
+}
+
+// Read squeezes an arbitrary number of bytes from the sponge.
+func (s *asmState) Read(out []byte) (n int, err error) {
+ n = len(out)
+
+ // need to pad if we were absorbing
+ if s.state == spongeAbsorbing {
+ s.state = spongeSqueezing
+
+ // write hash directly into out if possible
+ if len(out)%s.rate == 0 {
+ klmd(s.function, &s.a, out, s.buf) // len(out) may be 0
+ s.buf = s.buf[:0]
+ return
+ }
+
+ // write hash into buffer
+ max := cap(s.buf)
+ if max > len(out) {
+ max = (len(out)/s.rate)*s.rate + s.rate
+ }
+ klmd(s.function, &s.a, s.buf[:max], s.buf)
+ s.buf = s.buf[:max]
+ }
+
+ for len(out) > 0 {
+ // flush the buffer
+ if len(s.buf) != 0 {
+ c := copy(out, s.buf)
+ out = out[c:]
+ s.buf = s.buf[c:]
+ continue
+ }
+
+ // write hash directly into out if possible
+ if len(out)%s.rate == 0 {
+ klmd(s.function|nopad, &s.a, out, nil)
+ return
+ }
+
+ // write hash into buffer
+ s.resetBuf()
+ if cap(s.buf) > len(out) {
+ s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate]
+ }
+ klmd(s.function|nopad, &s.a, s.buf, nil)
+ }
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (s *asmState) Sum(b []byte) []byte {
+ if s.outputLen == 0 {
+ panic("sha3: cannot call Sum on SHAKE functions")
+ }
+
+ // Copy the state to preserve the original.
+ a := s.a
+
+ // Hash the buffer. Note that we don't clear it because we
+ // aren't updating the state.
+ klmd(s.function, &a, nil, s.buf)
+ return append(b, a[:s.outputLen]...)
+}
+
+// Reset resets the Hash to its initial state.
+func (s *asmState) Reset() {
+ for i := range s.a {
+ s.a[i] = 0
+ }
+ s.resetBuf()
+ s.state = spongeAbsorbing
+}
+
+// Size returns the number of bytes Sum will return.
+func (s *asmState) Size() int {
+ return s.outputLen
+}
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (s *asmState) BlockSize() int {
+ return s.rate
+}
+
+// Clone returns a copy of the ShakeHash in its current state.
+func (s *asmState) Clone() ShakeHash {
+ return s.clone()
+}
+
+// new224Asm returns an assembly implementation of SHA3-224 if available,
+// otherwise it returns nil.
+func new224Asm() hash.Hash {
+ if hasAsm {
+ return newAsmState(sha3_224)
+ }
+ return nil
+}
+
+// new256Asm returns an assembly implementation of SHA3-256 if available,
+// otherwise it returns nil.
+func new256Asm() hash.Hash {
+ if hasAsm {
+ return newAsmState(sha3_256)
+ }
+ return nil
+}
+
+// new384Asm returns an assembly implementation of SHA3-384 if available,
+// otherwise it returns nil.
+func new384Asm() hash.Hash {
+ if hasAsm {
+ return newAsmState(sha3_384)
+ }
+ return nil
+}
+
+// new512Asm returns an assembly implementation of SHA3-512 if available,
+// otherwise it returns nil.
+func new512Asm() hash.Hash {
+ if hasAsm {
+ return newAsmState(sha3_512)
+ }
+ return nil
+}
+
+// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
+// otherwise it returns nil.
+func newShake128Asm() ShakeHash {
+ if hasAsm {
+ return newAsmState(shake_128)
+ }
+ return nil
+}
+
+// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
+// otherwise it returns nil.
+func newShake256Asm() ShakeHash {
+ if hasAsm {
+ return newAsmState(shake_256)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
new file mode 100644
index 000000000..20978fc71
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
@@ -0,0 +1,49 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//+build !gccgo,!appengine
+
+#include "textflag.h"
+
+TEXT ·hasMSA6(SB), NOSPLIT, $16-1
+ MOVD $0, R0 // KIMD-Query function code
+ MOVD $tmp-16(SP), R1 // parameter block
+ XC $16, (R1), (R1) // clear the parameter block
+ WORD $0xB93E0002 // KIMD --, --
+ WORD $0x91FC1004 // TM 4(R1), 0xFC (test bits [32-37])
+ BVS yes
+
+no:
+ MOVB $0, ret+0(FP)
+ RET
+
+yes:
+ MOVB $1, ret+0(FP)
+ RET
+
+// func kimd(function code, params *[200]byte, src []byte)
+TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
+ MOVD function+0(FP), R0
+ MOVD params+8(FP), R1
+ LMG src+16(FP), R2, R3 // R2=base, R3=len
+
+continue:
+ WORD $0xB93E0002 // KIMD --, R2
+ BVS continue // continue if interrupted
+ MOVD $0, R0 // reset R0 for pre-go1.8 compilers
+ RET
+
+// func klmd(function code, params *[200]byte, dst, src []byte)
+TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
+ // TODO: SHAKE support
+ MOVD function+0(FP), R0
+ MOVD params+8(FP), R1
+ LMG dst+16(FP), R2, R3 // R2=base, R3=len
+ LMG src+40(FP), R4, R5 // R4=base, R5=len
+
+continue:
+ WORD $0xB93F0024 // KLMD R2, R4
+ BVS continue // continue if interrupted
+ MOVD $0, R0 // reset R0 for pre-go1.8 compilers
+ RET
diff --git a/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
similarity index 83%
rename from crypto/sha3/shake.go
rename to vendor/golang.org/x/crypto/sha3/shake.go
index 841f9860f..97c9b0624 100644
--- a/crypto/sha3/shake.go
+++ b/vendor/golang.org/x/crypto/sha3/shake.go
@@ -38,12 +38,22 @@ func (d *state) Clone() ShakeHash {
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
-func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
+func NewShake128() ShakeHash {
+ if h := newShake128Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 168, dsbyte: 0x1f}
+}
-// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
+// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
// Its generic security strength is 256 bits against all attacks if
// at least 64 bytes of its output are used.
-func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
+func NewShake256() ShakeHash {
+ if h := newShake256Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 136, dsbyte: 0x1f}
+}
// ShakeSum128 writes an arbitrary-length digest of data into hash.
func ShakeSum128(hash, data []byte) {
diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go
new file mode 100644
index 000000000..73d0c90bf
--- /dev/null
+++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//+build gccgo appengine !s390x
+
+package sha3
+
+// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
+// otherwise it returns nil.
+func newShake128Asm() ShakeHash {
+ return nil
+}
+
+// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
+// otherwise it returns nil.
+func newShake256Asm() ShakeHash {
+ return nil
+}
diff --git a/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go
similarity index 100%
rename from crypto/sha3/xor.go
rename to vendor/golang.org/x/crypto/sha3/xor.go
diff --git a/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go
similarity index 100%
rename from crypto/sha3/xor_generic.go
rename to vendor/golang.org/x/crypto/sha3/xor_generic.go
diff --git a/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
similarity index 100%
rename from crypto/sha3/xor_unaligned.go
rename to vendor/golang.org/x/crypto/sha3/xor_unaligned.go
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
index b1f022078..00ed9923e 100644
--- a/vendor/golang.org/x/crypto/ssh/certs.go
+++ b/vendor/golang.org/x/crypto/ssh/certs.go
@@ -44,7 +44,9 @@ type Signature struct {
const CertTimeInfinity = 1<<64 - 1
// An Certificate represents an OpenSSH certificate as defined in
-// [PROTOCOL.certkeys]?rev=1.8.
+// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the
+// PublicKey interface, so it can be unmarshaled using
+// ParsePublicKey.
type Certificate struct {
Nonce []byte
Key PublicKey
@@ -220,6 +222,11 @@ type openSSHCertSigner struct {
signer Signer
}
+type algorithmOpenSSHCertSigner struct {
+ *openSSHCertSigner
+ algorithmSigner AlgorithmSigner
+}
+
// NewCertSigner returns a Signer that signs with the given Certificate, whose
// private key is held by signer. It returns an error if the public key in cert
// doesn't match the key used by signer.
@@ -228,7 +235,12 @@ func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
return nil, errors.New("ssh: signer and cert have different public key")
}
- return &openSSHCertSigner{cert, signer}, nil
+ if algorithmSigner, ok := signer.(AlgorithmSigner); ok {
+ return &algorithmOpenSSHCertSigner{
+ &openSSHCertSigner{cert, signer}, algorithmSigner}, nil
+ } else {
+ return &openSSHCertSigner{cert, signer}, nil
+ }
}
func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
@@ -239,6 +251,10 @@ func (s *openSSHCertSigner) PublicKey() PublicKey {
return s.pub
}
+func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
+ return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm)
+}
+
const sourceAddressCriticalOption = "source-address"
// CertChecker does the work of verifying a certificate. Its methods
@@ -340,10 +356,10 @@ func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permis
// the signature of the certificate.
func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
if c.IsRevoked != nil && c.IsRevoked(cert) {
- return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
+ return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial)
}
- for opt, _ := range cert.CriticalOptions {
+ for opt := range cert.CriticalOptions {
// sourceAddressCriticalOption will be enforced by
// serverAuthenticate
if opt == sourceAddressCriticalOption {
diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go
index 195530ea0..c0834c00d 100644
--- a/vendor/golang.org/x/crypto/ssh/channel.go
+++ b/vendor/golang.org/x/crypto/ssh/channel.go
@@ -205,32 +205,32 @@ type channel struct {
// writePacket sends a packet. If the packet is a channel close, it updates
// sentClose. This method takes the lock c.writeMu.
-func (c *channel) writePacket(packet []byte) error {
- c.writeMu.Lock()
- if c.sentClose {
- c.writeMu.Unlock()
+func (ch *channel) writePacket(packet []byte) error {
+ ch.writeMu.Lock()
+ if ch.sentClose {
+ ch.writeMu.Unlock()
return io.EOF
}
- c.sentClose = (packet[0] == msgChannelClose)
- err := c.mux.conn.writePacket(packet)
- c.writeMu.Unlock()
+ ch.sentClose = (packet[0] == msgChannelClose)
+ err := ch.mux.conn.writePacket(packet)
+ ch.writeMu.Unlock()
return err
}
-func (c *channel) sendMessage(msg interface{}) error {
+func (ch *channel) sendMessage(msg interface{}) error {
if debugMux {
- log.Printf("send(%d): %#v", c.mux.chanList.offset, msg)
+ log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg)
}
p := Marshal(msg)
- binary.BigEndian.PutUint32(p[1:], c.remoteId)
- return c.writePacket(p)
+ binary.BigEndian.PutUint32(p[1:], ch.remoteId)
+ return ch.writePacket(p)
}
// WriteExtended writes data to a specific extended stream. These streams are
// used, for example, for stderr.
-func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
- if c.sentEOF {
+func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
+ if ch.sentEOF {
return 0, io.EOF
}
// 1 byte message type, 4 bytes remoteId, 4 bytes data length
@@ -241,16 +241,16 @@ func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err er
opCode = msgChannelExtendedData
}
- c.writeMu.Lock()
- packet := c.packetPool[extendedCode]
+ ch.writeMu.Lock()
+ packet := ch.packetPool[extendedCode]
// We don't remove the buffer from packetPool, so
// WriteExtended calls from different goroutines will be
// flagged as errors by the race detector.
- c.writeMu.Unlock()
+ ch.writeMu.Unlock()
for len(data) > 0 {
- space := min(c.maxRemotePayload, len(data))
- if space, err = c.remoteWin.reserve(space); err != nil {
+ space := min(ch.maxRemotePayload, len(data))
+ if space, err = ch.remoteWin.reserve(space); err != nil {
return n, err
}
if want := headerLength + space; uint32(cap(packet)) < want {
@@ -262,13 +262,13 @@ func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err er
todo := data[:space]
packet[0] = opCode
- binary.BigEndian.PutUint32(packet[1:], c.remoteId)
+ binary.BigEndian.PutUint32(packet[1:], ch.remoteId)
if extendedCode > 0 {
binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
}
binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
copy(packet[headerLength:], todo)
- if err = c.writePacket(packet); err != nil {
+ if err = ch.writePacket(packet); err != nil {
return n, err
}
@@ -276,14 +276,14 @@ func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err er
data = data[len(todo):]
}
- c.writeMu.Lock()
- c.packetPool[extendedCode] = packet
- c.writeMu.Unlock()
+ ch.writeMu.Lock()
+ ch.packetPool[extendedCode] = packet
+ ch.writeMu.Unlock()
return n, err
}
-func (c *channel) handleData(packet []byte) error {
+func (ch *channel) handleData(packet []byte) error {
headerLen := 9
isExtendedData := packet[0] == msgChannelExtendedData
if isExtendedData {
@@ -303,7 +303,7 @@ func (c *channel) handleData(packet []byte) error {
if length == 0 {
return nil
}
- if length > c.maxIncomingPayload {
+ if length > ch.maxIncomingPayload {
// TODO(hanwen): should send Disconnect?
return errors.New("ssh: incoming packet exceeds maximum payload size")
}
@@ -313,21 +313,21 @@ func (c *channel) handleData(packet []byte) error {
return errors.New("ssh: wrong packet length")
}
- c.windowMu.Lock()
- if c.myWindow < length {
- c.windowMu.Unlock()
+ ch.windowMu.Lock()
+ if ch.myWindow < length {
+ ch.windowMu.Unlock()
// TODO(hanwen): should send Disconnect with reason?
return errors.New("ssh: remote side wrote too much")
}
- c.myWindow -= length
- c.windowMu.Unlock()
+ ch.myWindow -= length
+ ch.windowMu.Unlock()
if extended == 1 {
- c.extPending.write(data)
+ ch.extPending.write(data)
} else if extended > 0 {
// discard other extended data.
} else {
- c.pending.write(data)
+ ch.pending.write(data)
}
return nil
}
@@ -384,31 +384,31 @@ func (c *channel) close() {
// responseMessageReceived is called when a success or failure message is
// received on a channel to check that such a message is reasonable for the
// given channel.
-func (c *channel) responseMessageReceived() error {
- if c.direction == channelInbound {
+func (ch *channel) responseMessageReceived() error {
+ if ch.direction == channelInbound {
return errors.New("ssh: channel response message received on inbound channel")
}
- if c.decided {
+ if ch.decided {
return errors.New("ssh: duplicate response received for channel")
}
- c.decided = true
+ ch.decided = true
return nil
}
-func (c *channel) handlePacket(packet []byte) error {
+func (ch *channel) handlePacket(packet []byte) error {
switch packet[0] {
case msgChannelData, msgChannelExtendedData:
- return c.handleData(packet)
+ return ch.handleData(packet)
case msgChannelClose:
- c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
- c.mux.chanList.remove(c.localId)
- c.close()
+ ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId})
+ ch.mux.chanList.remove(ch.localId)
+ ch.close()
return nil
case msgChannelEOF:
// RFC 4254 is mute on how EOF affects dataExt messages but
// it is logical to signal EOF at the same time.
- c.extPending.eof()
- c.pending.eof()
+ ch.extPending.eof()
+ ch.pending.eof()
return nil
}
@@ -419,24 +419,24 @@ func (c *channel) handlePacket(packet []byte) error {
switch msg := decoded.(type) {
case *channelOpenFailureMsg:
- if err := c.responseMessageReceived(); err != nil {
+ if err := ch.responseMessageReceived(); err != nil {
return err
}
- c.mux.chanList.remove(msg.PeersId)
- c.msg <- msg
+ ch.mux.chanList.remove(msg.PeersID)
+ ch.msg <- msg
case *channelOpenConfirmMsg:
- if err := c.responseMessageReceived(); err != nil {
+ if err := ch.responseMessageReceived(); err != nil {
return err
}
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
}
- c.remoteId = msg.MyId
- c.maxRemotePayload = msg.MaxPacketSize
- c.remoteWin.add(msg.MyWindow)
- c.msg <- msg
+ ch.remoteId = msg.MyID
+ ch.maxRemotePayload = msg.MaxPacketSize
+ ch.remoteWin.add(msg.MyWindow)
+ ch.msg <- msg
case *windowAdjustMsg:
- if !c.remoteWin.add(msg.AdditionalBytes) {
+ if !ch.remoteWin.add(msg.AdditionalBytes) {
return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
}
case *channelRequestMsg:
@@ -444,12 +444,12 @@ func (c *channel) handlePacket(packet []byte) error {
Type: msg.Request,
WantReply: msg.WantReply,
Payload: msg.RequestSpecificData,
- ch: c,
+ ch: ch,
}
- c.incomingRequests <- &req
+ ch.incomingRequests <- &req
default:
- c.msg <- msg
+ ch.msg <- msg
}
return nil
}
@@ -488,23 +488,23 @@ func (e *extChannel) Read(data []byte) (n int, err error) {
return e.ch.ReadExtended(data, e.code)
}
-func (c *channel) Accept() (Channel, <-chan *Request, error) {
- if c.decided {
+func (ch *channel) Accept() (Channel, <-chan *Request, error) {
+ if ch.decided {
return nil, nil, errDecidedAlready
}
- c.maxIncomingPayload = channelMaxPacket
+ ch.maxIncomingPayload = channelMaxPacket
confirm := channelOpenConfirmMsg{
- PeersId: c.remoteId,
- MyId: c.localId,
- MyWindow: c.myWindow,
- MaxPacketSize: c.maxIncomingPayload,
+ PeersID: ch.remoteId,
+ MyID: ch.localId,
+ MyWindow: ch.myWindow,
+ MaxPacketSize: ch.maxIncomingPayload,
}
- c.decided = true
- if err := c.sendMessage(confirm); err != nil {
+ ch.decided = true
+ if err := ch.sendMessage(confirm); err != nil {
return nil, nil, err
}
- return c, c.incomingRequests, nil
+ return ch, ch.incomingRequests, nil
}
func (ch *channel) Reject(reason RejectionReason, message string) error {
@@ -512,7 +512,7 @@ func (ch *channel) Reject(reason RejectionReason, message string) error {
return errDecidedAlready
}
reject := channelOpenFailureMsg{
- PeersId: ch.remoteId,
+ PeersID: ch.remoteId,
Reason: reason,
Message: message,
Language: "en",
@@ -541,7 +541,7 @@ func (ch *channel) CloseWrite() error {
}
ch.sentEOF = true
return ch.sendMessage(channelEOFMsg{
- PeersId: ch.remoteId})
+ PeersID: ch.remoteId})
}
func (ch *channel) Close() error {
@@ -550,7 +550,7 @@ func (ch *channel) Close() error {
}
return ch.sendMessage(channelCloseMsg{
- PeersId: ch.remoteId})
+ PeersID: ch.remoteId})
}
// Extended returns an io.ReadWriter that sends and receives data on the given,
@@ -577,7 +577,7 @@ func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (boo
}
msg := channelRequestMsg{
- PeersId: ch.remoteId,
+ PeersID: ch.remoteId,
Request: name,
WantReply: wantReply,
RequestSpecificData: payload,
@@ -614,11 +614,11 @@ func (ch *channel) ackRequest(ok bool) error {
var msg interface{}
if !ok {
msg = channelRequestFailureMsg{
- PeersId: ch.remoteId,
+ PeersID: ch.remoteId,
}
} else {
msg = channelRequestSuccessMsg{
- PeersId: ch.remoteId,
+ PeersID: ch.remoteId,
}
}
return ch.sendMessage(msg)
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
index aed2b1f01..67b012610 100644
--- a/vendor/golang.org/x/crypto/ssh/cipher.go
+++ b/vendor/golang.org/x/crypto/ssh/cipher.go
@@ -16,6 +16,10 @@ import (
"hash"
"io"
"io/ioutil"
+ "math/bits"
+
+ "golang.org/x/crypto/internal/chacha20"
+ "golang.org/x/crypto/poly1305"
)
const (
@@ -53,78 +57,78 @@ func newRC4(key, iv []byte) (cipher.Stream, error) {
return rc4.NewCipher(key)
}
-type streamCipherMode struct {
- keySize int
- ivSize int
- skip int
- createFunc func(key, iv []byte) (cipher.Stream, error)
+type cipherMode struct {
+ keySize int
+ ivSize int
+ create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error)
}
-func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
- if len(key) < c.keySize {
- panic("ssh: key length too small for cipher")
- }
- if len(iv) < c.ivSize {
- panic("ssh: iv too small for cipher")
- }
-
- stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
- if err != nil {
- return nil, err
- }
-
- var streamDump []byte
- if c.skip > 0 {
- streamDump = make([]byte, 512)
- }
-
- for remainingToDump := c.skip; remainingToDump > 0; {
- dumpThisTime := remainingToDump
- if dumpThisTime > len(streamDump) {
- dumpThisTime = len(streamDump)
+func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+ return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+ stream, err := createFunc(key, iv)
+ if err != nil {
+ return nil, err
}
- stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
- remainingToDump -= dumpThisTime
- }
- return stream, nil
+ var streamDump []byte
+ if skip > 0 {
+ streamDump = make([]byte, 512)
+ }
+
+ for remainingToDump := skip; remainingToDump > 0; {
+ dumpThisTime := remainingToDump
+ if dumpThisTime > len(streamDump) {
+ dumpThisTime = len(streamDump)
+ }
+ stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
+ remainingToDump -= dumpThisTime
+ }
+
+ mac := macModes[algs.MAC].new(macKey)
+ return &streamPacketCipher{
+ mac: mac,
+ etm: macModes[algs.MAC].etm,
+ macResult: make([]byte, mac.Size()),
+ cipher: stream,
+ }, nil
+ }
}
// cipherModes documents properties of supported ciphers. Ciphers not included
// are not supported and will not be negotiated, even if explicitly requested in
// ClientConfig.Crypto.Ciphers.
-var cipherModes = map[string]*streamCipherMode{
+var cipherModes = map[string]*cipherMode{
// Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
// are defined in the order specified in the RFC.
- "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
- "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
- "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
+ "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)},
+ "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)},
+ "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)},
// Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
// They are defined in the order specified in the RFC.
- "arcfour128": {16, 0, 1536, newRC4},
- "arcfour256": {32, 0, 1536, newRC4},
+ "arcfour128": {16, 0, streamCipherMode(1536, newRC4)},
+ "arcfour256": {32, 0, streamCipherMode(1536, newRC4)},
// Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
// Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
// RC4) has problems with weak keys, and should be used with caution."
// RFC4345 introduces improved versions of Arcfour.
- "arcfour": {16, 0, 0, newRC4},
+ "arcfour": {16, 0, streamCipherMode(0, newRC4)},
- // AES-GCM is not a stream cipher, so it is constructed with a
- // special case. If we add any more non-stream ciphers, we
- // should invest a cleaner way to do this.
- gcmCipherID: {16, 12, 0, nil},
+ // AEAD ciphers
+ gcmCipherID: {16, 12, newGCMCipher},
+ chacha20Poly1305ID: {64, 0, newChaCha20Cipher},
// CBC mode is insecure and so is not included in the default config.
// (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
- aes128cbcID: {16, aes.BlockSize, 0, nil},
+ aes128cbcID: {16, aes.BlockSize, newAESCBCCipher},
- // 3des-cbc is insecure and is disabled by default.
- tripledescbcID: {24, des.BlockSize, 0, nil},
+ // 3des-cbc is insecure and is not included in the default
+ // config.
+ tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher},
}
// prefixLen is the length of the packet prefix that contains the packet length
@@ -304,7 +308,7 @@ type gcmCipher struct {
buf []byte
}
-func newGCMCipher(iv, key []byte) (packetCipher, error) {
+func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
@@ -372,7 +376,7 @@ func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
}
length := binary.BigEndian.Uint32(c.prefix[:])
if length > maxPacket {
- return nil, errors.New("ssh: max packet length exceeded.")
+ return nil, errors.New("ssh: max packet length exceeded")
}
if cap(c.buf) < int(length+gcmTagSize) {
@@ -422,7 +426,7 @@ type cbcCipher struct {
oracleCamouflage uint32
}
-func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
cbc := &cbcCipher{
mac: macModes[algs.MAC].new(macKey),
decrypter: cipher.NewCBCDecrypter(c, iv),
@@ -436,13 +440,13 @@ func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorith
return cbc, nil
}
-func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
- cbc, err := newCBCCipher(c, iv, key, macKey, algs)
+ cbc, err := newCBCCipher(c, key, iv, macKey, algs)
if err != nil {
return nil, err
}
@@ -450,13 +454,13 @@ func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCi
return cbc, nil
}
-func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
c, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
- cbc, err := newCBCCipher(c, iv, key, macKey, algs)
+ cbc, err := newCBCCipher(c, key, iv, macKey, algs)
if err != nil {
return nil, err
}
@@ -548,11 +552,11 @@ func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error)
c.packetData = c.packetData[:entirePacketSize]
}
- if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
+ n, err := io.ReadFull(r, c.packetData[firstBlockLength:])
+ if err != nil {
return nil, err
- } else {
- c.oracleCamouflage -= uint32(n)
}
+ c.oracleCamouflage -= uint32(n)
remainingCrypted := c.packetData[firstBlockLength:macStart]
c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
@@ -627,3 +631,140 @@ func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, pack
return nil
}
+
+const chacha20Poly1305ID = "chacha20-poly1305@openssh.com"
+
+// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com
+// AEAD, which is described here:
+//
+// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00
+//
+// the methods here also implement padding, which RFC4253 Section 6
+// also requires of stream ciphers.
+type chacha20Poly1305Cipher struct {
+ lengthKey [8]uint32
+ contentKey [8]uint32
+ buf []byte
+}
+
+func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) {
+ if len(key) != 64 {
+ panic(len(key))
+ }
+
+ c := &chacha20Poly1305Cipher{
+ buf: make([]byte, 256),
+ }
+
+ for i := range c.contentKey {
+ c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4])
+ }
+ for i := range c.lengthKey {
+ c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4])
+ }
+ return c, nil
+}
+
+func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
+ nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)}
+ s := chacha20.New(c.contentKey, nonce)
+ var polyKey [32]byte
+ s.XORKeyStream(polyKey[:], polyKey[:])
+ s.Advance() // skip next 32 bytes
+
+ encryptedLength := c.buf[:4]
+ if _, err := io.ReadFull(r, encryptedLength); err != nil {
+ return nil, err
+ }
+
+ var lenBytes [4]byte
+ chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength)
+
+ length := binary.BigEndian.Uint32(lenBytes[:])
+ if length > maxPacket {
+ return nil, errors.New("ssh: invalid packet length, packet too large")
+ }
+
+ contentEnd := 4 + length
+ packetEnd := contentEnd + poly1305.TagSize
+ if uint32(cap(c.buf)) < packetEnd {
+ c.buf = make([]byte, packetEnd)
+ copy(c.buf[:], encryptedLength)
+ } else {
+ c.buf = c.buf[:packetEnd]
+ }
+
+ if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil {
+ return nil, err
+ }
+
+ var mac [poly1305.TagSize]byte
+ copy(mac[:], c.buf[contentEnd:packetEnd])
+ if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) {
+ return nil, errors.New("ssh: MAC failure")
+ }
+
+ plain := c.buf[4:contentEnd]
+ s.XORKeyStream(plain, plain)
+
+ padding := plain[0]
+ if padding < 4 {
+ // padding is a byte, so it automatically satisfies
+ // the maximum size, which is 255.
+ return nil, fmt.Errorf("ssh: illegal padding %d", padding)
+ }
+
+ if int(padding)+1 >= len(plain) {
+ return nil, fmt.Errorf("ssh: padding %d too large", padding)
+ }
+
+ plain = plain[1 : len(plain)-int(padding)]
+
+ return plain, nil
+}
+
+func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error {
+ nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)}
+ s := chacha20.New(c.contentKey, nonce)
+ var polyKey [32]byte
+ s.XORKeyStream(polyKey[:], polyKey[:])
+ s.Advance() // skip next 32 bytes
+
+ // There is no blocksize, so fall back to multiple of 8 byte
+ // padding, as described in RFC 4253, Sec 6.
+ const packetSizeMultiple = 8
+
+ padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple
+ if padding < 4 {
+ padding += packetSizeMultiple
+ }
+
+ // size (4 bytes), padding (1), payload, padding, tag.
+ totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize
+ if cap(c.buf) < totalLength {
+ c.buf = make([]byte, totalLength)
+ } else {
+ c.buf = c.buf[:totalLength]
+ }
+
+ binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding))
+ chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4])
+ c.buf[4] = byte(padding)
+ copy(c.buf[5:], payload)
+ packetEnd := 5 + len(payload) + padding
+ if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil {
+ return err
+ }
+
+ s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd])
+
+ var mac [poly1305.TagSize]byte
+ poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey)
+
+ copy(c.buf[packetEnd:], mac[:])
+
+ if _, err := w.Write(c.buf); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go
index a7e3263bc..7b00bff1c 100644
--- a/vendor/golang.org/x/crypto/ssh/client.go
+++ b/vendor/golang.org/x/crypto/ssh/client.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"net"
+ "os"
"sync"
"time"
)
@@ -18,6 +19,8 @@ import (
type Client struct {
Conn
+ handleForwardsOnce sync.Once // guards calling (*Client).handleForwards
+
forwards forwardList // forwarded tcpip connections from the remote side
mu sync.Mutex
channelHandlers map[string]chan NewChannel
@@ -59,8 +62,6 @@ func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
conn.Wait()
conn.forwards.closeAll()
}()
- go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
- go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-streamlocal@openssh.com"))
return conn
}
@@ -184,9 +185,13 @@ func Dial(network, addr string, config *ClientConfig) (*Client, error) {
// keys. A HostKeyCallback must return nil if the host key is OK, or
// an error to reject it. It receives the hostname as passed to Dial
// or NewClientConn. The remote address is the RemoteAddr of the
-// net.Conn underlying the the SSH connection.
+// net.Conn underlying the SSH connection.
type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
+// BannerCallback is the function type used for treat the banner sent by
+// the server. A BannerCallback receives the message sent by the remote server.
+type BannerCallback func(message string) error
+
// A ClientConfig structure is used to configure a Client. It must not be
// modified after having been passed to an SSH function.
type ClientConfig struct {
@@ -209,6 +214,12 @@ type ClientConfig struct {
// FixedHostKey can be used for simplistic host key checks.
HostKeyCallback HostKeyCallback
+ // BannerCallback is called during the SSH dance to display a custom
+ // server's message. The client configuration can supply this callback to
+ // handle it as wished. The function BannerDisplayStderr can be used for
+ // simplistic display on Stderr.
+ BannerCallback BannerCallback
+
// ClientVersion contains the version identification string that will
// be used for the connection. If empty, a reasonable default is used.
ClientVersion string
@@ -255,3 +266,13 @@ func FixedHostKey(key PublicKey) HostKeyCallback {
hk := &fixedHostKey{key}
return hk.check
}
+
+// BannerDisplayStderr returns a function that can be used for
+// ClientConfig.BannerCallback to display banners on os.Stderr.
+func BannerDisplayStderr() BannerCallback {
+ return func(banner string) error {
+ _, err := os.Stderr.WriteString(banner)
+
+ return err
+ }
+}
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
index 3acd8d498..5f44b7740 100644
--- a/vendor/golang.org/x/crypto/ssh/client_auth.go
+++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
@@ -11,6 +11,14 @@ import (
"io"
)
+type authResult int
+
+const (
+ authFailure authResult = iota
+ authPartialSuccess
+ authSuccess
+)
+
// clientAuthenticate authenticates with the remote server. See RFC 4252.
func (c *connection) clientAuthenticate(config *ClientConfig) error {
// initiate user auth session
@@ -37,11 +45,12 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
if err != nil {
return err
}
- if ok {
+ if ok == authSuccess {
// success
return nil
+ } else if ok == authFailure {
+ tried[auth.method()] = true
}
- tried[auth.method()] = true
if methods == nil {
methods = lastMethods
}
@@ -82,7 +91,7 @@ type AuthMethod interface {
// If authentication is not successful, a []string of alternative
// method names is returned. If the slice is nil, it will be ignored
// and the previous set of possible methods will be reused.
- auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
+ auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error)
// method returns the RFC 4252 method name.
method() string
@@ -91,13 +100,13 @@ type AuthMethod interface {
// "none" authentication, RFC 4252 section 5.2.
type noneAuth int
-func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
if err := c.writePacket(Marshal(&userAuthRequestMsg{
User: user,
Service: serviceSSH,
Method: "none",
})); err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
return handleAuthResponse(c)
@@ -111,7 +120,7 @@ func (n *noneAuth) method() string {
// a function call, e.g. by prompting the user.
type passwordCallback func() (password string, err error)
-func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
type passwordAuthMsg struct {
User string `sshtype:"50"`
Service string
@@ -125,7 +134,7 @@ func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand
// The program may only find out that the user doesn't have a password
// when prompting.
if err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
if err := c.writePacket(Marshal(&passwordAuthMsg{
@@ -135,7 +144,7 @@ func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand
Reply: false,
Password: pw,
})); err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
return handleAuthResponse(c)
@@ -178,7 +187,7 @@ func (cb publicKeyCallback) method() string {
return "publickey"
}
-func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
// Authentication is performed by sending an enquiry to test if a key is
// acceptable to the remote. If the key is acceptable, the client will
// attempt to authenticate with the valid key. If not the client will repeat
@@ -186,13 +195,13 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
signers, err := cb()
if err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
var methods []string
for _, signer := range signers {
ok, err := validateKey(signer.PublicKey(), user, c)
if err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
if !ok {
continue
@@ -206,7 +215,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
Method: cb.method(),
}, []byte(pub.Type()), pubKey))
if err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
// manually wrap the serialized signature in a string
@@ -224,24 +233,24 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
}
p := Marshal(&msg)
if err := c.writePacket(p); err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
- var success bool
+ var success authResult
success, methods, err = handleAuthResponse(c)
if err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
// If authentication succeeds or the list of available methods does not
// contain the "publickey" method, do not attempt to authenticate with any
// other keys. According to RFC 4252 Section 7, the latter can occur when
// additional authentication methods are required.
- if success || !containsMethod(methods, cb.method()) {
+ if success == authSuccess || !containsMethod(methods, cb.method()) {
return success, methods, err
}
}
- return false, methods, nil
+ return authFailure, methods, nil
}
func containsMethod(methods []string, method string) bool {
@@ -283,7 +292,9 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
}
switch packet[0] {
case msgUserAuthBanner:
- // TODO(gpaul): add callback to present the banner to the user
+ if err := handleBannerResponse(c, packet); err != nil {
+ return false, err
+ }
case msgUserAuthPubKeyOk:
var msg userAuthPubKeyOkMsg
if err := Unmarshal(packet, &msg); err != nil {
@@ -316,30 +327,53 @@ func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMet
// handleAuthResponse returns whether the preceding authentication request succeeded
// along with a list of remaining authentication methods to try next and
// an error if an unexpected response was received.
-func handleAuthResponse(c packetConn) (bool, []string, error) {
+func handleAuthResponse(c packetConn) (authResult, []string, error) {
for {
packet, err := c.readPacket()
if err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
switch packet[0] {
case msgUserAuthBanner:
- // TODO: add callback to present the banner to the user
+ if err := handleBannerResponse(c, packet); err != nil {
+ return authFailure, nil, err
+ }
case msgUserAuthFailure:
var msg userAuthFailureMsg
if err := Unmarshal(packet, &msg); err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
- return false, msg.Methods, nil
+ if msg.PartialSuccess {
+ return authPartialSuccess, msg.Methods, nil
+ }
+ return authFailure, msg.Methods, nil
case msgUserAuthSuccess:
- return true, nil, nil
+ return authSuccess, nil, nil
default:
- return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
+ return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
}
}
}
+func handleBannerResponse(c packetConn, packet []byte) error {
+ var msg userAuthBannerMsg
+ if err := Unmarshal(packet, &msg); err != nil {
+ return err
+ }
+
+ transport, ok := c.(*handshakeTransport)
+ if !ok {
+ return nil
+ }
+
+ if transport.bannerCallback != nil {
+ return transport.bannerCallback(msg.Message)
+ }
+
+ return nil
+}
+
// KeyboardInteractiveChallenge should print questions, optionally
// disabling echoing (e.g. for passwords), and return all the answers.
// Challenge may be called multiple times in a single session. After
@@ -359,7 +393,7 @@ func (cb KeyboardInteractiveChallenge) method() string {
return "keyboard-interactive"
}
-func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
type initiateMsg struct {
User string `sshtype:"50"`
Service string
@@ -373,37 +407,42 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
Service: serviceSSH,
Method: "keyboard-interactive",
})); err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
for {
packet, err := c.readPacket()
if err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
// like handleAuthResponse, but with less options.
switch packet[0] {
case msgUserAuthBanner:
- // TODO: Print banners during userauth.
+ if err := handleBannerResponse(c, packet); err != nil {
+ return authFailure, nil, err
+ }
continue
case msgUserAuthInfoRequest:
// OK
case msgUserAuthFailure:
var msg userAuthFailureMsg
if err := Unmarshal(packet, &msg); err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
- return false, msg.Methods, nil
+ if msg.PartialSuccess {
+ return authPartialSuccess, msg.Methods, nil
+ }
+ return authFailure, msg.Methods, nil
case msgUserAuthSuccess:
- return true, nil, nil
+ return authSuccess, nil, nil
default:
- return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
+ return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
}
var msg userAuthInfoRequestMsg
if err := Unmarshal(packet, &msg); err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
// Manually unpack the prompt/echo pairs.
@@ -413,7 +452,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
for i := 0; i < int(msg.NumPrompts); i++ {
prompt, r, ok := parseString(rest)
if !ok || len(r) == 0 {
- return false, nil, errors.New("ssh: prompt format error")
+ return authFailure, nil, errors.New("ssh: prompt format error")
}
prompts = append(prompts, string(prompt))
echos = append(echos, r[0] != 0)
@@ -421,16 +460,16 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
}
if len(rest) != 0 {
- return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
+ return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
}
answers, err := cb(msg.User, msg.Instruction, prompts, echos)
if err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
if len(answers) != len(prompts) {
- return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
+ return authFailure, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
}
responseLength := 1 + 4
for _, a := range answers {
@@ -446,7 +485,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
}
if err := c.writePacket(serialized); err != nil {
- return false, nil, err
+ return authFailure, nil, err
}
}
}
@@ -456,10 +495,10 @@ type retryableAuthMethod struct {
maxTries int
}
-func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) {
+func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) {
for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
ok, methods, err = r.authMethod.auth(session, user, c, rand)
- if ok || err != nil { // either success or error terminate
+ if ok != authFailure || err != nil { // either success, partial success or error terminate
return ok, methods, err
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
index dc39e4d23..04f3620b3 100644
--- a/vendor/golang.org/x/crypto/ssh/common.go
+++ b/vendor/golang.org/x/crypto/ssh/common.go
@@ -24,11 +24,21 @@ const (
serviceSSH = "ssh-connection"
)
-// supportedCiphers specifies the supported ciphers in preference order.
+// supportedCiphers lists ciphers we support but might not recommend.
var supportedCiphers = []string{
"aes128-ctr", "aes192-ctr", "aes256-ctr",
"aes128-gcm@openssh.com",
- "arcfour256", "arcfour128",
+ chacha20Poly1305ID,
+ "arcfour256", "arcfour128", "arcfour",
+ aes128cbcID,
+ tripledescbcID,
+}
+
+// preferredCiphers specifies the default preference for ciphers.
+var preferredCiphers = []string{
+ "aes128-gcm@openssh.com",
+ chacha20Poly1305ID,
+ "aes128-ctr", "aes192-ctr", "aes256-ctr",
}
// supportedKexAlgos specifies the supported key-exchange algorithms in
@@ -211,7 +221,7 @@ func (c *Config) SetDefaults() {
c.Rand = rand.Reader
}
if c.Ciphers == nil {
- c.Ciphers = supportedCiphers
+ c.Ciphers = preferredCiphers
}
var ciphers []string
for _, c := range c.Ciphers {
@@ -242,7 +252,7 @@ func (c *Config) SetDefaults() {
// buildDataSignedForAuth returns the data that is signed in order to prove
// possession of a private key. See RFC 4252, section 7.
-func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
+func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
data := struct {
Session []byte
Type byte
@@ -253,7 +263,7 @@ func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubK
Algo []byte
PubKey []byte
}{
- sessionId,
+ sessionID,
msgUserAuthRequest,
req.User,
req.Service,
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
index 932ce8393..4f7912ecd 100644
--- a/vendor/golang.org/x/crypto/ssh/handshake.go
+++ b/vendor/golang.org/x/crypto/ssh/handshake.go
@@ -78,6 +78,11 @@ type handshakeTransport struct {
dialAddress string
remoteAddr net.Addr
+ // bannerCallback is non-empty if we are the client and it has been set in
+ // ClientConfig. In that case it is called during the user authentication
+ // dance to handle a custom server's message.
+ bannerCallback BannerCallback
+
// Algorithms agreed in the last key exchange.
algorithms *algorithms
@@ -120,6 +125,7 @@ func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byt
t.dialAddress = dialAddr
t.remoteAddr = addr
t.hostKeyCallback = config.HostKeyCallback
+ t.bannerCallback = config.BannerCallback
if config.HostKeyAlgorithms != nil {
t.hostKeyAlgorithms = config.HostKeyAlgorithms
} else {
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
index f91c2770e..f34bcc013 100644
--- a/vendor/golang.org/x/crypto/ssh/kex.go
+++ b/vendor/golang.org/x/crypto/ssh/kex.go
@@ -119,7 +119,7 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha
return nil, err
}
- kInt, err := group.diffieHellman(kexDHReply.Y, x)
+ ki, err := group.diffieHellman(kexDHReply.Y, x)
if err != nil {
return nil, err
}
@@ -129,8 +129,8 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha
writeString(h, kexDHReply.HostKey)
writeInt(h, X)
writeInt(h, kexDHReply.Y)
- K := make([]byte, intLength(kInt))
- marshalInt(K, kInt)
+ K := make([]byte, intLength(ki))
+ marshalInt(K, ki)
h.Write(K)
return &kexResult{
@@ -164,7 +164,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
}
Y := new(big.Int).Exp(group.g, y, group.p)
- kInt, err := group.diffieHellman(kexDHInit.X, y)
+ ki, err := group.diffieHellman(kexDHInit.X, y)
if err != nil {
return nil, err
}
@@ -177,8 +177,8 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
writeInt(h, kexDHInit.X)
writeInt(h, Y)
- K := make([]byte, intLength(kInt))
- marshalInt(K, kInt)
+ K := make([]byte, intLength(ki))
+ marshalInt(K, ki)
h.Write(K)
H := h.Sum(nil)
@@ -462,9 +462,9 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh
writeString(h, kp.pub[:])
writeString(h, reply.EphemeralPubKey)
- kInt := new(big.Int).SetBytes(secret[:])
- K := make([]byte, intLength(kInt))
- marshalInt(K, kInt)
+ ki := new(big.Int).SetBytes(secret[:])
+ K := make([]byte, intLength(ki))
+ marshalInt(K, ki)
h.Write(K)
return &kexResult{
@@ -510,9 +510,9 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
writeString(h, kexInit.ClientPubKey)
writeString(h, kp.pub[:])
- kInt := new(big.Int).SetBytes(secret[:])
- K := make([]byte, intLength(kInt))
- marshalInt(K, kInt)
+ ki := new(big.Int).SetBytes(secret[:])
+ K := make([]byte, intLength(ki))
+ marshalInt(K, ki)
h.Write(K)
H := h.Sum(nil)
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index b682c1741..969804794 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -38,6 +38,16 @@ const (
KeyAlgoED25519 = "ssh-ed25519"
)
+// These constants represent non-default signature algorithms that are supported
+// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See
+// [PROTOCOL.agent] section 4.5.1 and
+// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10
+const (
+ SigAlgoRSA = "ssh-rsa"
+ SigAlgoRSASHA2256 = "rsa-sha2-256"
+ SigAlgoRSASHA2512 = "rsa-sha2-512"
+)
+
// parsePubKey parses a public key of the given algorithm.
// Use ParsePublicKey for keys with prepended algorithm.
func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
@@ -276,7 +286,8 @@ type PublicKey interface {
Type() string
// Marshal returns the serialized key data in SSH wire format,
- // with the name prefix.
+ // with the name prefix. To unmarshal the returned data, use
+ // the ParsePublicKey function.
Marshal() []byte
// Verify that sig is a signature on the given data using this
@@ -300,6 +311,19 @@ type Signer interface {
Sign(rand io.Reader, data []byte) (*Signature, error)
}
+// A AlgorithmSigner is a Signer that also supports specifying a specific
+// algorithm to use for signing.
+type AlgorithmSigner interface {
+ Signer
+
+ // SignWithAlgorithm is like Signer.Sign, but allows specification of a
+ // non-default signing algorithm. See the SigAlgo* constants in this
+ // package for signature algorithms supported by this package. Callers may
+ // pass an empty string for the algorithm in which case the AlgorithmSigner
+ // will use its default algorithm.
+ SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error)
+}
+
type rsaPublicKey rsa.PublicKey
func (r *rsaPublicKey) Type() string {
@@ -348,13 +372,21 @@ func (r *rsaPublicKey) Marshal() []byte {
}
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
- if sig.Format != r.Type() {
+ var hash crypto.Hash
+ switch sig.Format {
+ case SigAlgoRSA:
+ hash = crypto.SHA1
+ case SigAlgoRSASHA2256:
+ hash = crypto.SHA256
+ case SigAlgoRSASHA2512:
+ hash = crypto.SHA512
+ default:
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
- h := crypto.SHA1.New()
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
- return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
+ return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob)
}
func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
@@ -363,7 +395,7 @@ func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
type dsaPublicKey dsa.PublicKey
-func (r *dsaPublicKey) Type() string {
+func (k *dsaPublicKey) Type() string {
return "ssh-dss"
}
@@ -458,6 +490,14 @@ func (k *dsaPrivateKey) PublicKey() PublicKey {
}
func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
+ return k.SignWithAlgorithm(rand, data, "")
+}
+
+func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
+ if algorithm != "" && algorithm != k.PublicKey().Type() {
+ return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
+ }
+
h := crypto.SHA1.New()
h.Write(data)
digest := h.Sum(nil)
@@ -481,12 +521,12 @@ func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
type ecdsaPublicKey ecdsa.PublicKey
-func (key *ecdsaPublicKey) Type() string {
- return "ecdsa-sha2-" + key.nistID()
+func (k *ecdsaPublicKey) Type() string {
+ return "ecdsa-sha2-" + k.nistID()
}
-func (key *ecdsaPublicKey) nistID() string {
- switch key.Params().BitSize {
+func (k *ecdsaPublicKey) nistID() string {
+ switch k.Params().BitSize {
case 256:
return "nistp256"
case 384:
@@ -499,7 +539,7 @@ func (key *ecdsaPublicKey) nistID() string {
type ed25519PublicKey ed25519.PublicKey
-func (key ed25519PublicKey) Type() string {
+func (k ed25519PublicKey) Type() string {
return KeyAlgoED25519
}
@@ -518,23 +558,23 @@ func parseED25519(in []byte) (out PublicKey, rest []byte, err error) {
return (ed25519PublicKey)(key), w.Rest, nil
}
-func (key ed25519PublicKey) Marshal() []byte {
+func (k ed25519PublicKey) Marshal() []byte {
w := struct {
Name string
KeyBytes []byte
}{
KeyAlgoED25519,
- []byte(key),
+ []byte(k),
}
return Marshal(&w)
}
-func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error {
- if sig.Format != key.Type() {
- return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
+func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error {
+ if sig.Format != k.Type() {
+ return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
- edKey := (ed25519.PublicKey)(key)
+ edKey := (ed25519.PublicKey)(k)
if ok := ed25519.Verify(edKey, b, sig.Blob); !ok {
return errors.New("ssh: signature did not verify")
}
@@ -595,9 +635,9 @@ func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
return (*ecdsaPublicKey)(key), w.Rest, nil
}
-func (key *ecdsaPublicKey) Marshal() []byte {
+func (k *ecdsaPublicKey) Marshal() []byte {
// See RFC 5656, section 3.1.
- keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y)
+ keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y)
// ECDSA publickey struct layout should match the struct used by
// parseECDSACert in the x/crypto/ssh/agent package.
w := struct {
@@ -605,20 +645,20 @@ func (key *ecdsaPublicKey) Marshal() []byte {
ID string
Key []byte
}{
- key.Type(),
- key.nistID(),
+ k.Type(),
+ k.nistID(),
keyBytes,
}
return Marshal(&w)
}
-func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
- if sig.Format != key.Type() {
- return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
+func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
+ if sig.Format != k.Type() {
+ return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
- h := ecHash(key.Curve).New()
+ h := ecHash(k.Curve).New()
h.Write(data)
digest := h.Sum(nil)
@@ -635,7 +675,7 @@ func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
return err
}
- if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) {
+ if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) {
return nil
}
return errors.New("ssh: signature did not verify")
@@ -690,16 +730,42 @@ func (s *wrappedSigner) PublicKey() PublicKey {
}
func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
+ return s.SignWithAlgorithm(rand, data, "")
+}
+
+func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
var hashFunc crypto.Hash
- switch key := s.pubKey.(type) {
- case *rsaPublicKey, *dsaPublicKey:
- hashFunc = crypto.SHA1
- case *ecdsaPublicKey:
- hashFunc = ecHash(key.Curve)
- case ed25519PublicKey:
- default:
- return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+ if _, ok := s.pubKey.(*rsaPublicKey); ok {
+ // RSA keys support a few hash functions determined by the requested signature algorithm
+ switch algorithm {
+ case "", SigAlgoRSA:
+ algorithm = SigAlgoRSA
+ hashFunc = crypto.SHA1
+ case SigAlgoRSASHA2256:
+ hashFunc = crypto.SHA256
+ case SigAlgoRSASHA2512:
+ hashFunc = crypto.SHA512
+ default:
+ return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
+ }
+ } else {
+ // The only supported algorithm for all other key types is the same as the type of the key
+ if algorithm == "" {
+ algorithm = s.pubKey.Type()
+ } else if algorithm != s.pubKey.Type() {
+ return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
+ }
+
+ switch key := s.pubKey.(type) {
+ case *dsaPublicKey:
+ hashFunc = crypto.SHA1
+ case *ecdsaPublicKey:
+ hashFunc = ecHash(key.Curve)
+ case ed25519PublicKey:
+ default:
+ return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+ }
}
var digest []byte
@@ -744,7 +810,7 @@ func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
}
return &Signature{
- Format: s.pubKey.Type(),
+ Format: algorithm,
Blob: signature,
}, nil
}
@@ -758,7 +824,7 @@ func NewPublicKey(key interface{}) (PublicKey, error) {
return (*rsaPublicKey)(key), nil
case *ecdsa.PublicKey:
if !supportedEllipticCurve(key.Curve) {
- return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.")
+ return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported")
}
return (*ecdsaPublicKey)(key), nil
case *dsa.PublicKey:
@@ -802,7 +868,7 @@ func encryptedBlock(block *pem.Block) bool {
}
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
-// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
+// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys.
func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
@@ -816,6 +882,9 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
switch block.Type {
case "RSA PRIVATE KEY":
return x509.ParsePKCS1PrivateKey(block.Bytes)
+ // RFC5208 - https://tools.ietf.org/html/rfc5208
+ case "PRIVATE KEY":
+ return x509.ParsePKCS8PrivateKey(block.Bytes)
case "EC PRIVATE KEY":
return x509.ParseECPrivateKey(block.Bytes)
case "DSA PRIVATE KEY":
@@ -899,8 +968,8 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
// Implemented based on the documentation at
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
- magic := append([]byte("openssh-key-v1"), 0)
- if !bytes.Equal(magic, key[0:len(magic)]) {
+ const magic = "openssh-key-v1\x00"
+ if len(key) < len(magic) || string(key[:len(magic)]) != magic {
return nil, errors.New("ssh: invalid openssh private key format")
}
remaining := key[len(magic):]
diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
index e6ecd3afa..08d281173 100644
--- a/vendor/golang.org/x/crypto/ssh/messages.go
+++ b/vendor/golang.org/x/crypto/ssh/messages.go
@@ -23,10 +23,6 @@ const (
msgUnimplemented = 3
msgDebug = 4
msgNewKeys = 21
-
- // Standard authentication messages
- msgUserAuthSuccess = 52
- msgUserAuthBanner = 53
)
// SSH messages:
@@ -137,6 +133,18 @@ type userAuthFailureMsg struct {
PartialSuccess bool
}
+// See RFC 4252, section 5.1
+const msgUserAuthSuccess = 52
+
+// See RFC 4252, section 5.4
+const msgUserAuthBanner = 53
+
+type userAuthBannerMsg struct {
+ Message string `sshtype:"53"`
+ // unused, but required to allow message parsing
+ Language string
+}
+
// See RFC 4256, section 3.2
const msgUserAuthInfoRequest = 60
const msgUserAuthInfoResponse = 61
@@ -154,7 +162,7 @@ const msgChannelOpen = 90
type channelOpenMsg struct {
ChanType string `sshtype:"90"`
- PeersId uint32
+ PeersID uint32
PeersWindow uint32
MaxPacketSize uint32
TypeSpecificData []byte `ssh:"rest"`
@@ -165,7 +173,7 @@ const msgChannelData = 94
// Used for debug print outs of packets.
type channelDataMsg struct {
- PeersId uint32 `sshtype:"94"`
+ PeersID uint32 `sshtype:"94"`
Length uint32
Rest []byte `ssh:"rest"`
}
@@ -174,8 +182,8 @@ type channelDataMsg struct {
const msgChannelOpenConfirm = 91
type channelOpenConfirmMsg struct {
- PeersId uint32 `sshtype:"91"`
- MyId uint32
+ PeersID uint32 `sshtype:"91"`
+ MyID uint32
MyWindow uint32
MaxPacketSize uint32
TypeSpecificData []byte `ssh:"rest"`
@@ -185,7 +193,7 @@ type channelOpenConfirmMsg struct {
const msgChannelOpenFailure = 92
type channelOpenFailureMsg struct {
- PeersId uint32 `sshtype:"92"`
+ PeersID uint32 `sshtype:"92"`
Reason RejectionReason
Message string
Language string
@@ -194,7 +202,7 @@ type channelOpenFailureMsg struct {
const msgChannelRequest = 98
type channelRequestMsg struct {
- PeersId uint32 `sshtype:"98"`
+ PeersID uint32 `sshtype:"98"`
Request string
WantReply bool
RequestSpecificData []byte `ssh:"rest"`
@@ -204,28 +212,28 @@ type channelRequestMsg struct {
const msgChannelSuccess = 99
type channelRequestSuccessMsg struct {
- PeersId uint32 `sshtype:"99"`
+ PeersID uint32 `sshtype:"99"`
}
// See RFC 4254, section 5.4.
const msgChannelFailure = 100
type channelRequestFailureMsg struct {
- PeersId uint32 `sshtype:"100"`
+ PeersID uint32 `sshtype:"100"`
}
// See RFC 4254, section 5.3
const msgChannelClose = 97
type channelCloseMsg struct {
- PeersId uint32 `sshtype:"97"`
+ PeersID uint32 `sshtype:"97"`
}
// See RFC 4254, section 5.3
const msgChannelEOF = 96
type channelEOFMsg struct {
- PeersId uint32 `sshtype:"96"`
+ PeersID uint32 `sshtype:"96"`
}
// See RFC 4254, section 4
@@ -255,7 +263,7 @@ type globalRequestFailureMsg struct {
const msgChannelWindowAdjust = 93
type windowAdjustMsg struct {
- PeersId uint32 `sshtype:"93"`
+ PeersID uint32 `sshtype:"93"`
AdditionalBytes uint32
}
diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go
index 27a527c10..f19016270 100644
--- a/vendor/golang.org/x/crypto/ssh/mux.go
+++ b/vendor/golang.org/x/crypto/ssh/mux.go
@@ -278,7 +278,7 @@ func (m *mux) handleChannelOpen(packet []byte) error {
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
failMsg := channelOpenFailureMsg{
- PeersId: msg.PeersId,
+ PeersID: msg.PeersID,
Reason: ConnectionFailed,
Message: "invalid request",
Language: "en_US.UTF-8",
@@ -287,7 +287,7 @@ func (m *mux) handleChannelOpen(packet []byte) error {
}
c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
- c.remoteId = msg.PeersId
+ c.remoteId = msg.PeersID
c.maxRemotePayload = msg.MaxPacketSize
c.remoteWin.add(msg.PeersWindow)
m.incomingChannels <- c
@@ -313,7 +313,7 @@ func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
PeersWindow: ch.myWindow,
MaxPacketSize: ch.maxIncomingPayload,
TypeSpecificData: extra,
- PeersId: ch.localId,
+ PeersID: ch.localId,
}
if err := m.sendMessage(open); err != nil {
return nil, err
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index 8a78b7ca0..e86e89661 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -95,6 +95,10 @@ type ServerConfig struct {
// Note that RFC 4253 section 4.2 requires that this string start with
// "SSH-2.0-".
ServerVersion string
+
+ // BannerCallback, if present, is called and the return string is sent to
+ // the client after key exchange completed but before authentication.
+ BannerCallback func(conn ConnMetadata) string
}
// AddHostKey adds a private key as a host key. If an existing host
@@ -162,6 +166,9 @@ type ServerConn struct {
// unsuccessful, it closes the connection and returns an error. The
// Request and NewChannel channels must be serviced, or the connection
// will hang.
+//
+// The returned error may be of type *ServerAuthError for
+// authentication errors.
func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
fullConf := *config
fullConf.SetDefaults()
@@ -252,7 +259,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
func isAcceptableAlgo(algo string) bool {
switch algo {
case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
- CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
+ CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
return true
}
return false
@@ -288,12 +295,13 @@ func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
}
-// ServerAuthError implements the error interface. It appends any authentication
-// errors that may occur, and is returned if all of the authentication methods
-// provided by the user failed to authenticate.
+// ServerAuthError represents server authentication errors and is
+// sometimes returned by NewServerConn. It appends any authentication
+// errors that may occur, and is returned if all of the authentication
+// methods provided by the user failed to authenticate.
type ServerAuthError struct {
// Errors contains authentication errors returned by the authentication
- // callback methods.
+ // callback methods. The first entry is typically ErrNoAuth.
Errors []error
}
@@ -305,6 +313,13 @@ func (l ServerAuthError) Error() string {
return "[" + strings.Join(errs, ", ") + "]"
}
+// ErrNoAuth is the error value returned if no
+// authentication method has been passed yet. This happens as a normal
+// part of the authentication loop, since the client first tries
+// 'none' authentication to discover available methods.
+// It is returned in ServerAuthError.Errors from NewServerConn.
+var ErrNoAuth = errors.New("ssh: no auth passed yet")
+
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
sessionID := s.transport.getSessionID()
var cache pubKeyCache
@@ -312,6 +327,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err
authFailures := 0
var authErrs []error
+ var displayedBanner bool
userAuthLoop:
for {
@@ -343,8 +359,22 @@ userAuthLoop:
}
s.user = userAuthReq.User
+
+ if !displayedBanner && config.BannerCallback != nil {
+ displayedBanner = true
+ msg := config.BannerCallback(s)
+ if msg != "" {
+ bannerMsg := &userAuthBannerMsg{
+ Message: msg,
+ }
+ if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil {
+ return nil, err
+ }
+ }
+ }
+
perms = nil
- authErr := errors.New("no auth passed yet")
+ authErr := ErrNoAuth
switch userAuthReq.Method {
case "none":
@@ -374,7 +404,7 @@ userAuthLoop:
perms, authErr = config.PasswordCallback(s, password)
case "keyboard-interactive":
if config.KeyboardInteractiveCallback == nil {
- authErr = errors.New("ssh: keyboard-interactive auth not configubred")
+ authErr = errors.New("ssh: keyboard-interactive auth not configured")
break
}
@@ -454,6 +484,7 @@ userAuthLoop:
// sig.Format. This is usually the same, but
// for certs, the names differ.
if !isAcceptableAlgo(sig.Format) {
+ authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break
}
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go
index cc06e03f5..d3321f6b7 100644
--- a/vendor/golang.org/x/crypto/ssh/session.go
+++ b/vendor/golang.org/x/crypto/ssh/session.go
@@ -406,7 +406,7 @@ func (s *Session) Wait() error {
s.stdinPipeWriter.Close()
}
var copyError error
- for _ = range s.copyFuncs {
+ for range s.copyFuncs {
if err := <-s.errors; err != nil && copyError == nil {
copyError = err
}
diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go
index a2dccc64c..b171b330b 100644
--- a/vendor/golang.org/x/crypto/ssh/streamlocal.go
+++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go
@@ -32,6 +32,7 @@ type streamLocalChannelForwardMsg struct {
// ListenUnix is similar to ListenTCP but uses a Unix domain socket.
func (c *Client) ListenUnix(socketPath string) (net.Listener, error) {
+ c.handleForwardsOnce.Do(c.handleForwards)
m := streamLocalChannelForwardMsg{
socketPath,
}
diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go
index acf17175d..80d35f5ec 100644
--- a/vendor/golang.org/x/crypto/ssh/tcpip.go
+++ b/vendor/golang.org/x/crypto/ssh/tcpip.go
@@ -90,10 +90,19 @@ type channelForwardMsg struct {
rport uint32
}
+// handleForwards starts goroutines handling forwarded connections.
+// It's called on first use by (*Client).ListenTCP to not launch
+// goroutines until needed.
+func (c *Client) handleForwards() {
+ go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip"))
+ go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com"))
+}
+
// ListenTCP requests the remote peer open a listening socket
// on laddr. Incoming connections will be available by calling
// Accept on the returned net.Listener.
func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
+ c.handleForwardsOnce.Do(c.handleForwards)
if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
return c.autoPortListenWorkaround(laddr)
}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
index 18379a935..9a887598f 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -617,7 +617,7 @@ func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) {
if _, err = w.Write(crlf); err != nil {
return n, err
}
- n += 1
+ n++
buf = buf[1:]
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
index 02dad484e..391104084 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
@@ -25,7 +25,7 @@ type State struct {
termios unix.Termios
}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
return err == nil
@@ -108,9 +108,7 @@ func ReadPassword(fd int) ([]byte, error) {
return nil, err
}
- defer func() {
- unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
- }()
+ defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
return readPasswordLine(passwordReader(fd))
}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
new file mode 100644
index 000000000..dfcd62785
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
@@ -0,0 +1,12 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+
+package terminal
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+const ioctlWriteTermios = unix.TCSETS
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
index 799f049f0..9317ac7ed 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
@@ -21,7 +21,7 @@ import (
type State struct{}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
return false
}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
index a2e1b57dc..3d5f06a9f 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
@@ -14,10 +14,10 @@ import (
// State contains the state of a terminal.
type State struct {
- state *unix.Termios
+ termios unix.Termios
}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
return err == nil
@@ -75,47 +75,43 @@ func ReadPassword(fd int) ([]byte, error) {
// restored.
// see http://cr.illumos.org/~webrev/andy_js/1060/
func MakeRaw(fd int) (*State, error) {
- oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
if err != nil {
return nil, err
}
- oldTermios := *oldTermiosPtr
- newTermios := oldTermios
- newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
- newTermios.Oflag &^= syscall.OPOST
- newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
- newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB
- newTermios.Cflag |= syscall.CS8
- newTermios.Cc[unix.VMIN] = 1
- newTermios.Cc[unix.VTIME] = 0
+ oldState := State{termios: *termios}
- if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil {
+ termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
+ termios.Oflag &^= unix.OPOST
+ termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
+ termios.Cflag &^= unix.CSIZE | unix.PARENB
+ termios.Cflag |= unix.CS8
+ termios.Cc[unix.VMIN] = 1
+ termios.Cc[unix.VTIME] = 0
+
+ if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil {
return nil, err
}
- return &State{
- state: oldTermiosPtr,
- }, nil
+ return &oldState, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, oldState *State) error {
- return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state)
+ return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios)
}
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
- oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
if err != nil {
return nil, err
}
- return &State{
- state: oldTermiosPtr,
- }, nil
+ return &State{termios: *termios}, nil
}
// GetSize returns the dimensions of the given terminal.
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
index 60979ccd0..6cb8a9503 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -17,6 +17,8 @@
package terminal
import (
+ "os"
+
"golang.org/x/sys/windows"
)
@@ -24,7 +26,7 @@ type State struct {
mode uint32
}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
var st uint32
err := windows.GetConsoleMode(windows.Handle(fd), &st)
@@ -71,13 +73,6 @@ func GetSize(fd int) (width, height int, err error) {
return int(info.Size.X), int(info.Size.Y), nil
}
-// passwordReader is an io.Reader that reads from a specific Windows HANDLE.
-type passwordReader int
-
-func (r passwordReader) Read(buf []byte) (int, error) {
- return windows.Read(windows.Handle(r), buf)
-}
-
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
@@ -94,9 +89,15 @@ func ReadPassword(fd int) ([]byte, error) {
return nil, err
}
- defer func() {
- windows.SetConsoleMode(windows.Handle(fd), old)
- }()
+ defer windows.SetConsoleMode(windows.Handle(fd), old)
- return readPasswordLine(passwordReader(fd))
+ var h windows.Handle
+ p, _ := windows.GetCurrentProcess()
+ if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil {
+ return nil, err
+ }
+
+ f := os.NewFile(uintptr(h), "stdin")
+ defer f.Close()
+ return readPasswordLine(f)
}
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
index ab2b88765..f6fae1db4 100644
--- a/vendor/golang.org/x/crypto/ssh/transport.go
+++ b/vendor/golang.org/x/crypto/ssh/transport.go
@@ -6,6 +6,7 @@ package ssh
import (
"bufio"
+ "bytes"
"errors"
"io"
"log"
@@ -76,17 +77,17 @@ type connectionState struct {
// both directions are triggered by reading and writing a msgNewKey packet
// respectively.
func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
- if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
+ ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult)
+ if err != nil {
return err
- } else {
- t.reader.pendingKeyChange <- ciph
}
+ t.reader.pendingKeyChange <- ciph
- if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
+ ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult)
+ if err != nil {
return err
- } else {
- t.writer.pendingKeyChange <- ciph
}
+ t.writer.pendingKeyChange <- ciph
return nil
}
@@ -139,7 +140,7 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
case cipher := <-s.pendingKeyChange:
s.packetCipher = cipher
default:
- return nil, errors.New("ssh: got bogus newkeys message.")
+ return nil, errors.New("ssh: got bogus newkeys message")
}
case msgDisconnect:
@@ -232,52 +233,22 @@ var (
clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
)
-// generateKeys generates key material for IV, MAC and encryption.
-func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
- cipherMode := cipherModes[algs.Cipher]
- macMode := macModes[algs.MAC]
-
- iv = make([]byte, cipherMode.ivSize)
- key = make([]byte, cipherMode.keySize)
- macKey = make([]byte, macMode.keySize)
-
- generateKeyMaterial(iv, d.ivTag, kex)
- generateKeyMaterial(key, d.keyTag, kex)
- generateKeyMaterial(macKey, d.macKeyTag, kex)
- return
-}
-
// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
// described in RFC 4253, section 6.4. direction should either be serverKeys
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
- iv, key, macKey := generateKeys(d, algs, kex)
+ cipherMode := cipherModes[algs.Cipher]
+ macMode := macModes[algs.MAC]
- if algs.Cipher == gcmCipherID {
- return newGCMCipher(iv, key)
- }
+ iv := make([]byte, cipherMode.ivSize)
+ key := make([]byte, cipherMode.keySize)
+ macKey := make([]byte, macMode.keySize)
- if algs.Cipher == aes128cbcID {
- return newAESCBCCipher(iv, key, macKey, algs)
- }
+ generateKeyMaterial(iv, d.ivTag, kex)
+ generateKeyMaterial(key, d.keyTag, kex)
+ generateKeyMaterial(macKey, d.macKeyTag, kex)
- if algs.Cipher == tripledescbcID {
- return newTripleDESCBCCipher(iv, key, macKey, algs)
- }
-
- c := &streamPacketCipher{
- mac: macModes[algs.MAC].new(macKey),
- etm: macModes[algs.MAC].etm,
- }
- c.macResult = make([]byte, c.mac.Size())
-
- var err error
- c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
- if err != nil {
- return nil, err
- }
-
- return c, nil
+ return cipherModes[algs.Cipher].create(key, iv, macKey, algs)
}
// generateKeyMaterial fills out with key material generated from tag, K, H
@@ -342,7 +313,7 @@ func readVersion(r io.Reader) ([]byte, error) {
var ok bool
var buf [1]byte
- for len(versionString) < maxVersionStringBytes {
+ for length := 0; length < maxVersionStringBytes; length++ {
_, err := io.ReadFull(r, buf[:])
if err != nil {
return nil, err
@@ -350,6 +321,13 @@ func readVersion(r io.Reader) ([]byte, error) {
// The RFC says that the version should be terminated with \r\n
// but several SSH servers actually only send a \n.
if buf[0] == '\n' {
+ if !bytes.HasPrefix(versionString, []byte("SSH-")) {
+ // RFC 4253 says we need to ignore all version string lines
+ // except the one containing the SSH version (provided that
+ // all the lines do not exceed 255 bytes in total).
+ versionString = versionString[:0]
+ continue
+ }
ok = true
break
}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 1bfe09da7..fd150a374 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -267,10 +267,10 @@
"revisionTime": "2017-04-30T22:20:11Z"
},
{
- "checksumSHA1": "UpjhOUZ1+0zNt+iIvdtECSHXmTs=",
+ "checksumSHA1": "6XsjAARQFvlW6dS15al0ibTFPOQ=",
"path": "github.com/karalabe/hid",
- "revision": "f00545f9f3748e591590be3732d913c77525b10f",
- "revisionTime": "2017-08-21T10:38:37Z",
+ "revision": "d815e0c1a2e2082a287a2806bc90bc8fc7b276a9",
+ "revisionTime": "2018-11-28T19:21:57Z",
"tree": true
},
{
@@ -455,76 +455,76 @@
"revisionTime": "2017-07-05T02:17:15Z"
},
{
- "checksumSHA1": "k6zbR5hiI10hkWtiK91rIY5s5/E=",
+ "checksumSHA1": "LV0VMVON7xY1ttV+s2ph83ntmDQ=",
"path": "github.com/syndtr/goleveldb/leveldb",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
- "checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
+ "checksumSHA1": "mPNraL2edpk/2FYq26rSXfMHbJg=",
"path": "github.com/syndtr/goleveldb/leveldb/cache",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
- "checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
+ "checksumSHA1": "UA+PKDKWlDnE2OZblh23W6wZwbY=",
"path": "github.com/syndtr/goleveldb/leveldb/comparer",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
"path": "github.com/syndtr/goleveldb/leveldb/errors",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
"path": "github.com/syndtr/goleveldb/leveldb/filter",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
- "checksumSHA1": "weSsccMav4BCerDpSLzh3mMxAYo=",
+ "checksumSHA1": "hPyFsMiqZ1OB7MX+6wIAA6nsdtc=",
"path": "github.com/syndtr/goleveldb/leveldb/iterator",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
"path": "github.com/syndtr/goleveldb/leveldb/journal",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "MtYY1b2234y/MlS+djL8tXVAcQs=",
"path": "github.com/syndtr/goleveldb/leveldb/memdb",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
- "checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
+ "checksumSHA1": "o2TorI3z+vc+EBMJ8XeFoUmXBtU=",
"path": "github.com/syndtr/goleveldb/leveldb/opt",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "ZnyuciM+R19NG8L5YS3TIJdo1e8=",
"path": "github.com/syndtr/goleveldb/leveldb/storage",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=",
"path": "github.com/syndtr/goleveldb/leveldb/table",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "V/Dh7NV0/fy/5jX1KaAjmGcNbzI=",
"path": "github.com/syndtr/goleveldb/leveldb/util",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "nD6S4KB0S+YHxVMDDE+w3PyXaMk=",
@@ -631,92 +631,116 @@
{
"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",
"path": "golang.org/x/crypto/cast5",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
"checksumSHA1": "IQkUIOnvlf0tYloFx9mLaXSvXWQ=",
"path": "golang.org/x/crypto/curve25519",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
- "checksumSHA1": "1hwn8cgg4EVXhCpJIqmMbzqnUo0=",
+ "checksumSHA1": "2LpxYGSf068307b7bhAuVjvzLLc=",
"path": "golang.org/x/crypto/ed25519",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
- "checksumSHA1": "LXFcVx8I587SnWmKycSDEq9yvK8=",
+ "checksumSHA1": "0JTAFXPkankmWcZGQJGScLDiaN8=",
"path": "golang.org/x/crypto/ed25519/internal/edwards25519",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
- "checksumSHA1": "IIhFTrLlmlc6lEFSitqi4aw2lw0=",
+ "checksumSHA1": "fhxj9uzosD3dQefNF5JuGJzGZwg=",
+ "path": "golang.org/x/crypto/internal/chacha20",
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
+ },
+ {
+ "checksumSHA1": "/U7f2gaH6DnEmLguVLDbipU6kXU=",
+ "path": "golang.org/x/crypto/internal/subtle",
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
+ },
+ {
+ "checksumSHA1": "M7MQqB1xKzwQh5aEjckVsVCxpoY=",
"path": "golang.org/x/crypto/openpgp",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
"checksumSHA1": "olOKkhrdkYQHZ0lf1orrFQPQrv4=",
"path": "golang.org/x/crypto/openpgp/armor",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
"checksumSHA1": "eo/KtdjieJQXH7Qy+faXFcF70ME=",
"path": "golang.org/x/crypto/openpgp/elgamal",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
"checksumSHA1": "rlxVSaGgqdAgwblsErxTxIfuGfg=",
"path": "golang.org/x/crypto/openpgp/errors",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
- "checksumSHA1": "Pq88+Dgh04UdXWZN6P+bLgYnbRc=",
+ "checksumSHA1": "DwKua4mYaqKBGxCrwgLP2JqkPA0=",
"path": "golang.org/x/crypto/openpgp/packet",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
"checksumSHA1": "s2qT4UwvzBSkzXuiuMkowif1Olw=",
"path": "golang.org/x/crypto/openpgp/s2k",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
"checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=",
"path": "golang.org/x/crypto/pbkdf2",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
- "checksumSHA1": "y/oIaxq2d3WPizRZfVjo8RCRYTU=",
+ "checksumSHA1": "vKbPb9fpjCdzuoOvajOJnYfHG2g=",
+ "path": "golang.org/x/crypto/poly1305",
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
+ },
+ {
+ "checksumSHA1": "GP0QdBhWPoH4hsHedU7935MjGWo=",
"path": "golang.org/x/crypto/ripemd160",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
- "checksumSHA1": "dHh6VeHcbNg11miGjGEl8LbPe7w=",
+ "checksumSHA1": "q+Rqy6Spw6qDSj75TGEZF7nzoFM=",
"path": "golang.org/x/crypto/scrypt",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
- "checksumSHA1": "Wi44TcpIOXdojyVWkvyOBnBKIS4=",
+ "checksumSHA1": "hUsBzxJ8KTL4v0vpPT/mqvdJ46s=",
+ "path": "golang.org/x/crypto/sha3",
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
+ },
+ {
+ "checksumSHA1": "eMiE+YWT0hJF4B9/hrKHaRp39aU=",
"path": "golang.org/x/crypto/ssh",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
- "checksumSHA1": "5Yb2z6UO+Arm/TEd+OEtdnwOt1A=",
+ "checksumSHA1": "BSPDVKviqHQaG2phOFN690zAKB8=",
"path": "golang.org/x/crypto/ssh/terminal",
- "revision": "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd",
- "revisionTime": "2017-09-25T11:22:06Z"
+ "revision": "ff983b9c42bc9fbf91556e191cc8efb585c16908",
+ "revisionTime": "2018-07-25T11:53:45Z"
},
{
"checksumSHA1": "Y+HGqEkYM15ir+J93MEaHdyFy0c=",
diff --git a/whisper/whisperv6/api_test.go b/whisper/whisperv6/api_test.go
index cdbc7fab5..6d7157f57 100644
--- a/whisper/whisperv6/api_test.go
+++ b/whisper/whisperv6/api_test.go
@@ -18,27 +18,12 @@ package whisperv6
import (
"bytes"
- "crypto/ecdsa"
"testing"
"time"
-
- mapset "github.com/deckarep/golang-set"
- "github.com/ethereum/go-ethereum/common"
)
func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) {
- w := &Whisper{
- privateKeys: make(map[string]*ecdsa.PrivateKey),
- symKeys: make(map[string][]byte),
- envelopes: make(map[common.Hash]*Envelope),
- expirations: make(map[uint32]mapset.Set),
- peers: make(map[*Peer]struct{}),
- messageQueue: make(chan *Envelope, messageQueueLimit),
- p2pMsgQueue: make(chan *Envelope, messageQueueLimit),
- quit: make(chan struct{}),
- syncAllowance: DefaultSyncAllowance,
- }
- w.filters = NewFilters(w)
+ w := New(nil)
keyID, err := w.GenerateSymKey()
if err != nil {