swarm, cmd/swarm: Merge branch 'master' into multiple-ens-endpoints
This commit is contained in:
commit
a3a07350dc
2
.github/CONTRIBUTING.md
vendored
2
.github/CONTRIBUTING.md
vendored
@ -2,7 +2,7 @@
|
||||
|
||||
Before you do a feature request please check and make sure that it isn't possible
|
||||
through some other means. The JavaScript enabled console is a powerful feature
|
||||
in the right hands. Please check our [Bitchin' tricks](https://github.com/ethereum/go-ethereum/wiki/bitchin-tricks) wiki page for more info
|
||||
in the right hands. Please check our [Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
|
||||
and help.
|
||||
|
||||
## Contributing
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -35,6 +35,7 @@ profile.cov
|
||||
.idea
|
||||
|
||||
# dashboard
|
||||
/dashboard/assets/flow-typed
|
||||
/dashboard/assets/node_modules
|
||||
/dashboard/assets/stats.json
|
||||
/dashboard/assets/public/bundle.js
|
||||
/dashboard/assets/bundle.js
|
||||
|
@ -40,6 +40,7 @@ matrix:
|
||||
- os: osx
|
||||
go: 1.9.x
|
||||
script:
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- brew update
|
||||
- brew install caskroom/cask/brew-cask
|
||||
- brew cask install osxfuse
|
||||
@ -87,13 +88,13 @@ matrix:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
|
||||
- sudo ln -s /usr/include/asm-generic /usr/include/asm
|
||||
|
||||
- GOARM=5 CC=arm-linux-gnueabi-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
|
||||
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=6 CC=arm-linux-gnueabi-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
|
||||
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=7 CC=arm-linux-gnueabihf-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc
|
||||
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- CC=aarch64-linux-gnu-gcc go run build/ci.go install -arch arm64
|
||||
- go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
|
6
Makefile
6
Makefile
@ -45,9 +45,13 @@ clean:
|
||||
|
||||
devtools:
|
||||
env GOBIN= go get -u golang.org/x/tools/cmd/stringer
|
||||
env GOBIN= go get -u github.com/jteeuwen/go-bindata/go-bindata
|
||||
env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata
|
||||
env GOBIN= go get -u github.com/fjl/gencodec
|
||||
env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go
|
||||
env GOBIN= go install ./cmd/abigen
|
||||
@type "npm" 2> /dev/null || echo 'Please install node.js and npm'
|
||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||
|
||||
# Cross Compilation Targets (xgo)
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -50,57 +51,52 @@ func JSON(reader io.Reader) (ABI, error) {
|
||||
// methods string signature. (signature = baz(uint32,string32))
|
||||
func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
|
||||
// Fetch the ABI of the requested method
|
||||
var method Method
|
||||
|
||||
if name == "" {
|
||||
method = abi.Constructor
|
||||
} else {
|
||||
m, exist := abi.Methods[name]
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("method '%s' not found", name)
|
||||
// constructor
|
||||
arguments, err := abi.Constructor.Inputs.Pack(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
method = m
|
||||
return arguments, nil
|
||||
|
||||
}
|
||||
arguments, err := method.pack(args...)
|
||||
method, exist := abi.Methods[name]
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("method '%s' not found", name)
|
||||
}
|
||||
|
||||
arguments, err := method.Inputs.Pack(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Pack up the method ID too if not a constructor and return
|
||||
if name == "" {
|
||||
return arguments, nil
|
||||
}
|
||||
return append(method.Id(), arguments...), nil
|
||||
}
|
||||
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, output []byte) (err error) {
|
||||
if err = bytesAreProper(output); err != nil {
|
||||
return err
|
||||
if len(output) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
}
|
||||
// since there can't be naming collisions with contracts and events,
|
||||
// we need to decide whether we're calling a method or an event
|
||||
var unpack unpacker
|
||||
if method, ok := abi.Methods[name]; ok {
|
||||
unpack = method
|
||||
if len(output)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output")
|
||||
}
|
||||
return method.Outputs.Unpack(v, output)
|
||||
} else if event, ok := abi.Events[name]; ok {
|
||||
unpack = event
|
||||
} else {
|
||||
return fmt.Errorf("abi: could not locate named method or event.")
|
||||
return event.Inputs.Unpack(v, output)
|
||||
}
|
||||
|
||||
// requires a struct to unpack into for a tuple return...
|
||||
if unpack.isTupleReturn() {
|
||||
return unpack.tupleUnpack(v, output)
|
||||
}
|
||||
return unpack.singleUnpack(v, output)
|
||||
return fmt.Errorf("abi: could not locate named method or event")
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
var fields []struct {
|
||||
Type string
|
||||
Name string
|
||||
Constant bool
|
||||
Indexed bool
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
@ -137,3 +133,14 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MethodById looks up a method by the 4-byte id
|
||||
// returns nil if none found
|
||||
func (abi *ABI) MethodById(sigdata []byte) *Method {
|
||||
for _, method := range abi.Methods {
|
||||
if bytes.Equal(method.Id(), sigdata[:4]) {
|
||||
return &method
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -18,13 +18,15 @@ package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
@ -74,9 +76,24 @@ func TestReader(t *testing.T) {
|
||||
}
|
||||
|
||||
// deep equal fails for some reason
|
||||
t.Skip()
|
||||
if !reflect.DeepEqual(abi, exp) {
|
||||
t.Errorf("\nabi: %v\ndoes not match exp: %v", abi, exp)
|
||||
for name, expM := range exp.Methods {
|
||||
gotM, exist := abi.Methods[name]
|
||||
if !exist {
|
||||
t.Errorf("Missing expected method %v", name)
|
||||
}
|
||||
if !reflect.DeepEqual(gotM, expM) {
|
||||
t.Errorf("\nGot abi method: \n%v\ndoes not match expected method\n%v", gotM, expM)
|
||||
}
|
||||
}
|
||||
|
||||
for name, gotM := range abi.Methods {
|
||||
expM, exist := exp.Methods[name]
|
||||
if !exist {
|
||||
t.Errorf("Found extra method %v", name)
|
||||
}
|
||||
if !reflect.DeepEqual(gotM, expM) {
|
||||
t.Errorf("\nGot abi method: \n%v\ndoes not match expected method\n%v", gotM, expM)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -348,6 +365,188 @@ func TestInputVariableInputLength(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
|
||||
const definition = `[
|
||||
{ "type" : "function", "name" : "fixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "fixedArrBytes", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
|
||||
{ "type" : "function", "name" : "mixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type": "uint256[2]" }, { "name" : "dynArr", "type": "uint256[]" } ] },
|
||||
{ "type" : "function", "name" : "doubleFixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "fixedArr2", "type": "uint256[3]" } ] },
|
||||
{ "type" : "function", "name" : "multipleMixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }
|
||||
]`
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// test string, fixed array uint256[2]
|
||||
strin := "hello world"
|
||||
arrin := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
fixedArrStrPack, err := abi.Pack("fixedArrStr", strin, arrin)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
offset := make([]byte, 32)
|
||||
offset[31] = 96
|
||||
length := make([]byte, 32)
|
||||
length[31] = byte(len(strin))
|
||||
strvalue := common.RightPadBytes([]byte(strin), 32)
|
||||
arrinvalue1 := common.LeftPadBytes(arrin[0].Bytes(), 32)
|
||||
arrinvalue2 := common.LeftPadBytes(arrin[1].Bytes(), 32)
|
||||
exp := append(offset, arrinvalue1...)
|
||||
exp = append(exp, arrinvalue2...)
|
||||
exp = append(exp, append(length, strvalue...)...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
fixedArrStrPack = fixedArrStrPack[4:]
|
||||
if !bytes.Equal(fixedArrStrPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, fixedArrStrPack)
|
||||
}
|
||||
|
||||
// test byte array, fixed array uint256[2]
|
||||
bytesin := []byte(strin)
|
||||
arrin = [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
fixedArrBytesPack, err := abi.Pack("fixedArrBytes", bytesin, arrin)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
offset = make([]byte, 32)
|
||||
offset[31] = 96
|
||||
length = make([]byte, 32)
|
||||
length[31] = byte(len(strin))
|
||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||
arrinvalue1 = common.LeftPadBytes(arrin[0].Bytes(), 32)
|
||||
arrinvalue2 = common.LeftPadBytes(arrin[1].Bytes(), 32)
|
||||
exp = append(offset, arrinvalue1...)
|
||||
exp = append(exp, arrinvalue2...)
|
||||
exp = append(exp, append(length, strvalue...)...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
fixedArrBytesPack = fixedArrBytesPack[4:]
|
||||
if !bytes.Equal(fixedArrBytesPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, fixedArrBytesPack)
|
||||
}
|
||||
|
||||
// test string, fixed array uint256[2], dynamic array uint256[]
|
||||
strin = "hello world"
|
||||
fixedarrin := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
dynarrin := []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
|
||||
mixedArrStrPack, err := abi.Pack("mixedArrStr", strin, fixedarrin, dynarrin)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
stroffset := make([]byte, 32)
|
||||
stroffset[31] = 128
|
||||
strlength := make([]byte, 32)
|
||||
strlength[31] = byte(len(strin))
|
||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||
fixedarrinvalue1 := common.LeftPadBytes(fixedarrin[0].Bytes(), 32)
|
||||
fixedarrinvalue2 := common.LeftPadBytes(fixedarrin[1].Bytes(), 32)
|
||||
dynarroffset := make([]byte, 32)
|
||||
dynarroffset[31] = byte(160 + ((len(strin)/32)+1)*32)
|
||||
dynarrlength := make([]byte, 32)
|
||||
dynarrlength[31] = byte(len(dynarrin))
|
||||
dynarrinvalue1 := common.LeftPadBytes(dynarrin[0].Bytes(), 32)
|
||||
dynarrinvalue2 := common.LeftPadBytes(dynarrin[1].Bytes(), 32)
|
||||
dynarrinvalue3 := common.LeftPadBytes(dynarrin[2].Bytes(), 32)
|
||||
exp = append(stroffset, fixedarrinvalue1...)
|
||||
exp = append(exp, fixedarrinvalue2...)
|
||||
exp = append(exp, dynarroffset...)
|
||||
exp = append(exp, append(strlength, strvalue...)...)
|
||||
dynarrarg := append(dynarrlength, dynarrinvalue1...)
|
||||
dynarrarg = append(dynarrarg, dynarrinvalue2...)
|
||||
dynarrarg = append(dynarrarg, dynarrinvalue3...)
|
||||
exp = append(exp, dynarrarg...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
mixedArrStrPack = mixedArrStrPack[4:]
|
||||
if !bytes.Equal(mixedArrStrPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, mixedArrStrPack)
|
||||
}
|
||||
|
||||
// test string, fixed array uint256[2], fixed array uint256[3]
|
||||
strin = "hello world"
|
||||
fixedarrin1 := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
fixedarrin2 := [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
|
||||
doubleFixedArrStrPack, err := abi.Pack("doubleFixedArrStr", strin, fixedarrin1, fixedarrin2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
stroffset = make([]byte, 32)
|
||||
stroffset[31] = 192
|
||||
strlength = make([]byte, 32)
|
||||
strlength[31] = byte(len(strin))
|
||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||
fixedarrin1value1 := common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
|
||||
fixedarrin1value2 := common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
|
||||
fixedarrin2value1 := common.LeftPadBytes(fixedarrin2[0].Bytes(), 32)
|
||||
fixedarrin2value2 := common.LeftPadBytes(fixedarrin2[1].Bytes(), 32)
|
||||
fixedarrin2value3 := common.LeftPadBytes(fixedarrin2[2].Bytes(), 32)
|
||||
exp = append(stroffset, fixedarrin1value1...)
|
||||
exp = append(exp, fixedarrin1value2...)
|
||||
exp = append(exp, fixedarrin2value1...)
|
||||
exp = append(exp, fixedarrin2value2...)
|
||||
exp = append(exp, fixedarrin2value3...)
|
||||
exp = append(exp, append(strlength, strvalue...)...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
doubleFixedArrStrPack = doubleFixedArrStrPack[4:]
|
||||
if !bytes.Equal(doubleFixedArrStrPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, doubleFixedArrStrPack)
|
||||
}
|
||||
|
||||
// test string, fixed array uint256[2], dynamic array uint256[], fixed array uint256[3]
|
||||
strin = "hello world"
|
||||
fixedarrin1 = [2]*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
dynarrin = []*big.Int{big.NewInt(1), big.NewInt(2)}
|
||||
fixedarrin2 = [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
|
||||
multipleMixedArrStrPack, err := abi.Pack("multipleMixedArrStr", strin, fixedarrin1, dynarrin, fixedarrin2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// generate expected output
|
||||
stroffset = make([]byte, 32)
|
||||
stroffset[31] = 224
|
||||
strlength = make([]byte, 32)
|
||||
strlength[31] = byte(len(strin))
|
||||
strvalue = common.RightPadBytes([]byte(strin), 32)
|
||||
fixedarrin1value1 = common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
|
||||
fixedarrin1value2 = common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
|
||||
dynarroffset = U256(big.NewInt(int64(256 + ((len(strin)/32)+1)*32)))
|
||||
dynarrlength = make([]byte, 32)
|
||||
dynarrlength[31] = byte(len(dynarrin))
|
||||
dynarrinvalue1 = common.LeftPadBytes(dynarrin[0].Bytes(), 32)
|
||||
dynarrinvalue2 = common.LeftPadBytes(dynarrin[1].Bytes(), 32)
|
||||
fixedarrin2value1 = common.LeftPadBytes(fixedarrin2[0].Bytes(), 32)
|
||||
fixedarrin2value2 = common.LeftPadBytes(fixedarrin2[1].Bytes(), 32)
|
||||
fixedarrin2value3 = common.LeftPadBytes(fixedarrin2[2].Bytes(), 32)
|
||||
exp = append(stroffset, fixedarrin1value1...)
|
||||
exp = append(exp, fixedarrin1value2...)
|
||||
exp = append(exp, dynarroffset...)
|
||||
exp = append(exp, fixedarrin2value1...)
|
||||
exp = append(exp, fixedarrin2value2...)
|
||||
exp = append(exp, fixedarrin2value3...)
|
||||
exp = append(exp, append(strlength, strvalue...)...)
|
||||
dynarrarg = append(dynarrlength, dynarrinvalue1...)
|
||||
dynarrarg = append(dynarrarg, dynarrinvalue2...)
|
||||
exp = append(exp, dynarrarg...)
|
||||
|
||||
// ignore first 4 bytes of the output. This is the function identifier
|
||||
multipleMixedArrStrPack = multipleMixedArrStrPack[4:]
|
||||
if !bytes.Equal(multipleMixedArrStrPack, exp) {
|
||||
t.Errorf("expected %x, got %x\n", exp, multipleMixedArrStrPack)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultFunctionParsing(t *testing.T) {
|
||||
const definition = `[{ "name" : "balance" }]`
|
||||
|
||||
@ -418,3 +617,82 @@ func TestBareEvents(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUnpackEvent is based on this contract:
|
||||
// contract T {
|
||||
// event received(address sender, uint amount, bytes memo);
|
||||
// function receive(bytes memo) external payable {
|
||||
// received(msg.sender, msg.value, memo);
|
||||
// }
|
||||
// }
|
||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||
func TestUnpackEvent(t *testing.T) {
|
||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
||||
abi, err := JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
const hexdata = `000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158`
|
||||
data, err := hex.DecodeString(hexdata)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(data)%32 == 0 {
|
||||
t.Errorf("len(data) is %d, want a non-multiple of 32", len(data))
|
||||
}
|
||||
|
||||
type ReceivedEvent struct {
|
||||
Address common.Address
|
||||
Amount *big.Int
|
||||
Memo []byte
|
||||
}
|
||||
var ev ReceivedEvent
|
||||
|
||||
err = abi.Unpack(&ev, "received", data)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
t.Logf("len(data): %d; received event: %+v", len(data), ev)
|
||||
}
|
||||
}
|
||||
|
||||
func TestABI_MethodById(t *testing.T) {
|
||||
const abiJSON = `[
|
||||
{"type":"function","name":"receive","constant":false,"inputs":[{"name":"memo","type":"bytes"}],"outputs":[],"payable":true,"stateMutability":"payable"},
|
||||
{"type":"event","name":"received","anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}]},
|
||||
{"type":"function","name":"fixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"}]},
|
||||
{"type":"function","name":"fixedArrBytes","constant":true,"inputs":[{"name":"str","type":"bytes"},{"name":"fixedArr","type":"uint256[2]"}]},
|
||||
{"type":"function","name":"mixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"}]},
|
||||
{"type":"function","name":"doubleFixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"fixedArr2","type":"uint256[3]"}]},
|
||||
{"type":"function","name":"multipleMixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"},{"name":"fixedArr2","type":"uint256[3]"}]},
|
||||
{"type":"function","name":"balance","constant":true},
|
||||
{"type":"function","name":"send","constant":false,"inputs":[{"name":"amount","type":"uint256"}]},
|
||||
{"type":"function","name":"test","constant":false,"inputs":[{"name":"number","type":"uint32"}]},
|
||||
{"type":"function","name":"string","constant":false,"inputs":[{"name":"inputs","type":"string"}]},
|
||||
{"type":"function","name":"bool","constant":false,"inputs":[{"name":"inputs","type":"bool"}]},
|
||||
{"type":"function","name":"address","constant":false,"inputs":[{"name":"inputs","type":"address"}]},
|
||||
{"type":"function","name":"uint64[2]","constant":false,"inputs":[{"name":"inputs","type":"uint64[2]"}]},
|
||||
{"type":"function","name":"uint64[]","constant":false,"inputs":[{"name":"inputs","type":"uint64[]"}]},
|
||||
{"type":"function","name":"foo","constant":false,"inputs":[{"name":"inputs","type":"uint32"}]},
|
||||
{"type":"function","name":"bar","constant":false,"inputs":[{"name":"inputs","type":"uint32"},{"name":"string","type":"uint16"}]},
|
||||
{"type":"function","name":"_slice","constant":false,"inputs":[{"name":"inputs","type":"uint32[2]"}]},
|
||||
{"type":"function","name":"__slice256","constant":false,"inputs":[{"name":"inputs","type":"uint256[2]"}]},
|
||||
{"type":"function","name":"sliceAddress","constant":false,"inputs":[{"name":"inputs","type":"address[]"}]},
|
||||
{"type":"function","name":"sliceMultiAddress","constant":false,"inputs":[{"name":"a","type":"address[]"},{"name":"b","type":"address[]"}]}
|
||||
]
|
||||
`
|
||||
abi, err := JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for name, m := range abi.Methods {
|
||||
a := fmt.Sprintf("%v", m)
|
||||
b := fmt.Sprintf("%v", abi.MethodById(m.Id()))
|
||||
if a != b {
|
||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -19,6 +19,8 @@ package abi
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Argument holds the name of the argument and the corresponding type.
|
||||
@ -29,7 +31,10 @@ type Argument struct {
|
||||
Indexed bool // indexed is only used by events
|
||||
}
|
||||
|
||||
func (a *Argument) UnmarshalJSON(data []byte) error {
|
||||
type Arguments []Argument
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface
|
||||
func (argument *Argument) UnmarshalJSON(data []byte) error {
|
||||
var extarg struct {
|
||||
Name string
|
||||
Type string
|
||||
@ -40,12 +45,206 @@ func (a *Argument) UnmarshalJSON(data []byte) error {
|
||||
return fmt.Errorf("argument json err: %v", err)
|
||||
}
|
||||
|
||||
a.Type, err = NewType(extarg.Type)
|
||||
argument.Type, err = NewType(extarg.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Name = extarg.Name
|
||||
a.Indexed = extarg.Indexed
|
||||
argument.Name = extarg.Name
|
||||
argument.Indexed = extarg.Indexed
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LengthNonIndexed returns the number of arguments when not counting 'indexed' ones. Only events
|
||||
// can ever have 'indexed' arguments, it should always be false on arguments for method input/output
|
||||
func (arguments Arguments) LengthNonIndexed() int {
|
||||
out := 0
|
||||
for _, arg := range arguments {
|
||||
if !arg.Indexed {
|
||||
out++
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
|
||||
func (arguments Arguments) isTuple() bool {
|
||||
return len(arguments) > 1
|
||||
}
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
if arguments.isTuple() {
|
||||
return arguments.unpackTuple(v, data)
|
||||
}
|
||||
return arguments.unpackAtomic(v, data)
|
||||
}
|
||||
|
||||
func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
||||
// make sure the passed value is arguments pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
kind = value.Kind()
|
||||
)
|
||||
|
||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the output interface is a struct, make sure names don't collide
|
||||
if kind == reflect.Struct {
|
||||
exists := make(map[string]bool)
|
||||
for _, arg := range arguments {
|
||||
field := capitalise(arg.Name)
|
||||
if field == "" {
|
||||
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
}
|
||||
if exists[field] {
|
||||
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
|
||||
}
|
||||
exists[field] = true
|
||||
}
|
||||
}
|
||||
// `i` counts the nonindexed arguments.
|
||||
// `j` counts the number of complex types.
|
||||
// both `i` and `j` are used to to correctly compute `data` offset.
|
||||
|
||||
i, j := -1, 0
|
||||
for _, arg := range arguments {
|
||||
|
||||
if arg.Indexed {
|
||||
// can't read, continue
|
||||
continue
|
||||
}
|
||||
i++
|
||||
marshalledValue, err := toGoType((i+j)*32, arg.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if arg.Type.T == ArrayTy {
|
||||
// combined index ('i' + 'j') need to be adjusted only by size of array, thus
|
||||
// we need to decrement 'j' because 'i' was incremented
|
||||
j += arg.Type.Size - 1
|
||||
}
|
||||
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
name := capitalise(arg.Name)
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if typ.Field(j).Name == name {
|
||||
if err := set(value.Field(j), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
|
||||
}
|
||||
v := value.Index(i)
|
||||
if err := requireAssignable(v, reflectValue); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := set(v.Elem(), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||
func (arguments Arguments) unpackAtomic(v interface{}, output []byte) error {
|
||||
// make sure the passed value is arguments pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
arg := arguments[0]
|
||||
if arg.Indexed {
|
||||
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
|
||||
}
|
||||
|
||||
value := valueOf.Elem()
|
||||
|
||||
marshalledValue, err := toGoType(0, arg.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return set(value, reflect.ValueOf(marshalledValue), arg)
|
||||
}
|
||||
|
||||
// Unpack performs the operation Go format -> Hexdata
|
||||
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
abiArgs := arguments
|
||||
if len(args) != len(abiArgs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
|
||||
}
|
||||
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
|
||||
// input offset is the bytes offset for packed output
|
||||
inputOffset := 0
|
||||
for _, abiArg := range abiArgs {
|
||||
if abiArg.Type.T == ArrayTy {
|
||||
inputOffset += 32 * abiArg.Type.Size
|
||||
} else {
|
||||
inputOffset += 32
|
||||
}
|
||||
}
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := abiArgs[i]
|
||||
// pack the input
|
||||
packed, err := input.Type.pack(reflect.ValueOf(a))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.requiresLengthPrefix() {
|
||||
// calculate the offset
|
||||
offset := inputOffset + len(variableInput)
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
ret = append(ret, packed...)
|
||||
}
|
||||
}
|
||||
// append the variable input at the end of the packed input
|
||||
ret = append(ret, variableInput...)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// capitalise makes the first character of a string upper case, also removing any
|
||||
// prefixing underscores from the variable names.
|
||||
func capitalise(input string) string {
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
}
|
||||
|
@ -52,12 +52,6 @@ type ContractCaller interface {
|
||||
CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
|
||||
}
|
||||
|
||||
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
||||
type DeployBackend interface {
|
||||
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
|
||||
CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error)
|
||||
}
|
||||
|
||||
// PendingContractCaller defines methods to perform contract calls on the pending state.
|
||||
// Call will try to discover this interface when access to the pending state is requested.
|
||||
// If the backend does not support the pending state, Call returns ErrNoPendingState.
|
||||
@ -85,13 +79,34 @@ type ContractTransactor interface {
|
||||
// There is no guarantee that this is the true gas limit requirement as other
|
||||
// transactions may be added or removed by miners, but it should provide a basis
|
||||
// for setting a reasonable default.
|
||||
EstimateGas(ctx context.Context, call ethereum.CallMsg) (usedGas *big.Int, err error)
|
||||
EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error)
|
||||
// SendTransaction injects the transaction into the pending pool for execution.
|
||||
SendTransaction(ctx context.Context, tx *types.Transaction) error
|
||||
}
|
||||
|
||||
// ContractFilterer defines the methods needed to access log events using one-off
|
||||
// queries or continuous event subscriptions.
|
||||
type ContractFilterer interface {
|
||||
// FilterLogs executes a log filter operation, blocking during execution and
|
||||
// returning all the results in one batch.
|
||||
//
|
||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
||||
FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error)
|
||||
|
||||
// SubscribeFilterLogs creates a background log filtering operation, returning
|
||||
// a subscription immediately, which can be used to stream the found events.
|
||||
SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error)
|
||||
}
|
||||
|
||||
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
|
||||
type DeployBackend interface {
|
||||
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
|
||||
CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error)
|
||||
}
|
||||
|
||||
// ContractBackend defines the methods needed to work with contracts on a read-write basis.
|
||||
type ContractBackend interface {
|
||||
ContractCaller
|
||||
ContractTransactor
|
||||
ContractFilterer
|
||||
}
|
||||
|
@ -30,11 +30,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend.
|
||||
@ -53,6 +57,8 @@ type SimulatedBackend struct {
|
||||
pendingBlock *types.Block // Currently pending block that will be imported on request
|
||||
pendingState *state.StateDB // Currently pending state that will be the active on on request
|
||||
|
||||
events *filters.EventSystem // Event system for filtering log events live
|
||||
|
||||
config *params.ChainConfig
|
||||
}
|
||||
|
||||
@ -62,8 +68,14 @@ func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
database, _ := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
|
||||
backend := &SimulatedBackend{
|
||||
database: database,
|
||||
blockchain: blockchain,
|
||||
config: genesis.Config,
|
||||
events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
|
||||
}
|
||||
backend.rollback()
|
||||
return backend
|
||||
}
|
||||
@ -89,9 +101,11 @@ func (b *SimulatedBackend) Rollback() {
|
||||
}
|
||||
|
||||
func (b *SimulatedBackend) rollback() {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {})
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||
}
|
||||
|
||||
// CodeAt returns the code associated with a certain account in the blockchain.
|
||||
@ -200,7 +214,7 @@ func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error
|
||||
|
||||
// EstimateGas executes the requested code against the currently pending block/state and
|
||||
// returns the used amount of gas.
|
||||
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (*big.Int, error) {
|
||||
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
@ -210,16 +224,16 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
||||
hi uint64
|
||||
cap uint64
|
||||
)
|
||||
if call.Gas != nil && call.Gas.Uint64() >= params.TxGas {
|
||||
hi = call.Gas.Uint64()
|
||||
if call.Gas >= params.TxGas {
|
||||
hi = call.Gas
|
||||
} else {
|
||||
hi = b.pendingBlock.GasLimit().Uint64()
|
||||
hi = b.pendingBlock.GasLimit()
|
||||
}
|
||||
cap = hi
|
||||
|
||||
// Create a helper to check if a gas allowance results in an executable transaction
|
||||
executable := func(gas uint64) bool {
|
||||
call.Gas = new(big.Int).SetUint64(gas)
|
||||
call.Gas = gas
|
||||
|
||||
snapshot := b.pendingState.Snapshot()
|
||||
_, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
|
||||
@ -242,21 +256,21 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
||||
// Reject the transaction as invalid if it still fails at the highest allowance
|
||||
if hi == cap {
|
||||
if !executable(hi) {
|
||||
return nil, errGasEstimationFailed
|
||||
return 0, errGasEstimationFailed
|
||||
}
|
||||
}
|
||||
return new(big.Int).SetUint64(hi), nil
|
||||
return hi, nil
|
||||
}
|
||||
|
||||
// callContract implemens common code between normal and pending contract calls.
|
||||
// callContract implements common code between normal and pending contract calls.
|
||||
// state is modified during execution, make sure to copy it if necessary.
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, *big.Int, bool, error) {
|
||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, uint64, bool, error) {
|
||||
// Ensure message is initialized properly.
|
||||
if call.GasPrice == nil {
|
||||
call.GasPrice = big.NewInt(1)
|
||||
}
|
||||
if call.Gas == nil || call.Gas.Sign() == 0 {
|
||||
call.Gas = big.NewInt(50000000)
|
||||
if call.Gas == 0 {
|
||||
call.Gas = 50000000
|
||||
}
|
||||
if call.Value == nil {
|
||||
call.Value = new(big.Int)
|
||||
@ -271,9 +285,9 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
||||
// Create a new environment which holds all relevant information
|
||||
// about the transaction and calling mechanisms.
|
||||
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxBig256)
|
||||
ret, gasUsed, _, failed, err := core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
return ret, gasUsed, failed, err
|
||||
gaspool := new(core.GasPool).AddGas(math.MaxUint64)
|
||||
|
||||
return core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
|
||||
}
|
||||
|
||||
// SendTransaction updates the pending block to include the given transaction.
|
||||
@ -291,29 +305,95 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
|
||||
}
|
||||
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
}
|
||||
block.AddTx(tx)
|
||||
})
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||
return nil
|
||||
}
|
||||
|
||||
// JumpTimeInSeconds adds skip seconds to the clock
|
||||
// FilterLogs executes a log filter operation, blocking during execution and
|
||||
// returning all the results in one batch.
|
||||
//
|
||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
||||
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
}
|
||||
to := int64(-1)
|
||||
if query.ToBlock != nil {
|
||||
to = query.ToBlock.Int64()
|
||||
}
|
||||
// Construct and execute the filter
|
||||
filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
|
||||
|
||||
logs, err := filter.Logs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]types.Log, len(logs))
|
||||
for i, log := range logs {
|
||||
res[i] = *log
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// SubscribeFilterLogs creates a background log filtering operation, returning a
|
||||
// subscription immediately, which can be used to stream the found events.
|
||||
func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
|
||||
// Subscribe to contract events
|
||||
sink := make(chan []*types.Log)
|
||||
|
||||
sub, err := b.events.SubscribeLogs(query, sink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Since we're getting logs in batches, we need to flatten them into a plain stream
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case logs := <-sink:
|
||||
for _, log := range logs {
|
||||
select {
|
||||
case ch <- *log:
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
|
||||
// AdjustTime adds a time shift to the simulated clock.
|
||||
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
}
|
||||
block.OffsetTime(int64(adjustment.Seconds()))
|
||||
})
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
b.pendingBlock = blocks[0]
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), state.NewDatabase(b.database))
|
||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -328,6 +408,47 @@ func (m callmsg) Nonce() uint64 { return 0 }
|
||||
func (m callmsg) CheckNonce() bool { return false }
|
||||
func (m callmsg) To() *common.Address { return m.CallMsg.To }
|
||||
func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
|
||||
func (m callmsg) Gas() *big.Int { return m.CallMsg.Gas }
|
||||
func (m callmsg) Gas() uint64 { return m.CallMsg.Gas }
|
||||
func (m callmsg) Value() *big.Int { return m.CallMsg.Value }
|
||||
func (m callmsg) Data() []byte { return m.CallMsg.Data }
|
||||
|
||||
// filterBackend implements filters.Backend to support filtering for logs without
|
||||
// taking bloom-bits acceleration structures into account.
|
||||
type filterBackend struct {
|
||||
db ethdb.Database
|
||||
bc *core.BlockChain
|
||||
}
|
||||
|
||||
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
|
||||
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
|
||||
|
||||
func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) {
|
||||
if block == rpc.LatestBlockNumber {
|
||||
return fb.bc.CurrentHeader(), nil
|
||||
}
|
||||
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||
}
|
||||
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
<-quit
|
||||
return nil
|
||||
})
|
||||
}
|
||||
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||
return fb.bc.SubscribeChainEvent(ch)
|
||||
}
|
||||
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||
return fb.bc.SubscribeRemovedLogsEvent(ch)
|
||||
}
|
||||
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
||||
return fb.bc.SubscribeLogsEvent(ch)
|
||||
}
|
||||
|
||||
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
|
||||
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
// SignerFn is a signer function callback when a contract requires a method to
|
||||
@ -50,11 +51,27 @@ type TransactOpts struct {
|
||||
|
||||
Value *big.Int // Funds to transfer along along the transaction (nil = 0 = no funds)
|
||||
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
|
||||
GasLimit *big.Int // Gas limit to set for the transaction execution (nil = estimate + 10%)
|
||||
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
|
||||
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
||||
// FilterOpts is the collection of options to fine tune filtering for events
|
||||
// within a bound contract.
|
||||
type FilterOpts struct {
|
||||
Start uint64 // Start of the queried range
|
||||
End *uint64 // End of the range (nil = latest)
|
||||
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
||||
// WatchOpts is the collection of options to fine tune subscribing for events
|
||||
// within a bound contract.
|
||||
type WatchOpts struct {
|
||||
Start *uint64 // Start of the queried range (nil = latest)
|
||||
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
|
||||
}
|
||||
|
||||
// BoundContract is the base wrapper object that reflects a contract on the
|
||||
// Ethereum network. It contains a collection of methods that are used by the
|
||||
// higher level contract bindings to operate.
|
||||
@ -63,16 +80,18 @@ type BoundContract struct {
|
||||
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
|
||||
caller ContractCaller // Read interface to interact with the blockchain
|
||||
transactor ContractTransactor // Write interface to interact with the blockchain
|
||||
filterer ContractFilterer // Event filtering to interact with the blockchain
|
||||
}
|
||||
|
||||
// NewBoundContract creates a low level contract interface through which calls
|
||||
// and transactions may be made through.
|
||||
func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller, transactor ContractTransactor) *BoundContract {
|
||||
func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller, transactor ContractTransactor, filterer ContractFilterer) *BoundContract {
|
||||
return &BoundContract{
|
||||
address: address,
|
||||
abi: abi,
|
||||
caller: caller,
|
||||
transactor: transactor,
|
||||
filterer: filterer,
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,7 +99,7 @@ func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller
|
||||
// deployment address with a Go wrapper.
|
||||
func DeployContract(opts *TransactOpts, abi abi.ABI, bytecode []byte, backend ContractBackend, params ...interface{}) (common.Address, *types.Transaction, *BoundContract, error) {
|
||||
// Otherwise try to deploy the contract
|
||||
c := NewBoundContract(common.Address{}, abi, backend, backend)
|
||||
c := NewBoundContract(common.Address{}, abi, backend, backend, backend)
|
||||
|
||||
input, err := c.abi.Pack("", params...)
|
||||
if err != nil {
|
||||
@ -189,7 +208,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
}
|
||||
}
|
||||
gasLimit := opts.GasLimit
|
||||
if gasLimit == nil {
|
||||
if gasLimit == 0 {
|
||||
// Gas estimation cannot succeed without code for method invocations
|
||||
if contract != nil {
|
||||
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
|
||||
@ -225,6 +244,104 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
|
||||
return signedTx, nil
|
||||
}
|
||||
|
||||
// FilterLogs filters contract logs for past blocks, returning the necessary
|
||||
// channels to construct a strongly typed bound iterator on top of them.
|
||||
func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]interface{}) (chan types.Log, event.Subscription, error) {
|
||||
// Don't crash on a lazy user
|
||||
if opts == nil {
|
||||
opts = new(FilterOpts)
|
||||
}
|
||||
// Append the event selector to the query parameters and construct the topic set
|
||||
query = append([][]interface{}{{c.abi.Events[name].Id()}}, query...)
|
||||
|
||||
topics, err := makeTopics(query...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Start the background filtering
|
||||
logs := make(chan types.Log, 128)
|
||||
|
||||
config := ethereum.FilterQuery{
|
||||
Addresses: []common.Address{c.address},
|
||||
Topics: topics,
|
||||
FromBlock: new(big.Int).SetUint64(opts.Start),
|
||||
}
|
||||
if opts.End != nil {
|
||||
config.ToBlock = new(big.Int).SetUint64(*opts.End)
|
||||
}
|
||||
/* TODO(karalabe): Replace the rest of the method below with this when supported
|
||||
sub, err := c.filterer.SubscribeFilterLogs(ensureContext(opts.Context), config, logs)
|
||||
*/
|
||||
buff, err := c.filterer.FilterLogs(ensureContext(opts.Context), config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
sub, err := event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
for _, log := range buff {
|
||||
select {
|
||||
case logs <- log:
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}), nil
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return logs, sub, nil
|
||||
}
|
||||
|
||||
// WatchLogs filters subscribes to contract logs for future blocks, returning a
|
||||
// subscription object that can be used to tear down the watcher.
|
||||
func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]interface{}) (chan types.Log, event.Subscription, error) {
|
||||
// Don't crash on a lazy user
|
||||
if opts == nil {
|
||||
opts = new(WatchOpts)
|
||||
}
|
||||
// Append the event selector to the query parameters and construct the topic set
|
||||
query = append([][]interface{}{{c.abi.Events[name].Id()}}, query...)
|
||||
|
||||
topics, err := makeTopics(query...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Start the background filtering
|
||||
logs := make(chan types.Log, 128)
|
||||
|
||||
config := ethereum.FilterQuery{
|
||||
Addresses: []common.Address{c.address},
|
||||
Topics: topics,
|
||||
}
|
||||
if opts.Start != nil {
|
||||
config.FromBlock = new(big.Int).SetUint64(*opts.Start)
|
||||
}
|
||||
sub, err := c.filterer.SubscribeFilterLogs(ensureContext(opts.Context), config, logs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return logs, sub, nil
|
||||
}
|
||||
|
||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.abi.Unpack(out, event, log.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var indexed abi.Arguments
|
||||
for _, arg := range c.abi.Events[event].Inputs {
|
||||
if arg.Indexed {
|
||||
indexed = append(indexed, arg)
|
||||
}
|
||||
}
|
||||
return parseTopics(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// ensureContext is a helper method to ensure a context is not nil, even if the
|
||||
// user specified it as such.
|
||||
func ensureContext(ctx context.Context) context.Context {
|
||||
if ctx == nil {
|
||||
return context.TODO()
|
||||
|
@ -63,10 +63,11 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
return r
|
||||
}, abis[i])
|
||||
|
||||
// Extract the call and transact methods, and sort them alphabetically
|
||||
// Extract the call and transact methods; events; and sort them alphabetically
|
||||
var (
|
||||
calls = make(map[string]*tmplMethod)
|
||||
transacts = make(map[string]*tmplMethod)
|
||||
events = make(map[string]*tmplEvent)
|
||||
)
|
||||
for _, original := range evmABI.Methods {
|
||||
// Normalize the method for capital cases and non-anonymous inputs/outputs
|
||||
@ -89,11 +90,33 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
}
|
||||
// Append the methods to the call or transact lists
|
||||
if original.Const {
|
||||
calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original)}
|
||||
calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
|
||||
} else {
|
||||
transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original)}
|
||||
transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
|
||||
}
|
||||
}
|
||||
for _, original := range evmABI.Events {
|
||||
// Skip anonymous events as they don't support explicit filtering
|
||||
if original.Anonymous {
|
||||
continue
|
||||
}
|
||||
// Normalize the event for capital cases and non-anonymous outputs
|
||||
normalized := original
|
||||
normalized.Name = methodNormalizer[lang](original.Name)
|
||||
|
||||
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
|
||||
copy(normalized.Inputs, original.Inputs)
|
||||
for j, input := range normalized.Inputs {
|
||||
// Indexed fields are input, non-indexed ones are outputs
|
||||
if input.Indexed {
|
||||
if input.Name == "" {
|
||||
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Append the event to the accumulator list
|
||||
events[original.Name] = &tmplEvent{Original: original, Normalized: normalized}
|
||||
}
|
||||
contracts[types[i]] = &tmplContract{
|
||||
Type: capitalise(types[i]),
|
||||
InputABI: strings.Replace(strippedABI, "\"", "\\\"", -1),
|
||||
@ -101,6 +124,7 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
Constructor: evmABI.Constructor,
|
||||
Calls: calls,
|
||||
Transacts: transacts,
|
||||
Events: events,
|
||||
}
|
||||
}
|
||||
// Generate the contract template data content and render it
|
||||
@ -111,10 +135,11 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
funcs := map[string]interface{}{
|
||||
"bindtype": bindType[lang],
|
||||
"namedtype": namedType[lang],
|
||||
"capitalise": capitalise,
|
||||
"decapitalise": decapitalise,
|
||||
"bindtype": bindType[lang],
|
||||
"bindtopictype": bindTopicType[lang],
|
||||
"namedtype": namedType[lang],
|
||||
"capitalise": capitalise,
|
||||
"decapitalise": decapitalise,
|
||||
}
|
||||
tmpl := template.Must(template.New("").Funcs(funcs).Parse(tmplSource[lang]))
|
||||
if err := tmpl.Execute(buffer, data); err != nil {
|
||||
@ -133,7 +158,7 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
}
|
||||
|
||||
// bindType is a set of type binders that convert Solidity types to some supported
|
||||
// programming language.
|
||||
// programming language types.
|
||||
var bindType = map[Lang]func(kind abi.Type) string{
|
||||
LangGo: bindTypeGo,
|
||||
LangJava: bindTypeJava,
|
||||
@ -254,6 +279,33 @@ func bindTypeJava(kind abi.Type) string {
|
||||
}
|
||||
}
|
||||
|
||||
// bindTopicType is a set of type binders that convert Solidity types to some
|
||||
// supported programming language topic types.
|
||||
var bindTopicType = map[Lang]func(kind abi.Type) string{
|
||||
LangGo: bindTopicTypeGo,
|
||||
LangJava: bindTopicTypeJava,
|
||||
}
|
||||
|
||||
// bindTypeGo converts a Solidity topic type to a Go one. It is almost the same
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeGo(kind abi.Type) string {
|
||||
bound := bindTypeGo(kind)
|
||||
if bound == "string" || bound == "[]byte" {
|
||||
bound = "common.Hash"
|
||||
}
|
||||
return bound
|
||||
}
|
||||
|
||||
// bindTypeGo converts a Solidity topic type to a Java one. It is almost the same
|
||||
// funcionality as for simple types, but dynamic types get converted to hashes.
|
||||
func bindTopicTypeJava(kind abi.Type) string {
|
||||
bound := bindTypeJava(kind)
|
||||
if bound == "String" || bound == "Bytes" {
|
||||
bound = "Hash"
|
||||
}
|
||||
return bound
|
||||
}
|
||||
|
||||
// namedType is a set of functions that transform language specific types to
|
||||
// named versions that my be used inside method names.
|
||||
var namedType = map[Lang]func(string, abi.Type) string{
|
||||
@ -304,8 +356,15 @@ var methodNormalizer = map[Lang]func(string) string{
|
||||
LangJava: decapitalise,
|
||||
}
|
||||
|
||||
// capitalise makes the first character of a string upper case.
|
||||
// capitalise makes the first character of a string upper case, also removing any
|
||||
// prefixing underscores from the variable names.
|
||||
func capitalise(input string) string {
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
}
|
||||
|
||||
@ -314,16 +373,25 @@ func decapitalise(input string) string {
|
||||
return strings.ToLower(input[:1]) + input[1:]
|
||||
}
|
||||
|
||||
// structured checks whether a method has enough information to return a proper
|
||||
// Go struct ot if flat returns are needed.
|
||||
func structured(method abi.Method) bool {
|
||||
if len(method.Outputs) < 2 {
|
||||
// structured checks whether a list of ABI data types has enough information to
|
||||
// operate through a proper Go struct or if flat returns are needed.
|
||||
func structured(args abi.Arguments) bool {
|
||||
if len(args) < 2 {
|
||||
return false
|
||||
}
|
||||
for _, out := range method.Outputs {
|
||||
exists := make(map[string]bool)
|
||||
for _, out := range args {
|
||||
// If the name is anonymous, we can't organize into a struct
|
||||
if out.Name == "" {
|
||||
return false
|
||||
}
|
||||
// If the field name is empty when normalized or collides (var, Var, _var, _Var),
|
||||
// we can't organize into a struct
|
||||
field := capitalise(out.Name)
|
||||
if field == "" || exists[field] {
|
||||
return false
|
||||
}
|
||||
exists[field] = true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -126,6 +126,7 @@ var bindTests = []struct {
|
||||
{"type":"function","name":"namedOutput","constant":true,"inputs":[],"outputs":[{"name":"str","type":"string"}]},
|
||||
{"type":"function","name":"anonOutput","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"}]},
|
||||
{"type":"function","name":"namedOutputs","constant":true,"inputs":[],"outputs":[{"name":"str1","type":"string"},{"name":"str2","type":"string"}]},
|
||||
{"type":"function","name":"collidingOutputs","constant":true,"inputs":[],"outputs":[{"name":"str","type":"string"},{"name":"Str","type":"string"}]},
|
||||
{"type":"function","name":"anonOutputs","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"},{"name":"","type":"string"}]},
|
||||
{"type":"function","name":"mixedOutputs","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"},{"name":"str","type":"string"}]}
|
||||
]
|
||||
@ -140,12 +141,71 @@ var bindTests = []struct {
|
||||
str1, err = b.NamedOutput(nil)
|
||||
str1, err = b.AnonOutput(nil)
|
||||
res, _ := b.NamedOutputs(nil)
|
||||
str1, str2, err = b.CollidingOutputs(nil)
|
||||
str1, str2, err = b.AnonOutputs(nil)
|
||||
str1, str2, err = b.MixedOutputs(nil)
|
||||
|
||||
fmt.Println(str1, str2, res.Str1, res.Str2, err)
|
||||
}`,
|
||||
},
|
||||
// Tests that named, anonymous and indexed events are handled correctly
|
||||
{
|
||||
`EventChecker`, ``, ``,
|
||||
`
|
||||
[
|
||||
{"type":"event","name":"empty","inputs":[]},
|
||||
{"type":"event","name":"indexed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256","indexed":true}]},
|
||||
{"type":"event","name":"mixed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256"}]},
|
||||
{"type":"event","name":"anonymous","anonymous":true,"inputs":[]},
|
||||
{"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]}
|
||||
]
|
||||
`,
|
||||
`if e, err := NewEventChecker(common.Address{}, nil); e == nil || err != nil {
|
||||
t.Fatalf("binding (%v) nil or error (%v) not nil", e, nil)
|
||||
} else if false { // Don't run, just compile and test types
|
||||
var (
|
||||
err error
|
||||
res bool
|
||||
str string
|
||||
dat []byte
|
||||
hash common.Hash
|
||||
)
|
||||
_, err = e.FilterEmpty(nil)
|
||||
_, err = e.FilterIndexed(nil, []common.Address{}, []*big.Int{})
|
||||
|
||||
mit, err := e.FilterMixed(nil, []common.Address{})
|
||||
|
||||
res = mit.Next() // Make sure the iterator has a Next method
|
||||
err = mit.Error() // Make sure the iterator has an Error method
|
||||
err = mit.Close() // Make sure the iterator has a Close method
|
||||
|
||||
fmt.Println(mit.Event.Raw.BlockHash) // Make sure the raw log is contained within the results
|
||||
fmt.Println(mit.Event.Num) // Make sure the unpacked non-indexed fields are present
|
||||
fmt.Println(mit.Event.Addr) // Make sure the reconstructed indexed fields are present
|
||||
|
||||
dit, err := e.FilterDynamic(nil, []string{}, [][]byte{})
|
||||
|
||||
str = dit.Event.Str // Make sure non-indexed strings retain their type
|
||||
dat = dit.Event.Dat // Make sure non-indexed bytes retain their type
|
||||
hash = dit.Event.IdxStr // Make sure indexed strings turn into hashes
|
||||
hash = dit.Event.IdxDat // Make sure indexed bytes turn into hashes
|
||||
|
||||
sink := make(chan *EventCheckerMixed)
|
||||
sub, err := e.WatchMixed(nil, sink, []common.Address{})
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
event := <-sink
|
||||
fmt.Println(event.Raw.BlockHash) // Make sure the raw log is contained within the results
|
||||
fmt.Println(event.Num) // Make sure the unpacked non-indexed fields are present
|
||||
fmt.Println(event.Addr) // Make sure the reconstructed indexed fields are present
|
||||
|
||||
fmt.Println(res, str, dat, hash, err)
|
||||
}
|
||||
// Run a tiny reflection test to ensure disallowed methods don't appear
|
||||
if _, ok := reflect.TypeOf(&EventChecker{}).MethodByName("FilterAnonymous"); ok {
|
||||
t.Errorf("binding has disallowed method (FilterAnonymous)")
|
||||
}`,
|
||||
},
|
||||
// Test that contract interactions (deploy, transact and call) generate working code
|
||||
{
|
||||
`Interactor`,
|
||||
@ -397,7 +457,6 @@ var bindTests = []struct {
|
||||
sim.Commit()
|
||||
|
||||
// Set the field with automatic estimation and check that it succeeds
|
||||
auth.GasLimit = nil
|
||||
if _, err := limiter.SetField(auth, "automatic"); err != nil {
|
||||
t.Fatalf("Failed to call automatically gased transaction: %v", err)
|
||||
}
|
||||
@ -447,6 +506,237 @@ var bindTests = []struct {
|
||||
}
|
||||
`,
|
||||
},
|
||||
{
|
||||
`Underscorer`,
|
||||
`
|
||||
contract Underscorer {
|
||||
function UnderscoredOutput() constant returns (int _int, string _string) {
|
||||
return (314, "pi");
|
||||
}
|
||||
function LowerLowerCollision() constant returns (int _res, int res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function LowerUpperCollision() constant returns (int _res, int Res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function UpperLowerCollision() constant returns (int _Res, int res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function UpperUpperCollision() constant returns (int _Res, int Res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function PurelyUnderscoredOutput() constant returns (int _, int res) {
|
||||
return (1, 2);
|
||||
}
|
||||
function AllPurelyUnderscoredOutput() constant returns (int _, int __) {
|
||||
return (1, 2);
|
||||
}
|
||||
}
|
||||
`, `6060604052341561000f57600080fd5b6103498061001e6000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806303a592131461008857806367e6633d146100b85780639df484851461014d578063af7486ab1461017d578063b564b34d146101ad578063e02ab24d146101dd578063e409ca451461020d575b600080fd5b341561009357600080fd5b61009b61023d565b604051808381526020018281526020019250505060405180910390f35b34156100c357600080fd5b6100cb610252565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156101115780820151818401526020810190506100f6565b50505050905090810190601f16801561013e5780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b341561015857600080fd5b6101606102a0565b604051808381526020018281526020019250505060405180910390f35b341561018857600080fd5b6101906102b5565b604051808381526020018281526020019250505060405180910390f35b34156101b857600080fd5b6101c06102ca565b604051808381526020018281526020019250505060405180910390f35b34156101e857600080fd5b6101f06102df565b604051808381526020018281526020019250505060405180910390f35b341561021857600080fd5b6102206102f4565b604051808381526020018281526020019250505060405180910390f35b60008060016002819150809050915091509091565b600061025c610309565b61013a8090506040805190810160405280600281526020017f7069000000000000000000000000000000000000000000000000000000000000815250915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b6020604051908101604052806000815250905600a165627a7a72305820c11dcfa136fc7d182ee4d34f0b12d988496228f7e2d02d2b5376d996ca1743d00029`,
|
||||
`[{"constant":true,"inputs":[],"name":"LowerUpperCollision","outputs":[{"name":"_res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UnderscoredOutput","outputs":[{"name":"_int","type":"int256"},{"name":"_string","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperLowerCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"AllPurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"__","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperUpperCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"LowerLowerCollision","outputs":[{"name":"_res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"}]`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
|
||||
// Deploy a underscorer tester contract and execute a structured call on it
|
||||
_, _, underscorer, err := DeployUnderscorer(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy underscorer contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
// Verify that underscored return values correctly parse into structs
|
||||
if res, err := underscorer.UnderscoredOutput(nil); err != nil {
|
||||
t.Errorf("Failed to call constant function: %v", err)
|
||||
} else if res.Int.Cmp(big.NewInt(314)) != 0 || res.String != "pi" {
|
||||
t.Errorf("Invalid result, want: {314, \"pi\"}, got: %+v", res)
|
||||
}
|
||||
// Verify that underscored and non-underscored name collisions force tuple outputs
|
||||
var a, b *big.Int
|
||||
|
||||
a, b, _ = underscorer.LowerLowerCollision(nil)
|
||||
a, b, _ = underscorer.LowerUpperCollision(nil)
|
||||
a, b, _ = underscorer.UpperLowerCollision(nil)
|
||||
a, b, _ = underscorer.UpperUpperCollision(nil)
|
||||
a, b, _ = underscorer.PurelyUnderscoredOutput(nil)
|
||||
a, b, _ = underscorer.AllPurelyUnderscoredOutput(nil)
|
||||
|
||||
fmt.Println(a, b, err)
|
||||
`,
|
||||
},
|
||||
// Tests that logs can be successfully filtered and decoded.
|
||||
{
|
||||
`Eventer`,
|
||||
`
|
||||
contract Eventer {
|
||||
event SimpleEvent (
|
||||
address indexed Addr,
|
||||
bytes32 indexed Id,
|
||||
bool indexed Flag,
|
||||
uint Value
|
||||
);
|
||||
function raiseSimpleEvent(address addr, bytes32 id, bool flag, uint value) {
|
||||
SimpleEvent(addr, id, flag, value);
|
||||
}
|
||||
|
||||
event NodataEvent (
|
||||
uint indexed Number,
|
||||
int16 indexed Short,
|
||||
uint32 indexed Long
|
||||
);
|
||||
function raiseNodataEvent(uint number, int16 short, uint32 long) {
|
||||
NodataEvent(number, short, long);
|
||||
}
|
||||
|
||||
event DynamicEvent (
|
||||
string indexed IndexedString,
|
||||
bytes indexed IndexedBytes,
|
||||
string NonIndexedString,
|
||||
bytes NonIndexedBytes
|
||||
);
|
||||
function raiseDynamicEvent(string str, bytes blob) {
|
||||
DynamicEvent(str, blob, str, blob);
|
||||
}
|
||||
}
|
||||
`,
|
||||
`6060604052341561000f57600080fd5b61042c8061001e6000396000f300606060405260043610610057576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063528300ff1461005c578063630c31e2146100fc578063c7d116dd14610156575b600080fd5b341561006757600080fd5b6100fa600480803590602001908201803590602001908080601f0160208091040260200160405190810160405280939291908181526020018383808284378201915050505050509190803590602001908201803590602001908080601f01602080910402602001604051908101604052809392919081815260200183838082843782019150505050505091905050610194565b005b341561010757600080fd5b610154600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035600019169060200190919080351515906020019091908035906020019091905050610367565b005b341561016157600080fd5b610192600480803590602001909190803560010b90602001909190803563ffffffff169060200190919050506103c3565b005b806040518082805190602001908083835b6020831015156101ca57805182526020820191506020810190506020830392506101a5565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040518091039020826040518082805190602001908083835b60208310151561022d5780518252602082019150602081019050602083039250610208565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405180910390207f3281fd4f5e152dd3385df49104a3f633706e21c9e80672e88d3bcddf33101f008484604051808060200180602001838103835285818151815260200191508051906020019080838360005b838110156102c15780820151818401526020810190506102a6565b50505050905090810190601f1680156102ee5780820380516001836020036101000a031916815260200191505b50838103825284818151815260200191508051906020019080838360005b8381101561032757808201518184015260208101905061030c565b50505050905090810190601f1680156103545780820380516001836020036101000a031916815260200191505b5094505050505060405180910390a35050565b81151583600019168573ffffffffffffffffffffffffffffffffffffffff167f1f097de4289df643bd9c11011cc61367aa12983405c021056e706eb5ba1250c8846040518082815260200191505060405180910390a450505050565b8063ffffffff168260010b847f3ca7f3a77e5e6e15e781850bc82e32adfa378a2a609370db24b4d0fae10da2c960405160405180910390a45050505600a165627a7a72305820d1f8a8bbddbc5bb29f285891d6ae1eef8420c52afdc05e1573f6114d8e1714710029`,
|
||||
`[{"constant":false,"inputs":[{"name":"str","type":"string"},{"name":"blob","type":"bytes"}],"name":"raiseDynamicEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"},{"name":"id","type":"bytes32"},{"name":"flag","type":"bool"},{"name":"value","type":"uint256"}],"name":"raiseSimpleEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"number","type":"uint256"},{"name":"short","type":"int16"},{"name":"long","type":"uint32"}],"name":"raiseNodataEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"Addr","type":"address"},{"indexed":true,"name":"Id","type":"bytes32"},{"indexed":true,"name":"Flag","type":"bool"},{"indexed":false,"name":"Value","type":"uint256"}],"name":"SimpleEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"Number","type":"uint256"},{"indexed":true,"name":"Short","type":"int16"},{"indexed":true,"name":"Long","type":"uint32"}],"name":"NodataEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"IndexedString","type":"string"},{"indexed":true,"name":"IndexedBytes","type":"bytes"},{"indexed":false,"name":"NonIndexedString","type":"string"},{"indexed":false,"name":"NonIndexedBytes","type":"bytes"}],"name":"DynamicEvent","type":"event"}]`,
|
||||
`
|
||||
// Generate a new random account and a funded simulator
|
||||
key, _ := crypto.GenerateKey()
|
||||
auth := bind.NewKeyedTransactor(key)
|
||||
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
|
||||
|
||||
// Deploy an eventer contract
|
||||
_, _, eventer, err := DeployEventer(auth, sim)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to deploy eventer contract: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
// Inject a few events into the contract, gradually more in each block
|
||||
for i := 1; i <= 3; i++ {
|
||||
for j := 1; j <= i; j++ {
|
||||
if _, err := eventer.RaiseSimpleEvent(auth, common.Address{byte(j)}, [32]byte{byte(j)}, true, big.NewInt(int64(10*i+j))); err != nil {
|
||||
t.Fatalf("block %d, event %d: raise failed: %v", i, j, err)
|
||||
}
|
||||
}
|
||||
sim.Commit()
|
||||
}
|
||||
// Test filtering for certain events and ensure they can be found
|
||||
sit, err := eventer.FilterSimpleEvent(nil, []common.Address{common.Address{1}, common.Address{3}}, [][32]byte{{byte(1)}, {byte(2)}, {byte(3)}}, []bool{true})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to filter for simple events: %v", err)
|
||||
}
|
||||
defer sit.Close()
|
||||
|
||||
sit.Next()
|
||||
if sit.Event.Value.Uint64() != 11 || !sit.Event.Flag {
|
||||
t.Errorf("simple log content mismatch: have %v, want {11, true}", sit.Event)
|
||||
}
|
||||
sit.Next()
|
||||
if sit.Event.Value.Uint64() != 21 || !sit.Event.Flag {
|
||||
t.Errorf("simple log content mismatch: have %v, want {21, true}", sit.Event)
|
||||
}
|
||||
sit.Next()
|
||||
if sit.Event.Value.Uint64() != 31 || !sit.Event.Flag {
|
||||
t.Errorf("simple log content mismatch: have %v, want {31, true}", sit.Event)
|
||||
}
|
||||
sit.Next()
|
||||
if sit.Event.Value.Uint64() != 33 || !sit.Event.Flag {
|
||||
t.Errorf("simple log content mismatch: have %v, want {33, true}", sit.Event)
|
||||
}
|
||||
|
||||
if sit.Next() {
|
||||
t.Errorf("unexpected simple event found: %+v", sit.Event)
|
||||
}
|
||||
if err = sit.Error(); err != nil {
|
||||
t.Fatalf("simple event iteration failed: %v", err)
|
||||
}
|
||||
// Test raising and filtering for an event with no data component
|
||||
if _, err := eventer.RaiseNodataEvent(auth, big.NewInt(314), 141, 271); err != nil {
|
||||
t.Fatalf("failed to raise nodata event: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
nit, err := eventer.FilterNodataEvent(nil, []*big.Int{big.NewInt(314)}, []int16{140, 141, 142}, []uint32{271})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to filter for nodata events: %v", err)
|
||||
}
|
||||
defer nit.Close()
|
||||
|
||||
if !nit.Next() {
|
||||
t.Fatalf("nodata log not found: %v", nit.Error())
|
||||
}
|
||||
if nit.Event.Number.Uint64() != 314 {
|
||||
t.Errorf("nodata log content mismatch: have %v, want 314", nit.Event.Number)
|
||||
}
|
||||
if nit.Next() {
|
||||
t.Errorf("unexpected nodata event found: %+v", nit.Event)
|
||||
}
|
||||
if err = nit.Error(); err != nil {
|
||||
t.Fatalf("nodata event iteration failed: %v", err)
|
||||
}
|
||||
// Test raising and filtering for events with dynamic indexed components
|
||||
if _, err := eventer.RaiseDynamicEvent(auth, "Hello", []byte("World")); err != nil {
|
||||
t.Fatalf("failed to raise dynamic event: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
dit, err := eventer.FilterDynamicEvent(nil, []string{"Hi", "Hello", "Bye"}, [][]byte{[]byte("World")})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to filter for dynamic events: %v", err)
|
||||
}
|
||||
defer dit.Close()
|
||||
|
||||
if !dit.Next() {
|
||||
t.Fatalf("dynamic log not found: %v", dit.Error())
|
||||
}
|
||||
if dit.Event.NonIndexedString != "Hello" || string(dit.Event.NonIndexedBytes) != "World" || dit.Event.IndexedString != common.HexToHash("0x06b3dfaec148fb1bb2b066f10ec285e7c9bf402ab32aa78a5d38e34566810cd2") || dit.Event.IndexedBytes != common.HexToHash("0xf2208c967df089f60420785795c0a9ba8896b0f6f1867fa7f1f12ad6f79c1a18") {
|
||||
t.Errorf("dynamic log content mismatch: have %v, want {'0x06b3dfaec148fb1bb2b066f10ec285e7c9bf402ab32aa78a5d38e34566810cd2, '0xf2208c967df089f60420785795c0a9ba8896b0f6f1867fa7f1f12ad6f79c1a18', 'Hello', 'World'}", dit.Event)
|
||||
}
|
||||
if dit.Next() {
|
||||
t.Errorf("unexpected dynamic event found: %+v", dit.Event)
|
||||
}
|
||||
if err = dit.Error(); err != nil {
|
||||
t.Fatalf("dynamic event iteration failed: %v", err)
|
||||
}
|
||||
// Test subscribing to an event and raising it afterwards
|
||||
ch := make(chan *EventerSimpleEvent, 16)
|
||||
sub, err := eventer.WatchSimpleEvent(nil, ch, nil, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe to simple events: %v", err)
|
||||
}
|
||||
if _, err := eventer.RaiseSimpleEvent(auth, common.Address{255}, [32]byte{255}, true, big.NewInt(255)); err != nil {
|
||||
t.Fatalf("failed to raise subscribed simple event: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
select {
|
||||
case event := <-ch:
|
||||
if event.Value.Uint64() != 255 {
|
||||
t.Errorf("simple log content mismatch: have %v, want 255", event)
|
||||
}
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
t.Fatalf("subscribed simple event didn't arrive")
|
||||
}
|
||||
// Unsubscribe from the event and make sure we're not delivered more
|
||||
sub.Unsubscribe()
|
||||
|
||||
if _, err := eventer.RaiseSimpleEvent(auth, common.Address{254}, [32]byte{254}, true, big.NewInt(254)); err != nil {
|
||||
t.Fatalf("failed to raise subscribed simple event: %v", err)
|
||||
}
|
||||
sim.Commit()
|
||||
|
||||
select {
|
||||
case event := <-ch:
|
||||
t.Fatalf("unsubscribed simple event arrived: %v", event)
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
}
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
// Tests that packages generated by the binder can be successfully compiled and
|
||||
@ -498,7 +788,7 @@ func TestBindings(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Test the entire package and report any failures
|
||||
cmd := exec.Command(gocmd, "test", "-v")
|
||||
cmd := exec.Command(gocmd, "test", "-v", "-count", "1")
|
||||
cmd.Dir = pkg
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("failed to run binding test: %v\n%s", err, out)
|
||||
|
@ -32,6 +32,7 @@ type tmplContract struct {
|
||||
Constructor abi.Method // Contract constructor for deploy parametrization
|
||||
Calls map[string]*tmplMethod // Contract calls that only read state data
|
||||
Transacts map[string]*tmplMethod // Contract calls that write state data
|
||||
Events map[string]*tmplEvent // Contract events accessors
|
||||
}
|
||||
|
||||
// tmplMethod is a wrapper around an abi.Method that contains a few preprocessed
|
||||
@ -39,7 +40,13 @@ type tmplContract struct {
|
||||
type tmplMethod struct {
|
||||
Original abi.Method // Original method as parsed by the abi package
|
||||
Normalized abi.Method // Normalized version of the parsed method (capitalized names, non-anonymous args/returns)
|
||||
Structured bool // Whether the returns should be accumulated into a contract
|
||||
Structured bool // Whether the returns should be accumulated into a struct
|
||||
}
|
||||
|
||||
// tmplEvent is a wrapper around an a
|
||||
type tmplEvent struct {
|
||||
Original abi.Event // Original event as parsed by the abi package
|
||||
Normalized abi.Event // Normalized version of the parsed fields
|
||||
}
|
||||
|
||||
// tmplSource is language to template mapping containing all the supported
|
||||
@ -75,7 +82,7 @@ package {{.Package}}
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
}
|
||||
return address, tx, &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract} }, nil
|
||||
return address, tx, &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
|
||||
}
|
||||
{{end}}
|
||||
|
||||
@ -83,6 +90,7 @@ package {{.Package}}
|
||||
type {{.Type}} struct {
|
||||
{{.Type}}Caller // Read-only binding to the contract
|
||||
{{.Type}}Transactor // Write-only binding to the contract
|
||||
{{.Type}}Filterer // Log filterer for contract events
|
||||
}
|
||||
|
||||
// {{.Type}}Caller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
@ -95,6 +103,11 @@ package {{.Package}}
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// {{.Type}}Filterer is an auto generated log filtering Go binding around an Ethereum contract events.
|
||||
type {{.Type}}Filterer struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// {{.Type}}Session is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type {{.Type}}Session struct {
|
||||
@ -134,16 +147,16 @@ package {{.Package}}
|
||||
|
||||
// New{{.Type}} creates a new instance of {{.Type}}, bound to a specific deployed contract.
|
||||
func New{{.Type}}(address common.Address, backend bind.ContractBackend) (*{{.Type}}, error) {
|
||||
contract, err := bind{{.Type}}(address, backend, backend)
|
||||
contract, err := bind{{.Type}}(address, backend, backend, backend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract} }, nil
|
||||
return &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
|
||||
}
|
||||
|
||||
// New{{.Type}}Caller creates a new read-only instance of {{.Type}}, bound to a specific deployed contract.
|
||||
func New{{.Type}}Caller(address common.Address, caller bind.ContractCaller) (*{{.Type}}Caller, error) {
|
||||
contract, err := bind{{.Type}}(address, caller, nil)
|
||||
contract, err := bind{{.Type}}(address, caller, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -152,20 +165,29 @@ package {{.Package}}
|
||||
|
||||
// New{{.Type}}Transactor creates a new write-only instance of {{.Type}}, bound to a specific deployed contract.
|
||||
func New{{.Type}}Transactor(address common.Address, transactor bind.ContractTransactor) (*{{.Type}}Transactor, error) {
|
||||
contract, err := bind{{.Type}}(address, nil, transactor)
|
||||
contract, err := bind{{.Type}}(address, nil, transactor, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &{{.Type}}Transactor{contract: contract}, nil
|
||||
}
|
||||
|
||||
// New{{.Type}}Filterer creates a new log filterer instance of {{.Type}}, bound to a specific deployed contract.
|
||||
func New{{.Type}}Filterer(address common.Address, filterer bind.ContractFilterer) (*{{.Type}}Filterer, error) {
|
||||
contract, err := bind{{.Type}}(address, nil, nil, filterer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &{{.Type}}Filterer{contract: contract}, nil
|
||||
}
|
||||
|
||||
// bind{{.Type}} binds a generic wrapper to an already deployed contract.
|
||||
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor) (*bind.BoundContract, error) {
|
||||
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor), nil
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
@ -263,6 +285,137 @@ package {{.Package}}
|
||||
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{range .Events}}
|
||||
// {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
|
||||
type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
|
||||
Event *{{$contract.Type}}{{.Normalized.Name}} // Event containing the contract specifics and raw log
|
||||
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
}
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Next() bool {
|
||||
// If the iterator failed, stop iterating
|
||||
if (it.fail != nil) {
|
||||
return false
|
||||
}
|
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if (it.done) {
|
||||
select {
|
||||
case log := <-it.logs:
|
||||
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
|
||||
it.fail = err
|
||||
return false
|
||||
}
|
||||
it.Event.Raw = log
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select {
|
||||
case log := <-it.logs:
|
||||
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
|
||||
it.fail = err
|
||||
return false
|
||||
}
|
||||
it.Event.Raw = log
|
||||
return true
|
||||
|
||||
case err := <-it.sub.Err():
|
||||
it.done = true
|
||||
it.fail = err
|
||||
return it.Next()
|
||||
}
|
||||
}
|
||||
// Error returns any retrieval or parsing error occurred during filtering.
|
||||
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Error() error {
|
||||
return it.fail
|
||||
}
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Close() error {
|
||||
it.sub.Unsubscribe()
|
||||
return nil
|
||||
}
|
||||
|
||||
// {{$contract.Type}}{{.Normalized.Name}} represents a {{.Normalized.Name}} event raised by the {{$contract.Type}} contract.
|
||||
type {{$contract.Type}}{{.Normalized.Name}} struct { {{range .Normalized.Inputs}}
|
||||
{{capitalise .Name}} {{if .Indexed}}{{bindtopictype .Type}}{{else}}{{bindtype .Type}}{{end}}; {{end}}
|
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
}
|
||||
|
||||
// Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.Id}}.
|
||||
//
|
||||
// Solidity: {{.Original.String}}
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
|
||||
{{range .Normalized.Inputs}}
|
||||
{{if .Indexed}}var {{.Name}}Rule []interface{}
|
||||
for _, {{.Name}}Item := range {{.Name}} {
|
||||
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
|
||||
}{{end}}{{end}}
|
||||
|
||||
logs, sub, err := _{{$contract.Type}}.contract.FilterLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &{{$contract.Type}}{{.Normalized.Name}}Iterator{contract: _{{$contract.Type}}.contract, event: "{{.Original.Name}}", logs: logs, sub: sub}, nil
|
||||
}
|
||||
|
||||
// Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.Id}}.
|
||||
//
|
||||
// Solidity: {{.Original.String}}
|
||||
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type}}{{end}}{{end}}) (event.Subscription, error) {
|
||||
{{range .Normalized.Inputs}}
|
||||
{{if .Indexed}}var {{.Name}}Rule []interface{}
|
||||
for _, {{.Name}}Item := range {{.Name}} {
|
||||
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
|
||||
}{{end}}{{end}}
|
||||
|
||||
logs, sub, err := _{{$contract.Type}}.contract.WatchLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case log := <-logs:
|
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new({{$contract.Type}}{{.Normalized.Name}})
|
||||
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
|
||||
return err
|
||||
}
|
||||
event.Raw = log
|
||||
|
||||
select {
|
||||
case sink <- event:
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
|
189
accounts/abi/bind/topics.go
Normal file
189
accounts/abi/bind/topics.go
Normal file
@ -0,0 +1,189 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bind
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// makeTopics converts a filter query argument list into a filter topic set.
|
||||
func makeTopics(query ...[]interface{}) ([][]common.Hash, error) {
|
||||
topics := make([][]common.Hash, len(query))
|
||||
for i, filter := range query {
|
||||
for _, rule := range filter {
|
||||
var topic common.Hash
|
||||
|
||||
// Try to generate the topic based on simple types
|
||||
switch rule := rule.(type) {
|
||||
case common.Hash:
|
||||
copy(topic[:], rule[:])
|
||||
case common.Address:
|
||||
copy(topic[common.HashLength-common.AddressLength:], rule[:])
|
||||
case *big.Int:
|
||||
blob := rule.Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case bool:
|
||||
if rule {
|
||||
topic[common.HashLength-1] = 1
|
||||
}
|
||||
case int8:
|
||||
blob := big.NewInt(int64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case int16:
|
||||
blob := big.NewInt(int64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case int32:
|
||||
blob := big.NewInt(int64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case int64:
|
||||
blob := big.NewInt(rule).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case uint8:
|
||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case uint16:
|
||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case uint32:
|
||||
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case uint64:
|
||||
blob := new(big.Int).SetUint64(rule).Bytes()
|
||||
copy(topic[common.HashLength-len(blob):], blob)
|
||||
case string:
|
||||
hash := crypto.Keccak256Hash([]byte(rule))
|
||||
copy(topic[:], hash[:])
|
||||
case []byte:
|
||||
hash := crypto.Keccak256Hash(rule)
|
||||
copy(topic[:], hash[:])
|
||||
|
||||
default:
|
||||
// Attempt to generate the topic from funky types
|
||||
val := reflect.ValueOf(rule)
|
||||
|
||||
switch {
|
||||
case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8:
|
||||
reflect.Copy(reflect.ValueOf(topic[common.HashLength-val.Len():]), val)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported indexed type: %T", rule)
|
||||
}
|
||||
}
|
||||
topics[i] = append(topics[i], topic)
|
||||
}
|
||||
}
|
||||
return topics, nil
|
||||
}
|
||||
|
||||
// Big batch of reflect types for topic reconstruction.
|
||||
var (
|
||||
reflectHash = reflect.TypeOf(common.Hash{})
|
||||
reflectAddress = reflect.TypeOf(common.Address{})
|
||||
reflectBigInt = reflect.TypeOf(new(big.Int))
|
||||
)
|
||||
|
||||
// parseTopics converts the indexed topic fields into actual log field values.
|
||||
//
|
||||
// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256
|
||||
// hashes as the topic value!
|
||||
func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) error {
|
||||
// Sanity check that the fields and topics match up
|
||||
if len(fields) != len(topics) {
|
||||
return errors.New("topic/field count mismatch")
|
||||
}
|
||||
// Iterate over all the fields and reconstruct them from topics
|
||||
for _, arg := range fields {
|
||||
if !arg.Indexed {
|
||||
return errors.New("non-indexed field in topic reconstruction")
|
||||
}
|
||||
field := reflect.ValueOf(out).Elem().FieldByName(capitalise(arg.Name))
|
||||
|
||||
// Try to parse the topic back into the fields based on primitive types
|
||||
switch field.Kind() {
|
||||
case reflect.Bool:
|
||||
if topics[0][common.HashLength-1] == 1 {
|
||||
field.Set(reflect.ValueOf(true))
|
||||
}
|
||||
case reflect.Int8:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(int8(num.Int64())))
|
||||
|
||||
case reflect.Int16:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(int16(num.Int64())))
|
||||
|
||||
case reflect.Int32:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(int32(num.Int64())))
|
||||
|
||||
case reflect.Int64:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(num.Int64()))
|
||||
|
||||
case reflect.Uint8:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(uint8(num.Uint64())))
|
||||
|
||||
case reflect.Uint16:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(uint16(num.Uint64())))
|
||||
|
||||
case reflect.Uint32:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(uint32(num.Uint64())))
|
||||
|
||||
case reflect.Uint64:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(num.Uint64()))
|
||||
|
||||
default:
|
||||
// Ran out of plain primitive types, try custom types
|
||||
switch field.Type() {
|
||||
case reflectHash: // Also covers all dynamic types
|
||||
field.Set(reflect.ValueOf(topics[0]))
|
||||
|
||||
case reflectAddress:
|
||||
var addr common.Address
|
||||
copy(addr[:], topics[0][common.HashLength-common.AddressLength:])
|
||||
field.Set(reflect.ValueOf(addr))
|
||||
|
||||
case reflectBigInt:
|
||||
num := new(big.Int).SetBytes(topics[0][:])
|
||||
field.Set(reflect.ValueOf(num))
|
||||
|
||||
default:
|
||||
// Ran out of custom types, try the crazies
|
||||
switch {
|
||||
case arg.Type.T == abi.FixedBytesTy:
|
||||
reflect.Copy(field, reflect.ValueOf(topics[0][common.HashLength-arg.Type.Size:]))
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported indexed type: %v", arg.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
topics = topics[1:]
|
||||
}
|
||||
return nil
|
||||
}
|
@ -34,18 +34,18 @@ var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d
|
||||
|
||||
var waitDeployedTests = map[string]struct {
|
||||
code string
|
||||
gas *big.Int
|
||||
gas uint64
|
||||
wantAddress common.Address
|
||||
wantErr error
|
||||
}{
|
||||
"successful deploy": {
|
||||
code: `6060604052600a8060106000396000f360606040526008565b00`,
|
||||
gas: big.NewInt(3000000),
|
||||
gas: 3000000,
|
||||
wantAddress: common.HexToAddress("0x3a220f351252089d385b29beca14e27f204c296a"),
|
||||
},
|
||||
"empty code": {
|
||||
code: ``,
|
||||
gas: big.NewInt(300000),
|
||||
gas: 300000,
|
||||
wantErr: bind.ErrNoCodeAfterDeploy,
|
||||
wantAddress: common.HexToAddress("0x3a220f351252089d385b29beca14e27f204c296a"),
|
||||
},
|
||||
|
@ -18,7 +18,6 @@ package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -31,7 +30,18 @@ import (
|
||||
type Event struct {
|
||||
Name string
|
||||
Anonymous bool
|
||||
Inputs []Argument
|
||||
Inputs Arguments
|
||||
}
|
||||
|
||||
func (event Event) String() string {
|
||||
inputs := make([]string, len(event.Inputs))
|
||||
for i, input := range event.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
if input.Indexed {
|
||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("event %v(%v)", event.Name, strings.Join(inputs, ", "))
|
||||
}
|
||||
|
||||
// Id returns the canonical representation of the event's signature used by the
|
||||
@ -45,93 +55,3 @@ func (e Event) Id() common.Hash {
|
||||
}
|
||||
return common.BytesToHash(crypto.Keccak256([]byte(fmt.Sprintf("%v(%v)", e.Name, strings.Join(types, ",")))))
|
||||
}
|
||||
|
||||
// unpacks an event return tuple into a struct of corresponding go types
|
||||
//
|
||||
// Unpacking can be done into a struct or a slice/array.
|
||||
func (e Event) tupleUnpack(v interface{}, output []byte) error {
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
if value.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(e.Inputs); i++ {
|
||||
input := e.Inputs[i]
|
||||
if input.Indexed {
|
||||
// can't read, continue
|
||||
continue
|
||||
} else if input.Type.T == ArrayTy {
|
||||
// need to move this up because they read sequentially
|
||||
j += input.Type.Size
|
||||
}
|
||||
marshalledValue, err := toGoType((i+j)*32, input.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Struct:
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
field := typ.Field(j)
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if field.Name == strings.ToUpper(e.Inputs[i].Name[:1])+e.Inputs[i].Name[1:] {
|
||||
if err := set(value.Field(j), reflectValue, e.Inputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(e.Inputs), value.Len())
|
||||
}
|
||||
v := value.Index(i)
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", v.Type(), reflectValue.Type())
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(v.Elem(), reflectValue, e.Inputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e Event) isTupleReturn() bool { return len(e.Inputs) > 1 }
|
||||
|
||||
func (e Event) singleUnpack(v interface{}, output []byte) error {
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
if e.Inputs[0].Indexed {
|
||||
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
|
||||
}
|
||||
|
||||
value := valueOf.Elem()
|
||||
|
||||
marshalledValue, err := toGoType(0, e.Inputs[0].Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := set(value, reflect.ValueOf(marshalledValue), e.Inputs[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -17,13 +17,53 @@
|
||||
package abi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var jsonEventTransfer = []byte(`{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true, "name": "from", "type": "address"
|
||||
}, {
|
||||
"indexed": true, "name": "to", "type": "address"
|
||||
}, {
|
||||
"indexed": false, "name": "value", "type": "uint256"
|
||||
}],
|
||||
"name": "Transfer",
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
var jsonEventPledge = []byte(`{
|
||||
"anonymous": false,
|
||||
"inputs": [{
|
||||
"indexed": false, "name": "who", "type": "address"
|
||||
}, {
|
||||
"indexed": false, "name": "wad", "type": "uint128"
|
||||
}, {
|
||||
"indexed": false, "name": "currency", "type": "bytes3"
|
||||
}],
|
||||
"name": "Pledge",
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
// 1000000
|
||||
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
|
||||
|
||||
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
|
||||
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
func TestEventId(t *testing.T) {
|
||||
var table = []struct {
|
||||
definition string
|
||||
@ -54,3 +94,223 @@ func TestEventId(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
|
||||
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 [2]uint8
|
||||
Value2 uint8
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
require.NoError(t, err)
|
||||
var b bytes.Buffer
|
||||
var i uint8 = 1
|
||||
for ; i <= 3; i++ {
|
||||
b.Write(packNum(reflect.ValueOf(i)))
|
||||
}
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, [2]uint8{1, 2}, rst.Value1)
|
||||
require.Equal(t, uint8(3), rst.Value2)
|
||||
}
|
||||
|
||||
func TestEventTupleUnpack(t *testing.T) {
|
||||
|
||||
type EventTransfer struct {
|
||||
Value *big.Int
|
||||
}
|
||||
|
||||
type EventPledge struct {
|
||||
Who common.Address
|
||||
Wad *big.Int
|
||||
Currency [3]byte
|
||||
}
|
||||
|
||||
type BadEventPledge struct {
|
||||
Who string
|
||||
Wad int
|
||||
Currency [3]byte
|
||||
}
|
||||
|
||||
bigint := new(big.Int)
|
||||
bigintExpected := big.NewInt(1000000)
|
||||
bigintExpected2 := big.NewInt(2218516807680)
|
||||
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
|
||||
var testCases = []struct {
|
||||
data string
|
||||
dest interface{}
|
||||
expected interface{}
|
||||
jsonLog []byte
|
||||
error string
|
||||
name string
|
||||
}{{
|
||||
transferData1,
|
||||
&EventTransfer{},
|
||||
&EventTransfer{Value: bigintExpected},
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into structure",
|
||||
}, {
|
||||
transferData1,
|
||||
&[]interface{}{&bigint},
|
||||
&[]interface{}{&bigintExpected},
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into slice",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&EventPledge{},
|
||||
&EventPledge{
|
||||
addr,
|
||||
bigintExpected2,
|
||||
[3]byte{'u', 's', 'd'}},
|
||||
jsonEventPledge,
|
||||
"",
|
||||
"Can unpack Pledge event into structure",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&[]interface{}{&common.Address{}, &bigint, &[3]byte{}},
|
||||
&[]interface{}{
|
||||
&addr,
|
||||
&bigintExpected2,
|
||||
&[3]byte{'u', 's', 'd'}},
|
||||
jsonEventPledge,
|
||||
"",
|
||||
"Can unpack Pledge event into slice",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&[3]interface{}{&common.Address{}, &bigint, &[3]byte{}},
|
||||
&[3]interface{}{
|
||||
&addr,
|
||||
&bigintExpected2,
|
||||
&[3]byte{'u', 's', 'd'}},
|
||||
jsonEventPledge,
|
||||
"",
|
||||
"Can unpack Pledge event into an array",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&[]interface{}{new(int), 0, 0},
|
||||
&[]interface{}{},
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal common.Address in to int",
|
||||
"Can not unpack Pledge event into slice with wrong types",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&BadEventPledge{},
|
||||
&BadEventPledge{},
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal common.Address in to string",
|
||||
"Can not unpack Pledge event into struct with wrong filed types",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&[]interface{}{common.Address{}, new(big.Int)},
|
||||
&[]interface{}{},
|
||||
jsonEventPledge,
|
||||
"abi: insufficient number of elements in the list/array for unpack, want 3, got 2",
|
||||
"Can not unpack Pledge event into too short slice",
|
||||
}, {
|
||||
pledgeData1,
|
||||
new(map[string]interface{}),
|
||||
&[]interface{}{},
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal tuple into map[string]interface {}",
|
||||
"Can not unpack Pledge event into map",
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
assert := assert.New(t)
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
|
||||
if tc.error == "" {
|
||||
assert.Nil(err, "Should be able to unpack event data.")
|
||||
assert.Equal(tc.expected, tc.dest, tc.name)
|
||||
} else {
|
||||
assert.EqualError(err, tc.error)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, assert *assert.Assertions) error {
|
||||
data, err := hex.DecodeString(hexData)
|
||||
assert.NoError(err, "Hex data should be a correct hex-string")
|
||||
var e Event
|
||||
assert.NoError(json.Unmarshal(jsonEvent, &e), "Should be able to unmarshal event ABI")
|
||||
a := ABI{Events: map[string]Event{"e": e}}
|
||||
return a.Unpack(dest, "e", data)
|
||||
}
|
||||
|
||||
/*
|
||||
Taken from
|
||||
https://github.com/ethereum/go-ethereum/pull/15568
|
||||
*/
|
||||
|
||||
type testResult struct {
|
||||
Values [2]*big.Int
|
||||
Value1 *big.Int
|
||||
Value2 *big.Int
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
definition string
|
||||
want testResult
|
||||
}
|
||||
|
||||
func (tc testCase) encoded(intType, arrayType Type) []byte {
|
||||
var b bytes.Buffer
|
||||
if tc.want.Value1 != nil {
|
||||
val, _ := intType.pack(reflect.ValueOf(tc.want.Value1))
|
||||
b.Write(val)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want.Values, [2]*big.Int{nil, nil}) {
|
||||
val, _ := arrayType.pack(reflect.ValueOf(tc.want.Values))
|
||||
b.Write(val)
|
||||
}
|
||||
if tc.want.Value2 != nil {
|
||||
val, _ := intType.pack(reflect.ValueOf(tc.want.Value2))
|
||||
b.Write(val)
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
|
||||
func TestEventUnpackIndexed(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 uint8
|
||||
Value2 uint8
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
require.NoError(t, err)
|
||||
var b bytes.Buffer
|
||||
b.Write(packNum(reflect.ValueOf(uint8(8))))
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, uint8(0), rst.Value1)
|
||||
require.Equal(t, uint8(8), rst.Value2)
|
||||
}
|
||||
|
||||
// TestEventIndexedWithArrayUnpack verifies that decoder will not overlow when static array is indexed input.
|
||||
func TestEventIndexedWithArrayUnpack(t *testing.T) {
|
||||
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]`
|
||||
type testStruct struct {
|
||||
Value1 [2]uint8
|
||||
Value2 string
|
||||
}
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
require.NoError(t, err)
|
||||
var b bytes.Buffer
|
||||
stringOut := "abc"
|
||||
// number of fields that will be encoded * 32
|
||||
b.Write(packNum(reflect.ValueOf(32)))
|
||||
b.Write(packNum(reflect.ValueOf(len(stringOut))))
|
||||
b.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var rst testStruct
|
||||
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
|
||||
require.Equal(t, [2]uint8{0, 0}, rst.Value1)
|
||||
require.Equal(t, stringOut, rst.Value2)
|
||||
}
|
||||
|
@ -18,13 +18,12 @@ package abi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// Callable method given a `Name` and whether the method is a constant.
|
||||
// Method represents a callable given a `Name` and whether the method is a constant.
|
||||
// If the method is `Const` no transaction needs to be created for this
|
||||
// particular Method call. It can easily be simulated using a local VM.
|
||||
// For example a `Balance()` method only needs to retrieve something
|
||||
@ -35,125 +34,8 @@ import (
|
||||
type Method struct {
|
||||
Name string
|
||||
Const bool
|
||||
Inputs []Argument
|
||||
Outputs []Argument
|
||||
}
|
||||
|
||||
func (method Method) pack(args ...interface{}) ([]byte, error) {
|
||||
// Make sure arguments match up and pack them
|
||||
if len(args) != len(method.Inputs) {
|
||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(method.Inputs))
|
||||
}
|
||||
// variable input is the output appended at the end of packed
|
||||
// output. This is used for strings and bytes types input.
|
||||
var variableInput []byte
|
||||
|
||||
var ret []byte
|
||||
for i, a := range args {
|
||||
input := method.Inputs[i]
|
||||
// pack the input
|
||||
packed, err := input.Type.pack(reflect.ValueOf(a))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("`%s` %v", method.Name, err)
|
||||
}
|
||||
|
||||
// check for a slice type (string, bytes, slice)
|
||||
if input.Type.requiresLengthPrefix() {
|
||||
// calculate the offset
|
||||
offset := len(method.Inputs)*32 + len(variableInput)
|
||||
// set the offset
|
||||
ret = append(ret, packNum(reflect.ValueOf(offset))...)
|
||||
// Append the packed output to the variable input. The variable input
|
||||
// will be appended at the end of the input.
|
||||
variableInput = append(variableInput, packed...)
|
||||
} else {
|
||||
// append the packed value to the input
|
||||
ret = append(ret, packed...)
|
||||
}
|
||||
}
|
||||
// append the variable input at the end of the packed input
|
||||
ret = append(ret, variableInput...)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// unpacks a method return tuple into a struct of corresponding go types
|
||||
//
|
||||
// Unpacking can be done into a struct or a slice/array.
|
||||
func (method Method) tupleUnpack(v interface{}, output []byte) error {
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
var (
|
||||
value = valueOf.Elem()
|
||||
typ = value.Type()
|
||||
)
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(method.Outputs); i++ {
|
||||
toUnpack := method.Outputs[i]
|
||||
if toUnpack.Type.T == ArrayTy {
|
||||
// need to move this up because they read sequentially
|
||||
j += toUnpack.Type.Size
|
||||
}
|
||||
marshalledValue, err := toGoType((i+j)*32, toUnpack.Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Struct:
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
field := typ.Field(j)
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if field.Name == strings.ToUpper(method.Outputs[i].Name[:1])+method.Outputs[i].Name[1:] {
|
||||
if err := set(value.Field(j), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if value.Len() < i {
|
||||
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(method.Outputs), value.Len())
|
||||
}
|
||||
v := value.Index(i)
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v in to %v", v.Type(), reflectValue.Type())
|
||||
}
|
||||
reflectValue := reflect.ValueOf(marshalledValue)
|
||||
if err := set(v.Elem(), reflectValue, method.Outputs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple in to %v", typ)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (method Method) isTupleReturn() bool { return len(method.Outputs) > 1 }
|
||||
|
||||
func (method Method) singleUnpack(v interface{}, output []byte) error {
|
||||
// make sure the passed value is a pointer
|
||||
valueOf := reflect.ValueOf(v)
|
||||
if reflect.Ptr != valueOf.Kind() {
|
||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||
}
|
||||
|
||||
value := valueOf.Elem()
|
||||
|
||||
marshalledValue, err := toGoType(0, method.Outputs[0].Type, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := set(value, reflect.ValueOf(marshalledValue), method.Outputs[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
Inputs Arguments
|
||||
Outputs Arguments
|
||||
}
|
||||
|
||||
// Sig returns the methods string signature according to the ABI spec.
|
||||
@ -163,35 +45,35 @@ func (method Method) singleUnpack(v interface{}, output []byte) error {
|
||||
// function foo(uint32 a, int b) = "foo(uint32,int256)"
|
||||
//
|
||||
// Please note that "int" is substitute for its canonical representation "int256"
|
||||
func (m Method) Sig() string {
|
||||
types := make([]string, len(m.Inputs))
|
||||
func (method Method) Sig() string {
|
||||
types := make([]string, len(method.Inputs))
|
||||
i := 0
|
||||
for _, input := range m.Inputs {
|
||||
for _, input := range method.Inputs {
|
||||
types[i] = input.Type.String()
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf("%v(%v)", m.Name, strings.Join(types, ","))
|
||||
return fmt.Sprintf("%v(%v)", method.Name, strings.Join(types, ","))
|
||||
}
|
||||
|
||||
func (m Method) String() string {
|
||||
inputs := make([]string, len(m.Inputs))
|
||||
for i, input := range m.Inputs {
|
||||
func (method Method) String() string {
|
||||
inputs := make([]string, len(method.Inputs))
|
||||
for i, input := range method.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
}
|
||||
outputs := make([]string, len(m.Outputs))
|
||||
for i, output := range m.Outputs {
|
||||
outputs := make([]string, len(method.Outputs))
|
||||
for i, output := range method.Outputs {
|
||||
if len(output.Name) > 0 {
|
||||
outputs[i] = fmt.Sprintf("%v ", output.Name)
|
||||
}
|
||||
outputs[i] += output.Type.String()
|
||||
}
|
||||
constant := ""
|
||||
if m.Const {
|
||||
if method.Const {
|
||||
constant = "constant "
|
||||
}
|
||||
return fmt.Sprintf("function %v(%v) %sreturns(%v)", m.Name, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
|
||||
return fmt.Sprintf("function %v(%v) %sreturns(%v)", method.Name, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
|
||||
}
|
||||
|
||||
func (m Method) Id() []byte {
|
||||
return crypto.Keccak256([]byte(m.Sig()))[:4]
|
||||
func (method Method) Id() []byte {
|
||||
return crypto.Keccak256([]byte(method.Sig()))[:4]
|
||||
}
|
||||
|
@ -48,9 +48,8 @@ func packElement(t Type, reflectValue reflect.Value) []byte {
|
||||
case BoolTy:
|
||||
if reflectValue.Bool() {
|
||||
return math.PaddedBigBytes(common.Big1, 32)
|
||||
} else {
|
||||
return math.PaddedBigBytes(common.Big0, 32)
|
||||
}
|
||||
return math.PaddedBigBytes(common.Big0, 32)
|
||||
case BytesTy:
|
||||
if reflectValue.Kind() == reflect.Array {
|
||||
reflectValue = mustArrayToByteSlice(reflectValue)
|
||||
|
@ -85,3 +85,28 @@ func set(dst, src reflect.Value, output Argument) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireAssignable assures that `dest` is a pointer and it's not an interface.
|
||||
func requireAssignable(dst, src reflect.Value) error {
|
||||
if dst.Kind() != reflect.Ptr && dst.Kind() != reflect.Interface {
|
||||
return fmt.Errorf("abi: cannot unmarshal %v into %v", src.Type(), dst.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireUnpackKind verifies preconditions for unpacking `args` into `kind`
|
||||
func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
||||
args Arguments) error {
|
||||
|
||||
switch k {
|
||||
case reflect.Struct:
|
||||
case reflect.Slice, reflect.Array:
|
||||
if minLen := args.LengthNonIndexed(); v.Len() < minLen {
|
||||
return fmt.Errorf("abi: insufficient number of elements in the list/array for unpack, want %d, got %d",
|
||||
minLen, v.Len())
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("abi: cannot unmarshal tuple into %v", t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Type enumerator
|
||||
const (
|
||||
IntTy byte = iota
|
||||
UintTy
|
||||
@ -100,69 +101,66 @@ func NewType(t string) (typ Type, err error) {
|
||||
return Type{}, fmt.Errorf("invalid formatting of array type")
|
||||
}
|
||||
return typ, err
|
||||
} else {
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
// varSize is the size of the variable
|
||||
var varSize int
|
||||
if len(parsedType[3]) > 0 {
|
||||
var err error
|
||||
varSize, err = strconv.Atoi(parsedType[2])
|
||||
if err != nil {
|
||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||
}
|
||||
} else {
|
||||
if parsedType[0] == "uint" || parsedType[0] == "int" {
|
||||
// this should fail because it means that there's something wrong with
|
||||
// the abi type (the compiler should always format it to the size...always)
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
}
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
// varSize is the size of the variable
|
||||
var varSize int
|
||||
if len(parsedType[3]) > 0 {
|
||||
var err error
|
||||
varSize, err = strconv.Atoi(parsedType[2])
|
||||
if err != nil {
|
||||
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
|
||||
}
|
||||
// varType is the parsed abi type
|
||||
varType := parsedType[1]
|
||||
|
||||
switch varType {
|
||||
case "int":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = IntTy
|
||||
case "uint":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = UintTy
|
||||
case "bool":
|
||||
typ.Kind = reflect.Bool
|
||||
typ.T = BoolTy
|
||||
typ.Type = reflect.TypeOf(bool(false))
|
||||
case "address":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = address_t
|
||||
typ.Size = 20
|
||||
typ.T = AddressTy
|
||||
case "string":
|
||||
typ.Kind = reflect.String
|
||||
typ.Type = reflect.TypeOf("")
|
||||
typ.T = StringTy
|
||||
case "bytes":
|
||||
if varSize == 0 {
|
||||
typ.T = BytesTy
|
||||
typ.Kind = reflect.Slice
|
||||
typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0)))
|
||||
} else {
|
||||
typ.T = FixedBytesTy
|
||||
typ.Kind = reflect.Array
|
||||
typ.Size = varSize
|
||||
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
|
||||
}
|
||||
case "function":
|
||||
typ.Kind = reflect.Array
|
||||
typ.T = FunctionTy
|
||||
typ.Size = 24
|
||||
typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
|
||||
default:
|
||||
} else {
|
||||
if parsedType[0] == "uint" || parsedType[0] == "int" {
|
||||
// this should fail because it means that there's something wrong with
|
||||
// the abi type (the compiler should always format it to the size...always)
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
}
|
||||
// varType is the parsed abi type
|
||||
switch varType := parsedType[1]; varType {
|
||||
case "int":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(false, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = IntTy
|
||||
case "uint":
|
||||
typ.Kind, typ.Type = reflectIntKindAndType(true, varSize)
|
||||
typ.Size = varSize
|
||||
typ.T = UintTy
|
||||
case "bool":
|
||||
typ.Kind = reflect.Bool
|
||||
typ.T = BoolTy
|
||||
typ.Type = reflect.TypeOf(bool(false))
|
||||
case "address":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = address_t
|
||||
typ.Size = 20
|
||||
typ.T = AddressTy
|
||||
case "string":
|
||||
typ.Kind = reflect.String
|
||||
typ.Type = reflect.TypeOf("")
|
||||
typ.T = StringTy
|
||||
case "bytes":
|
||||
if varSize == 0 {
|
||||
typ.T = BytesTy
|
||||
typ.Kind = reflect.Slice
|
||||
typ.Type = reflect.SliceOf(reflect.TypeOf(byte(0)))
|
||||
} else {
|
||||
typ.T = FixedBytesTy
|
||||
typ.Kind = reflect.Array
|
||||
typ.Size = varSize
|
||||
typ.Type = reflect.ArrayOf(varSize, reflect.TypeOf(byte(0)))
|
||||
}
|
||||
case "function":
|
||||
typ.Kind = reflect.Array
|
||||
typ.T = FunctionTy
|
||||
typ.Size = 24
|
||||
typ.Type = reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
|
||||
default:
|
||||
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -25,15 +25,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// unpacker is a utility interface that enables us to have
|
||||
// abstraction between events and methods and also to properly
|
||||
// "unpack" them; e.g. events use Inputs, methods use Outputs.
|
||||
type unpacker interface {
|
||||
tupleUnpack(v interface{}, output []byte) error
|
||||
singleUnpack(v interface{}, output []byte) error
|
||||
isTupleReturn() bool
|
||||
}
|
||||
|
||||
// reads the integer based on its kind
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
@ -79,7 +70,7 @@ func readBool(word []byte) (bool, error) {
|
||||
// This enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
|
||||
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
||||
if t.T != FunctionTy {
|
||||
return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array.")
|
||||
return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array")
|
||||
}
|
||||
if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 {
|
||||
err = fmt.Errorf("abi: got improperly encoded function type, got %v", word)
|
||||
@ -92,7 +83,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
|
||||
// through reflection, creates a fixed array to be read from
|
||||
func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
||||
if t.T != FixedBytesTy {
|
||||
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array.")
|
||||
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
|
||||
}
|
||||
// convert
|
||||
array := reflect.New(t.Type).Elem()
|
||||
@ -203,14 +194,3 @@ func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err
|
||||
//fmt.Printf("LENGTH PREFIX INFO: \nsize: %v\noffset: %v\nstart: %v\n", length, offset, start)
|
||||
return
|
||||
}
|
||||
|
||||
// checks for proper formatting of byte output
|
||||
func bytesAreProper(output []byte) error {
|
||||
if len(output) == 0 {
|
||||
return fmt.Errorf("abi: unmarshalling empty output")
|
||||
} else if len(output)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output")
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -22,10 +22,12 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type unpackTest struct {
|
||||
@ -257,82 +259,179 @@ var unpackTests = []unpackTest{
|
||||
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
|
||||
want: [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)},
|
||||
},
|
||||
// struct outputs
|
||||
{
|
||||
def: `[{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{big.NewInt(1), big.NewInt(2)},
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int","type":"int256"},{"name":"Int","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: multiple outputs mapping to the same struct field 'Int'",
|
||||
},
|
||||
{
|
||||
def: `[{"name":"int","type":"int256"},{"name":"_int","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: multiple outputs mapping to the same struct field 'Int'",
|
||||
},
|
||||
{
|
||||
def: `[{"name":"Int","type":"int256"},{"name":"_int","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: multiple outputs mapping to the same struct field 'Int'",
|
||||
},
|
||||
{
|
||||
def: `[{"name":"Int","type":"int256"},{"name":"_","type":"int256"}]`,
|
||||
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
|
||||
want: struct {
|
||||
Int1 *big.Int
|
||||
Int2 *big.Int
|
||||
}{},
|
||||
err: "abi: purely underscored output cannot unpack to struct",
|
||||
},
|
||||
}
|
||||
|
||||
func TestUnpack(t *testing.T) {
|
||||
for i, test := range unpackTests {
|
||||
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(def))
|
||||
if err != nil {
|
||||
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.want))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
if err := test.checkError(err); err != nil {
|
||||
t.Errorf("test %d (%v) failed: %v", i, test.def, err)
|
||||
continue
|
||||
}
|
||||
out := outptr.Elem().Interface()
|
||||
if !reflect.DeepEqual(test.want, out) {
|
||||
t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.want, out)
|
||||
}
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||
abi, err := JSON(strings.NewReader(def))
|
||||
if err != nil {
|
||||
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||
}
|
||||
encb, err := hex.DecodeString(test.enc)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid hex: %s" + test.enc)
|
||||
}
|
||||
outptr := reflect.New(reflect.TypeOf(test.want))
|
||||
err = abi.Unpack(outptr.Interface(), "method", encb)
|
||||
if err := test.checkError(err); err != nil {
|
||||
t.Errorf("test %d (%v) failed: %v", i, test.def, err)
|
||||
return
|
||||
}
|
||||
out := outptr.Elem().Interface()
|
||||
if !reflect.DeepEqual(test.want, out) {
|
||||
t.Errorf("test %d (%v) failed: expected %v, got %v", i, test.def, test.want, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithStruct(t *testing.T) {
|
||||
type methodMultiOutput struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
|
||||
func methodMultiReturn(require *require.Assertions) (ABI, []byte, methodMultiOutput) {
|
||||
const definition = `[
|
||||
{ "name" : "multi", "constant" : false, "outputs": [ { "name": "Int", "type": "uint256" }, { "name": "String", "type": "string" } ] }]`
|
||||
var expected = methodMultiOutput{big.NewInt(1), "hello"}
|
||||
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
require.NoError(err)
|
||||
// using buff to make the code readable
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
stringOut := "hello"
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
buff.Write(common.RightPadBytes([]byte(expected.String), 32))
|
||||
return abi, buff.Bytes(), expected
|
||||
}
|
||||
|
||||
var inter struct {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if inter.Int == nil || inter.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", inter.Int)
|
||||
}
|
||||
|
||||
if inter.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", inter.String)
|
||||
}
|
||||
|
||||
var reversed struct {
|
||||
func TestMethodMultiReturn(t *testing.T) {
|
||||
type reversed struct {
|
||||
String string
|
||||
Int *big.Int
|
||||
}
|
||||
|
||||
err = abi.Unpack(&reversed, "multi", buff.Bytes())
|
||||
abi, data, expected := methodMultiReturn(require.New(t))
|
||||
bigint := new(big.Int)
|
||||
var testCases = []struct {
|
||||
dest interface{}
|
||||
expected interface{}
|
||||
error string
|
||||
name string
|
||||
}{{
|
||||
&methodMultiOutput{},
|
||||
&expected,
|
||||
"",
|
||||
"Can unpack into structure",
|
||||
}, {
|
||||
&reversed{},
|
||||
&reversed{expected.String, expected.Int},
|
||||
"",
|
||||
"Can unpack into reversed structure",
|
||||
}, {
|
||||
&[]interface{}{&bigint, new(string)},
|
||||
&[]interface{}{&expected.Int, &expected.String},
|
||||
"",
|
||||
"Can unpack into a slice",
|
||||
}, {
|
||||
&[2]interface{}{&bigint, new(string)},
|
||||
&[2]interface{}{&expected.Int, &expected.String},
|
||||
"",
|
||||
"Can unpack into an array",
|
||||
}, {
|
||||
&[]interface{}{new(int), new(int)},
|
||||
&[]interface{}{&expected.Int, &expected.String},
|
||||
"abi: cannot unmarshal *big.Int in to int",
|
||||
"Can not unpack into a slice with wrong types",
|
||||
}, {
|
||||
&[]interface{}{new(int)},
|
||||
&[]interface{}{},
|
||||
"abi: insufficient number of elements in the list/array for unpack, want 2, got 1",
|
||||
"Can not unpack into a slice with wrong types",
|
||||
}}
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
err := abi.Unpack(tc.dest, "multi", data)
|
||||
if tc.error == "" {
|
||||
require.Nil(err, "Should be able to unpack method outputs.")
|
||||
require.Equal(tc.expected, tc.dest)
|
||||
} else {
|
||||
require.EqualError(err, tc.error)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiReturnWithArray(t *testing.T) {
|
||||
const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3]"}, {"type": "uint64"}]}]`
|
||||
abi, err := JSON(strings.NewReader(definition))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
buff := new(bytes.Buffer)
|
||||
buff.Write(common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000009"))
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000008"))
|
||||
|
||||
if reversed.Int == nil || reversed.Int.Cmp(big.NewInt(1)) != 0 {
|
||||
t.Error("expected Int to be 1 got", reversed.Int)
|
||||
ret1, ret1Exp := new([3]uint64), [3]uint64{9, 9, 9}
|
||||
ret2, ret2Exp := new(uint64), uint64(8)
|
||||
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if reversed.String != stringOut {
|
||||
t.Error("expected String to be", stringOut, "got", reversed.String)
|
||||
if !reflect.DeepEqual(*ret1, ret1Exp) {
|
||||
t.Error("array result", *ret1, "!= Expected", ret1Exp)
|
||||
}
|
||||
if *ret2 != ret2Exp {
|
||||
t.Error("int result", *ret2, "!= Expected", ret2Exp)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ func NewAuthNeededError(needed string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Error implements the standard error interfacel.
|
||||
// Error implements the standard error interface.
|
||||
func (err *AuthNeededError) Error() string {
|
||||
return fmt.Sprintf("authentication needed: %s", err.Needed)
|
||||
}
|
||||
|
@ -58,6 +58,9 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid hex in encSeed")
|
||||
}
|
||||
if len(encSeedBytes) < 16 {
|
||||
return nil, errors.New("invalid encSeed, too short")
|
||||
}
|
||||
iv := encSeedBytes[:16]
|
||||
cipherText := encSeedBytes[16:]
|
||||
/*
|
||||
|
@ -2,6 +2,8 @@
|
||||
// https://github.com/trezor/trezor-common/blob/master/protob/messages.proto
|
||||
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
/**
|
||||
* Messages for TREZOR communication
|
||||
*/
|
||||
|
@ -18,7 +18,7 @@
|
||||
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
|
||||
// https://doc.satoshilabs.com/trezor-tech/api-protobuf.html
|
||||
|
||||
//go:generate protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor,import_path=trezor:. types.proto messages.proto
|
||||
//go:generate protoc --go_out=import_path=trezor:. types.proto messages.proto
|
||||
|
||||
// Package trezor contains the wire protocol wrapper in Go.
|
||||
package trezor
|
||||
|
@ -2,6 +2,8 @@
|
||||
// https://github.com/trezor/trezor-common/blob/master/protob/types.proto
|
||||
// dated 28.07.2017, commit dd8ec3231fb5f7992360aff9bdfe30bb58130f4b.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
/**
|
||||
* Types for TREZOR communication
|
||||
*
|
||||
|
@ -180,7 +180,7 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction
|
||||
AddressN: derivationPath,
|
||||
Nonce: new(big.Int).SetUint64(tx.Nonce()).Bytes(),
|
||||
GasPrice: tx.GasPrice().Bytes(),
|
||||
GasLimit: tx.Gas().Bytes(),
|
||||
GasLimit: new(big.Int).SetUint64(tx.Gas()).Bytes(),
|
||||
Value: tx.Value().Bytes(),
|
||||
DataLength: &length,
|
||||
}
|
||||
|
@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.9.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.2.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.9.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
35
build/ci.go
35
build/ci.go
@ -23,7 +23,7 @@ Usage: go run build/ci.go <command> <command flags/arguments>
|
||||
|
||||
Available commands are:
|
||||
|
||||
install [ -arch architecture ] [ packages... ] -- builds packages and executables
|
||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
lint -- runs certain pre-selected linters
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts
|
||||
@ -173,17 +173,24 @@ func main() {
|
||||
func doInstall(cmdline []string) {
|
||||
var (
|
||||
arch = flag.String("arch", "", "Architecture to cross build for")
|
||||
cc = flag.String("cc", "", "C compiler to cross build with")
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
env := build.Env()
|
||||
|
||||
// Check Go version. People regularly open issues about compilation
|
||||
// failure with outdated Go. This should save them the trouble.
|
||||
if runtime.Version() < "go1.7" && !strings.Contains(runtime.Version(), "devel") {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.7 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
if !strings.Contains(runtime.Version(), "devel") {
|
||||
// Figure out the minor version number since we can't textually compare (1.10 < 1.7)
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if minor < 7 {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.7 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// Compile packages given as arguments, or everything if there are no arguments.
|
||||
packages := []string{"./..."}
|
||||
@ -207,7 +214,7 @@ func doInstall(cmdline []string) {
|
||||
}
|
||||
}
|
||||
// Seems we are cross compiling, work around forbidden GOBIN
|
||||
goinstall := goToolArch(*arch, "install", buildFlags(env)...)
|
||||
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
|
||||
goinstall.Args = append(goinstall.Args, "-v")
|
||||
goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...)
|
||||
goinstall.Args = append(goinstall.Args, packages...)
|
||||
@ -221,7 +228,7 @@ func doInstall(cmdline []string) {
|
||||
}
|
||||
for name := range pkgs {
|
||||
if name == "main" {
|
||||
gobuild := goToolArch(*arch, "build", buildFlags(env)...)
|
||||
gobuild := goToolArch(*arch, *cc, "build", buildFlags(env)...)
|
||||
gobuild.Args = append(gobuild.Args, "-v")
|
||||
gobuild.Args = append(gobuild.Args, []string{"-o", executablePath(cmd.Name())}...)
|
||||
gobuild.Args = append(gobuild.Args, "."+string(filepath.Separator)+filepath.Join("cmd", cmd.Name()))
|
||||
@ -249,15 +256,18 @@ func buildFlags(env build.Environment) (flags []string) {
|
||||
}
|
||||
|
||||
func goTool(subcmd string, args ...string) *exec.Cmd {
|
||||
return goToolArch(runtime.GOARCH, subcmd, args...)
|
||||
return goToolArch(runtime.GOARCH, os.Getenv("CC"), subcmd, args...)
|
||||
}
|
||||
|
||||
func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
|
||||
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
|
||||
cmd := build.GoTool(subcmd, args...)
|
||||
if subcmd == "build" || subcmd == "install" || subcmd == "test" {
|
||||
// Go CGO has a Windows linker error prior to 1.8 (https://github.com/golang/go/issues/8756).
|
||||
// Work around issue by allowing multiple definitions for <1.8 builds.
|
||||
if runtime.GOOS == "windows" && runtime.Version() < "go1.8" {
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if runtime.GOOS == "windows" && minor < 8 {
|
||||
cmd.Args = append(cmd.Args, []string{"-ldflags", "-extldflags -Wl,--allow-multiple-definition"}...)
|
||||
}
|
||||
}
|
||||
@ -268,6 +278,9 @@ func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
|
||||
cmd.Env = append(cmd.Env, "CGO_ENABLED=1")
|
||||
cmd.Env = append(cmd.Env, "GOARCH="+arch)
|
||||
}
|
||||
if cc != "" {
|
||||
cmd.Env = append(cmd.Env, "CC="+cc)
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
|
||||
continue
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@ -96,12 +97,32 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", *listenAddr)
|
||||
if err != nil {
|
||||
utils.Fatalf("-ResolveUDPAddr: %v", err)
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
utils.Fatalf("-ListenUDP: %v", err)
|
||||
}
|
||||
|
||||
realaddr := conn.LocalAddr().(*net.UDPAddr)
|
||||
if natm != nil {
|
||||
if !realaddr.IP.IsLoopback() {
|
||||
go nat.Map(natm, nil, "udp", realaddr.Port, realaddr.Port, "ethereum discovery")
|
||||
}
|
||||
// TODO: react to external IP changes over time.
|
||||
if ext, err := natm.ExternalIP(); err == nil {
|
||||
realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port}
|
||||
}
|
||||
}
|
||||
|
||||
if *runv5 {
|
||||
if _, err := discv5.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil {
|
||||
if _, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", restrictList); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil {
|
||||
if _, err := discover.ListenUDP(nodeKey, conn, realaddr, nil, "", restrictList); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
41
cmd/ethkey/README.md
Normal file
41
cmd/ethkey/README.md
Normal file
@ -0,0 +1,41 @@
|
||||
ethkey
|
||||
======
|
||||
|
||||
ethkey is a simple command-line tool for working with Ethereum keyfiles.
|
||||
|
||||
|
||||
# Usage
|
||||
|
||||
### `ethkey generate`
|
||||
|
||||
Generate a new keyfile.
|
||||
If you want to use an existing private key to use in the keyfile, it can be
|
||||
specified by setting `--privatekey` with the location of the file containing the
|
||||
private key.
|
||||
|
||||
|
||||
### `ethkey inspect <keyfile>`
|
||||
|
||||
Print various information about the keyfile.
|
||||
Private key information can be printed by using the `--private` flag;
|
||||
make sure to use this feature with great caution!
|
||||
|
||||
|
||||
### `ethkey sign <keyfile> <message/file>`
|
||||
|
||||
Sign the message with a keyfile.
|
||||
It is possible to refer to a file containing the message.
|
||||
|
||||
|
||||
### `ethkey verify <address> <signature> <message/file>`
|
||||
|
||||
Verify the signature of the message.
|
||||
It is possible to refer to a file containing the message.
|
||||
|
||||
|
||||
## Passphrases
|
||||
|
||||
For every command that uses a keyfile, you will be prompted to provide the
|
||||
passphrase for decrypting the keyfile. To avoid this message, it is possible
|
||||
to pass the passphrase by using the `--passphrase` flag pointing to a file that
|
||||
contains the passphrase.
|
118
cmd/ethkey/generate.go
Normal file
118
cmd/ethkey/generate.go
Normal file
@ -0,0 +1,118 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/pborman/uuid"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
type outputGenerate struct {
|
||||
Address string
|
||||
AddressEIP55 string
|
||||
}
|
||||
|
||||
var commandGenerate = cli.Command{
|
||||
Name: "generate",
|
||||
Usage: "generate new keyfile",
|
||||
ArgsUsage: "[ <keyfile> ]",
|
||||
Description: `
|
||||
Generate a new keyfile.
|
||||
|
||||
If you want to encrypt an existing private key, it can be specified by setting
|
||||
--privatekey with the location of the file containing the private key.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
jsonFlag,
|
||||
cli.StringFlag{
|
||||
Name: "privatekey",
|
||||
Usage: "file containing a raw private key to encrypt",
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
// Check if keyfile path given and make sure it doesn't already exist.
|
||||
keyfilepath := ctx.Args().First()
|
||||
if keyfilepath == "" {
|
||||
keyfilepath = defaultKeyfileName
|
||||
}
|
||||
if _, err := os.Stat(keyfilepath); err == nil {
|
||||
utils.Fatalf("Keyfile already exists at %s.", keyfilepath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
utils.Fatalf("Error checking if keyfile exists: %v", err)
|
||||
}
|
||||
|
||||
var privateKey *ecdsa.PrivateKey
|
||||
var err error
|
||||
if file := ctx.String("privatekey"); file != "" {
|
||||
// Load private key from file.
|
||||
privateKey, err = crypto.LoadECDSA(file)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't load private key: %v", err)
|
||||
}
|
||||
} else {
|
||||
// If not loaded, generate random.
|
||||
privateKey, err = crypto.GenerateKey()
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to generate random private key: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the keyfile object with a random UUID.
|
||||
id := uuid.NewRandom()
|
||||
key := &keystore.Key{
|
||||
Id: id,
|
||||
Address: crypto.PubkeyToAddress(privateKey.PublicKey),
|
||||
PrivateKey: privateKey,
|
||||
}
|
||||
|
||||
// Encrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, true)
|
||||
keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encrypting key: %v", err)
|
||||
}
|
||||
|
||||
// Store the file to disk.
|
||||
if err := os.MkdirAll(filepath.Dir(keyfilepath), 0700); err != nil {
|
||||
utils.Fatalf("Could not create directory %s", filepath.Dir(keyfilepath))
|
||||
}
|
||||
if err := ioutil.WriteFile(keyfilepath, keyjson, 0600); err != nil {
|
||||
utils.Fatalf("Failed to write keyfile to %s: %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Output some information.
|
||||
out := outputGenerate{
|
||||
Address: key.Address.Hex(),
|
||||
}
|
||||
if ctx.Bool(jsonFlag.Name) {
|
||||
mustPrintJSON(out)
|
||||
} else {
|
||||
fmt.Println("Address:", out.Address)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
75
cmd/ethkey/inspect.go
Normal file
75
cmd/ethkey/inspect.go
Normal file
@ -0,0 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
type outputInspect struct {
|
||||
Address string
|
||||
PublicKey string
|
||||
PrivateKey string
|
||||
}
|
||||
|
||||
var commandInspect = cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "inspect a keyfile",
|
||||
ArgsUsage: "<keyfile>",
|
||||
Description: `
|
||||
Print various information about the keyfile.
|
||||
|
||||
Private key information can be printed by using the --private flag;
|
||||
make sure to use this feature with great caution!`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
jsonFlag,
|
||||
cli.BoolFlag{
|
||||
Name: "private",
|
||||
Usage: "include the private key in the output",
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
keyfilepath := ctx.Args().First()
|
||||
|
||||
// Read key from file.
|
||||
keyjson, err := ioutil.ReadFile(keyfilepath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
}
|
||||
|
||||
// Output all relevant information we can retrieve.
|
||||
showPrivate := ctx.Bool("private")
|
||||
out := outputInspect{
|
||||
Address: key.Address.Hex(),
|
||||
PublicKey: hex.EncodeToString(
|
||||
crypto.FromECDSAPub(&key.PrivateKey.PublicKey)),
|
||||
}
|
||||
if showPrivate {
|
||||
out.PrivateKey = hex.EncodeToString(crypto.FromECDSA(key.PrivateKey))
|
||||
}
|
||||
|
||||
if ctx.Bool(jsonFlag.Name) {
|
||||
mustPrintJSON(out)
|
||||
} else {
|
||||
fmt.Println("Address: ", out.Address)
|
||||
fmt.Println("Public key: ", out.PublicKey)
|
||||
if showPrivate {
|
||||
fmt.Println("Private key: ", out.PrivateKey)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
67
cmd/ethkey/main.go
Normal file
67
cmd/ethkey/main.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultKeyfileName = "keyfile.json"
|
||||
)
|
||||
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
var gitCommit = ""
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
app = utils.NewApp(gitCommit, "an Ethereum key manager")
|
||||
app.Commands = []cli.Command{
|
||||
commandGenerate,
|
||||
commandInspect,
|
||||
commandSignMessage,
|
||||
commandVerifyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
// Commonly used command line flags.
|
||||
var (
|
||||
passphraseFlag = cli.StringFlag{
|
||||
Name: "passwordfile",
|
||||
Usage: "the file that contains the passphrase for the keyfile",
|
||||
}
|
||||
jsonFlag = cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output JSON instead of human-readable format",
|
||||
}
|
||||
messageFlag = cli.StringFlag{
|
||||
Name: "message",
|
||||
Usage: "the file that contains the message to sign/verify",
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
159
cmd/ethkey/message.go
Normal file
159
cmd/ethkey/message.go
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
type outputSign struct {
|
||||
Signature string
|
||||
}
|
||||
|
||||
var msgfileFlag = cli.StringFlag{
|
||||
Name: "msgfile",
|
||||
Usage: "file containing the message to sign/verify",
|
||||
}
|
||||
|
||||
var commandSignMessage = cli.Command{
|
||||
Name: "signmessage",
|
||||
Usage: "sign a message",
|
||||
ArgsUsage: "<keyfile> <message>",
|
||||
Description: `
|
||||
Sign the message with a keyfile.
|
||||
|
||||
To sign a message contained in a file, use the --msgfile flag.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
passphraseFlag,
|
||||
jsonFlag,
|
||||
msgfileFlag,
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
message := getMessage(ctx, 1)
|
||||
|
||||
// Load the keyfile.
|
||||
keyfilepath := ctx.Args().First()
|
||||
keyjson, err := ioutil.ReadFile(keyfilepath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
|
||||
}
|
||||
|
||||
// Decrypt key with passphrase.
|
||||
passphrase := getPassPhrase(ctx, false)
|
||||
key, err := keystore.DecryptKey(keyjson, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error decrypting key: %v", err)
|
||||
}
|
||||
|
||||
signature, err := crypto.Sign(signHash(message), key.PrivateKey)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to sign message: %v", err)
|
||||
}
|
||||
out := outputSign{Signature: hex.EncodeToString(signature)}
|
||||
if ctx.Bool(jsonFlag.Name) {
|
||||
mustPrintJSON(out)
|
||||
} else {
|
||||
fmt.Println("Signature:", out.Signature)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type outputVerify struct {
|
||||
Success bool
|
||||
RecoveredAddress string
|
||||
RecoveredPublicKey string
|
||||
}
|
||||
|
||||
var commandVerifyMessage = cli.Command{
|
||||
Name: "verifymessage",
|
||||
Usage: "verify the signature of a signed message",
|
||||
ArgsUsage: "<address> <signature> <message>",
|
||||
Description: `
|
||||
Verify the signature of the message.
|
||||
It is possible to refer to a file containing the message.`,
|
||||
Flags: []cli.Flag{
|
||||
jsonFlag,
|
||||
msgfileFlag,
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
addressStr := ctx.Args().First()
|
||||
signatureHex := ctx.Args().Get(1)
|
||||
message := getMessage(ctx, 2)
|
||||
|
||||
if !common.IsHexAddress(addressStr) {
|
||||
utils.Fatalf("Invalid address: %s", addressStr)
|
||||
}
|
||||
address := common.HexToAddress(addressStr)
|
||||
signature, err := hex.DecodeString(signatureHex)
|
||||
if err != nil {
|
||||
utils.Fatalf("Signature encoding is not hexadecimal: %v", err)
|
||||
}
|
||||
|
||||
recoveredPubkey, err := crypto.SigToPub(signHash(message), signature)
|
||||
if err != nil || recoveredPubkey == nil {
|
||||
utils.Fatalf("Signature verification failed: %v", err)
|
||||
}
|
||||
recoveredPubkeyBytes := crypto.FromECDSAPub(recoveredPubkey)
|
||||
recoveredAddress := crypto.PubkeyToAddress(*recoveredPubkey)
|
||||
success := address == recoveredAddress
|
||||
|
||||
out := outputVerify{
|
||||
Success: success,
|
||||
RecoveredPublicKey: hex.EncodeToString(recoveredPubkeyBytes),
|
||||
RecoveredAddress: recoveredAddress.Hex(),
|
||||
}
|
||||
if ctx.Bool(jsonFlag.Name) {
|
||||
mustPrintJSON(out)
|
||||
} else {
|
||||
if out.Success {
|
||||
fmt.Println("Signature verification successful!")
|
||||
} else {
|
||||
fmt.Println("Signature verification failed!")
|
||||
}
|
||||
fmt.Println("Recovered public key:", out.RecoveredPublicKey)
|
||||
fmt.Println("Recovered address:", out.RecoveredAddress)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func getMessage(ctx *cli.Context, msgarg int) []byte {
|
||||
if file := ctx.String("msgfile"); file != "" {
|
||||
if len(ctx.Args()) > msgarg {
|
||||
utils.Fatalf("Can't use --msgfile and message argument at the same time.")
|
||||
}
|
||||
msg, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't read message file: %v", err)
|
||||
}
|
||||
return msg
|
||||
} else if len(ctx.Args()) == msgarg+1 {
|
||||
return []byte(ctx.Args().Get(msgarg))
|
||||
}
|
||||
utils.Fatalf("Invalid number of arguments: want %d, got %d", msgarg+1, len(ctx.Args()))
|
||||
return nil
|
||||
}
|
70
cmd/ethkey/message_test.go
Normal file
70
cmd/ethkey/message_test.go
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMessageSignVerify(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "ethkey-test")
|
||||
if err != nil {
|
||||
t.Fatal("Can't create temporary directory:", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
||||
message := "test message"
|
||||
|
||||
// Create the key.
|
||||
generate := runEthkey(t, "generate", keyfile)
|
||||
generate.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
Repeat passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
_, matches := generate.ExpectRegexp(`Address: (0x[0-9a-fA-F]{40})\n`)
|
||||
address := matches[1]
|
||||
generate.ExpectExit()
|
||||
|
||||
// Sign a message.
|
||||
sign := runEthkey(t, "signmessage", keyfile, message)
|
||||
sign.Expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
Passphrase: {{.InputLine "foobar"}}
|
||||
`)
|
||||
_, matches = sign.ExpectRegexp(`Signature: ([0-9a-f]+)\n`)
|
||||
signature := matches[1]
|
||||
sign.ExpectExit()
|
||||
|
||||
// Verify the message.
|
||||
verify := runEthkey(t, "verifymessage", address, signature, message)
|
||||
_, matches = verify.ExpectRegexp(`
|
||||
Signature verification successful!
|
||||
Recovered public key: [0-9a-f]+
|
||||
Recovered address: (0x[0-9a-fA-F]{40})
|
||||
`)
|
||||
recovered := matches[1]
|
||||
verify.ExpectExit()
|
||||
|
||||
if recovered != address {
|
||||
t.Error("recovered address doesn't match generated key")
|
||||
}
|
||||
}
|
54
cmd/ethkey/run_test.go
Normal file
54
cmd/ethkey/run_test.go
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
)
|
||||
|
||||
type testEthkey struct {
|
||||
*cmdtest.TestCmd
|
||||
}
|
||||
|
||||
// spawns ethkey with the given command line args.
|
||||
func runEthkey(t *testing.T, args ...string) *testEthkey {
|
||||
tt := new(testEthkey)
|
||||
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
|
||||
tt.Run("ethkey-test", args...)
|
||||
return tt
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Run the app if we've been exec'd as "ethkey-test" in runEthkey.
|
||||
reexec.Register("ethkey-test", func() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
// check if we have been reexec'd
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
83
cmd/ethkey/utils.go
Normal file
83
cmd/ethkey/utils.go
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// getPassPhrase obtains a passphrase given by the user. It first checks the
|
||||
// --passphrase command line flag and ultimately prompts the user for a
|
||||
// passphrase.
|
||||
func getPassPhrase(ctx *cli.Context, confirmation bool) string {
|
||||
// Look for the --passphrase flag.
|
||||
passphraseFile := ctx.String(passphraseFlag.Name)
|
||||
if passphraseFile != "" {
|
||||
content, err := ioutil.ReadFile(passphraseFile)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase file '%s': %v",
|
||||
passphraseFile, err)
|
||||
}
|
||||
return strings.TrimRight(string(content), "\r\n")
|
||||
}
|
||||
|
||||
// Otherwise prompt the user for the passphrase.
|
||||
passphrase, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if passphrase != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
return passphrase
|
||||
}
|
||||
|
||||
// signHash is a helper function that calculates a hash for the given message
|
||||
// that can be safely used to calculate a signature from.
|
||||
//
|
||||
// The hash is calulcated as
|
||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||
//
|
||||
// This gives context to the signed message and prevents signing of transactions.
|
||||
func signHash(data []byte) []byte {
|
||||
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
|
||||
return crypto.Keccak256([]byte(msg))
|
||||
}
|
||||
|
||||
// mustPrintJSON prints the JSON encoding of the given object and
|
||||
// exits the program with an error message when the marshaling fails.
|
||||
func mustPrintJSON(jsonObject interface{}) {
|
||||
str, err := json.MarshalIndent(jsonObject, "", " ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to marshal JSON object: %v", err)
|
||||
}
|
||||
fmt.Println(string(str))
|
||||
}
|
@ -19,6 +19,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -35,6 +36,10 @@ func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
}
|
||||
|
||||
func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureState outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
log := vm.StructLog{
|
||||
@ -56,6 +61,11 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
|
||||
return l.encoder.Encode(log)
|
||||
}
|
||||
|
||||
// CaptureFault outputs state information on the logger.
|
||||
func (l *JSONLogger) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CaptureEnd is triggered at end of execution.
|
||||
func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error {
|
||||
type endLog struct {
|
||||
|
@ -96,7 +96,9 @@ func runCmd(ctx *cli.Context) error {
|
||||
}
|
||||
if ctx.GlobalString(GenesisFlag.Name) != "" {
|
||||
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
|
||||
_, statedb = gen.ToBlock()
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
genesis := gen.ToBlock(db)
|
||||
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
|
||||
chainConfig = gen.Config
|
||||
} else {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
|
@ -18,10 +18,10 @@
|
||||
package main
|
||||
|
||||
//go:generate go-bindata -nometadata -o website.go faucet.html
|
||||
//go:generate gofmt -w -s website.go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@ -223,7 +223,6 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
|
||||
NoDiscovery: true,
|
||||
DiscoveryV5: true,
|
||||
ListenAddr: fmt.Sprintf(":%d", port),
|
||||
DiscoveryV5Addr: fmt.Sprintf(":%d", port+1),
|
||||
MaxPeers: 25,
|
||||
BootstrapNodesV5: enodes,
|
||||
},
|
||||
@ -474,7 +473,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
|
||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, big.NewInt(21000), f.price, nil)
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
if err != nil {
|
||||
f.lock.Unlock()
|
||||
@ -698,11 +697,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
reader, err := zlib.NewReader(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(reader)
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", common.Address{}, err
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
// Code generated by go-bindata.
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// faucet.html
|
||||
// DO NOT EDIT!
|
||||
|
||||
package main
|
||||
|
||||
@ -92,8 +91,8 @@ func faucetHtml() (*asset, error) {
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
@ -118,8 +117,8 @@ func MustAsset(name string) []byte {
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
@ -159,8 +158,8 @@ var _bindata = map[string]func() (*asset, error){
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(cannonicalName, "/")
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
@ -205,11 +204,7 @@ func RestoreAsset(dir, name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively
|
||||
@ -230,6 +225,6 @@ func RestoreAssets(dir, name string) error {
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ func importChain(ctx *cli.Context) error {
|
||||
|
||||
if len(ctx.Args()) == 1 {
|
||||
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
|
||||
utils.Fatalf("Import error: %v", err)
|
||||
log.Error("Import error", "err", err)
|
||||
}
|
||||
} else {
|
||||
for _, arg := range ctx.Args() {
|
||||
@ -211,7 +211,7 @@ func importChain(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chain.Stop()
|
||||
fmt.Printf("Import done in %v.\n\n", time.Since(start))
|
||||
|
||||
// Output pre-compaction stats mostly to see the import trashing
|
||||
|
@ -18,7 +18,6 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -29,7 +28,6 @@ import (
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/contracts/release"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@ -158,7 +156,7 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
utils.RegisterDashboardService(stack, &cfg.Dashboard)
|
||||
utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit)
|
||||
}
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
shhEnabled := enableWhisper(ctx)
|
||||
@ -177,21 +175,6 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
if cfg.Ethstats.URL != "" {
|
||||
utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
|
||||
}
|
||||
|
||||
// Add the release oracle service so it boots along with node.
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
config := release.Config{
|
||||
Oracle: relOracle,
|
||||
Major: uint32(params.VersionMajor),
|
||||
Minor: uint32(params.VersionMinor),
|
||||
Patch: uint32(params.VersionPatch),
|
||||
}
|
||||
commit, _ := hex.DecodeString(gitCommit)
|
||||
copy(config.Commit[:], commit)
|
||||
return release.NewReleaseService(ctx, config)
|
||||
}); err != nil {
|
||||
utils.Fatalf("Failed to register the Geth release oracle service: %v", err)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
|
@ -120,8 +120,12 @@ func remoteConsole(ctx *cli.Context) error {
|
||||
if ctx.GlobalIsSet(utils.DataDirFlag.Name) {
|
||||
path = ctx.GlobalString(utils.DataDirFlag.Name)
|
||||
}
|
||||
if path != "" && ctx.GlobalBool(utils.TestnetFlag.Name) {
|
||||
path = filepath.Join(path, "testnet")
|
||||
if path != "" {
|
||||
if ctx.GlobalBool(utils.TestnetFlag.Name) {
|
||||
path = filepath.Join(path, "testnet")
|
||||
} else if ctx.GlobalBool(utils.RinkebyFlag.Name) {
|
||||
path = filepath.Join(path, "rinkeby")
|
||||
}
|
||||
}
|
||||
endpoint = fmt.Sprintf("%s/geth.ipc", path)
|
||||
}
|
||||
|
@ -85,10 +85,13 @@ var (
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.LightServFlag,
|
||||
utils.LightPeersFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
@ -278,9 +281,12 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
||||
// Start auxiliary services if enabled
|
||||
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) {
|
||||
// Mining only makes sense if a full Ethereum node is running
|
||||
if ctx.GlobalBool(utils.LightModeFlag.Name) || ctx.GlobalString(utils.SyncModeFlag.Name) == "light" {
|
||||
utils.Fatalf("Light clients do not support mining")
|
||||
}
|
||||
var ethereum *eth.Ethereum
|
||||
if err := stack.Service(ðereum); err != nil {
|
||||
utils.Fatalf("ethereum service not running: %v", err)
|
||||
utils.Fatalf("Ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
|
||||
|
@ -134,7 +134,6 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with geth. If not, see <http://www.gnu.org/licenses/>.
|
||||
`)
|
||||
along with geth. If not, see <http://www.gnu.org/licenses/>.`)
|
||||
return nil
|
||||
}
|
||||
|
@ -22,10 +22,11 @@ import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AppHelpTemplate is the test template for the default, global app help topic.
|
||||
@ -74,6 +75,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.IdentityFlag,
|
||||
utils.LightServFlag,
|
||||
@ -127,6 +129,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Name: "PERFORMANCE TUNING",
|
||||
Flags: []cli.Flag{
|
||||
utils.CacheFlag,
|
||||
utils.CacheDatabaseFlag,
|
||||
utils.CacheGCFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
},
|
||||
},
|
||||
|
@ -44,7 +44,7 @@ type cppEthereumGenesisSpec struct {
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
GasLimitBoundDivisor *hexutil.Big `json:"gasLimitBoundDivisor"`
|
||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
@ -107,11 +107,11 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit.Uint64())
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
|
||||
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||
spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||
spec.Params.GasLimitBoundDivisor = (*hexutil.Big)(params.GasLimitBoundDivisor)
|
||||
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
|
||||
@ -168,26 +168,26 @@ type parityChainSpec struct {
|
||||
Engine struct {
|
||||
Ethash struct {
|
||||
Params struct {
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
GasLimitBoundDivisor *hexutil.Big `json:"gasLimitBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
HomesteadTransition uint64 `json:"homesteadTransition"`
|
||||
EIP150Transition uint64 `json:"eip150Transition"`
|
||||
EIP160Transition uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition uint64 `json:"eip161dTransition"`
|
||||
EIP649Reward *hexutil.Big `json:"eip649Reward"`
|
||||
EIP100bTransition uint64 `json:"eip100bTransition"`
|
||||
EIP649Transition uint64 `json:"eip649Transition"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
HomesteadTransition uint64 `json:"homesteadTransition"`
|
||||
EIP150Transition uint64 `json:"eip150Transition"`
|
||||
EIP160Transition uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition uint64 `json:"eip161dTransition"`
|
||||
EIP649Reward *hexutil.Big `json:"eip649Reward"`
|
||||
EIP100bTransition uint64 `json:"eip100bTransition"`
|
||||
EIP649Transition uint64 `json:"eip649Transition"`
|
||||
} `json:"params"`
|
||||
} `json:"Ethash"`
|
||||
} `json:"engine"`
|
||||
|
||||
Params struct {
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit *hexutil.Big `json:"minGasLimit"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
MaxCodeSize uint64 `json:"maxCodeSize"`
|
||||
EIP155Transition uint64 `json:"eip155Transition"`
|
||||
@ -270,7 +270,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
}
|
||||
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||
spec.Engine.Ethash.Params.GasLimitBoundDivisor = (*hexutil.Big)(params.GasLimitBoundDivisor)
|
||||
spec.Engine.Ethash.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
|
||||
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
|
||||
@ -283,7 +283,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (*hexutil.Big)(params.MinGasLimit)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
||||
|
@ -39,6 +39,8 @@ ADD genesis.json /genesis.json
|
||||
ADD account.json /account.json
|
||||
ADD account.pass /account.pass
|
||||
|
||||
EXPOSE 8080 30303 30303/udp
|
||||
|
||||
ENTRYPOINT [ \
|
||||
"faucet", "--genesis", "/genesis.json", "--network", "{{.NetworkID}}", "--bootnodes", "{{.Bootnodes}}", "--ethstats", "{{.Ethstats}}", "--ethport", "{{.EthPort}}", \
|
||||
"--faucet.name", "{{.FaucetName}}", "--faucet.amount", "{{.FaucetAmount}}", "--faucet.minutes", "{{.FaucetMinutes}}", "--faucet.tiers", "{{.FaucetTiers}}", \
|
||||
|
@ -361,7 +361,7 @@ func validateEnsAPIs(s string) (err error) {
|
||||
func printConfig(config *bzzapi.Config) string {
|
||||
out, err := tomlSettings.Marshal(&config)
|
||||
if err != nil {
|
||||
return (fmt.Sprintf("Something is not right with the configuration: %v", err))
|
||||
return fmt.Sprintf("Something is not right with the configuration: %v", err)
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
@ -116,7 +116,6 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
stream := rlp.NewStream(reader, 0)
|
||||
|
||||
// Run actual the import.
|
||||
@ -150,25 +149,34 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
||||
if checkInterrupt() {
|
||||
return fmt.Errorf("interrupted")
|
||||
}
|
||||
if hasAllBlocks(chain, blocks[:i]) {
|
||||
missing := missingBlocks(chain, blocks[:i])
|
||||
if len(missing) == 0 {
|
||||
log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := chain.InsertChain(blocks[:i]); err != nil {
|
||||
if _, err := chain.InsertChain(missing); err != nil {
|
||||
return fmt.Errorf("invalid block %d: %v", n, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
|
||||
for _, b := range bs {
|
||||
if !chain.HasBlock(b.Hash(), b.NumberU64()) {
|
||||
return false
|
||||
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
|
||||
head := chain.CurrentBlock()
|
||||
for i, block := range blocks {
|
||||
// If we're behind the chain head, only check block, state is available at head
|
||||
if head.NumberU64() > block.NumberU64() {
|
||||
if !chain.HasBlock(block.Hash(), block.NumberU64()) {
|
||||
return blocks[i:]
|
||||
}
|
||||
continue
|
||||
}
|
||||
// If we're above the chain head, state availability is a must
|
||||
if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
|
||||
return blocks[i:]
|
||||
}
|
||||
}
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExportChain(blockchain *core.BlockChain, fn string) error {
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
@ -96,7 +97,7 @@ func NewApp(gitCommit, usage string) *cli.App {
|
||||
//app.Authors = nil
|
||||
app.Email = ""
|
||||
app.Version = params.Version
|
||||
if gitCommit != "" {
|
||||
if len(gitCommit) >= 8 {
|
||||
app.Version += "-" + gitCommit[:8]
|
||||
}
|
||||
app.Usage = usage
|
||||
@ -169,7 +170,11 @@ var (
|
||||
Usage: `Blockchain sync mode ("fast", "full", or "light")`,
|
||||
Value: &defaultSyncMode,
|
||||
}
|
||||
|
||||
GCModeFlag = cli.StringFlag{
|
||||
Name: "gcmode",
|
||||
Usage: `Blockchain garbage collection mode ("full", "archive")`,
|
||||
Value: "full",
|
||||
}
|
||||
LightServFlag = cli.IntFlag{
|
||||
Name: "lightserv",
|
||||
Usage: "Maximum percentage of time allowed for serving LES requests (0-90)",
|
||||
@ -178,7 +183,7 @@ var (
|
||||
LightPeersFlag = cli.IntFlag{
|
||||
Name: "lightpeers",
|
||||
Usage: "Maximum number of LES client peers",
|
||||
Value: 20,
|
||||
Value: eth.DefaultConfig.LightPeers,
|
||||
}
|
||||
LightKDFFlag = cli.BoolFlag{
|
||||
Name: "lightkdf",
|
||||
@ -292,8 +297,18 @@ var (
|
||||
// Performance tuning settings
|
||||
CacheFlag = cli.IntFlag{
|
||||
Name: "cache",
|
||||
Usage: "Megabytes of memory allocated to internal caching (min 16MB / database forced)",
|
||||
Value: 128,
|
||||
Usage: "Megabytes of memory allocated to internal caching",
|
||||
Value: 1024,
|
||||
}
|
||||
CacheDatabaseFlag = cli.IntFlag{
|
||||
Name: "cache.database",
|
||||
Usage: "Percentage of cache memory allowance to use for database io",
|
||||
Value: 75,
|
||||
}
|
||||
CacheGCFlag = cli.IntFlag{
|
||||
Name: "cache.gc",
|
||||
Usage: "Percentage of cache memory allowance to use for trie pruning",
|
||||
Value: 25,
|
||||
}
|
||||
TrieCacheGenFlag = cli.IntFlag{
|
||||
Name: "trie-cache-gens",
|
||||
@ -313,7 +328,7 @@ var (
|
||||
TargetGasLimitFlag = cli.Uint64Flag{
|
||||
Name: "targetgaslimit",
|
||||
Usage: "Target gas limit sets the artificial target gas floor for the blocks to mine",
|
||||
Value: params.GenesisGasLimit.Uint64(),
|
||||
Value: params.GenesisGasLimit,
|
||||
}
|
||||
EtherbaseFlag = cli.StringFlag{
|
||||
Name: "etherbase",
|
||||
@ -611,7 +626,7 @@ func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
}
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyV5Bootnodes
|
||||
urls = params.RinkebyBootnodes
|
||||
case cfg.BootstrapNodesV5 != nil:
|
||||
return // already set, don't apply defaults.
|
||||
}
|
||||
@ -635,14 +650,6 @@ func setListenAddress(ctx *cli.Context, cfg *p2p.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// setDiscoveryV5Address creates a UDP listening address string from set command
|
||||
// line flags for the V5 discovery protocol.
|
||||
func setDiscoveryV5Address(ctx *cli.Context, cfg *p2p.Config) {
|
||||
if ctx.GlobalIsSet(ListenPortFlag.Name) {
|
||||
cfg.DiscoveryV5Addr = fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name)+1)
|
||||
}
|
||||
}
|
||||
|
||||
// setNAT creates a port mapper from command line flags.
|
||||
func setNAT(ctx *cli.Context, cfg *p2p.Config) {
|
||||
if ctx.GlobalIsSet(NATFlag.Name) {
|
||||
@ -721,13 +728,15 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
|
||||
// makeDatabaseHandles raises out the number of allowed file handles per process
|
||||
// for Geth and returns half of the allowance to assign to the database.
|
||||
func makeDatabaseHandles() int {
|
||||
if err := raiseFdLimit(2048); err != nil {
|
||||
Fatalf("Failed to raise file descriptor allowance: %v", err)
|
||||
}
|
||||
limit, err := getFdLimit()
|
||||
limit, err := fdlimit.Current()
|
||||
if err != nil {
|
||||
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
|
||||
}
|
||||
if limit < 2048 {
|
||||
if err := fdlimit.Raise(2048); err != nil {
|
||||
Fatalf("Failed to raise file descriptor allowance: %v", err)
|
||||
}
|
||||
}
|
||||
if limit > 2048 { // cap database file descriptors even if more is available
|
||||
limit = 2048
|
||||
}
|
||||
@ -793,24 +802,43 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
||||
setNodeKey(ctx, cfg)
|
||||
setNAT(ctx, cfg)
|
||||
setListenAddress(ctx, cfg)
|
||||
setDiscoveryV5Address(ctx, cfg)
|
||||
setBootstrapNodes(ctx, cfg)
|
||||
setBootstrapNodesV5(ctx, cfg)
|
||||
|
||||
lightClient := ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalString(SyncModeFlag.Name) == "light"
|
||||
lightServer := ctx.GlobalInt(LightServFlag.Name) != 0
|
||||
lightPeers := ctx.GlobalInt(LightPeersFlag.Name)
|
||||
|
||||
if ctx.GlobalIsSet(MaxPeersFlag.Name) {
|
||||
cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name)
|
||||
} else {
|
||||
if lightServer {
|
||||
cfg.MaxPeers += lightPeers
|
||||
}
|
||||
if lightClient && ctx.GlobalIsSet(LightPeersFlag.Name) && cfg.MaxPeers < lightPeers {
|
||||
cfg.MaxPeers = lightPeers
|
||||
}
|
||||
}
|
||||
if !(lightClient || lightServer) {
|
||||
lightPeers = 0
|
||||
}
|
||||
ethPeers := cfg.MaxPeers - lightPeers
|
||||
if lightClient {
|
||||
ethPeers = 0
|
||||
}
|
||||
log.Info("Maximum peer count", "ETH", ethPeers, "LES", lightPeers, "total", cfg.MaxPeers)
|
||||
|
||||
if ctx.GlobalIsSet(MaxPendingPeersFlag.Name) {
|
||||
cfg.MaxPendingPeers = ctx.GlobalInt(MaxPendingPeersFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(NoDiscoverFlag.Name) || ctx.GlobalBool(LightModeFlag.Name) {
|
||||
if ctx.GlobalIsSet(NoDiscoverFlag.Name) || lightClient {
|
||||
cfg.NoDiscovery = true
|
||||
}
|
||||
|
||||
// if we're running a light client or server, force enable the v5 peer discovery
|
||||
// unless it is explicitly disabled with --nodiscover note that explicitly specifying
|
||||
// --v5disc overrides --nodiscover, in which case the later only disables v4 discovery
|
||||
forceV5Discovery := (ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalInt(LightServFlag.Name) > 0) && !ctx.GlobalBool(NoDiscoverFlag.Name)
|
||||
forceV5Discovery := (lightClient || lightServer) && !ctx.GlobalBool(NoDiscoverFlag.Name)
|
||||
if ctx.GlobalIsSet(DiscoveryV5Flag.Name) {
|
||||
cfg.DiscoveryV5 = ctx.GlobalBool(DiscoveryV5Flag.Name)
|
||||
} else if forceV5Discovery {
|
||||
@ -829,7 +857,6 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
||||
// --dev mode can't use p2p networking.
|
||||
cfg.MaxPeers = 0
|
||||
cfg.ListenAddr = ":0"
|
||||
cfg.DiscoveryV5Addr = ":0"
|
||||
cfg.NoDiscovery = true
|
||||
cfg.DiscoveryV5 = false
|
||||
}
|
||||
@ -1008,11 +1035,19 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) {
|
||||
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name)
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
|
||||
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
|
||||
}
|
||||
cfg.DatabaseHandles = makeDatabaseHandles()
|
||||
|
||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
|
||||
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerThreadsFlag.Name) {
|
||||
cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name)
|
||||
}
|
||||
@ -1103,9 +1138,9 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
|
||||
// RegisterDashboardService adds a dashboard to the stack.
|
||||
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config) {
|
||||
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) {
|
||||
stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return dashboard.New(cfg)
|
||||
return dashboard.New(cfg, commit)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1138,13 +1173,13 @@ func RegisterEthStatsService(stack *node.Node, url string) {
|
||||
// SetupNetwork configures the system for either the main net or some test network.
|
||||
func SetupNetwork(ctx *cli.Context) {
|
||||
// TODO(fjl): move target gas limit into config
|
||||
params.TargetGasLimit = new(big.Int).SetUint64(ctx.GlobalUint64(TargetGasLimitFlag.Name))
|
||||
params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name)
|
||||
}
|
||||
|
||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
var (
|
||||
cache = ctx.GlobalInt(CacheFlag.Name)
|
||||
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
|
||||
handles = makeDatabaseHandles()
|
||||
)
|
||||
name := "chaindata"
|
||||
@ -1196,8 +1231,19 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
||||
})
|
||||
}
|
||||
}
|
||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
cache := &core.CacheConfig{
|
||||
Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
|
||||
TrieNodeLimit: eth.DefaultConfig.TrieCache,
|
||||
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
|
||||
}
|
||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
||||
chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg)
|
||||
chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg)
|
||||
if err != nil {
|
||||
Fatalf("Can't create BlockChain: %v", err)
|
||||
}
|
||||
|
@ -601,7 +601,7 @@ func requestExpiredMessagesLoop() {
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to save symmetric key for mail request: %s", err)
|
||||
}
|
||||
peerID = extractIdFromEnode(*argEnode)
|
||||
peerID = extractIDFromEnode(*argEnode)
|
||||
shh.AllowP2PMessagesFromPeer(peerID)
|
||||
|
||||
for {
|
||||
@ -652,7 +652,7 @@ func requestExpiredMessagesLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
func extractIdFromEnode(s string) []byte {
|
||||
func extractIDFromEnode(s string) []byte {
|
||||
n, err := discover.ParseNode(s)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to parse enode: %s", err)
|
||||
|
@ -40,7 +40,7 @@ func fastXORBytes(dst, a, b []byte) int {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
@ -84,7 +84,7 @@ func fastANDBytes(dst, a, b []byte) int {
|
||||
dw[i] = aw[i] & bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
dst[i] = a[i] & b[i]
|
||||
}
|
||||
return n
|
||||
@ -128,7 +128,7 @@ func fastORBytes(dst, a, b []byte) int {
|
||||
dw[i] = aw[i] | bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
dst[i] = a[i] | b[i]
|
||||
}
|
||||
return n
|
||||
@ -168,7 +168,7 @@ func fastTestBytes(p []byte) bool {
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
if p[i] != 0 {
|
||||
return true
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
// +build freebsd
|
||||
|
||||
package utils
|
||||
package fdlimit
|
||||
|
||||
import "syscall"
|
||||
|
||||
@ -24,9 +24,9 @@ import "syscall"
|
||||
// but Rlimit fields have type int64 on FreeBSD so it needs
|
||||
// an extra conversion.
|
||||
|
||||
// raiseFdLimit tries to maximize the file descriptor allowance of this process
|
||||
// Raise tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
func raiseFdLimit(max uint64) error {
|
||||
func Raise(max uint64) error {
|
||||
// Get the current limit
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
@ -43,12 +43,22 @@ func raiseFdLimit(max uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFdLimit retrieves the number of file descriptors allowed to be opened by this
|
||||
// Current retrieves the number of file descriptors allowed to be opened by this
|
||||
// process.
|
||||
func getFdLimit() (int, error) {
|
||||
func Current() (int, error) {
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(limit.Cur), nil
|
||||
}
|
||||
|
||||
// Maximum retrieves the maximum number of file descriptors this process is
|
||||
// allowed to request for itself.
|
||||
func Maximum() (int, error) {
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(limit.Max), nil
|
||||
}
|
@ -14,22 +14,32 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utils
|
||||
package fdlimit
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestFileDescriptorLimits simply tests whether the file descriptor allowance
|
||||
// per this process can be retrieved.
|
||||
func TestFileDescriptorLimits(t *testing.T) {
|
||||
target := 4096
|
||||
hardlimit, err := Maximum()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hardlimit < target {
|
||||
t.Skip(fmt.Sprintf("system limit is less than desired test target: %d < %d", hardlimit, target))
|
||||
}
|
||||
|
||||
if limit, err := getFdLimit(); err != nil || limit <= 0 {
|
||||
if limit, err := Current(); err != nil || limit <= 0 {
|
||||
t.Fatalf("failed to retrieve file descriptor limit (%d): %v", limit, err)
|
||||
}
|
||||
if err := raiseFdLimit(uint64(target)); err != nil {
|
||||
if err := Raise(uint64(target)); err != nil {
|
||||
t.Fatalf("failed to raise file allowance")
|
||||
}
|
||||
if limit, err := getFdLimit(); err != nil || limit < target {
|
||||
if limit, err := Current(); err != nil || limit < target {
|
||||
t.Fatalf("failed to retrieve raised descriptor limit (have %v, want %v): %v", limit, target, err)
|
||||
}
|
||||
}
|
@ -16,13 +16,13 @@
|
||||
|
||||
// +build linux darwin netbsd openbsd solaris
|
||||
|
||||
package utils
|
||||
package fdlimit
|
||||
|
||||
import "syscall"
|
||||
|
||||
// raiseFdLimit tries to maximize the file descriptor allowance of this process
|
||||
// Raise tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
func raiseFdLimit(max uint64) error {
|
||||
func Raise(max uint64) error {
|
||||
// Get the current limit
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
@ -39,12 +39,22 @@ func raiseFdLimit(max uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFdLimit retrieves the number of file descriptors allowed to be opened by this
|
||||
// Current retrieves the number of file descriptors allowed to be opened by this
|
||||
// process.
|
||||
func getFdLimit() (int, error) {
|
||||
func Current() (int, error) {
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(limit.Cur), nil
|
||||
}
|
||||
|
||||
// Maximum retrieves the maximum number of file descriptors this process is
|
||||
// allowed to request for itself.
|
||||
func Maximum() (int, error) {
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(limit.Max), nil
|
||||
}
|
@ -14,13 +14,13 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utils
|
||||
package fdlimit
|
||||
|
||||
import "errors"
|
||||
|
||||
// raiseFdLimit tries to maximize the file descriptor allowance of this process
|
||||
// Raise tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
func raiseFdLimit(max uint64) error {
|
||||
func Raise(max uint64) error {
|
||||
// This method is NOP by design:
|
||||
// * Linux/Darwin counterparts need to manually increase per process limits
|
||||
// * On Windows Go uses the CreateFile API, which is limited to 16K files, non
|
||||
@ -33,9 +33,15 @@ func raiseFdLimit(max uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFdLimit retrieves the number of file descriptors allowed to be opened by this
|
||||
// Current retrieves the number of file descriptors allowed to be opened by this
|
||||
// process.
|
||||
func getFdLimit() (int, error) {
|
||||
// Please see raiseFdLimit for the reason why we use hard coded 16K as the limit
|
||||
func Current() (int, error) {
|
||||
// Please see Raise for the reason why we use hard coded 16K as the limit
|
||||
return 16384, nil
|
||||
}
|
||||
|
||||
// Maximum retrieves the maximum number of file descriptors this process is
|
||||
// allowed to request for itself.
|
||||
func Maximum() (int, error) {
|
||||
return Current()
|
||||
}
|
@ -20,18 +20,29 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// StorageSize is a wrapper around a float value that supports user friendly
|
||||
// formatting.
|
||||
type StorageSize float64
|
||||
|
||||
func (self StorageSize) String() string {
|
||||
if self > 1000000 {
|
||||
return fmt.Sprintf("%.2f mB", self/1000000)
|
||||
} else if self > 1000 {
|
||||
return fmt.Sprintf("%.2f kB", self/1000)
|
||||
// String implements the stringer interface.
|
||||
func (s StorageSize) String() string {
|
||||
if s > 1000000 {
|
||||
return fmt.Sprintf("%.2f mB", s/1000000)
|
||||
} else if s > 1000 {
|
||||
return fmt.Sprintf("%.2f kB", s/1000)
|
||||
} else {
|
||||
return fmt.Sprintf("%.2f B", self)
|
||||
return fmt.Sprintf("%.2f B", s)
|
||||
}
|
||||
}
|
||||
|
||||
func (self StorageSize) Int64() int64 {
|
||||
return int64(self)
|
||||
// TerminalString implements log.TerminalStringer, formatting a string for console
|
||||
// output during logging.
|
||||
func (s StorageSize) TerminalString() string {
|
||||
if s > 1000000 {
|
||||
return fmt.Sprintf("%.2fmB", s/1000000)
|
||||
} else if s > 1000 {
|
||||
return fmt.Sprintf("%.2fkB", s/1000)
|
||||
} else {
|
||||
return fmt.Sprintf("%.2fB", s)
|
||||
}
|
||||
}
|
||||
|
@ -510,7 +510,6 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
header.Nonce = types.BlockNonce{}
|
||||
|
||||
number := header.Number.Uint64()
|
||||
|
||||
// Assemble the voting snapshot to check which votes make sense
|
||||
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
|
||||
if err != nil {
|
||||
@ -538,10 +537,8 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro
|
||||
c.lock.RUnlock()
|
||||
}
|
||||
// Set the correct difficulty
|
||||
header.Difficulty = diffNoTurn
|
||||
if snap.inturn(header.Number.Uint64(), c.signer) {
|
||||
header.Difficulty = diffInTurn
|
||||
}
|
||||
header.Difficulty = CalcDifficulty(snap, c.signer)
|
||||
|
||||
// Ensure the extra data has all it's components
|
||||
if len(header.Extra) < extraVanity {
|
||||
header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...)
|
||||
@ -655,6 +652,27 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
|
||||
return block.WithSeal(header), nil
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have based on the previous blocks in the chain and the
|
||||
// current signer.
|
||||
func (c *Clique) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||
snap, err := c.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return CalcDifficulty(snap, c.signer)
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have based on the previous blocks in the chain and the
|
||||
// current signer.
|
||||
func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
|
||||
if snap.inturn(snap.Number+1, signer) {
|
||||
return new(big.Int).Set(diffInTurn)
|
||||
}
|
||||
return new(big.Int).Set(diffNoTurn)
|
||||
}
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC API to allow
|
||||
// controlling the signer voting.
|
||||
func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// ChainReader defines a small collection of methods needed to access the local
|
||||
@ -88,6 +89,10 @@ type Engine interface {
|
||||
// seal place on top.
|
||||
Seal(chain ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error)
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have.
|
||||
CalcDifficulty(chain ChainReader, time uint64, parent *types.Header) *big.Int
|
||||
|
||||
// APIs returns the RPC APIs this consensus engine provides.
|
||||
APIs(chain ChainReader) []rpc.API
|
||||
}
|
||||
|
@ -23,6 +23,10 @@ var (
|
||||
// that is unknown.
|
||||
ErrUnknownAncestor = errors.New("unknown ancestor")
|
||||
|
||||
// ErrPrunedAncestor is returned when validating a block requires an ancestor
|
||||
// that is known, but the state of which is not available.
|
||||
ErrPrunedAncestor = errors.New("pruned ancestor")
|
||||
|
||||
// ErrFutureBlock is returned when a block's timestamp is in the future according
|
||||
// to the current node.
|
||||
ErrFutureBlock = errors.New("block in the future")
|
||||
|
@ -355,9 +355,11 @@ func hashimotoFull(dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte)
|
||||
return hashimoto(hash, nonce, uint64(len(dataset))*4, lookup)
|
||||
}
|
||||
|
||||
const maxEpoch = 2048
|
||||
|
||||
// datasetSizes is a lookup table for the ethash dataset size for the first 2048
|
||||
// epochs (i.e. 61440000 blocks).
|
||||
var datasetSizes = []uint64{
|
||||
var datasetSizes = [maxEpoch]uint64{
|
||||
1073739904, 1082130304, 1090514816, 1098906752, 1107293056,
|
||||
1115684224, 1124070016, 1132461952, 1140849536, 1149232768,
|
||||
1157627776, 1166013824, 1174404736, 1182786944, 1191180416,
|
||||
@ -771,7 +773,7 @@ var datasetSizes = []uint64{
|
||||
|
||||
// cacheSizes is a lookup table for the ethash verification cache size for the
|
||||
// first 2048 epochs (i.e. 61440000 blocks).
|
||||
var cacheSizes = []uint64{
|
||||
var cacheSizes = [maxEpoch]uint64{
|
||||
16776896, 16907456, 17039296, 17170112, 17301056, 17432512, 17563072,
|
||||
17693888, 17824192, 17955904, 18087488, 18218176, 18349504, 18481088,
|
||||
18611392, 18742336, 18874304, 19004224, 19135936, 19267264, 19398208,
|
||||
|
@ -25,7 +25,7 @@ package ethash
|
||||
func cacheSize(block uint64) uint64 {
|
||||
// If we have a pre-generated value, use that
|
||||
epoch := int(block / epochLength)
|
||||
if epoch < len(cacheSizes) {
|
||||
if epoch < maxEpoch {
|
||||
return cacheSizes[epoch]
|
||||
}
|
||||
// We don't have a way to verify primes fast before Go 1.8
|
||||
@ -39,7 +39,7 @@ func cacheSize(block uint64) uint64 {
|
||||
func datasetSize(block uint64) uint64 {
|
||||
// If we have a pre-generated value, use that
|
||||
epoch := int(block / epochLength)
|
||||
if epoch < len(datasetSizes) {
|
||||
if epoch < maxEpoch {
|
||||
return datasetSizes[epoch]
|
||||
}
|
||||
// We don't have a way to verify primes fast before Go 1.8
|
||||
|
@ -20,17 +20,20 @@ package ethash
|
||||
|
||||
import "math/big"
|
||||
|
||||
// cacheSize calculates and returns the size of the ethash verification cache that
|
||||
// belongs to a certain block number. The cache size grows linearly, however, we
|
||||
// always take the highest prime below the linearly growing threshold in order to
|
||||
// reduce the risk of accidental regularities leading to cyclic behavior.
|
||||
// cacheSize returns the size of the ethash verification cache that belongs to a certain
|
||||
// block number.
|
||||
func cacheSize(block uint64) uint64 {
|
||||
// If we have a pre-generated value, use that
|
||||
epoch := int(block / epochLength)
|
||||
if epoch < len(cacheSizes) {
|
||||
if epoch < maxEpoch {
|
||||
return cacheSizes[epoch]
|
||||
}
|
||||
// No known cache size, calculate manually (sanity branch only)
|
||||
return calcCacheSize(epoch)
|
||||
}
|
||||
|
||||
// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
|
||||
// however, we always take the highest prime below the linearly growing threshold in order
|
||||
// to reduce the risk of accidental regularities leading to cyclic behavior.
|
||||
func calcCacheSize(epoch int) uint64 {
|
||||
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
|
||||
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
||||
size -= 2 * hashBytes
|
||||
@ -38,17 +41,20 @@ func cacheSize(block uint64) uint64 {
|
||||
return size
|
||||
}
|
||||
|
||||
// datasetSize calculates and returns the size of the ethash mining dataset that
|
||||
// belongs to a certain block number. The dataset size grows linearly, however, we
|
||||
// always take the highest prime below the linearly growing threshold in order to
|
||||
// reduce the risk of accidental regularities leading to cyclic behavior.
|
||||
// datasetSize returns the size of the ethash mining dataset that belongs to a certain
|
||||
// block number.
|
||||
func datasetSize(block uint64) uint64 {
|
||||
// If we have a pre-generated value, use that
|
||||
epoch := int(block / epochLength)
|
||||
if epoch < len(datasetSizes) {
|
||||
if epoch < maxEpoch {
|
||||
return datasetSizes[epoch]
|
||||
}
|
||||
// No known dataset size, calculate manually (sanity branch only)
|
||||
return calcDatasetSize(epoch)
|
||||
}
|
||||
|
||||
// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
|
||||
// however, we always take the highest prime below the linearly growing threshold in order
|
||||
// to reduce the risk of accidental regularities leading to cyclic behavior.
|
||||
func calcDatasetSize(epoch int) uint64 {
|
||||
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
|
||||
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
||||
size -= 2 * mixBytes
|
||||
|
@ -23,24 +23,15 @@ import "testing"
|
||||
// Tests whether the dataset size calculator works correctly by cross checking the
|
||||
// hard coded lookup table with the value generated by it.
|
||||
func TestSizeCalculations(t *testing.T) {
|
||||
var tests []uint64
|
||||
|
||||
// Verify all the cache sizes from the lookup table
|
||||
defer func(sizes []uint64) { cacheSizes = sizes }(cacheSizes)
|
||||
tests, cacheSizes = cacheSizes, []uint64{}
|
||||
|
||||
for i, test := range tests {
|
||||
if size := cacheSize(uint64(i*epochLength) + 1); size != test {
|
||||
t.Errorf("cache %d: cache size mismatch: have %d, want %d", i, size, test)
|
||||
// Verify all the cache and dataset sizes from the lookup table.
|
||||
for epoch, want := range cacheSizes {
|
||||
if size := calcCacheSize(epoch); size != want {
|
||||
t.Errorf("cache %d: cache size mismatch: have %d, want %d", epoch, size, want)
|
||||
}
|
||||
}
|
||||
// Verify all the dataset sizes from the lookup table
|
||||
defer func(sizes []uint64) { datasetSizes = sizes }(datasetSizes)
|
||||
tests, datasetSizes = datasetSizes, []uint64{}
|
||||
|
||||
for i, test := range tests {
|
||||
if size := datasetSize(uint64(i*epochLength) + 1); size != test {
|
||||
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", i, size, test)
|
||||
for epoch, want := range datasetSizes {
|
||||
if size := calcDatasetSize(epoch); size != want {
|
||||
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", epoch, size, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -688,8 +688,8 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||
Difficulty: big.NewInt(167925187834220),
|
||||
GasLimit: big.NewInt(4015682),
|
||||
GasUsed: big.NewInt(0),
|
||||
GasLimit: 4015682,
|
||||
GasUsed: 0,
|
||||
Time: big.NewInt(1488928920),
|
||||
Extra: []byte("www.bw.com"),
|
||||
MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
|
||||
|
@ -36,9 +36,10 @@ import (
|
||||
|
||||
// Ethash proof-of-work protocol constants.
|
||||
var (
|
||||
FrontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||
ByzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||
FrontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||
ByzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
||||
)
|
||||
|
||||
// Various error messages to mark blocks invalid. These should be private to
|
||||
@ -231,7 +232,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
return errLargeBlockTime
|
||||
}
|
||||
} else {
|
||||
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
|
||||
if header.Time.Cmp(big.NewInt(time.Now().Add(allowedFutureBlockTime).Unix())) > 0 {
|
||||
return consensus.ErrFutureBlock
|
||||
}
|
||||
}
|
||||
@ -239,29 +240,30 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
return errZeroBlockTime
|
||||
}
|
||||
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
|
||||
expected := ethash.CalcDifficulty(chain, header.Time.Uint64(), parent)
|
||||
|
||||
if expected.Cmp(header.Difficulty) != 0 {
|
||||
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
||||
}
|
||||
// Verify that the gas limit is <= 2^63-1
|
||||
if header.GasLimit.Cmp(math.MaxBig63) > 0 {
|
||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, math.MaxBig63)
|
||||
cap := uint64(0x7fffffffffffffff)
|
||||
if header.GasLimit > cap {
|
||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
|
||||
}
|
||||
// Verify that the gasUsed is <= gasLimit
|
||||
if header.GasUsed.Cmp(header.GasLimit) > 0 {
|
||||
return fmt.Errorf("invalid gasUsed: have %v, gasLimit %v", header.GasUsed, header.GasLimit)
|
||||
if header.GasUsed > header.GasLimit {
|
||||
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
|
||||
}
|
||||
|
||||
// Verify that the gas limit remains within allowed bounds
|
||||
diff := new(big.Int).Set(parent.GasLimit)
|
||||
diff = diff.Sub(diff, header.GasLimit)
|
||||
diff.Abs(diff)
|
||||
diff := int64(parent.GasLimit) - int64(header.GasLimit)
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
}
|
||||
limit := parent.GasLimit / params.GasLimitBoundDivisor
|
||||
|
||||
limit := new(big.Int).Set(parent.GasLimit)
|
||||
limit = limit.Div(limit, params.GasLimitBoundDivisor)
|
||||
|
||||
if diff.Cmp(limit) >= 0 || header.GasLimit.Cmp(params.MinGasLimit) < 0 {
|
||||
return fmt.Errorf("invalid gas limit: have %v, want %v += %v", header.GasLimit, parent.GasLimit, limit)
|
||||
if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit {
|
||||
return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit)
|
||||
}
|
||||
// Verify that the block number is parent's +1
|
||||
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
|
||||
@ -286,7 +288,13 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time
|
||||
// given the parent block's time and difficulty.
|
||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||
return CalcDifficulty(chain.Config(), time, parent)
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time
|
||||
// given the parent block's time and difficulty.
|
||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||
next := new(big.Int).Add(parent.Number, big1)
|
||||
switch {
|
||||
@ -339,7 +347,7 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
|
||||
if x.Cmp(bigMinus99) < 0 {
|
||||
x.Set(bigMinus99)
|
||||
}
|
||||
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
||||
// parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
|
||||
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
||||
x.Mul(y, x)
|
||||
x.Add(parent.Difficulty, x)
|
||||
@ -373,7 +381,7 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
|
||||
// the difficulty that a new block should have when created at time given the
|
||||
// parent block's time and difficulty. The calculation uses the Homestead rules.
|
||||
func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
|
||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
|
||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md
|
||||
// algorithm:
|
||||
// diff = (parent_diff +
|
||||
// (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
||||
@ -468,7 +476,7 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
||||
}
|
||||
// Sanity check that the block number is below the lookup table size (60M blocks)
|
||||
number := header.Number.Uint64()
|
||||
if number/epochLength >= uint64(len(cacheSizes)) {
|
||||
if number/epochLength >= maxEpoch {
|
||||
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
|
||||
return errNonceOutOfRange
|
||||
}
|
||||
@ -476,14 +484,18 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
||||
if header.Difficulty.Sign() <= 0 {
|
||||
return errInvalidDifficulty
|
||||
}
|
||||
|
||||
// Recompute the digest and PoW value and verify against the header
|
||||
cache := ethash.cache(number)
|
||||
|
||||
size := datasetSize(number)
|
||||
if ethash.config.PowMode == ModeTest {
|
||||
size = 32 * 1024
|
||||
}
|
||||
digest, result := hashimotoLight(size, cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
// Caches are unmapped in a finalizer. Ensure that the cache stays live
|
||||
// until after the call to hashimotoLight so it's not unmapped while being used.
|
||||
runtime.KeepAlive(cache)
|
||||
|
||||
if !bytes.Equal(header.MixDigest[:], digest) {
|
||||
return errInvalidMixDigest
|
||||
}
|
||||
@ -501,8 +513,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
|
||||
|
||||
header.Difficulty = ethash.CalcDifficulty(chain, header.Time.Uint64(), parent)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -510,7 +521,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||
// setting the final state and assembling the block.
|
||||
func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||
// Accumulate any block and uncle rewards and commit the final state root
|
||||
AccumulateRewards(chain.Config(), state, header, uncles)
|
||||
accumulateRewards(chain.Config(), state, header, uncles)
|
||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||
|
||||
// Header seems complete, assemble into a block and return
|
||||
@ -526,8 +537,7 @@ var (
|
||||
// AccumulateRewards credits the coinbase of the given block with the mining
|
||||
// reward. The total reward consists of the static block reward and rewards for
|
||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||
func AccumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||
// Select the correct block reward based on chain progression
|
||||
blockReward := FrontierBlockReward
|
||||
if config.IsByzantium(header.Number) {
|
||||
|
@ -71,6 +71,7 @@ func TestCalcDifficulty(t *testing.T) {
|
||||
}
|
||||
|
||||
config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
|
||||
|
||||
for name, test := range tests {
|
||||
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
|
||||
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
@ -35,6 +36,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
metrics "github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
@ -142,32 +144,82 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
|
||||
return memoryMap(path)
|
||||
}
|
||||
|
||||
// lru tracks caches or datasets by their last use time, keeping at most N of them.
|
||||
type lru struct {
|
||||
what string
|
||||
new func(epoch uint64) interface{}
|
||||
mu sync.Mutex
|
||||
// Items are kept in a LRU cache, but there is a special case:
|
||||
// We always keep an item for (highest seen epoch) + 1 as the 'future item'.
|
||||
cache *simplelru.LRU
|
||||
future uint64
|
||||
futureItem interface{}
|
||||
}
|
||||
|
||||
// newlru create a new least-recently-used cache for ither the verification caches
|
||||
// or the mining datasets.
|
||||
func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru {
|
||||
if maxItems <= 0 {
|
||||
maxItems = 1
|
||||
}
|
||||
cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) {
|
||||
log.Trace("Evicted ethash "+what, "epoch", key)
|
||||
})
|
||||
return &lru{what: what, new: new, cache: cache}
|
||||
}
|
||||
|
||||
// get retrieves or creates an item for the given epoch. The first return value is always
|
||||
// non-nil. The second return value is non-nil if lru thinks that an item will be useful in
|
||||
// the near future.
|
||||
func (lru *lru) get(epoch uint64) (item, future interface{}) {
|
||||
lru.mu.Lock()
|
||||
defer lru.mu.Unlock()
|
||||
|
||||
// Get or create the item for the requested epoch.
|
||||
item, ok := lru.cache.Get(epoch)
|
||||
if !ok {
|
||||
if lru.future > 0 && lru.future == epoch {
|
||||
item = lru.futureItem
|
||||
} else {
|
||||
log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
|
||||
item = lru.new(epoch)
|
||||
}
|
||||
lru.cache.Add(epoch, item)
|
||||
}
|
||||
// Update the 'future item' if epoch is larger than previously seen.
|
||||
if epoch < maxEpoch-1 && lru.future < epoch+1 {
|
||||
log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1)
|
||||
future = lru.new(epoch + 1)
|
||||
lru.future = epoch + 1
|
||||
lru.futureItem = future
|
||||
}
|
||||
return item, future
|
||||
}
|
||||
|
||||
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
|
||||
type cache struct {
|
||||
epoch uint64 // Epoch for which this cache is relevant
|
||||
epoch uint64 // Epoch for which this cache is relevant
|
||||
dump *os.File // File descriptor of the memory mapped cache
|
||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
cache []uint32 // The actual cache data content (may be memory mapped)
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
}
|
||||
|
||||
dump *os.File // File descriptor of the memory mapped cache
|
||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
|
||||
cache []uint32 // The actual cache data content (may be memory mapped)
|
||||
used time.Time // Timestamp of the last use for smarter eviction
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
lock sync.Mutex // Ensures thread safety for updating the usage time
|
||||
// newCache creates a new ethash verification cache and returns it as a plain Go
|
||||
// interface to be usable in an LRU cache.
|
||||
func newCache(epoch uint64) interface{} {
|
||||
return &cache{epoch: epoch}
|
||||
}
|
||||
|
||||
// generate ensures that the cache content is generated before use.
|
||||
func (c *cache) generate(dir string, limit int, test bool) {
|
||||
c.once.Do(func() {
|
||||
// If we have a testing cache, generate and return
|
||||
if test {
|
||||
c.cache = make([]uint32, 1024/4)
|
||||
generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1))
|
||||
return
|
||||
}
|
||||
// If we don't store anything on disk, generate and return
|
||||
size := cacheSize(c.epoch*epochLength + 1)
|
||||
seed := seedHash(c.epoch*epochLength + 1)
|
||||
|
||||
if test {
|
||||
size = 1024
|
||||
}
|
||||
// If we don't store anything on disk, generate and return.
|
||||
if dir == "" {
|
||||
c.cache = make([]uint32, size/4)
|
||||
generateCache(c.cache, c.epoch, seed)
|
||||
@ -181,6 +233,10 @@ func (c *cache) generate(dir string, limit int, test bool) {
|
||||
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||
logger := log.New("epoch", c.epoch)
|
||||
|
||||
// We're about to mmap the file, ensure that the mapping is cleaned up when the
|
||||
// cache becomes unused.
|
||||
runtime.SetFinalizer(c, (*cache).finalizer)
|
||||
|
||||
// Try to load the file from disk and memory map it
|
||||
var err error
|
||||
c.dump, c.mmap, c.cache, err = memoryMap(path)
|
||||
@ -207,49 +263,41 @@ func (c *cache) generate(dir string, limit int, test bool) {
|
||||
})
|
||||
}
|
||||
|
||||
// release closes any file handlers and memory maps open.
|
||||
func (c *cache) release() {
|
||||
// finalizer unmaps the memory and closes the file.
|
||||
func (c *cache) finalizer() {
|
||||
if c.mmap != nil {
|
||||
c.mmap.Unmap()
|
||||
c.mmap = nil
|
||||
}
|
||||
if c.dump != nil {
|
||||
c.dump.Close()
|
||||
c.dump = nil
|
||||
c.mmap, c.dump = nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
|
||||
type dataset struct {
|
||||
epoch uint64 // Epoch for which this cache is relevant
|
||||
epoch uint64 // Epoch for which this cache is relevant
|
||||
dump *os.File // File descriptor of the memory mapped cache
|
||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
dataset []uint32 // The actual cache data content
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
}
|
||||
|
||||
dump *os.File // File descriptor of the memory mapped cache
|
||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
|
||||
dataset []uint32 // The actual cache data content
|
||||
used time.Time // Timestamp of the last use for smarter eviction
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
lock sync.Mutex // Ensures thread safety for updating the usage time
|
||||
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
||||
// interface to be usable in an LRU cache.
|
||||
func newDataset(epoch uint64) interface{} {
|
||||
return &dataset{epoch: epoch}
|
||||
}
|
||||
|
||||
// generate ensures that the dataset content is generated before use.
|
||||
func (d *dataset) generate(dir string, limit int, test bool) {
|
||||
d.once.Do(func() {
|
||||
// If we have a testing dataset, generate and return
|
||||
if test {
|
||||
cache := make([]uint32, 1024/4)
|
||||
generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1))
|
||||
|
||||
d.dataset = make([]uint32, 32*1024/4)
|
||||
generateDataset(d.dataset, d.epoch, cache)
|
||||
|
||||
return
|
||||
}
|
||||
// If we don't store anything on disk, generate and return
|
||||
csize := cacheSize(d.epoch*epochLength + 1)
|
||||
dsize := datasetSize(d.epoch*epochLength + 1)
|
||||
seed := seedHash(d.epoch*epochLength + 1)
|
||||
|
||||
if test {
|
||||
csize = 1024
|
||||
dsize = 32 * 1024
|
||||
}
|
||||
// If we don't store anything on disk, generate and return
|
||||
if dir == "" {
|
||||
cache := make([]uint32, csize/4)
|
||||
generateCache(cache, d.epoch, seed)
|
||||
@ -265,6 +313,10 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
||||
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||
logger := log.New("epoch", d.epoch)
|
||||
|
||||
// We're about to mmap the file, ensure that the mapping is cleaned up when the
|
||||
// cache becomes unused.
|
||||
runtime.SetFinalizer(d, (*dataset).finalizer)
|
||||
|
||||
// Try to load the file from disk and memory map it
|
||||
var err error
|
||||
d.dump, d.mmap, d.dataset, err = memoryMap(path)
|
||||
@ -294,15 +346,12 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
||||
})
|
||||
}
|
||||
|
||||
// release closes any file handlers and memory maps open.
|
||||
func (d *dataset) release() {
|
||||
// finalizer closes any file handlers and memory maps open.
|
||||
func (d *dataset) finalizer() {
|
||||
if d.mmap != nil {
|
||||
d.mmap.Unmap()
|
||||
d.mmap = nil
|
||||
}
|
||||
if d.dump != nil {
|
||||
d.dump.Close()
|
||||
d.dump = nil
|
||||
d.mmap, d.dump = nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,14 +359,12 @@ func (d *dataset) release() {
|
||||
func MakeCache(block uint64, dir string) {
|
||||
c := cache{epoch: block / epochLength}
|
||||
c.generate(dir, math.MaxInt32, false)
|
||||
c.release()
|
||||
}
|
||||
|
||||
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
|
||||
func MakeDataset(block uint64, dir string) {
|
||||
d := dataset{epoch: block / epochLength}
|
||||
d.generate(dir, math.MaxInt32, false)
|
||||
d.release()
|
||||
}
|
||||
|
||||
// Mode defines the type and amount of PoW verification an ethash engine makes.
|
||||
@ -347,10 +394,8 @@ type Config struct {
|
||||
type Ethash struct {
|
||||
config Config
|
||||
|
||||
caches map[uint64]*cache // In memory caches to avoid regenerating too often
|
||||
fcache *cache // Pre-generated cache for the estimated future epoch
|
||||
datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often
|
||||
fdataset *dataset // Pre-generated dataset for the estimated future epoch
|
||||
caches *lru // In memory caches to avoid regenerating too often
|
||||
datasets *lru // In memory datasets to avoid regenerating too often
|
||||
|
||||
// Mining related fields
|
||||
rand *rand.Rand // Properly seeded random source for nonces
|
||||
@ -380,8 +425,8 @@ func New(config Config) *Ethash {
|
||||
}
|
||||
return &Ethash{
|
||||
config: config,
|
||||
caches: make(map[uint64]*cache),
|
||||
datasets: make(map[uint64]*dataset),
|
||||
caches: newlru("cache", config.CachesInMem, newCache),
|
||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
}
|
||||
@ -390,16 +435,7 @@ func New(config Config) *Ethash {
|
||||
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
||||
// purposes.
|
||||
func NewTester() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
CachesInMem: 1,
|
||||
PowMode: ModeTest,
|
||||
},
|
||||
caches: make(map[uint64]*cache),
|
||||
datasets: make(map[uint64]*dataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
}
|
||||
return New(Config{CachesInMem: 1, PowMode: ModeTest})
|
||||
}
|
||||
|
||||
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
|
||||
@ -456,126 +492,40 @@ func NewShared() *Ethash {
|
||||
// cache tries to retrieve a verification cache for the specified block number
|
||||
// by first checking against a list of in-memory caches, then against caches
|
||||
// stored on disk, and finally generating one if none can be found.
|
||||
func (ethash *Ethash) cache(block uint64) []uint32 {
|
||||
func (ethash *Ethash) cache(block uint64) *cache {
|
||||
epoch := block / epochLength
|
||||
currentI, futureI := ethash.caches.get(epoch)
|
||||
current := currentI.(*cache)
|
||||
|
||||
// If we have a PoW for that epoch, use that
|
||||
ethash.lock.Lock()
|
||||
|
||||
current, future := ethash.caches[epoch], (*cache)(nil)
|
||||
if current == nil {
|
||||
// No in-memory cache, evict the oldest if the cache limit was reached
|
||||
for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.config.CachesInMem {
|
||||
var evict *cache
|
||||
for _, cache := range ethash.caches {
|
||||
if evict == nil || evict.used.After(cache.used) {
|
||||
evict = cache
|
||||
}
|
||||
}
|
||||
delete(ethash.caches, evict.epoch)
|
||||
evict.release()
|
||||
|
||||
log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
|
||||
}
|
||||
// If we have the new cache pre-generated, use that, otherwise create a new one
|
||||
if ethash.fcache != nil && ethash.fcache.epoch == epoch {
|
||||
log.Trace("Using pre-generated cache", "epoch", epoch)
|
||||
current, ethash.fcache = ethash.fcache, nil
|
||||
} else {
|
||||
log.Trace("Requiring new ethash cache", "epoch", epoch)
|
||||
current = &cache{epoch: epoch}
|
||||
}
|
||||
ethash.caches[epoch] = current
|
||||
|
||||
// If we just used up the future cache, or need a refresh, regenerate
|
||||
if ethash.fcache == nil || ethash.fcache.epoch <= epoch {
|
||||
if ethash.fcache != nil {
|
||||
ethash.fcache.release()
|
||||
}
|
||||
log.Trace("Requiring new future ethash cache", "epoch", epoch+1)
|
||||
future = &cache{epoch: epoch + 1}
|
||||
ethash.fcache = future
|
||||
}
|
||||
// New current cache, set its initial timestamp
|
||||
current.used = time.Now()
|
||||
}
|
||||
ethash.lock.Unlock()
|
||||
|
||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
||||
// Wait for generation finish.
|
||||
current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
|
||||
|
||||
current.lock.Lock()
|
||||
current.used = time.Now()
|
||||
current.lock.Unlock()
|
||||
|
||||
// If we exhausted the future cache, now's a good time to regenerate it
|
||||
if future != nil {
|
||||
// If we need a new future cache, now's a good time to regenerate it.
|
||||
if futureI != nil {
|
||||
future := futureI.(*cache)
|
||||
go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
return current.cache
|
||||
return current
|
||||
}
|
||||
|
||||
// dataset tries to retrieve a mining dataset for the specified block number
|
||||
// by first checking against a list of in-memory datasets, then against DAGs
|
||||
// stored on disk, and finally generating one if none can be found.
|
||||
func (ethash *Ethash) dataset(block uint64) []uint32 {
|
||||
func (ethash *Ethash) dataset(block uint64) *dataset {
|
||||
epoch := block / epochLength
|
||||
currentI, futureI := ethash.datasets.get(epoch)
|
||||
current := currentI.(*dataset)
|
||||
|
||||
// If we have a PoW for that epoch, use that
|
||||
ethash.lock.Lock()
|
||||
|
||||
current, future := ethash.datasets[epoch], (*dataset)(nil)
|
||||
if current == nil {
|
||||
// No in-memory dataset, evict the oldest if the dataset limit was reached
|
||||
for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.config.DatasetsInMem {
|
||||
var evict *dataset
|
||||
for _, dataset := range ethash.datasets {
|
||||
if evict == nil || evict.used.After(dataset.used) {
|
||||
evict = dataset
|
||||
}
|
||||
}
|
||||
delete(ethash.datasets, evict.epoch)
|
||||
evict.release()
|
||||
|
||||
log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used)
|
||||
}
|
||||
// If we have the new cache pre-generated, use that, otherwise create a new one
|
||||
if ethash.fdataset != nil && ethash.fdataset.epoch == epoch {
|
||||
log.Trace("Using pre-generated dataset", "epoch", epoch)
|
||||
current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk
|
||||
ethash.fdataset = nil
|
||||
} else {
|
||||
log.Trace("Requiring new ethash dataset", "epoch", epoch)
|
||||
current = &dataset{epoch: epoch}
|
||||
}
|
||||
ethash.datasets[epoch] = current
|
||||
|
||||
// If we just used up the future dataset, or need a refresh, regenerate
|
||||
if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch {
|
||||
if ethash.fdataset != nil {
|
||||
ethash.fdataset.release()
|
||||
}
|
||||
log.Trace("Requiring new future ethash dataset", "epoch", epoch+1)
|
||||
future = &dataset{epoch: epoch + 1}
|
||||
ethash.fdataset = future
|
||||
}
|
||||
// New current dataset, set its initial timestamp
|
||||
current.used = time.Now()
|
||||
}
|
||||
ethash.lock.Unlock()
|
||||
|
||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
||||
// Wait for generation finish.
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
|
||||
current.lock.Lock()
|
||||
current.used = time.Now()
|
||||
current.lock.Unlock()
|
||||
|
||||
// If we exhausted the future dataset, now's a good time to regenerate it
|
||||
if future != nil {
|
||||
// If we need a new future dataset, now's a good time to regenerate it.
|
||||
if futureI != nil {
|
||||
future := futureI.(*dataset)
|
||||
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
return current.dataset
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
// Threads returns the number of mining threads currently enabled. This doesn't
|
||||
|
@ -17,7 +17,11 @@
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -38,3 +42,38 @@ func TestTestMode(t *testing.T) {
|
||||
t.Fatalf("unexpected verification error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// This test checks that cache lru logic doesn't crash under load.
|
||||
// It reproduces https://github.com/ethereum/go-ethereum/issues/14943
|
||||
func TestCacheFileEvict(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "ethash-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
e := New(Config{CachesInMem: 3, CachesOnDisk: 10, CacheDir: tmpdir, PowMode: ModeTest})
|
||||
|
||||
workers := 8
|
||||
epochs := 100
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go verifyTest(&wg, e, i, epochs)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) {
|
||||
defer wg.Done()
|
||||
|
||||
const wiggle = 4 * epochLength
|
||||
r := rand.New(rand.NewSource(int64(workerIndex)))
|
||||
for epoch := 0; epoch < epochs; epoch++ {
|
||||
block := int64(epoch)*epochLength - wiggle/2 + r.Int63n(wiggle)
|
||||
if block < 0 {
|
||||
block = 0
|
||||
}
|
||||
head := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)}
|
||||
e.VerifySeal(nil, head)
|
||||
}
|
||||
}
|
||||
|
@ -97,10 +97,9 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
||||
func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
|
||||
// Extract some data from the header
|
||||
var (
|
||||
header = block.Header()
|
||||
hash = header.HashNoNonce().Bytes()
|
||||
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
||||
|
||||
header = block.Header()
|
||||
hash = header.HashNoNonce().Bytes()
|
||||
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
||||
number = header.Number.Uint64()
|
||||
dataset = ethash.dataset(number)
|
||||
)
|
||||
@ -111,13 +110,14 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||
)
|
||||
logger := log.New("miner", id)
|
||||
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
||||
search:
|
||||
for {
|
||||
select {
|
||||
case <-abort:
|
||||
// Mining terminated, update stats and abort
|
||||
logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
|
||||
ethash.hashrate.Mark(attempts)
|
||||
return
|
||||
break search
|
||||
|
||||
default:
|
||||
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
|
||||
@ -127,7 +127,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||
attempts = 0
|
||||
}
|
||||
// Compute the PoW value of this nonce
|
||||
digest, result := hashimotoFull(dataset, hash, nonce)
|
||||
digest, result := hashimotoFull(dataset.dataset, hash, nonce)
|
||||
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
|
||||
// Correct nonce found, create a new header with it
|
||||
header = types.CopyHeader(header)
|
||||
@ -141,9 +141,12 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||
case <-abort:
|
||||
logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
|
||||
}
|
||||
return
|
||||
break search
|
||||
}
|
||||
nonce++
|
||||
}
|
||||
}
|
||||
// Datasets are unmapped in a finalizer. Ensure that the dataset stays live
|
||||
// during sealing so it's not unmapped while being read.
|
||||
runtime.KeepAlive(dataset)
|
||||
}
|
||||
|
@ -92,6 +92,9 @@ func New(config Config) (*Console, error) {
|
||||
printer: config.Printer,
|
||||
histPath: filepath.Join(config.DataDir, HistoryFile),
|
||||
}
|
||||
if err := os.MkdirAll(config.DataDir, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := console.init(config.Preload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -423,7 +426,7 @@ func (c *Console) Execute(path string) error {
|
||||
return c.jsre.Exec(path)
|
||||
}
|
||||
|
||||
// Stop cleans up the console and terminates the runtime envorinment.
|
||||
// Stop cleans up the console and terminates the runtime environment.
|
||||
func (c *Console) Stop(graceful bool) error {
|
||||
if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil {
|
||||
return err
|
||||
|
@ -155,8 +155,7 @@ func (p *terminalPrompter) SetHistory(history []string) {
|
||||
p.State.ReadHistory(strings.NewReader(strings.Join(history, "\n")))
|
||||
}
|
||||
|
||||
// AppendHistory appends an entry to the scrollback history. It should be called
|
||||
// if and only if the prompt to append was a valid command.
|
||||
// AppendHistory appends an entry to the scrollback history.
|
||||
func (p *terminalPrompter) AppendHistory(command string) {
|
||||
p.State.AppendHistory(command)
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM alpine:3.5
|
||||
FROM alpine:3.7
|
||||
|
||||
RUN \
|
||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||
|
@ -1,12 +1,14 @@
|
||||
FROM ubuntu:xenial
|
||||
|
||||
ENV PATH=/usr/lib/go-1.9/bin:$PATH
|
||||
|
||||
RUN \
|
||||
apt-get update && apt-get upgrade -q -y && \
|
||||
apt-get install -y --no-install-recommends golang git make gcc libc-dev ca-certificates && \
|
||||
apt-get install -y --no-install-recommends golang-1.9 git make gcc libc-dev ca-certificates && \
|
||||
git clone --depth 1 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apt-get remove -y golang git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||
rm -rf /go-ethereum
|
||||
|
||||
EXPOSE 8545
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM alpine:3.5
|
||||
FROM alpine:3.7
|
||||
|
||||
RUN \
|
||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||
|
@ -1,12 +1,14 @@
|
||||
FROM ubuntu:xenial
|
||||
|
||||
ENV PATH=/usr/lib/go-1.9/bin:$PATH
|
||||
|
||||
RUN \
|
||||
apt-get update && apt-get upgrade -q -y && \
|
||||
apt-get install -y --no-install-recommends golang git make gcc libc-dev ca-certificates && \
|
||||
apt-get install -y --no-install-recommends golang-1.9 git make gcc libc-dev ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apt-get remove -y golang git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||
rm -rf /go-ethereum
|
||||
|
||||
EXPOSE 8545
|
||||
|
@ -21,7 +21,7 @@
|
||||
// as well as (auto)depositing ether to the chequebook contract.
|
||||
package chequebook
|
||||
|
||||
//go:generate abigen --sol contract/chequebook.sol --pkg contract --out contract/chequebook.go
|
||||
//go:generate abigen --sol contract/chequebook.sol --exc contract/mortal.sol:mortal,contract/owned.sol:owned --pkg contract --out contract/chequebook.go
|
||||
//go:generate go run ./gencode.go
|
||||
|
||||
import (
|
||||
@ -56,8 +56,8 @@ import (
|
||||
// * watching incoming ether
|
||||
|
||||
var (
|
||||
gasToCash = big.NewInt(2000000) // gas cost of a cash transaction using chequebook
|
||||
// gasToDeploy = big.NewInt(3000000)
|
||||
gasToCash = uint64(2000000) // gas cost of a cash transaction using chequebook
|
||||
// gasToDeploy = uint64(3000000)
|
||||
)
|
||||
|
||||
// Backend wraps all methods required for chequebook operation.
|
||||
|
@ -1,5 +1,5 @@
|
||||
// This file is an automatically generated Go binding. Do not modify as any
|
||||
// change will likely be lost upon the next re-generation!
|
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package contract
|
||||
|
||||
@ -7,17 +7,19 @@ import (
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
// ChequebookABI is the input ABI used to generate the binding from.
|
||||
const ChequebookABI = `[{"constant":false,"inputs":[],"name":"kill","outputs":[],"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"sent","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[{"name":"beneficiary","type":"address"},{"name":"amount","type":"uint256"},{"name":"sig_v","type":"uint8"},{"name":"sig_r","type":"bytes32"},{"name":"sig_s","type":"bytes32"}],"name":"cash","outputs":[],"type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"deadbeat","type":"address"}],"name":"Overdraft","type":"event"}]`
|
||||
const ChequebookABI = "[{\"constant\":false,\"inputs\":[],\"name\":\"kill\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"address\"}],\"name\":\"sent\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"beneficiary\",\"type\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\"},{\"name\":\"sig_v\",\"type\":\"uint8\"},{\"name\":\"sig_r\",\"type\":\"bytes32\"},{\"name\":\"sig_s\",\"type\":\"bytes32\"}],\"name\":\"cash\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"payable\":true,\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"deadbeat\",\"type\":\"address\"}],\"name\":\"Overdraft\",\"type\":\"event\"}]"
|
||||
|
||||
// ChequebookBin is the compiled bytecode used for deploying new contracts.
|
||||
const ChequebookBin = `0x606060405260008054600160a060020a031916331790556101ff806100246000396000f3606060405260e060020a600035046341c0e1b581146100315780637bf786f814610059578063fbf788d614610071575b005b61002f60005433600160a060020a03908116911614156100bd57600054600160a060020a0316ff5b6100ab60043560016020526000908152604090205481565b61002f600435602435604435606435608435600160a060020a03851660009081526001602052604081205485116100bf575b505050505050565b60408051918252519081900360200190f35b565b50604080516c0100000000000000000000000030600160a060020a0390811682028352881602601482015260288101869052815190819003604801812080825260ff861660208381019190915282840186905260608301859052925190926001926080818101939182900301816000866161da5a03f11561000257505060405151600054600160a060020a0390811691161461015a576100a3565b600160a060020a038681166000908152600160205260409020543090911631908603106101b357604060008181208790559051600160a060020a0388169190819081818181818881f1935050505015156100a357610002565b60005460408051600160a060020a03929092168252517f2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f9789181900360200190a185600160a060020a0316ff`
|
||||
const ChequebookBin = `0x606060405260008054600160a060020a033316600160a060020a03199091161790556102ec806100306000396000f3006060604052600436106100565763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166341c0e1b581146100585780637bf786f81461006b578063fbf788d61461009c575b005b341561006357600080fd5b6100566100ca565b341561007657600080fd5b61008a600160a060020a03600435166100f1565b60405190815260200160405180910390f35b34156100a757600080fd5b610056600160a060020a036004351660243560ff60443516606435608435610103565b60005433600160a060020a03908116911614156100ef57600054600160a060020a0316ff5b565b60016020526000908152604090205481565b600160a060020a0385166000908152600160205260408120548190861161012957600080fd5b3087876040516c01000000000000000000000000600160a060020a03948516810282529290931690910260148301526028820152604801604051809103902091506001828686866040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f115156101cf57600080fd5b505060206040510351600054600160a060020a039081169116146101f257600080fd5b50600160a060020a03808716600090815260016020526040902054860390301631811161026257600160a060020a0387166000818152600160205260409081902088905582156108fc0290839051600060405180830381858888f19350505050151561025d57600080fd5b6102b7565b6000547f2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f97890600160a060020a0316604051600160a060020a03909116815260200160405180910390a186600160a060020a0316ff5b505050505050505600a165627a7a72305820533e856fc37e3d64d1706bcc7dfb6b1d490c8d566ea498d9d01ec08965a896ca0029`
|
||||
|
||||
// DeployChequebook deploys a new Ethereum contract, binding an instance of Chequebook to it.
|
||||
func DeployChequebook(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Chequebook, error) {
|
||||
@ -29,13 +31,14 @@ func DeployChequebook(auth *bind.TransactOpts, backend bind.ContractBackend) (co
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
}
|
||||
return address, tx, &Chequebook{ChequebookCaller: ChequebookCaller{contract: contract}, ChequebookTransactor: ChequebookTransactor{contract: contract}}, nil
|
||||
return address, tx, &Chequebook{ChequebookCaller: ChequebookCaller{contract: contract}, ChequebookTransactor: ChequebookTransactor{contract: contract}, ChequebookFilterer: ChequebookFilterer{contract: contract}}, nil
|
||||
}
|
||||
|
||||
// Chequebook is an auto generated Go binding around an Ethereum contract.
|
||||
type Chequebook struct {
|
||||
ChequebookCaller // Read-only binding to the contract
|
||||
ChequebookTransactor // Write-only binding to the contract
|
||||
ChequebookFilterer // Log filterer for contract events
|
||||
}
|
||||
|
||||
// ChequebookCaller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
@ -48,6 +51,11 @@ type ChequebookTransactor struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// ChequebookFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
|
||||
type ChequebookFilterer struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// ChequebookSession is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type ChequebookSession struct {
|
||||
@ -87,16 +95,16 @@ type ChequebookTransactorRaw struct {
|
||||
|
||||
// NewChequebook creates a new instance of Chequebook, bound to a specific deployed contract.
|
||||
func NewChequebook(address common.Address, backend bind.ContractBackend) (*Chequebook, error) {
|
||||
contract, err := bindChequebook(address, backend, backend)
|
||||
contract, err := bindChequebook(address, backend, backend, backend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Chequebook{ChequebookCaller: ChequebookCaller{contract: contract}, ChequebookTransactor: ChequebookTransactor{contract: contract}}, nil
|
||||
return &Chequebook{ChequebookCaller: ChequebookCaller{contract: contract}, ChequebookTransactor: ChequebookTransactor{contract: contract}, ChequebookFilterer: ChequebookFilterer{contract: contract}}, nil
|
||||
}
|
||||
|
||||
// NewChequebookCaller creates a new read-only instance of Chequebook, bound to a specific deployed contract.
|
||||
func NewChequebookCaller(address common.Address, caller bind.ContractCaller) (*ChequebookCaller, error) {
|
||||
contract, err := bindChequebook(address, caller, nil)
|
||||
contract, err := bindChequebook(address, caller, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -105,20 +113,29 @@ func NewChequebookCaller(address common.Address, caller bind.ContractCaller) (*C
|
||||
|
||||
// NewChequebookTransactor creates a new write-only instance of Chequebook, bound to a specific deployed contract.
|
||||
func NewChequebookTransactor(address common.Address, transactor bind.ContractTransactor) (*ChequebookTransactor, error) {
|
||||
contract, err := bindChequebook(address, nil, transactor)
|
||||
contract, err := bindChequebook(address, nil, transactor, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ChequebookTransactor{contract: contract}, nil
|
||||
}
|
||||
|
||||
// NewChequebookFilterer creates a new log filterer instance of Chequebook, bound to a specific deployed contract.
|
||||
func NewChequebookFilterer(address common.Address, filterer bind.ContractFilterer) (*ChequebookFilterer, error) {
|
||||
contract, err := bindChequebook(address, nil, nil, filterer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ChequebookFilterer{contract: contract}, nil
|
||||
}
|
||||
|
||||
// bindChequebook binds a generic wrapper to an already deployed contract.
|
||||
func bindChequebook(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor) (*bind.BoundContract, error) {
|
||||
func bindChequebook(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader(ChequebookABI))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor), nil
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
@ -227,315 +244,124 @@ func (_Chequebook *ChequebookTransactorSession) Kill() (*types.Transaction, erro
|
||||
return _Chequebook.Contract.Kill(&_Chequebook.TransactOpts)
|
||||
}
|
||||
|
||||
// MortalABI is the input ABI used to generate the binding from.
|
||||
const MortalABI = `[{"constant":false,"inputs":[],"name":"kill","outputs":[],"type":"function"}]`
|
||||
// ChequebookOverdraftIterator is returned from FilterOverdraft and is used to iterate over the raw logs and unpacked data for Overdraft events raised by the Chequebook contract.
|
||||
type ChequebookOverdraftIterator struct {
|
||||
Event *ChequebookOverdraft // Event containing the contract specifics and raw log
|
||||
|
||||
// MortalBin is the compiled bytecode used for deploying new contracts.
|
||||
const MortalBin = `0x606060405260008054600160a060020a03191633179055605c8060226000396000f3606060405260e060020a600035046341c0e1b58114601a575b005b60186000543373ffffffffffffffffffffffffffffffffffffffff90811691161415605a5760005473ffffffffffffffffffffffffffffffffffffffff16ff5b56`
|
||||
contract *bind.BoundContract // Generic contract to use for unpacking event data
|
||||
event string // Event name to use for unpacking event data
|
||||
|
||||
// DeployMortal deploys a new Ethereum contract, binding an instance of Mortal to it.
|
||||
func DeployMortal(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Mortal, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader(MortalABI))
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
logs chan types.Log // Log channel receiving the found contract events
|
||||
sub ethereum.Subscription // Subscription for errors, completion and termination
|
||||
done bool // Whether the subscription completed delivering logs
|
||||
fail error // Occurred error to stop iteration
|
||||
}
|
||||
|
||||
// Next advances the iterator to the subsequent event, returning whether there
|
||||
// are any more events found. In case of a retrieval or parsing error, false is
|
||||
// returned and Error() can be queried for the exact failure.
|
||||
func (it *ChequebookOverdraftIterator) Next() bool {
|
||||
// If the iterator failed, stop iterating
|
||||
if it.fail != nil {
|
||||
return false
|
||||
}
|
||||
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(MortalBin), backend)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
// If the iterator completed, deliver directly whatever's available
|
||||
if it.done {
|
||||
select {
|
||||
case log := <-it.logs:
|
||||
it.Event = new(ChequebookOverdraft)
|
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
|
||||
it.fail = err
|
||||
return false
|
||||
}
|
||||
it.Event.Raw = log
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return address, tx, &Mortal{MortalCaller: MortalCaller{contract: contract}, MortalTransactor: MortalTransactor{contract: contract}}, nil
|
||||
}
|
||||
// Iterator still in progress, wait for either a data or an error event
|
||||
select {
|
||||
case log := <-it.logs:
|
||||
it.Event = new(ChequebookOverdraft)
|
||||
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
|
||||
it.fail = err
|
||||
return false
|
||||
}
|
||||
it.Event.Raw = log
|
||||
return true
|
||||
|
||||
// Mortal is an auto generated Go binding around an Ethereum contract.
|
||||
type Mortal struct {
|
||||
MortalCaller // Read-only binding to the contract
|
||||
MortalTransactor // Write-only binding to the contract
|
||||
}
|
||||
|
||||
// MortalCaller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
type MortalCaller struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// MortalTransactor is an auto generated write-only Go binding around an Ethereum contract.
|
||||
type MortalTransactor struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// MortalSession is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type MortalSession struct {
|
||||
Contract *Mortal // Generic contract binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
}
|
||||
|
||||
// MortalCallerSession is an auto generated read-only Go binding around an Ethereum contract,
|
||||
// with pre-set call options.
|
||||
type MortalCallerSession struct {
|
||||
Contract *MortalCaller // Generic contract caller binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
}
|
||||
|
||||
// MortalTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
|
||||
// with pre-set transact options.
|
||||
type MortalTransactorSession struct {
|
||||
Contract *MortalTransactor // Generic contract transactor binding to set the session for
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
}
|
||||
|
||||
// MortalRaw is an auto generated low-level Go binding around an Ethereum contract.
|
||||
type MortalRaw struct {
|
||||
Contract *Mortal // Generic contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// MortalCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
|
||||
type MortalCallerRaw struct {
|
||||
Contract *MortalCaller // Generic read-only contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// MortalTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
|
||||
type MortalTransactorRaw struct {
|
||||
Contract *MortalTransactor // Generic write-only contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// NewMortal creates a new instance of Mortal, bound to a specific deployed contract.
|
||||
func NewMortal(address common.Address, backend bind.ContractBackend) (*Mortal, error) {
|
||||
contract, err := bindMortal(address, backend, backend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
case err := <-it.sub.Err():
|
||||
it.done = true
|
||||
it.fail = err
|
||||
return it.Next()
|
||||
}
|
||||
return &Mortal{MortalCaller: MortalCaller{contract: contract}, MortalTransactor: MortalTransactor{contract: contract}}, nil
|
||||
}
|
||||
|
||||
// NewMortalCaller creates a new read-only instance of Mortal, bound to a specific deployed contract.
|
||||
func NewMortalCaller(address common.Address, caller bind.ContractCaller) (*MortalCaller, error) {
|
||||
contract, err := bindMortal(address, caller, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MortalCaller{contract: contract}, nil
|
||||
// Error retruned any retrieval or parsing error occurred during filtering.
|
||||
func (it *ChequebookOverdraftIterator) Error() error {
|
||||
return it.fail
|
||||
}
|
||||
|
||||
// NewMortalTransactor creates a new write-only instance of Mortal, bound to a specific deployed contract.
|
||||
func NewMortalTransactor(address common.Address, transactor bind.ContractTransactor) (*MortalTransactor, error) {
|
||||
contract, err := bindMortal(address, nil, transactor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MortalTransactor{contract: contract}, nil
|
||||
// Close terminates the iteration process, releasing any pending underlying
|
||||
// resources.
|
||||
func (it *ChequebookOverdraftIterator) Close() error {
|
||||
it.sub.Unsubscribe()
|
||||
return nil
|
||||
}
|
||||
|
||||
// bindMortal binds a generic wrapper to an already deployed contract.
|
||||
func bindMortal(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor) (*bind.BoundContract, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader(MortalABI))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor), nil
|
||||
// ChequebookOverdraft represents a Overdraft event raised by the Chequebook contract.
|
||||
type ChequebookOverdraft struct {
|
||||
Deadbeat common.Address
|
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_Mortal *MortalRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
return _Mortal.Contract.MortalCaller.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_Mortal *MortalRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
|
||||
return _Mortal.Contract.MortalTransactor.contract.Transfer(opts)
|
||||
}
|
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_Mortal *MortalRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
|
||||
return _Mortal.Contract.MortalTransactor.contract.Transact(opts, method, params...)
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_Mortal *MortalCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
return _Mortal.Contract.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_Mortal *MortalTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
|
||||
return _Mortal.Contract.contract.Transfer(opts)
|
||||
}
|
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_Mortal *MortalTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
|
||||
return _Mortal.Contract.contract.Transact(opts, method, params...)
|
||||
}
|
||||
|
||||
// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5.
|
||||
// FilterOverdraft is a free log retrieval operation binding the contract event 0x2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f978.
|
||||
//
|
||||
// Solidity: function kill() returns()
|
||||
func (_Mortal *MortalTransactor) Kill(opts *bind.TransactOpts) (*types.Transaction, error) {
|
||||
return _Mortal.contract.Transact(opts, "kill")
|
||||
// Solidity: event Overdraft(deadbeat address)
|
||||
func (_Chequebook *ChequebookFilterer) FilterOverdraft(opts *bind.FilterOpts) (*ChequebookOverdraftIterator, error) {
|
||||
|
||||
logs, sub, err := _Chequebook.contract.FilterLogs(opts, "Overdraft")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ChequebookOverdraftIterator{contract: _Chequebook.contract, event: "Overdraft", logs: logs, sub: sub}, nil
|
||||
}
|
||||
|
||||
// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5.
|
||||
// WatchOverdraft is a free log subscription operation binding the contract event 0x2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f978.
|
||||
//
|
||||
// Solidity: function kill() returns()
|
||||
func (_Mortal *MortalSession) Kill() (*types.Transaction, error) {
|
||||
return _Mortal.Contract.Kill(&_Mortal.TransactOpts)
|
||||
}
|
||||
// Solidity: event Overdraft(deadbeat address)
|
||||
func (_Chequebook *ChequebookFilterer) WatchOverdraft(opts *bind.WatchOpts, sink chan<- *ChequebookOverdraft) (event.Subscription, error) {
|
||||
|
||||
// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5.
|
||||
//
|
||||
// Solidity: function kill() returns()
|
||||
func (_Mortal *MortalTransactorSession) Kill() (*types.Transaction, error) {
|
||||
return _Mortal.Contract.Kill(&_Mortal.TransactOpts)
|
||||
}
|
||||
|
||||
// OwnedABI is the input ABI used to generate the binding from.
|
||||
const OwnedABI = `[{"inputs":[],"type":"constructor"}]`
|
||||
|
||||
// OwnedBin is the compiled bytecode used for deploying new contracts.
|
||||
const OwnedBin = `0x606060405260008054600160a060020a0319163317905560068060226000396000f3606060405200`
|
||||
|
||||
// DeployOwned deploys a new Ethereum contract, binding an instance of Owned to it.
|
||||
func DeployOwned(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Owned, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader(OwnedABI))
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
}
|
||||
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(OwnedBin), backend)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
}
|
||||
return address, tx, &Owned{OwnedCaller: OwnedCaller{contract: contract}, OwnedTransactor: OwnedTransactor{contract: contract}}, nil
|
||||
}
|
||||
|
||||
// Owned is an auto generated Go binding around an Ethereum contract.
|
||||
type Owned struct {
|
||||
OwnedCaller // Read-only binding to the contract
|
||||
OwnedTransactor // Write-only binding to the contract
|
||||
}
|
||||
|
||||
// OwnedCaller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
type OwnedCaller struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// OwnedTransactor is an auto generated write-only Go binding around an Ethereum contract.
|
||||
type OwnedTransactor struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// OwnedSession is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type OwnedSession struct {
|
||||
Contract *Owned // Generic contract binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
}
|
||||
|
||||
// OwnedCallerSession is an auto generated read-only Go binding around an Ethereum contract,
|
||||
// with pre-set call options.
|
||||
type OwnedCallerSession struct {
|
||||
Contract *OwnedCaller // Generic contract caller binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
}
|
||||
|
||||
// OwnedTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
|
||||
// with pre-set transact options.
|
||||
type OwnedTransactorSession struct {
|
||||
Contract *OwnedTransactor // Generic contract transactor binding to set the session for
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
}
|
||||
|
||||
// OwnedRaw is an auto generated low-level Go binding around an Ethereum contract.
|
||||
type OwnedRaw struct {
|
||||
Contract *Owned // Generic contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// OwnedCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
|
||||
type OwnedCallerRaw struct {
|
||||
Contract *OwnedCaller // Generic read-only contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// OwnedTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
|
||||
type OwnedTransactorRaw struct {
|
||||
Contract *OwnedTransactor // Generic write-only contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// NewOwned creates a new instance of Owned, bound to a specific deployed contract.
|
||||
func NewOwned(address common.Address, backend bind.ContractBackend) (*Owned, error) {
|
||||
contract, err := bindOwned(address, backend, backend)
|
||||
logs, sub, err := _Chequebook.contract.WatchLogs(opts, "Overdraft")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Owned{OwnedCaller: OwnedCaller{contract: contract}, OwnedTransactor: OwnedTransactor{contract: contract}}, nil
|
||||
}
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case log := <-logs:
|
||||
// New log arrived, parse the event and forward to the user
|
||||
event := new(ChequebookOverdraft)
|
||||
if err := _Chequebook.contract.UnpackLog(event, "Overdraft", log); err != nil {
|
||||
return err
|
||||
}
|
||||
event.Raw = log
|
||||
|
||||
// NewOwnedCaller creates a new read-only instance of Owned, bound to a specific deployed contract.
|
||||
func NewOwnedCaller(address common.Address, caller bind.ContractCaller) (*OwnedCaller, error) {
|
||||
contract, err := bindOwned(address, caller, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &OwnedCaller{contract: contract}, nil
|
||||
}
|
||||
|
||||
// NewOwnedTransactor creates a new write-only instance of Owned, bound to a specific deployed contract.
|
||||
func NewOwnedTransactor(address common.Address, transactor bind.ContractTransactor) (*OwnedTransactor, error) {
|
||||
contract, err := bindOwned(address, nil, transactor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &OwnedTransactor{contract: contract}, nil
|
||||
}
|
||||
|
||||
// bindOwned binds a generic wrapper to an already deployed contract.
|
||||
func bindOwned(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor) (*bind.BoundContract, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader(OwnedABI))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor), nil
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_Owned *OwnedRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
return _Owned.Contract.OwnedCaller.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_Owned *OwnedRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
|
||||
return _Owned.Contract.OwnedTransactor.contract.Transfer(opts)
|
||||
}
|
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_Owned *OwnedRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
|
||||
return _Owned.Contract.OwnedTransactor.contract.Transact(opts, method, params...)
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_Owned *OwnedCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
return _Owned.Contract.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_Owned *OwnedTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
|
||||
return _Owned.Contract.contract.Transfer(opts)
|
||||
}
|
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_Owned *OwnedTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
|
||||
return _Owned.Contract.contract.Transact(opts, method, params...)
|
||||
select {
|
||||
case sink <- event:
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
return err
|
||||
case <-quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
pragma solidity ^0.4.18;
|
||||
|
||||
import "https://github.com/ethereum/solidity/std/mortal.sol";
|
||||
import "./mortal.sol";
|
||||
|
||||
/// @title Chequebook for Ethereum micropayments
|
||||
/// @author Daniel A. Nagy <daniel@ethereum.org>
|
||||
@ -11,6 +11,9 @@ contract chequebook is mortal {
|
||||
/// @notice Overdraft event
|
||||
event Overdraft(address deadbeat);
|
||||
|
||||
// Allow sending ether to the chequebook.
|
||||
function() public payable { }
|
||||
|
||||
/// @notice Cash cheque
|
||||
///
|
||||
/// @param beneficiary beneficiary address
|
||||
@ -19,8 +22,7 @@ contract chequebook is mortal {
|
||||
/// @param sig_r signature parameter r
|
||||
/// @param sig_s signature parameter s
|
||||
/// The digital signature is calculated on the concatenated triplet of contract address, beneficiary address and cumulative amount
|
||||
function cash(address beneficiary, uint256 amount,
|
||||
uint8 sig_v, bytes32 sig_r, bytes32 sig_s) {
|
||||
function cash(address beneficiary, uint256 amount, uint8 sig_v, bytes32 sig_r, bytes32 sig_s) public {
|
||||
// Check if the cheque is old.
|
||||
// Only cheques that are more recent than the last cashed one are considered.
|
||||
require(amount > sent[beneficiary]);
|
||||
@ -31,7 +33,7 @@ contract chequebook is mortal {
|
||||
// and the cumulative amount on the last cashed cheque to beneficiary.
|
||||
uint256 diff = amount - sent[beneficiary];
|
||||
if (diff <= this.balance) {
|
||||
// update the cumulative amount before sending
|
||||
// update the cumulative amount before sending
|
||||
sent[beneficiary] = amount;
|
||||
beneficiary.transfer(diff);
|
||||
} else {
|
||||
|
@ -2,4 +2,4 @@ package contract
|
||||
|
||||
// ContractDeployedCode is used to detect suicides. This constant needs to be
|
||||
// updated when the contract code is changed.
|
||||
const ContractDeployedCode = "0x606060405260e060020a600035046341c0e1b581146100315780637bf786f814610059578063fbf788d614610071575b005b61002f60005433600160a060020a03908116911614156100bd57600054600160a060020a0316ff5b6100ab60043560016020526000908152604090205481565b61002f600435602435604435606435608435600160a060020a03851660009081526001602052604081205485116100bf575b505050505050565b60408051918252519081900360200190f35b565b50604080516c0100000000000000000000000030600160a060020a0390811682028352881602601482015260288101869052815190819003604801812080825260ff861660208381019190915282840186905260608301859052925190926001926080818101939182900301816000866161da5a03f11561000257505060405151600054600160a060020a0390811691161461015a576100a3565b600160a060020a038681166000908152600160205260409020543090911631908603106101b357604060008181208790559051600160a060020a0388169190819081818181818881f1935050505015156100a357610002565b60005460408051600160a060020a03929092168252517f2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f9789181900360200190a185600160a060020a0316ff"
|
||||
const ContractDeployedCode = "0x6060604052600436106100565763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166341c0e1b581146100585780637bf786f81461006b578063fbf788d61461009c575b005b341561006357600080fd5b6100566100ca565b341561007657600080fd5b61008a600160a060020a03600435166100f1565b60405190815260200160405180910390f35b34156100a757600080fd5b610056600160a060020a036004351660243560ff60443516606435608435610103565b60005433600160a060020a03908116911614156100ef57600054600160a060020a0316ff5b565b60016020526000908152604090205481565b600160a060020a0385166000908152600160205260408120548190861161012957600080fd5b3087876040516c01000000000000000000000000600160a060020a03948516810282529290931690910260148301526028820152604801604051809103902091506001828686866040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f115156101cf57600080fd5b505060206040510351600054600160a060020a039081169116146101f257600080fd5b50600160a060020a03808716600090815260016020526040902054860390301631811161026257600160a060020a0387166000818152600160205260409081902088905582156108fc0290839051600060405180830381858888f19350505050151561025d57600080fd5b6102b7565b6000547f2250e2993c15843b32621c89447cc589ee7a9f049c026986e545d3c2c0c6f97890600160a060020a0316604051600160a060020a03909116815260200160405180910390a186600160a060020a0316ff5b505050505050505600a165627a7a72305820533e856fc37e3d64d1706bcc7dfb6b1d490c8d566ea498d9d01ec08965a896ca0029"
|
||||
|
10
contracts/chequebook/contract/mortal.sol
Normal file
10
contracts/chequebook/contract/mortal.sol
Normal file
@ -0,0 +1,10 @@
|
||||
pragma solidity ^0.4.0;
|
||||
|
||||
import "./owned.sol";
|
||||
|
||||
contract mortal is owned {
|
||||
function kill() public {
|
||||
if (msg.sender == owner)
|
||||
selfdestruct(owner);
|
||||
}
|
||||
}
|
15
contracts/chequebook/contract/owned.sol
Normal file
15
contracts/chequebook/contract/owned.sol
Normal file
@ -0,0 +1,15 @@
|
||||
pragma solidity ^0.4.0;
|
||||
|
||||
contract owned {
|
||||
address owner;
|
||||
|
||||
modifier onlyowner() {
|
||||
if (msg.sender == owner) {
|
||||
_;
|
||||
}
|
||||
}
|
||||
|
||||
function owned() public {
|
||||
owner = msg.sender;
|
||||
}
|
||||
}
|
@ -33,15 +33,14 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
testAccount = core.GenesisAccount{
|
||||
Address: crypto.PubkeyToAddress(testKey.PublicKey),
|
||||
Balance: big.NewInt(500000000000),
|
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
testAlloc = core.GenesisAlloc{
|
||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(500000000000)},
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
backend := backends.NewSimulatedBackend(testAccount)
|
||||
backend := backends.NewSimulatedBackend(testAlloc)
|
||||
auth := bind.NewKeyedTransactor(testKey)
|
||||
|
||||
// Deploy the contract, get the code.
|
||||
|
23
contracts/ens/contract/AbstractENS.sol
Normal file
23
contracts/ens/contract/AbstractENS.sol
Normal file
@ -0,0 +1,23 @@
|
||||
pragma solidity ^0.4.0;
|
||||
|
||||
contract AbstractENS {
|
||||
function owner(bytes32 node) constant returns(address);
|
||||
function resolver(bytes32 node) constant returns(address);
|
||||
function ttl(bytes32 node) constant returns(uint64);
|
||||
function setOwner(bytes32 node, address owner);
|
||||
function setSubnodeOwner(bytes32 node, bytes32 label, address owner);
|
||||
function setResolver(bytes32 node, address resolver);
|
||||
function setTTL(bytes32 node, uint64 ttl);
|
||||
|
||||
// Logged when the owner of a node assigns a new owner to a subnode.
|
||||
event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner);
|
||||
|
||||
// Logged when the owner of a node transfers ownership to a new account.
|
||||
event Transfer(bytes32 indexed node, address owner);
|
||||
|
||||
// Logged when the resolver for a node changes.
|
||||
event NewResolver(bytes32 indexed node, address resolver);
|
||||
|
||||
// Logged when the TTL of a node changes
|
||||
event NewTTL(bytes32 indexed node, uint64 ttl);
|
||||
}
|
94
contracts/ens/contract/ENS.sol
Normal file
94
contracts/ens/contract/ENS.sol
Normal file
@ -0,0 +1,94 @@
|
||||
pragma solidity ^0.4.0;
|
||||
|
||||
import './AbstractENS.sol';
|
||||
|
||||
/**
|
||||
* The ENS registry contract.
|
||||
*/
|
||||
contract ENS is AbstractENS {
|
||||
struct Record {
|
||||
address owner;
|
||||
address resolver;
|
||||
uint64 ttl;
|
||||
}
|
||||
|
||||
mapping(bytes32=>Record) records;
|
||||
|
||||
// Permits modifications only by the owner of the specified node.
|
||||
modifier only_owner(bytes32 node) {
|
||||
if (records[node].owner != msg.sender) throw;
|
||||
_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new ENS registrar.
|
||||
*/
|
||||
function ENS() {
|
||||
records[0].owner = msg.sender;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the address that owns the specified node.
|
||||
*/
|
||||
function owner(bytes32 node) constant returns (address) {
|
||||
return records[node].owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the address of the resolver for the specified node.
|
||||
*/
|
||||
function resolver(bytes32 node) constant returns (address) {
|
||||
return records[node].resolver;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the TTL of a node, and any records associated with it.
|
||||
*/
|
||||
function ttl(bytes32 node) constant returns (uint64) {
|
||||
return records[node].ttl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transfers ownership of a node to a new address. May only be called by the current
|
||||
* owner of the node.
|
||||
* @param node The node to transfer ownership of.
|
||||
* @param owner The address of the new owner.
|
||||
*/
|
||||
function setOwner(bytes32 node, address owner) only_owner(node) {
|
||||
Transfer(node, owner);
|
||||
records[node].owner = owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transfers ownership of a subnode sha3(node, label) to a new address. May only be
|
||||
* called by the owner of the parent node.
|
||||
* @param node The parent node.
|
||||
* @param label The hash of the label specifying the subnode.
|
||||
* @param owner The address of the new owner.
|
||||
*/
|
||||
function setSubnodeOwner(bytes32 node, bytes32 label, address owner) only_owner(node) {
|
||||
var subnode = sha3(node, label);
|
||||
NewOwner(node, label, owner);
|
||||
records[subnode].owner = owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the resolver address for the specified node.
|
||||
* @param node The node to update.
|
||||
* @param resolver The address of the resolver.
|
||||
*/
|
||||
function setResolver(bytes32 node, address resolver) only_owner(node) {
|
||||
NewResolver(node, resolver);
|
||||
records[node].resolver = resolver;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the TTL for the specified node.
|
||||
* @param node The node to update.
|
||||
* @param ttl The TTL in seconds.
|
||||
*/
|
||||
function setTTL(bytes32 node, uint64 ttl) only_owner(node) {
|
||||
NewTTL(node, ttl);
|
||||
records[node].ttl = ttl;
|
||||
}
|
||||
}
|
39
contracts/ens/contract/FIFSRegistrar.sol
Normal file
39
contracts/ens/contract/FIFSRegistrar.sol
Normal file
@ -0,0 +1,39 @@
|
||||
pragma solidity ^0.4.0;
|
||||
|
||||
import './AbstractENS.sol';
|
||||
|
||||
/**
|
||||
* A registrar that allocates subdomains to the first person to claim them.
|
||||
*/
|
||||
contract FIFSRegistrar {
|
||||
AbstractENS ens;
|
||||
bytes32 rootNode;
|
||||
|
||||
modifier only_owner(bytes32 subnode) {
|
||||
var node = sha3(rootNode, subnode);
|
||||
var currentOwner = ens.owner(node);
|
||||
|
||||
if (currentOwner != 0 && currentOwner != msg.sender) throw;
|
||||
|
||||
_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param ensAddr The address of the ENS registry.
|
||||
* @param node The node that this registrar administers.
|
||||
*/
|
||||
function FIFSRegistrar(AbstractENS ensAddr, bytes32 node) {
|
||||
ens = ensAddr;
|
||||
rootNode = node;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a name, or change the owner of an existing registration.
|
||||
* @param subnode The hash of the label to register.
|
||||
* @param owner The address of the new owner.
|
||||
*/
|
||||
function register(bytes32 subnode, address owner) only_owner(subnode) {
|
||||
ens.setSubnodeOwner(rootNode, subnode, owner);
|
||||
}
|
||||
}
|
212
contracts/ens/contract/PublicResolver.sol
Normal file
212
contracts/ens/contract/PublicResolver.sol
Normal file
@ -0,0 +1,212 @@
|
||||
pragma solidity ^0.4.0;
|
||||
|
||||
import './AbstractENS.sol';
|
||||
|
||||
/**
|
||||
* A simple resolver anyone can use; only allows the owner of a node to set its
|
||||
* address.
|
||||
*/
|
||||
contract PublicResolver {
|
||||
bytes4 constant INTERFACE_META_ID = 0x01ffc9a7;
|
||||
bytes4 constant ADDR_INTERFACE_ID = 0x3b3b57de;
|
||||
bytes4 constant CONTENT_INTERFACE_ID = 0xd8389dc5;
|
||||
bytes4 constant NAME_INTERFACE_ID = 0x691f3431;
|
||||
bytes4 constant ABI_INTERFACE_ID = 0x2203ab56;
|
||||
bytes4 constant PUBKEY_INTERFACE_ID = 0xc8690233;
|
||||
bytes4 constant TEXT_INTERFACE_ID = 0x59d1d43c;
|
||||
|
||||
event AddrChanged(bytes32 indexed node, address a);
|
||||
event ContentChanged(bytes32 indexed node, bytes32 hash);
|
||||
event NameChanged(bytes32 indexed node, string name);
|
||||
event ABIChanged(bytes32 indexed node, uint256 indexed contentType);
|
||||
event PubkeyChanged(bytes32 indexed node, bytes32 x, bytes32 y);
|
||||
event TextChanged(bytes32 indexed node, string indexed indexedKey, string key);
|
||||
|
||||
struct PublicKey {
|
||||
bytes32 x;
|
||||
bytes32 y;
|
||||
}
|
||||
|
||||
struct Record {
|
||||
address addr;
|
||||
bytes32 content;
|
||||
string name;
|
||||
PublicKey pubkey;
|
||||
mapping(string=>string) text;
|
||||
mapping(uint256=>bytes) abis;
|
||||
}
|
||||
|
||||
AbstractENS ens;
|
||||
mapping(bytes32=>Record) records;
|
||||
|
||||
modifier only_owner(bytes32 node) {
|
||||
if (ens.owner(node) != msg.sender) throw;
|
||||
_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param ensAddr The ENS registrar contract.
|
||||
*/
|
||||
function PublicResolver(AbstractENS ensAddr) {
|
||||
ens = ensAddr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the resolver implements the interface specified by the provided hash.
|
||||
* @param interfaceID The ID of the interface to check for.
|
||||
* @return True if the contract implements the requested interface.
|
||||
*/
|
||||
function supportsInterface(bytes4 interfaceID) constant returns (bool) {
|
||||
return interfaceID == ADDR_INTERFACE_ID ||
|
||||
interfaceID == CONTENT_INTERFACE_ID ||
|
||||
interfaceID == NAME_INTERFACE_ID ||
|
||||
interfaceID == ABI_INTERFACE_ID ||
|
||||
interfaceID == PUBKEY_INTERFACE_ID ||
|
||||
interfaceID == TEXT_INTERFACE_ID ||
|
||||
interfaceID == INTERFACE_META_ID;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the address associated with an ENS node.
|
||||
* @param node The ENS node to query.
|
||||
* @return The associated address.
|
||||
*/
|
||||
function addr(bytes32 node) constant returns (address ret) {
|
||||
ret = records[node].addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the address associated with an ENS node.
|
||||
* May only be called by the owner of that node in the ENS registry.
|
||||
* @param node The node to update.
|
||||
* @param addr The address to set.
|
||||
*/
|
||||
function setAddr(bytes32 node, address addr) only_owner(node) {
|
||||
records[node].addr = addr;
|
||||
AddrChanged(node, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the content hash associated with an ENS node.
|
||||
* Note that this resource type is not standardized, and will likely change
|
||||
* in future to a resource type based on multihash.
|
||||
* @param node The ENS node to query.
|
||||
* @return The associated content hash.
|
||||
*/
|
||||
function content(bytes32 node) constant returns (bytes32 ret) {
|
||||
ret = records[node].content;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the content hash associated with an ENS node.
|
||||
* May only be called by the owner of that node in the ENS registry.
|
||||
* Note that this resource type is not standardized, and will likely change
|
||||
* in future to a resource type based on multihash.
|
||||
* @param node The node to update.
|
||||
* @param hash The content hash to set
|
||||
*/
|
||||
function setContent(bytes32 node, bytes32 hash) only_owner(node) {
|
||||
records[node].content = hash;
|
||||
ContentChanged(node, hash);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the name associated with an ENS node, for reverse records.
|
||||
* Defined in EIP181.
|
||||
* @param node The ENS node to query.
|
||||
* @return The associated name.
|
||||
*/
|
||||
function name(bytes32 node) constant returns (string ret) {
|
||||
ret = records[node].name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the name associated with an ENS node, for reverse records.
|
||||
* May only be called by the owner of that node in the ENS registry.
|
||||
* @param node The node to update.
|
||||
* @param name The name to set.
|
||||
*/
|
||||
function setName(bytes32 node, string name) only_owner(node) {
|
||||
records[node].name = name;
|
||||
NameChanged(node, name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the ABI associated with an ENS node.
|
||||
* Defined in EIP205.
|
||||
* @param node The ENS node to query
|
||||
* @param contentTypes A bitwise OR of the ABI formats accepted by the caller.
|
||||
* @return contentType The content type of the return value
|
||||
* @return data The ABI data
|
||||
*/
|
||||
function ABI(bytes32 node, uint256 contentTypes) constant returns (uint256 contentType, bytes data) {
|
||||
var record = records[node];
|
||||
for(contentType = 1; contentType <= contentTypes; contentType <<= 1) {
|
||||
if ((contentType & contentTypes) != 0 && record.abis[contentType].length > 0) {
|
||||
data = record.abis[contentType];
|
||||
return;
|
||||
}
|
||||
}
|
||||
contentType = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ABI associated with an ENS node.
|
||||
* Nodes may have one ABI of each content type. To remove an ABI, set it to
|
||||
* the empty string.
|
||||
* @param node The node to update.
|
||||
* @param contentType The content type of the ABI
|
||||
* @param data The ABI data.
|
||||
*/
|
||||
function setABI(bytes32 node, uint256 contentType, bytes data) only_owner(node) {
|
||||
// Content types must be powers of 2
|
||||
if (((contentType - 1) & contentType) != 0) throw;
|
||||
|
||||
records[node].abis[contentType] = data;
|
||||
ABIChanged(node, contentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the SECP256k1 public key associated with an ENS node.
|
||||
* Defined in EIP 619.
|
||||
* @param node The ENS node to query
|
||||
* @return x, y the X and Y coordinates of the curve point for the public key.
|
||||
*/
|
||||
function pubkey(bytes32 node) constant returns (bytes32 x, bytes32 y) {
|
||||
return (records[node].pubkey.x, records[node].pubkey.y);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the SECP256k1 public key associated with an ENS node.
|
||||
* @param node The ENS node to query
|
||||
* @param x the X coordinate of the curve point for the public key.
|
||||
* @param y the Y coordinate of the curve point for the public key.
|
||||
*/
|
||||
function setPubkey(bytes32 node, bytes32 x, bytes32 y) only_owner(node) {
|
||||
records[node].pubkey = PublicKey(x, y);
|
||||
PubkeyChanged(node, x, y);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the text data associated with an ENS node and key.
|
||||
* @param node The ENS node to query.
|
||||
* @param key The text data key to query.
|
||||
* @return The associated text data.
|
||||
*/
|
||||
function text(bytes32 node, string key) constant returns (string ret) {
|
||||
ret = records[node].text[key];
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the text data associated with an ENS node and key.
|
||||
* May only be called by the owner of that node in the ENS registry.
|
||||
* @param node The node to update.
|
||||
* @param key The key to set.
|
||||
* @param value The text data value to set.
|
||||
*/
|
||||
function setText(bytes32 node, string key, string value) only_owner(node) {
|
||||
records[node].text[key] = value;
|
||||
TextChanged(node, key, key);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,226 +0,0 @@
|
||||
// Ethereum Name Service contracts by Nick Johnson <nick@ethereum.org>
|
||||
//
|
||||
// To the extent possible under law, the person who associated CC0 with
|
||||
// ENS contracts has waived all copyright and related or neighboring rights
|
||||
// to ENS.
|
||||
//
|
||||
// You should have received a copy of the CC0 legalcode along with this
|
||||
// work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
|
||||
|
||||
/**
|
||||
* The ENS registry contract.
|
||||
*/
|
||||
contract ENS {
|
||||
struct Record {
|
||||
address owner;
|
||||
address resolver;
|
||||
}
|
||||
|
||||
mapping(bytes32=>Record) records;
|
||||
|
||||
// Logged when the owner of a node assigns a new owner to a subnode.
|
||||
event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner);
|
||||
|
||||
// Logged when the owner of a node transfers ownership to a new account.
|
||||
event Transfer(bytes32 indexed node, address owner);
|
||||
|
||||
// Logged when the owner of a node changes the resolver for that node.
|
||||
event NewResolver(bytes32 indexed node, address resolver);
|
||||
|
||||
// Permits modifications only by the owner of the specified node.
|
||||
modifier only_owner(bytes32 node) {
|
||||
if(records[node].owner != msg.sender) throw;
|
||||
_
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new ENS registrar, with the provided address as the owner of the root node.
|
||||
*/
|
||||
function ENS(address owner) {
|
||||
records[0].owner = owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the address that owns the specified node.
|
||||
*/
|
||||
function owner(bytes32 node) constant returns (address) {
|
||||
return records[node].owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the address of the resolver for the specified node.
|
||||
*/
|
||||
function resolver(bytes32 node) constant returns (address) {
|
||||
return records[node].resolver;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transfers ownership of a node to a new address. May only be called by the current
|
||||
* owner of the node.
|
||||
* @param node The node to transfer ownership of.
|
||||
* @param owner The address of the new owner.
|
||||
*/
|
||||
function setOwner(bytes32 node, address owner) only_owner(node) {
|
||||
Transfer(node, owner);
|
||||
records[node].owner = owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transfers ownership of a subnode sha3(node, label) to a new address. May only be
|
||||
* called by the owner of the parent node.
|
||||
* @param node The parent node.
|
||||
* @param label The hash of the label specifying the subnode.
|
||||
* @param owner The address of the new owner.
|
||||
*/
|
||||
function setSubnodeOwner(bytes32 node, bytes32 label, address owner) only_owner(node) {
|
||||
var subnode = sha3(node, label);
|
||||
NewOwner(node, label, owner);
|
||||
records[subnode].owner = owner;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the resolver address for the specified node.
|
||||
* @param node The node to update.
|
||||
* @param resolver The address of the resolver.
|
||||
*/
|
||||
function setResolver(bytes32 node, address resolver) only_owner(node) {
|
||||
NewResolver(node, resolver);
|
||||
records[node].resolver = resolver;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A registrar that allocates subdomains to the first person to claim them. It also deploys
|
||||
* a simple resolver contract and sets that as the default resolver on new names for
|
||||
* convenience.
|
||||
*/
|
||||
contract FIFSRegistrar {
|
||||
ENS ens;
|
||||
PublicResolver defaultResolver;
|
||||
bytes32 rootNode;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param ensAddr The address of the ENS registry.
|
||||
* @param node The node that this registrar administers.
|
||||
*/
|
||||
function FIFSRegistrar(address ensAddr, bytes32 node) {
|
||||
ens = ENS(ensAddr);
|
||||
defaultResolver = new PublicResolver(ensAddr);
|
||||
rootNode = node;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a name, or change the owner of an existing registration.
|
||||
* @param subnode The hash of the label to register.
|
||||
* @param owner The address of the new owner.
|
||||
*/
|
||||
function register(bytes32 subnode, address owner) {
|
||||
var node = sha3(rootNode, subnode);
|
||||
var currentOwner = ens.owner(node);
|
||||
if(currentOwner != 0 && currentOwner != msg.sender)
|
||||
throw;
|
||||
|
||||
// Temporarily set ourselves as the owner
|
||||
ens.setSubnodeOwner(rootNode, subnode, this);
|
||||
// Set up the default resolver
|
||||
ens.setResolver(node, defaultResolver);
|
||||
// Set the owner to the real owner
|
||||
ens.setOwner(node, owner);
|
||||
}
|
||||
}
|
||||
|
||||
contract Resolver {
|
||||
event AddrChanged(bytes32 indexed node, address a);
|
||||
event ContentChanged(bytes32 indexed node, bytes32 hash);
|
||||
|
||||
function has(bytes32 node, bytes32 kind) returns (bool);
|
||||
function addr(bytes32 node) constant returns (address ret);
|
||||
function content(bytes32 node) constant returns (bytes32 ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* A simple resolver anyone can use; only allows the owner of a node to set its
|
||||
* address.
|
||||
*/
|
||||
contract PublicResolver is Resolver {
|
||||
ENS ens;
|
||||
mapping(bytes32=>address) addresses;
|
||||
mapping(bytes32=>bytes32) contents;
|
||||
|
||||
modifier only_owner(bytes32 node) {
|
||||
if(ens.owner(node) != msg.sender) throw;
|
||||
_
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param ensAddr The ENS registrar contract.
|
||||
*/
|
||||
function PublicResolver(address ensAddr) {
|
||||
ens = ENS(ensAddr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fallback function.
|
||||
*/
|
||||
function() {
|
||||
throw;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the specified node has the specified record type.
|
||||
* @param node The ENS node to query.
|
||||
* @param kind The record type name, as specified in EIP137.
|
||||
* @return True if this resolver has a record of the provided type on the
|
||||
* provided node.
|
||||
*/
|
||||
function has(bytes32 node, bytes32 kind) returns (bool) {
|
||||
return (kind == "addr" && addresses[node] != 0) ||
|
||||
(kind == "content" && contents[node] != 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the address associated with an ENS node.
|
||||
* @param node The ENS node to query.
|
||||
* @return The associated address.
|
||||
*/
|
||||
function addr(bytes32 node) constant returns (address ret) {
|
||||
ret = addresses[node];
|
||||
if(ret == 0)
|
||||
throw;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the content hash associated with an ENS node.
|
||||
* @param node The ENS node to query.
|
||||
* @return The associated content hash.
|
||||
*/
|
||||
function content(bytes32 node) constant returns (bytes32 ret) {
|
||||
ret = contents[node];
|
||||
if(ret == 0)
|
||||
throw;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the address associated with an ENS node.
|
||||
* May only be called by the owner of that node in the ENS registry.
|
||||
* @param node The node to update.
|
||||
* @param addr The address to set.
|
||||
*/
|
||||
function setAddr(bytes32 node, address addr) only_owner(node) {
|
||||
addresses[node] = addr;
|
||||
AddrChanged(node, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the content hash associated with an ENS node.
|
||||
* May only be called by the owner of that node in the ENS registry.
|
||||
* @param node The node to update.
|
||||
* @param hash The content hash to set.
|
||||
*/
|
||||
function setContent(bytes32 node, bytes32 hash) only_owner(node) {
|
||||
contents[node] = hash;
|
||||
ContentChanged(node, hash);
|
||||
}
|
||||
}
|
195
contracts/ens/contract/fifsregistrar.go
Normal file
195
contracts/ens/contract/fifsregistrar.go
Normal file
@ -0,0 +1,195 @@
|
||||
// Code generated - DO NOT EDIT.
|
||||
// This file is a generated binding and any manual changes will be lost.
|
||||
|
||||
package contract
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// FIFSRegistrarABI is the input ABI used to generate the binding from.
|
||||
const FIFSRegistrarABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"subnode\",\"type\":\"bytes32\"},{\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"register\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"name\":\"ensAddr\",\"type\":\"address\"},{\"name\":\"node\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"}]"
|
||||
|
||||
// FIFSRegistrarBin is the compiled bytecode used for deploying new contracts.
|
||||
const FIFSRegistrarBin = `0x6060604052341561000f57600080fd5b604051604080610224833981016040528080519190602001805160008054600160a060020a03909516600160a060020a03199095169490941790935550506001556101c58061005f6000396000f3006060604052600436106100275763ffffffff60e060020a600035041663d22057a9811461002c575b600080fd5b341561003757600080fd5b61004e600435600160a060020a0360243516610050565b005b816000806001548360405191825260208201526040908101905190819003902060008054919350600160a060020a03909116906302571be39084906040516020015260405160e060020a63ffffffff84160281526004810191909152602401602060405180830381600087803b15156100c857600080fd5b6102c65a03f115156100d957600080fd5b5050506040518051915050600160a060020a0381161580159061010e575033600160a060020a031681600160a060020a031614155b1561011857600080fd5b600054600154600160a060020a03909116906306ab592390878760405160e060020a63ffffffff861602815260048101939093526024830191909152600160a060020a03166044820152606401600060405180830381600087803b151561017e57600080fd5b6102c65a03f1151561018f57600080fd5b50505050505050505600a165627a7a723058206fb963cb168d5e3a51af12cd6bb23e324dbd32dd4954f43653ba27e66b68ea650029`
|
||||
|
||||
// DeployFIFSRegistrar deploys a new Ethereum contract, binding an instance of FIFSRegistrar to it.
|
||||
func DeployFIFSRegistrar(auth *bind.TransactOpts, backend bind.ContractBackend, ensAddr common.Address, node [32]byte) (common.Address, *types.Transaction, *FIFSRegistrar, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader(FIFSRegistrarABI))
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
}
|
||||
address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(FIFSRegistrarBin), backend, ensAddr, node)
|
||||
if err != nil {
|
||||
return common.Address{}, nil, nil, err
|
||||
}
|
||||
return address, tx, &FIFSRegistrar{FIFSRegistrarCaller: FIFSRegistrarCaller{contract: contract}, FIFSRegistrarTransactor: FIFSRegistrarTransactor{contract: contract}, FIFSRegistrarFilterer: FIFSRegistrarFilterer{contract: contract}}, nil
|
||||
}
|
||||
|
||||
// FIFSRegistrar is an auto generated Go binding around an Ethereum contract.
|
||||
type FIFSRegistrar struct {
|
||||
FIFSRegistrarCaller // Read-only binding to the contract
|
||||
FIFSRegistrarTransactor // Write-only binding to the contract
|
||||
FIFSRegistrarFilterer // Log filterer for contract events
|
||||
}
|
||||
|
||||
// FIFSRegistrarCaller is an auto generated read-only Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarCaller struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// FIFSRegistrarTransactor is an auto generated write-only Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarTransactor struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// FIFSRegistrarFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
|
||||
type FIFSRegistrarFilterer struct {
|
||||
contract *bind.BoundContract // Generic contract wrapper for the low level calls
|
||||
}
|
||||
|
||||
// FIFSRegistrarSession is an auto generated Go binding around an Ethereum contract,
|
||||
// with pre-set call and transact options.
|
||||
type FIFSRegistrarSession struct {
|
||||
Contract *FIFSRegistrar // Generic contract binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
}
|
||||
|
||||
// FIFSRegistrarCallerSession is an auto generated read-only Go binding around an Ethereum contract,
|
||||
// with pre-set call options.
|
||||
type FIFSRegistrarCallerSession struct {
|
||||
Contract *FIFSRegistrarCaller // Generic contract caller binding to set the session for
|
||||
CallOpts bind.CallOpts // Call options to use throughout this session
|
||||
}
|
||||
|
||||
// FIFSRegistrarTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
|
||||
// with pre-set transact options.
|
||||
type FIFSRegistrarTransactorSession struct {
|
||||
Contract *FIFSRegistrarTransactor // Generic contract transactor binding to set the session for
|
||||
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
|
||||
}
|
||||
|
||||
// FIFSRegistrarRaw is an auto generated low-level Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarRaw struct {
|
||||
Contract *FIFSRegistrar // Generic contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// FIFSRegistrarCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarCallerRaw struct {
|
||||
Contract *FIFSRegistrarCaller // Generic read-only contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// FIFSRegistrarTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
|
||||
type FIFSRegistrarTransactorRaw struct {
|
||||
Contract *FIFSRegistrarTransactor // Generic write-only contract binding to access the raw methods on
|
||||
}
|
||||
|
||||
// NewFIFSRegistrar creates a new instance of FIFSRegistrar, bound to a specific deployed contract.
|
||||
func NewFIFSRegistrar(address common.Address, backend bind.ContractBackend) (*FIFSRegistrar, error) {
|
||||
contract, err := bindFIFSRegistrar(address, backend, backend, backend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FIFSRegistrar{FIFSRegistrarCaller: FIFSRegistrarCaller{contract: contract}, FIFSRegistrarTransactor: FIFSRegistrarTransactor{contract: contract}, FIFSRegistrarFilterer: FIFSRegistrarFilterer{contract: contract}}, nil
|
||||
}
|
||||
|
||||
// NewFIFSRegistrarCaller creates a new read-only instance of FIFSRegistrar, bound to a specific deployed contract.
|
||||
func NewFIFSRegistrarCaller(address common.Address, caller bind.ContractCaller) (*FIFSRegistrarCaller, error) {
|
||||
contract, err := bindFIFSRegistrar(address, caller, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FIFSRegistrarCaller{contract: contract}, nil
|
||||
}
|
||||
|
||||
// NewFIFSRegistrarTransactor creates a new write-only instance of FIFSRegistrar, bound to a specific deployed contract.
|
||||
func NewFIFSRegistrarTransactor(address common.Address, transactor bind.ContractTransactor) (*FIFSRegistrarTransactor, error) {
|
||||
contract, err := bindFIFSRegistrar(address, nil, transactor, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FIFSRegistrarTransactor{contract: contract}, nil
|
||||
}
|
||||
|
||||
// NewFIFSRegistrarFilterer creates a new log filterer instance of FIFSRegistrar, bound to a specific deployed contract.
|
||||
func NewFIFSRegistrarFilterer(address common.Address, filterer bind.ContractFilterer) (*FIFSRegistrarFilterer, error) {
|
||||
contract, err := bindFIFSRegistrar(address, nil, nil, filterer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FIFSRegistrarFilterer{contract: contract}, nil
|
||||
}
|
||||
|
||||
// bindFIFSRegistrar binds a generic wrapper to an already deployed contract.
|
||||
func bindFIFSRegistrar(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
|
||||
parsed, err := abi.JSON(strings.NewReader(FIFSRegistrarABI))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_FIFSRegistrar *FIFSRegistrarRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
return _FIFSRegistrar.Contract.FIFSRegistrarCaller.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_FIFSRegistrar *FIFSRegistrarRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
|
||||
return _FIFSRegistrar.Contract.FIFSRegistrarTransactor.contract.Transfer(opts)
|
||||
}
|
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_FIFSRegistrar *FIFSRegistrarRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
|
||||
return _FIFSRegistrar.Contract.FIFSRegistrarTransactor.contract.Transact(opts, method, params...)
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (_FIFSRegistrar *FIFSRegistrarCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error {
|
||||
return _FIFSRegistrar.Contract.contract.Call(opts, result, method, params...)
|
||||
}
|
||||
|
||||
// Transfer initiates a plain transaction to move funds to the contract, calling
|
||||
// its default method if one is available.
|
||||
func (_FIFSRegistrar *FIFSRegistrarTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
|
||||
return _FIFSRegistrar.Contract.contract.Transfer(opts)
|
||||
}
|
||||
|
||||
// Transact invokes the (paid) contract method with params as input values.
|
||||
func (_FIFSRegistrar *FIFSRegistrarTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
|
||||
return _FIFSRegistrar.Contract.contract.Transact(opts, method, params...)
|
||||
}
|
||||
|
||||
// Register is a paid mutator transaction binding the contract method 0xd22057a9.
|
||||
//
|
||||
// Solidity: function register(subnode bytes32, owner address) returns()
|
||||
func (_FIFSRegistrar *FIFSRegistrarTransactor) Register(opts *bind.TransactOpts, subnode [32]byte, owner common.Address) (*types.Transaction, error) {
|
||||
return _FIFSRegistrar.contract.Transact(opts, "register", subnode, owner)
|
||||
}
|
||||
|
||||
// Register is a paid mutator transaction binding the contract method 0xd22057a9.
|
||||
//
|
||||
// Solidity: function register(subnode bytes32, owner address) returns()
|
||||
func (_FIFSRegistrar *FIFSRegistrarSession) Register(subnode [32]byte, owner common.Address) (*types.Transaction, error) {
|
||||
return _FIFSRegistrar.Contract.Register(&_FIFSRegistrar.TransactOpts, subnode, owner)
|
||||
}
|
||||
|
||||
// Register is a paid mutator transaction binding the contract method 0xd22057a9.
|
||||
//
|
||||
// Solidity: function register(subnode bytes32, owner address) returns()
|
||||
func (_FIFSRegistrar *FIFSRegistrarTransactorSession) Register(subnode [32]byte, owner common.Address) (*types.Transaction, error) {
|
||||
return _FIFSRegistrar.Contract.Register(&_FIFSRegistrar.TransactOpts, subnode, owner)
|
||||
}
|
1321
contracts/ens/contract/publicresolver.go
Normal file
1321
contracts/ens/contract/publicresolver.go
Normal file
File diff suppressed because one or more lines are too long
@ -16,10 +16,11 @@
|
||||
|
||||
package ens
|
||||
|
||||
//go:generate abigen --sol contract/ens.sol --pkg contract --out contract/ens.go
|
||||
//go:generate abigen --sol contract/ENS.sol --exc contract/AbstractENS.sol:AbstractENS --pkg contract --out contract/ens.go
|
||||
//go:generate abigen --sol contract/FIFSRegistrar.sol --exc contract/AbstractENS.sol:AbstractENS --pkg contract --out contract/fifsregistrar.go
|
||||
//go:generate abigen --sol contract/PublicResolver.sol --exc contract/AbstractENS.sol:AbstractENS --pkg contract --out contract/publicresolver.go
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
@ -58,31 +59,29 @@ func NewENS(transactOpts *bind.TransactOpts, contractAddr common.Address, contra
|
||||
}
|
||||
|
||||
// DeployENS deploys an instance of the ENS nameservice, with a 'first-in, first-served' root registrar.
|
||||
func DeployENS(transactOpts *bind.TransactOpts, contractBackend bind.ContractBackend) (*ENS, error) {
|
||||
// Deploy the ENS registry
|
||||
ensAddr, _, _, err := contract.DeployENS(transactOpts, contractBackend, transactOpts.From)
|
||||
func DeployENS(transactOpts *bind.TransactOpts, contractBackend bind.ContractBackend) (common.Address, *ENS, error) {
|
||||
// Deploy the ENS registry.
|
||||
ensAddr, _, _, err := contract.DeployENS(transactOpts, contractBackend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return ensAddr, nil, err
|
||||
}
|
||||
|
||||
ens, err := NewENS(transactOpts, ensAddr, contractBackend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return ensAddr, nil, err
|
||||
}
|
||||
|
||||
// Deploy the registrar
|
||||
// Deploy the registrar.
|
||||
regAddr, _, _, err := contract.DeployFIFSRegistrar(transactOpts, contractBackend, ensAddr, [32]byte{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return ensAddr, nil, err
|
||||
}
|
||||
// Set the registrar as owner of the ENS root.
|
||||
if _, err = ens.SetOwner([32]byte{}, regAddr); err != nil {
|
||||
return ensAddr, nil, err
|
||||
}
|
||||
|
||||
// Set the registrar as owner of the ENS root
|
||||
_, err = ens.SetOwner([32]byte{}, regAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ens, nil
|
||||
return ensAddr, ens, nil
|
||||
}
|
||||
|
||||
func ensParentNode(name string) (common.Hash, common.Hash) {
|
||||
@ -156,15 +155,11 @@ func (self *ENS) Resolve(name string) (common.Hash, error) {
|
||||
// Only works if the registrar for the parent domain implements the FIFS registrar protocol.
|
||||
func (self *ENS) Register(name string) (*types.Transaction, error) {
|
||||
parentNode, label := ensParentNode(name)
|
||||
|
||||
registrar, err := self.getRegistrar(parentNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := self.TransactOpts
|
||||
opts.GasLimit = big.NewInt(200000)
|
||||
return registrar.Contract.Register(&opts, label, self.TransactOpts.From)
|
||||
return registrar.Contract.Register(&self.TransactOpts, label, self.TransactOpts.From)
|
||||
}
|
||||
|
||||
// SetContentHash sets the content hash associated with a name. Only works if the caller
|
||||
@ -178,6 +173,6 @@ func (self *ENS) SetContentHash(name string, hash common.Hash) (*types.Transacti
|
||||
}
|
||||
|
||||
opts := self.TransactOpts
|
||||
opts.GasLimit = big.NewInt(200000)
|
||||
opts.GasLimit = 200000
|
||||
return resolver.Contract.SetContent(&opts, node, hash)
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||
"github.com/ethereum/go-ethereum/contracts/ens/contract"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
@ -36,27 +37,36 @@ var (
|
||||
func TestENS(t *testing.T) {
|
||||
contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}})
|
||||
transactOpts := bind.NewKeyedTransactor(key)
|
||||
// Workaround for bug estimating gas in the call to Register
|
||||
transactOpts.GasLimit = big.NewInt(1000000)
|
||||
|
||||
ens, err := DeployENS(transactOpts, contractBackend)
|
||||
ensAddr, ens, err := DeployENS(transactOpts, contractBackend)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
t.Fatalf("can't deploy root registry: %v", err)
|
||||
}
|
||||
contractBackend.Commit()
|
||||
|
||||
_, err = ens.Register(name)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
// Set ourself as the owner of the name.
|
||||
if _, err := ens.Register(name); err != nil {
|
||||
t.Fatalf("can't register: %v", err)
|
||||
}
|
||||
contractBackend.Commit()
|
||||
|
||||
_, err = ens.SetContentHash(name, hash)
|
||||
// Deploy a resolver and make it responsible for the name.
|
||||
resolverAddr, _, _, err := contract.DeployPublicResolver(transactOpts, contractBackend, ensAddr)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
t.Fatalf("can't deploy resolver: %v", err)
|
||||
}
|
||||
if _, err := ens.SetResolver(ensNode(name), resolverAddr); err != nil {
|
||||
t.Fatalf("can't set resolver: %v", err)
|
||||
}
|
||||
contractBackend.Commit()
|
||||
|
||||
// Set the content hash for the name.
|
||||
if _, err = ens.SetContentHash(name, hash); err != nil {
|
||||
t.Fatalf("can't set content hash: %v", err)
|
||||
}
|
||||
contractBackend.Commit()
|
||||
|
||||
// Try to resolve the name.
|
||||
vhost, err := ens.Resolve(name)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
|
File diff suppressed because one or more lines are too long
@ -1,249 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// ReleaseOracle is an Ethereum contract to store the current and previous
|
||||
// versions of the go-ethereum implementation. Its goal is to allow Geth to
|
||||
// check for new releases automatically without the need to consult a central
|
||||
// repository.
|
||||
//
|
||||
// The contract takes a vote based approach on both assigning authorised signers
|
||||
// as well as signing off on new Geth releases.
|
||||
//
|
||||
// Note, when a signer is demoted, the currently pending release is auto-nuked.
|
||||
// The reason is to prevent suprises where a demotion actually tilts the votes
|
||||
// in favor of one voter party and pushing out a new release as a consequence of
|
||||
// a simple demotion.
|
||||
contract ReleaseOracle {
|
||||
// Votes is an internal data structure to count votes on a specific proposal
|
||||
struct Votes {
|
||||
address[] pass; // List of signers voting to pass a proposal
|
||||
address[] fail; // List of signers voting to fail a proposal
|
||||
}
|
||||
|
||||
// Version is the version details of a particular Geth release
|
||||
struct Version {
|
||||
uint32 major; // Major version component of the release
|
||||
uint32 minor; // Minor version component of the release
|
||||
uint32 patch; // Patch version component of the release
|
||||
bytes20 commit; // Git SHA1 commit hash of the release
|
||||
|
||||
uint64 time; // Timestamp of the release approval
|
||||
Votes votes; // Votes that passed this release
|
||||
}
|
||||
|
||||
// Oracle authorization details
|
||||
mapping(address => bool) authorised; // Set of accounts allowed to vote on updating the contract
|
||||
address[] voters; // List of addresses currently accepted as signers
|
||||
|
||||
// Various proposals being voted on
|
||||
mapping(address => Votes) authProps; // Currently running user authorization proposals
|
||||
address[] authPend; // List of addresses being voted on (map indexes)
|
||||
|
||||
Version verProp; // Currently proposed release being voted on
|
||||
Version[] releases; // All the positively voted releases
|
||||
|
||||
// isSigner is a modifier to authorize contract transactions.
|
||||
modifier isSigner() {
|
||||
if (authorised[msg.sender]) {
|
||||
_
|
||||
}
|
||||
}
|
||||
|
||||
// Constructor to assign the initial set of signers.
|
||||
function ReleaseOracle(address[] signers) {
|
||||
// If no signers were specified, assign the creator as the sole signer
|
||||
if (signers.length == 0) {
|
||||
authorised[msg.sender] = true;
|
||||
voters.push(msg.sender);
|
||||
return;
|
||||
}
|
||||
// Otherwise assign the individual signers one by one
|
||||
for (uint i = 0; i < signers.length; i++) {
|
||||
authorised[signers[i]] = true;
|
||||
voters.push(signers[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// signers is an accessor method to retrieve all the signers (public accessor
|
||||
// generates an indexed one, not a retrieve-all version).
|
||||
function signers() constant returns(address[]) {
|
||||
return voters;
|
||||
}
|
||||
|
||||
// authProposals retrieves the list of addresses that authorization proposals
|
||||
// are currently being voted on.
|
||||
function authProposals() constant returns(address[]) {
|
||||
return authPend;
|
||||
}
|
||||
|
||||
// authVotes retrieves the current authorization votes for a particular user
|
||||
// to promote him into the list of signers, or demote him from there.
|
||||
function authVotes(address user) constant returns(address[] promote, address[] demote) {
|
||||
return (authProps[user].pass, authProps[user].fail);
|
||||
}
|
||||
|
||||
// currentVersion retrieves the semantic version, commit hash and release time
|
||||
// of the currently votec active release.
|
||||
function currentVersion() constant returns (uint32 major, uint32 minor, uint32 patch, bytes20 commit, uint time) {
|
||||
if (releases.length == 0) {
|
||||
return (0, 0, 0, 0, 0);
|
||||
}
|
||||
var release = releases[releases.length - 1];
|
||||
|
||||
return (release.major, release.minor, release.patch, release.commit, release.time);
|
||||
}
|
||||
|
||||
// proposedVersion retrieves the semantic version, commit hash and the current
|
||||
// votes for the next proposed release.
|
||||
function proposedVersion() constant returns (uint32 major, uint32 minor, uint32 patch, bytes20 commit, address[] pass, address[] fail) {
|
||||
return (verProp.major, verProp.minor, verProp.patch, verProp.commit, verProp.votes.pass, verProp.votes.fail);
|
||||
}
|
||||
|
||||
// promote pitches in on a voting campaign to promote a new user to a signer
|
||||
// position.
|
||||
function promote(address user) {
|
||||
updateSigner(user, true);
|
||||
}
|
||||
|
||||
// demote pitches in on a voting campaign to demote an authorised user from
|
||||
// its signer position.
|
||||
function demote(address user) {
|
||||
updateSigner(user, false);
|
||||
}
|
||||
|
||||
// release votes for a particular version to be included as the next release.
|
||||
function release(uint32 major, uint32 minor, uint32 patch, bytes20 commit) {
|
||||
updateRelease(major, minor, patch, commit, true);
|
||||
}
|
||||
|
||||
// nuke votes for the currently proposed version to not be included as the next
|
||||
// release. Nuking doesn't require a specific version number for simplicity.
|
||||
function nuke() {
|
||||
updateRelease(0, 0, 0, 0, false);
|
||||
}
|
||||
|
||||
// updateSigner marks a vote for changing the status of an Ethereum user, either
|
||||
// for or against the user being an authorised signer.
|
||||
function updateSigner(address user, bool authorize) internal isSigner {
|
||||
// Gather the current votes and ensure we don't double vote
|
||||
Votes votes = authProps[user];
|
||||
for (uint i = 0; i < votes.pass.length; i++) {
|
||||
if (votes.pass[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < votes.fail.length; i++) {
|
||||
if (votes.fail[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// If no authorization proposal is open, add the user to the index for later lookups
|
||||
if (votes.pass.length == 0 && votes.fail.length == 0) {
|
||||
authPend.push(user);
|
||||
}
|
||||
// Cast the vote and return if the proposal cannot be resolved yet
|
||||
if (authorize) {
|
||||
votes.pass.push(msg.sender);
|
||||
if (votes.pass.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
votes.fail.push(msg.sender);
|
||||
if (votes.fail.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Proposal resolved in our favor, execute whatever we voted on
|
||||
if (authorize && !authorised[user]) {
|
||||
authorised[user] = true;
|
||||
voters.push(user);
|
||||
} else if (!authorize && authorised[user]) {
|
||||
authorised[user] = false;
|
||||
|
||||
for (i = 0; i < voters.length; i++) {
|
||||
if (voters[i] == user) {
|
||||
voters[i] = voters[voters.length - 1];
|
||||
voters.length--;
|
||||
|
||||
delete verProp; // Nuke any version proposal (no surprise releases!)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finally delete the resolved proposal, index and garbage collect
|
||||
delete authProps[user];
|
||||
|
||||
for (i = 0; i < authPend.length; i++) {
|
||||
if (authPend[i] == user) {
|
||||
authPend[i] = authPend[authPend.length - 1];
|
||||
authPend.length--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateRelease votes for a particular version to be included as the next release,
|
||||
// or for the currently proposed release to be nuked out.
|
||||
function updateRelease(uint32 major, uint32 minor, uint32 patch, bytes20 commit, bool release) internal isSigner {
|
||||
// Skip nuke votes if no proposal is pending
|
||||
if (!release && verProp.votes.pass.length == 0) {
|
||||
return;
|
||||
}
|
||||
// Mark a new release if no proposal is pending
|
||||
if (verProp.votes.pass.length == 0) {
|
||||
verProp.major = major;
|
||||
verProp.minor = minor;
|
||||
verProp.patch = patch;
|
||||
verProp.commit = commit;
|
||||
}
|
||||
// Make sure positive votes match the current proposal
|
||||
if (release && (verProp.major != major || verProp.minor != minor || verProp.patch != patch || verProp.commit != commit)) {
|
||||
return;
|
||||
}
|
||||
// Gather the current votes and ensure we don't double vote
|
||||
Votes votes = verProp.votes;
|
||||
for (uint i = 0; i < votes.pass.length; i++) {
|
||||
if (votes.pass[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < votes.fail.length; i++) {
|
||||
if (votes.fail[i] == msg.sender) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Cast the vote and return if the proposal cannot be resolved yet
|
||||
if (release) {
|
||||
votes.pass.push(msg.sender);
|
||||
if (votes.pass.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
votes.fail.push(msg.sender);
|
||||
if (votes.fail.length <= voters.length / 2) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Proposal resolved in our favor, execute whatever we voted on
|
||||
if (release) {
|
||||
verProp.time = uint64(now);
|
||||
releases.push(verProp);
|
||||
delete verProp;
|
||||
} else {
|
||||
delete verProp;
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user