forked from cerc-io/ipld-eth-server
vendor updates to work with go 1.11 and geth 1.8.18
This commit is contained in:
parent
975f13b969
commit
58b34548f3
199
Gopkg.lock
generated
199
Gopkg.lock
generated
@ -3,28 +3,47 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c5e006ec5f6460f05964be0a9d34a42e3ad25bbc2564a289d7b4f6f2290c76e5"
|
||||
name = "github.com/aristanetworks/goarista"
|
||||
packages = ["monotime"]
|
||||
pruneopts = ""
|
||||
revision = "8d0e8f607a4080e7df3532e645440ed0900c64a4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:735be9b1e9daa0f731c20fbd2f0d4d0d3efbebe674f415f78b72c19defc07fa1"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = ""
|
||||
revision = "2e60448ffcc6bf78332d1fe590260095f554dd78"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aaeffbff5bd24654cb4c190ed75d6c7b57b4f5d6741914c1a7a6bb7447e756c5"
|
||||
name = "github.com/deckarep/golang-set"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "cbaa98ba5575e67703b32b4b19f73c91f3c4159e"
|
||||
version = "v1.7.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c205f1963071408c1fac73c1b37c86ef9b98d80f17e690a2239853cde255ad3d"
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
packages = [
|
||||
".",
|
||||
"accounts",
|
||||
"accounts/abi",
|
||||
"accounts/abi/bind",
|
||||
"accounts/keystore",
|
||||
"common",
|
||||
"common/bitutil",
|
||||
"common/hexutil",
|
||||
"common/math",
|
||||
"common/mclock",
|
||||
"common/prque",
|
||||
"consensus",
|
||||
"consensus/misc",
|
||||
"core",
|
||||
"core/rawdb",
|
||||
"core/state",
|
||||
"core/types",
|
||||
"core/vm",
|
||||
@ -43,51 +62,73 @@
|
||||
"p2p",
|
||||
"p2p/discover",
|
||||
"p2p/discv5",
|
||||
"p2p/enode",
|
||||
"p2p/enr",
|
||||
"p2p/nat",
|
||||
"p2p/netutil",
|
||||
"params",
|
||||
"rlp",
|
||||
"rpc",
|
||||
"trie"
|
||||
"trie",
|
||||
]
|
||||
revision = "b8b9f7f4476a30a0aaf6077daade6ae77f969960"
|
||||
version = "v1.8.2"
|
||||
pruneopts = ""
|
||||
revision = "58632d44021bf095b43a1bb2443e6e3690a94739"
|
||||
version = "v1.8.18"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9ca737b471693542351e112c9e86be9bf7385e42256893a09ecb2a98e2036f74"
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc"
|
||||
version = "v1.7.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3b760d3b93f994df8eb1d9ebfad17d3e9e37edcb7f7efaa15b427c0d7a64f4e4"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = ""
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:09307dfb1aa3f49a2bf869dcfa4c6c06ecd3c207221bd1c1a1141f0e51f209eb"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a25a2c5ae694b01713fb6cd03c3b1ac1ccc1902b9f0a922680a88ec254f968e1"
|
||||
name = "github.com/google/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru"
|
||||
"simplelru",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:147d671753effde6d3bcd58fc74c1d67d740196c84c280c762a5417319499972"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
@ -98,12 +139,14 @@
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token"
|
||||
"json/token",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b5ef0f034b2eda02c19c2cfcccc9754b372c4e04fa593972fbd8e3d9813d8d98"
|
||||
name = "github.com/huin/goupnp"
|
||||
packages = [
|
||||
".",
|
||||
@ -112,59 +155,83 @@
|
||||
"httpu",
|
||||
"scpd",
|
||||
"soap",
|
||||
"ssdp"
|
||||
"ssdp",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "dceda08e705b2acee36aab47d765ed801f64cfc7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c205f1963071408c1fac73c1b37c86ef9b98d80f17e690a2239853cde255ad3d"
|
||||
name = "github.com/i-norden/go-ethereum"
|
||||
packages = ["core/types"]
|
||||
pruneopts = ""
|
||||
revision = "58632d44021bf095b43a1bb2443e6e3690a94739"
|
||||
version = "v1.8.18"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:76f836364ae83ed811c415aa92e1209ce49de9f62aad85b85fca749a8b96a110"
|
||||
name = "github.com/jackpal/go-nat-pmp"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c9cfead9f2a36ddf3daa40ba269aa7f4bbba6b62"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fff68ae49ca708829c24a986fb0a044ee2b5aa44b65a0b57b1840fbb0a81f9a6"
|
||||
name = "github.com/jmoiron/sqlx"
|
||||
packages = [
|
||||
".",
|
||||
"reflectx"
|
||||
"reflectx",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "99f3ad6d85ae53d0fecf788ab62d0e9734b3c117"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3b5167b89f2203a949b71e29783418a0532531238ad36eb8610ec12e9c7b997f"
|
||||
name = "github.com/lib/pq"
|
||||
packages = [
|
||||
".",
|
||||
"oid"
|
||||
"oid",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "83612a56d3dd153a94a629cd64925371c9adad78"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:739b2038a38cebb50e922d18f4b042c042256320fea2db094814aeef8891e0c1"
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "d419a98cdbed11a922bf76f257b7c4be79b50e73"
|
||||
version = "v1.7.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:59d11e81d6fdd12a771321696bb22abdd9a94d26ac864787e98c9b419e428734"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a31c8ff06bf8ccc1826e3a28a4132bf217a53f3b096e6a03dcac7b6a9eb796f9"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "b4575eea38cca1123ec2dc90c26529b5c5acfcff"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:32b27072cd55bd2fb7244de0425943d125da6a552ae2b6517cdd965a662baf18"
|
||||
name = "github.com/onsi/ginkgo"
|
||||
packages = [
|
||||
".",
|
||||
@ -184,12 +251,14 @@
|
||||
"reporters/stenographer",
|
||||
"reporters/stenographer/support/go-colorable",
|
||||
"reporters/stenographer/support/go-isatty",
|
||||
"types"
|
||||
"types",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "9eda700730cba42af70d53180f9dcce9266bc2bc"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a4e59d0b2821c983b58c317f141cd77df20570979632da8a7a352e5d12698de7"
|
||||
name = "github.com/onsi/gomega"
|
||||
packages = [
|
||||
".",
|
||||
@ -204,64 +273,98 @@
|
||||
"matchers/support/goraph/edge",
|
||||
"matchers/support/goraph/node",
|
||||
"matchers/support/goraph/util",
|
||||
"types"
|
||||
"types",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "c893efa28eb45626cdaa76c9f653b62488858837"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a5484d4fa43127138ae6e7b2299a6a52ae006c7f803d98d717f60abf3e97192e"
|
||||
name = "github.com/pborman/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
|
||||
version = "v1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d60cfeee185019d4fcd35e8c89c83aff576e4723b6100300bf67b05be961388f"
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6938b6ee0351393c55baa54b64352e49a741a1b7b616a7d134d237106812070f"
|
||||
name = "github.com/rjeczalik/notify"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "69d839f37b13a8cb7a78366f7633a4071cb43be7"
|
||||
version = "v0.9.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bfc8db90e2676a2fc0d742a536f376044a9b74f2745b2c60d339eb06c6c6988a"
|
||||
name = "github.com/rs/cors"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "7af7a1e09ba336d2ea14b1ce73bf693c6837dbf6"
|
||||
version = "v1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:dae0d7dd55563fd389e7263a32d2030022ef29cceff941336e53f6520e0308c0"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem"
|
||||
"mem",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "bb8f1927f2a9d3ab41c9340aa034f6b803f4359c"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6ff9b74bfea2625f805edec59395dc37e4a06458dd3c14e3372337e3d35a2ed6"
|
||||
name = "github.com/spf13/cast"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "acbeb36b902d72a7a4c18e8f3241075e7ab763e4"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2208a80fc3259291e43b30f42f844d18f4218036dff510f42c653ec9890d460a"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:104517520aab91164020ab6524a5d6b7cafc641b2e42ac6236f6ac1deac4f66a"
|
||||
name = "github.com/spf13/jwalterweatherman"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:261bc565833ef4f02121450d74eb88d5ae4bd74bfe5d0e862cddb8550ec35000"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:59354ad53dfe6ed1b941844cb029cd37c0377598eec3a0d49c03aee2375ef9c4"
|
||||
name = "github.com/spf13/viper"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f87eb23fc0c2b143947616f754533344626637d3ae1b03ba077136ccb17de3f2"
|
||||
name = "github.com/syndtr/goleveldb"
|
||||
packages = [
|
||||
"leveldb",
|
||||
@ -275,42 +378,59 @@
|
||||
"leveldb/opt",
|
||||
"leveldb/storage",
|
||||
"leveldb/table",
|
||||
"leveldb/util"
|
||||
"leveldb/util",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "adf24ef3f94bd13ec4163060b21a5678f22b429b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:450f85e389d9cbd8265299827a2a062b1b6aee8d4c7b78d849d913a6eb405908"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ripemd160"]
|
||||
packages = [
|
||||
"pbkdf2",
|
||||
"ripemd160",
|
||||
"scrypt",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "613d6eafa307c6881a737a3c35c0e312e8d3a8c5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:950b672f2ee80d0fc4c95a15a976ba9ee573a6fb8ede8a777770b2230776367e"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"html",
|
||||
"html/atom",
|
||||
"html/charset",
|
||||
"websocket"
|
||||
"websocket",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "faacc1b5e36e3ff02cbec9661c69ac63dd5a83ad"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = ""
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:303c0ee48d6229a2423950f41b3ccb5a2067dc4c7b65f8863cfbd962bef05a85"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "a0f4589a76f1f83070cb9e5613809e1d07b97c13"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "62eef0e2fa9b2c385f7b2778e763486da6880d37"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1c70f7bb89783a026dc32920575a3feef48e065ef6e170ad227903e8194d7a36"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"encoding",
|
||||
@ -332,37 +452,56 @@
|
||||
"runes",
|
||||
"transform",
|
||||
"unicode/cldr",
|
||||
"unicode/norm"
|
||||
"unicode/norm",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "be25de41fadfae372d6470bda81ca6beb55ef551"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/fatih/set.v0"
|
||||
packages = ["."]
|
||||
revision = "57907de300222151a123d29255ed17f5ed43fad3"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/karalabe/cookiejar.v2"
|
||||
packages = ["collections/prque"]
|
||||
revision = "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:4f830ee018eb8c56d0def653ad7c9a1d2a053f0cef2ac6b2200f73b98fa6a681"
|
||||
name = "gopkg.in/natefinch/npipe.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:f769ed60e075e4221612c2f4162fccc9d3795ef358fa463425e3b3d7a5debb27"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "287cf08546ab5e7e37d55a84f7ed3fd1db036de5"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "a69d27ddc33f08d7b39242508f04e5c339b52cc65e70596100d17a870b2183a6"
|
||||
input-imports = [
|
||||
"github.com/ethereum/go-ethereum",
|
||||
"github.com/ethereum/go-ethereum/accounts/abi",
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind",
|
||||
"github.com/ethereum/go-ethereum/common",
|
||||
"github.com/ethereum/go-ethereum/common/hexutil",
|
||||
"github.com/ethereum/go-ethereum/core",
|
||||
"github.com/ethereum/go-ethereum/core/types",
|
||||
"github.com/ethereum/go-ethereum/crypto",
|
||||
"github.com/ethereum/go-ethereum/ethclient",
|
||||
"github.com/ethereum/go-ethereum/ethdb",
|
||||
"github.com/ethereum/go-ethereum/p2p",
|
||||
"github.com/ethereum/go-ethereum/p2p/discover",
|
||||
"github.com/ethereum/go-ethereum/params",
|
||||
"github.com/ethereum/go-ethereum/rpc",
|
||||
"github.com/i-norden/go-ethereum/core/types",
|
||||
"github.com/jmoiron/sqlx",
|
||||
"github.com/lib/pq",
|
||||
"github.com/mitchellh/go-homedir",
|
||||
"github.com/onsi/ginkgo",
|
||||
"github.com/onsi/gomega",
|
||||
"github.com/onsi/gomega/ghttp",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/viper",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/sync/errgroup",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
@ -39,4 +39,4 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
version = "1.8"
|
||||
version = "1.8.18"
|
||||
|
252
vendor/github.com/btcsuite/btcd/btcec/btcec_test.go
generated
vendored
252
vendor/github.com/btcsuite/btcd/btcec/btcec_test.go
generated
vendored
@ -898,132 +898,132 @@ var testVectors = []struct {
|
||||
r, s string
|
||||
ok bool
|
||||
}{
|
||||
/*
|
||||
* All of these tests are disabled since they are for P224, not sec256k1.
|
||||
* they are left here as an example of test vectors for when some *real*
|
||||
* vectors may be found.
|
||||
* - oga@conformal.com
|
||||
{
|
||||
"09626b45493672e48f3d1226a3aff3201960e577d33a7f72c7eb055302db8fe8ed61685dd036b554942a5737cd1512cdf811ee0c00e6dd2f08c69f08643be396e85dafda664801e772cdb7396868ac47b172245b41986aa2648cb77fbbfa562581be06651355a0c4b090f9d17d8f0ab6cced4e0c9d386cf465a516630f0231bd",
|
||||
"9504b5b82d97a264d8b3735e0568decabc4b6ca275bc53cbadfc1c40",
|
||||
"03426f80e477603b10dee670939623e3da91a94267fc4e51726009ed",
|
||||
"81d3ac609f9575d742028dd496450a58a60eea2dcf8b9842994916e1",
|
||||
"96a8c5f382c992e8f30ccce9af120b067ec1d74678fa8445232f75a5",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"96b2b6536f6df29be8567a72528aceeaccbaa66c66c534f3868ca9778b02faadb182e4ed34662e73b9d52ecbe9dc8e875fc05033c493108b380689ebf47e5b062e6a0cdb3dd34ce5fe347d92768d72f7b9b377c20aea927043b509c078ed2467d7113405d2ddd458811e6faf41c403a2a239240180f1430a6f4330df5d77de37",
|
||||
"851e3100368a22478a0029353045ae40d1d8202ef4d6533cfdddafd8",
|
||||
"205302ac69457dd345e86465afa72ee8c74ca97e2b0b999aec1f10c2",
|
||||
"4450c2d38b697e990721aa2dbb56578d32b4f5aeb3b9072baa955ee0",
|
||||
"e26d4b589166f7b4ba4b1c8fce823fa47aad22f8c9c396b8c6526e12",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"86778dbb4a068a01047a8d245d632f636c11d2ad350740b36fad90428b454ad0f120cb558d12ea5c8a23db595d87543d06d1ef489263d01ee529871eb68737efdb8ff85bc7787b61514bed85b7e01d6be209e0a4eb0db5c8df58a5c5bf706d76cb2bdf7800208639e05b89517155d11688236e6a47ed37d8e5a2b1e0adea338e",
|
||||
"ad5bda09d319a717c1721acd6688d17020b31b47eef1edea57ceeffc",
|
||||
"c8ce98e181770a7c9418c73c63d01494b8b80a41098c5ea50692c984",
|
||||
"de5558c257ab4134e52c19d8db3b224a1899cbd08cc508ce8721d5e9",
|
||||
"745db7af5a477e5046705c0a5eff1f52cb94a79d481f0c5a5e108ecd",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"4bc6ef1958556686dab1e39c3700054a304cbd8f5928603dcd97fafd1f29e69394679b638f71c9344ce6a535d104803d22119f57b5f9477e253817a52afa9bfbc9811d6cc8c8be6b6566c6ef48b439bbb532abe30627548c598867f3861ba0b154dc1c3deca06eb28df8efd28258554b5179883a36fbb1eecf4f93ee19d41e3d",
|
||||
"cc5eea2edf964018bdc0504a3793e4d2145142caa09a72ac5fb8d3e8",
|
||||
"a48d78ae5d08aa725342773975a00d4219cf7a8029bb8cf3c17c374a",
|
||||
"67b861344b4e416d4094472faf4272f6d54a497177fbc5f9ef292836",
|
||||
"1d54f3fcdad795bf3b23408ecbac3e1321d1d66f2e4e3d05f41f7020",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"bb658732acbf3147729959eb7318a2058308b2739ec58907dd5b11cfa3ecf69a1752b7b7d806fe00ec402d18f96039f0b78dbb90a59c4414fb33f1f4e02e4089de4122cd93df5263a95be4d7084e2126493892816e6a5b4ed123cb705bf930c8f67af0fb4514d5769232a9b008a803af225160ce63f675bd4872c4c97b146e5e",
|
||||
"6234c936e27bf141fc7534bfc0a7eedc657f91308203f1dcbd642855",
|
||||
"27983d87ca785ef4892c3591ef4a944b1deb125dd58bd351034a6f84",
|
||||
"e94e05b42d01d0b965ffdd6c3a97a36a771e8ea71003de76c4ecb13f",
|
||||
"1dc6464ffeefbd7872a081a5926e9fc3e66d123f1784340ba17737e9",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"7c00be9123bfa2c4290be1d8bc2942c7f897d9a5b7917e3aabd97ef1aab890f148400a89abd554d19bec9d8ed911ce57b22fbcf6d30ca2115f13ce0a3f569a23bad39ee645f624c49c60dcfc11e7d2be24de9c905596d8f23624d63dc46591d1f740e46f982bfae453f107e80db23545782be23ce43708245896fc54e1ee5c43",
|
||||
"9f3f037282aaf14d4772edffff331bbdda845c3f65780498cde334f1",
|
||||
"8308ee5a16e3bcb721b6bc30000a0419bc1aaedd761be7f658334066",
|
||||
"6381d7804a8808e3c17901e4d283b89449096a8fba993388fa11dc54",
|
||||
"8e858f6b5b253686a86b757bad23658cda53115ac565abca4e3d9f57",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"cffc122a44840dc705bb37130069921be313d8bde0b66201aebc48add028ca131914ef2e705d6bedd19dc6cf9459bbb0f27cdfe3c50483808ffcdaffbeaa5f062e097180f07a40ef4ab6ed03fe07ed6bcfb8afeb42c97eafa2e8a8df469de07317c5e1494c41547478eff4d8c7d9f0f484ad90fedf6e1c35ee68fa73f1691601",
|
||||
"a03b88a10d930002c7b17ca6af2fd3e88fa000edf787dc594f8d4fd4",
|
||||
"e0cf7acd6ddc758e64847fe4df9915ebda2f67cdd5ec979aa57421f5",
|
||||
"387b84dcf37dc343c7d2c5beb82f0bf8bd894b395a7b894565d296c1",
|
||||
"4adc12ce7d20a89ce3925e10491c731b15ddb3f339610857a21b53b4",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"26e0e0cafd85b43d16255908ccfd1f061c680df75aba3081246b337495783052ba06c60f4a486c1591a4048bae11b4d7fec4f161d80bdc9a7b79d23e44433ed625eab280521a37f23dd3e1bdc5c6a6cfaa026f3c45cf703e76dab57add93fe844dd4cda67dc3bddd01f9152579e49df60969b10f09ce9372fdd806b0c7301866",
|
||||
"9a8983c42f2b5a87c37a00458b5970320d247f0c8a88536440173f7d",
|
||||
"15e489ec6355351361900299088cfe8359f04fe0cab78dde952be80c",
|
||||
"929a21baa173d438ec9f28d6a585a2f9abcfc0a4300898668e476dc0",
|
||||
"59a853f046da8318de77ff43f26fe95a92ee296fa3f7e56ce086c872",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"1078eac124f48ae4f807e946971d0de3db3748dd349b14cca5c942560fb25401b2252744f18ad5e455d2d97ed5ae745f55ff509c6c8e64606afe17809affa855c4c4cdcaf6b69ab4846aa5624ed0687541aee6f2224d929685736c6a23906d974d3c257abce1a3fb8db5951b89ecb0cda92b5207d93f6618fd0f893c32cf6a6e",
|
||||
"d6e55820bb62c2be97650302d59d667a411956138306bd566e5c3c2b",
|
||||
"631ab0d64eaf28a71b9cbd27a7a88682a2167cee6251c44e3810894f",
|
||||
"65af72bc7721eb71c2298a0eb4eed3cec96a737cc49125706308b129",
|
||||
"bd5a987c78e2d51598dbd9c34a9035b0069c580edefdacee17ad892a",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"919deb1fdd831c23481dfdb2475dcbe325b04c34f82561ced3d2df0b3d749b36e255c4928973769d46de8b95f162b53cd666cad9ae145e7fcfba97919f703d864efc11eac5f260a5d920d780c52899e5d76f8fe66936ff82130761231f536e6a3d59792f784902c469aa897aabf9a0678f93446610d56d5e0981e4c8a563556b",
|
||||
"269b455b1024eb92d860a420f143ac1286b8cce43031562ae7664574",
|
||||
"baeb6ca274a77c44a0247e5eb12ca72bdd9a698b3f3ae69c9f1aaa57",
|
||||
"cb4ec2160f04613eb0dfe4608486091a25eb12aa4dec1afe91cfb008",
|
||||
"40b01d8cd06589481574f958b98ca08ade9d2a8fe31024375c01bb40",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"6e012361250dacf6166d2dd1aa7be544c3206a9d43464b3fcd90f3f8cf48d08ec099b59ba6fe7d9bdcfaf244120aed1695d8be32d1b1cd6f143982ab945d635fb48a7c76831c0460851a3d62b7209c30cd9c2abdbe3d2a5282a9fcde1a6f418dd23c409bc351896b9b34d7d3a1a63bbaf3d677e612d4a80fa14829386a64b33f",
|
||||
"6d2d695efc6b43b13c14111f2109608f1020e3e03b5e21cfdbc82fcd",
|
||||
"26a4859296b7e360b69cf40be7bd97ceaffa3d07743c8489fc47ca1b",
|
||||
"9a8cb5f2fdc288b7183c5b32d8e546fc2ed1ca4285eeae00c8b572ad",
|
||||
"8c623f357b5d0057b10cdb1a1593dab57cda7bdec9cf868157a79b97",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"bf6bd7356a52b234fe24d25557200971fc803836f6fec3cade9642b13a8e7af10ab48b749de76aada9d8927f9b12f75a2c383ca7358e2566c4bb4f156fce1fd4e87ef8c8d2b6b1bdd351460feb22cdca0437ac10ca5e0abbbce9834483af20e4835386f8b1c96daaa41554ceee56730aac04f23a5c765812efa746051f396566",
|
||||
"14250131b2599939cf2d6bc491be80ddfe7ad9de644387ee67de2d40",
|
||||
"b5dc473b5d014cd504022043c475d3f93c319a8bdcb7262d9e741803",
|
||||
"4f21642f2201278a95339a80f75cc91f8321fcb3c9462562f6cbf145",
|
||||
"452a5f816ea1f75dee4fd514fa91a0d6a43622981966c59a1b371ff8",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"0eb7f4032f90f0bd3cf9473d6d9525d264d14c031a10acd31a053443ed5fe919d5ac35e0be77813071b4062f0b5fdf58ad5f637b76b0b305aec18f82441b6e607b44cdf6e0e3c7c57f24e6fd565e39430af4a6b1d979821ed0175fa03e3125506847654d7e1ae904ce1190ae38dc5919e257bdac2db142a6e7cd4da6c2e83770",
|
||||
"d1f342b7790a1667370a1840255ac5bbbdc66f0bc00ae977d99260ac",
|
||||
"76416cabae2de9a1000b4646338b774baabfa3db4673790771220cdb",
|
||||
"bc85e3fc143d19a7271b2f9e1c04b86146073f3fab4dda1c3b1f35ca",
|
||||
"9a5c70ede3c48d5f43307a0c2a4871934424a3303b815df4bb0f128e",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"5cc25348a05d85e56d4b03cec450128727bc537c66ec3a9fb613c151033b5e86878632249cba83adcefc6c1e35dcd31702929c3b57871cda5c18d1cf8f9650a25b917efaed56032e43b6fc398509f0d2997306d8f26675f3a8683b79ce17128e006aa0903b39eeb2f1001be65de0520115e6f919de902b32c38d691a69c58c92",
|
||||
"7e49a7abf16a792e4c7bbc4d251820a2abd22d9f2fc252a7bf59c9a6",
|
||||
"44236a8fb4791c228c26637c28ae59503a2f450d4cfb0dc42aa843b9",
|
||||
"084461b4050285a1a85b2113be76a17878d849e6bc489f4d84f15cd8",
|
||||
"079b5bddcc4d45de8dbdfd39f69817c7e5afa454a894d03ee1eaaac3",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"1951533ce33afb58935e39e363d8497a8dd0442018fd96dff167b3b23d7206a3ee182a3194765df4768a3284e23b8696c199b4686e670d60c9d782f08794a4bccc05cffffbd1a12acd9eb1cfa01f7ebe124da66ecff4599ea7720c3be4bb7285daa1a86ebf53b042bd23208d468c1b3aa87381f8e1ad63e2b4c2ba5efcf05845",
|
||||
"31945d12ebaf4d81f02be2b1768ed80784bf35cf5e2ff53438c11493",
|
||||
"a62bebffac987e3b9d3ec451eb64c462cdf7b4aa0b1bbb131ceaa0a4",
|
||||
"bc3c32b19e42b710bca5c6aaa128564da3ddb2726b25f33603d2af3c",
|
||||
"ed1a719cc0c507edc5239d76fe50e2306c145ad252bd481da04180c0",
|
||||
false,
|
||||
},
|
||||
*/
|
||||
/*
|
||||
* All of these tests are disabled since they are for P224, not sec256k1.
|
||||
* they are left here as an example of test vectors for when some *real*
|
||||
* vectors may be found.
|
||||
* - oga@conformal.com
|
||||
{
|
||||
"09626b45493672e48f3d1226a3aff3201960e577d33a7f72c7eb055302db8fe8ed61685dd036b554942a5737cd1512cdf811ee0c00e6dd2f08c69f08643be396e85dafda664801e772cdb7396868ac47b172245b41986aa2648cb77fbbfa562581be06651355a0c4b090f9d17d8f0ab6cced4e0c9d386cf465a516630f0231bd",
|
||||
"9504b5b82d97a264d8b3735e0568decabc4b6ca275bc53cbadfc1c40",
|
||||
"03426f80e477603b10dee670939623e3da91a94267fc4e51726009ed",
|
||||
"81d3ac609f9575d742028dd496450a58a60eea2dcf8b9842994916e1",
|
||||
"96a8c5f382c992e8f30ccce9af120b067ec1d74678fa8445232f75a5",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"96b2b6536f6df29be8567a72528aceeaccbaa66c66c534f3868ca9778b02faadb182e4ed34662e73b9d52ecbe9dc8e875fc05033c493108b380689ebf47e5b062e6a0cdb3dd34ce5fe347d92768d72f7b9b377c20aea927043b509c078ed2467d7113405d2ddd458811e6faf41c403a2a239240180f1430a6f4330df5d77de37",
|
||||
"851e3100368a22478a0029353045ae40d1d8202ef4d6533cfdddafd8",
|
||||
"205302ac69457dd345e86465afa72ee8c74ca97e2b0b999aec1f10c2",
|
||||
"4450c2d38b697e990721aa2dbb56578d32b4f5aeb3b9072baa955ee0",
|
||||
"e26d4b589166f7b4ba4b1c8fce823fa47aad22f8c9c396b8c6526e12",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"86778dbb4a068a01047a8d245d632f636c11d2ad350740b36fad90428b454ad0f120cb558d12ea5c8a23db595d87543d06d1ef489263d01ee529871eb68737efdb8ff85bc7787b61514bed85b7e01d6be209e0a4eb0db5c8df58a5c5bf706d76cb2bdf7800208639e05b89517155d11688236e6a47ed37d8e5a2b1e0adea338e",
|
||||
"ad5bda09d319a717c1721acd6688d17020b31b47eef1edea57ceeffc",
|
||||
"c8ce98e181770a7c9418c73c63d01494b8b80a41098c5ea50692c984",
|
||||
"de5558c257ab4134e52c19d8db3b224a1899cbd08cc508ce8721d5e9",
|
||||
"745db7af5a477e5046705c0a5eff1f52cb94a79d481f0c5a5e108ecd",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"4bc6ef1958556686dab1e39c3700054a304cbd8f5928603dcd97fafd1f29e69394679b638f71c9344ce6a535d104803d22119f57b5f9477e253817a52afa9bfbc9811d6cc8c8be6b6566c6ef48b439bbb532abe30627548c598867f3861ba0b154dc1c3deca06eb28df8efd28258554b5179883a36fbb1eecf4f93ee19d41e3d",
|
||||
"cc5eea2edf964018bdc0504a3793e4d2145142caa09a72ac5fb8d3e8",
|
||||
"a48d78ae5d08aa725342773975a00d4219cf7a8029bb8cf3c17c374a",
|
||||
"67b861344b4e416d4094472faf4272f6d54a497177fbc5f9ef292836",
|
||||
"1d54f3fcdad795bf3b23408ecbac3e1321d1d66f2e4e3d05f41f7020",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"bb658732acbf3147729959eb7318a2058308b2739ec58907dd5b11cfa3ecf69a1752b7b7d806fe00ec402d18f96039f0b78dbb90a59c4414fb33f1f4e02e4089de4122cd93df5263a95be4d7084e2126493892816e6a5b4ed123cb705bf930c8f67af0fb4514d5769232a9b008a803af225160ce63f675bd4872c4c97b146e5e",
|
||||
"6234c936e27bf141fc7534bfc0a7eedc657f91308203f1dcbd642855",
|
||||
"27983d87ca785ef4892c3591ef4a944b1deb125dd58bd351034a6f84",
|
||||
"e94e05b42d01d0b965ffdd6c3a97a36a771e8ea71003de76c4ecb13f",
|
||||
"1dc6464ffeefbd7872a081a5926e9fc3e66d123f1784340ba17737e9",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"7c00be9123bfa2c4290be1d8bc2942c7f897d9a5b7917e3aabd97ef1aab890f148400a89abd554d19bec9d8ed911ce57b22fbcf6d30ca2115f13ce0a3f569a23bad39ee645f624c49c60dcfc11e7d2be24de9c905596d8f23624d63dc46591d1f740e46f982bfae453f107e80db23545782be23ce43708245896fc54e1ee5c43",
|
||||
"9f3f037282aaf14d4772edffff331bbdda845c3f65780498cde334f1",
|
||||
"8308ee5a16e3bcb721b6bc30000a0419bc1aaedd761be7f658334066",
|
||||
"6381d7804a8808e3c17901e4d283b89449096a8fba993388fa11dc54",
|
||||
"8e858f6b5b253686a86b757bad23658cda53115ac565abca4e3d9f57",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"cffc122a44840dc705bb37130069921be313d8bde0b66201aebc48add028ca131914ef2e705d6bedd19dc6cf9459bbb0f27cdfe3c50483808ffcdaffbeaa5f062e097180f07a40ef4ab6ed03fe07ed6bcfb8afeb42c97eafa2e8a8df469de07317c5e1494c41547478eff4d8c7d9f0f484ad90fedf6e1c35ee68fa73f1691601",
|
||||
"a03b88a10d930002c7b17ca6af2fd3e88fa000edf787dc594f8d4fd4",
|
||||
"e0cf7acd6ddc758e64847fe4df9915ebda2f67cdd5ec979aa57421f5",
|
||||
"387b84dcf37dc343c7d2c5beb82f0bf8bd894b395a7b894565d296c1",
|
||||
"4adc12ce7d20a89ce3925e10491c731b15ddb3f339610857a21b53b4",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"26e0e0cafd85b43d16255908ccfd1f061c680df75aba3081246b337495783052ba06c60f4a486c1591a4048bae11b4d7fec4f161d80bdc9a7b79d23e44433ed625eab280521a37f23dd3e1bdc5c6a6cfaa026f3c45cf703e76dab57add93fe844dd4cda67dc3bddd01f9152579e49df60969b10f09ce9372fdd806b0c7301866",
|
||||
"9a8983c42f2b5a87c37a00458b5970320d247f0c8a88536440173f7d",
|
||||
"15e489ec6355351361900299088cfe8359f04fe0cab78dde952be80c",
|
||||
"929a21baa173d438ec9f28d6a585a2f9abcfc0a4300898668e476dc0",
|
||||
"59a853f046da8318de77ff43f26fe95a92ee296fa3f7e56ce086c872",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"1078eac124f48ae4f807e946971d0de3db3748dd349b14cca5c942560fb25401b2252744f18ad5e455d2d97ed5ae745f55ff509c6c8e64606afe17809affa855c4c4cdcaf6b69ab4846aa5624ed0687541aee6f2224d929685736c6a23906d974d3c257abce1a3fb8db5951b89ecb0cda92b5207d93f6618fd0f893c32cf6a6e",
|
||||
"d6e55820bb62c2be97650302d59d667a411956138306bd566e5c3c2b",
|
||||
"631ab0d64eaf28a71b9cbd27a7a88682a2167cee6251c44e3810894f",
|
||||
"65af72bc7721eb71c2298a0eb4eed3cec96a737cc49125706308b129",
|
||||
"bd5a987c78e2d51598dbd9c34a9035b0069c580edefdacee17ad892a",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"919deb1fdd831c23481dfdb2475dcbe325b04c34f82561ced3d2df0b3d749b36e255c4928973769d46de8b95f162b53cd666cad9ae145e7fcfba97919f703d864efc11eac5f260a5d920d780c52899e5d76f8fe66936ff82130761231f536e6a3d59792f784902c469aa897aabf9a0678f93446610d56d5e0981e4c8a563556b",
|
||||
"269b455b1024eb92d860a420f143ac1286b8cce43031562ae7664574",
|
||||
"baeb6ca274a77c44a0247e5eb12ca72bdd9a698b3f3ae69c9f1aaa57",
|
||||
"cb4ec2160f04613eb0dfe4608486091a25eb12aa4dec1afe91cfb008",
|
||||
"40b01d8cd06589481574f958b98ca08ade9d2a8fe31024375c01bb40",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"6e012361250dacf6166d2dd1aa7be544c3206a9d43464b3fcd90f3f8cf48d08ec099b59ba6fe7d9bdcfaf244120aed1695d8be32d1b1cd6f143982ab945d635fb48a7c76831c0460851a3d62b7209c30cd9c2abdbe3d2a5282a9fcde1a6f418dd23c409bc351896b9b34d7d3a1a63bbaf3d677e612d4a80fa14829386a64b33f",
|
||||
"6d2d695efc6b43b13c14111f2109608f1020e3e03b5e21cfdbc82fcd",
|
||||
"26a4859296b7e360b69cf40be7bd97ceaffa3d07743c8489fc47ca1b",
|
||||
"9a8cb5f2fdc288b7183c5b32d8e546fc2ed1ca4285eeae00c8b572ad",
|
||||
"8c623f357b5d0057b10cdb1a1593dab57cda7bdec9cf868157a79b97",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"bf6bd7356a52b234fe24d25557200971fc803836f6fec3cade9642b13a8e7af10ab48b749de76aada9d8927f9b12f75a2c383ca7358e2566c4bb4f156fce1fd4e87ef8c8d2b6b1bdd351460feb22cdca0437ac10ca5e0abbbce9834483af20e4835386f8b1c96daaa41554ceee56730aac04f23a5c765812efa746051f396566",
|
||||
"14250131b2599939cf2d6bc491be80ddfe7ad9de644387ee67de2d40",
|
||||
"b5dc473b5d014cd504022043c475d3f93c319a8bdcb7262d9e741803",
|
||||
"4f21642f2201278a95339a80f75cc91f8321fcb3c9462562f6cbf145",
|
||||
"452a5f816ea1f75dee4fd514fa91a0d6a43622981966c59a1b371ff8",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"0eb7f4032f90f0bd3cf9473d6d9525d264d14c031a10acd31a053443ed5fe919d5ac35e0be77813071b4062f0b5fdf58ad5f637b76b0b305aec18f82441b6e607b44cdf6e0e3c7c57f24e6fd565e39430af4a6b1d979821ed0175fa03e3125506847654d7e1ae904ce1190ae38dc5919e257bdac2db142a6e7cd4da6c2e83770",
|
||||
"d1f342b7790a1667370a1840255ac5bbbdc66f0bc00ae977d99260ac",
|
||||
"76416cabae2de9a1000b4646338b774baabfa3db4673790771220cdb",
|
||||
"bc85e3fc143d19a7271b2f9e1c04b86146073f3fab4dda1c3b1f35ca",
|
||||
"9a5c70ede3c48d5f43307a0c2a4871934424a3303b815df4bb0f128e",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"5cc25348a05d85e56d4b03cec450128727bc537c66ec3a9fb613c151033b5e86878632249cba83adcefc6c1e35dcd31702929c3b57871cda5c18d1cf8f9650a25b917efaed56032e43b6fc398509f0d2997306d8f26675f3a8683b79ce17128e006aa0903b39eeb2f1001be65de0520115e6f919de902b32c38d691a69c58c92",
|
||||
"7e49a7abf16a792e4c7bbc4d251820a2abd22d9f2fc252a7bf59c9a6",
|
||||
"44236a8fb4791c228c26637c28ae59503a2f450d4cfb0dc42aa843b9",
|
||||
"084461b4050285a1a85b2113be76a17878d849e6bc489f4d84f15cd8",
|
||||
"079b5bddcc4d45de8dbdfd39f69817c7e5afa454a894d03ee1eaaac3",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"1951533ce33afb58935e39e363d8497a8dd0442018fd96dff167b3b23d7206a3ee182a3194765df4768a3284e23b8696c199b4686e670d60c9d782f08794a4bccc05cffffbd1a12acd9eb1cfa01f7ebe124da66ecff4599ea7720c3be4bb7285daa1a86ebf53b042bd23208d468c1b3aa87381f8e1ad63e2b4c2ba5efcf05845",
|
||||
"31945d12ebaf4d81f02be2b1768ed80784bf35cf5e2ff53438c11493",
|
||||
"a62bebffac987e3b9d3ec451eb64c462cdf7b4aa0b1bbb131ceaa0a4",
|
||||
"bc3c32b19e42b710bca5c6aaa128564da3ddb2726b25f33603d2af3c",
|
||||
"ed1a719cc0c507edc5239d76fe50e2306c145ad252bd481da04180c0",
|
||||
false,
|
||||
},
|
||||
*/
|
||||
}
|
||||
|
||||
func TestVectors(t *testing.T) {
|
||||
|
22
vendor/github.com/deckarep/golang-set/.gitignore
generated
vendored
Normal file
22
vendor/github.com/deckarep/golang-set/.gitignore
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
11
vendor/github.com/deckarep/golang-set/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/deckarep/golang-set/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.9
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test -race ./...
|
||||
- go test -bench=.
|
||||
|
22
vendor/gopkg.in/fatih/set.v0/LICENSE.md → vendor/github.com/deckarep/golang-set/LICENSE
generated
vendored
22
vendor/gopkg.in/fatih/set.v0/LICENSE.md → vendor/github.com/deckarep/golang-set/LICENSE
generated
vendored
@ -1,20 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||
|
||||
Copyright (c) 2013 Fatih Arslan
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
95
vendor/github.com/deckarep/golang-set/README.md
generated
vendored
Normal file
95
vendor/github.com/deckarep/golang-set/README.md
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
[](https://travis-ci.org/deckarep/golang-set)
|
||||
[](https://goreportcard.com/report/github.com/deckarep/golang-set)
|
||||
[](http://godoc.org/github.com/deckarep/golang-set)
|
||||
|
||||
## golang-set
|
||||
|
||||
|
||||
The missing set collection for the Go language. Until Go has sets built-in...use this.
|
||||
|
||||
Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python.
|
||||
You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository
|
||||
and carry-on and to the rest that find this useful please contribute in helping me make it better by:
|
||||
|
||||
* Helping to make more idiomatic improvements to the code.
|
||||
* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~
|
||||
* Helping to make the unit-tests more robust and kick-ass.
|
||||
* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set)
|
||||
* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.)
|
||||
|
||||
I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang)
|
||||
|
||||
*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types.
|
||||
|
||||
## Features (as of 9/22/2014)
|
||||
|
||||
* a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product)
|
||||
|
||||
## Features (as of 9/15/2014)
|
||||
|
||||
* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set)
|
||||
|
||||
## Features (as of 4/22/2014)
|
||||
|
||||
* One common interface to both implementations
|
||||
* Two set implementations to choose from
|
||||
* a thread-safe implementation designed for concurrent use
|
||||
* a non-thread-safe implementation designed for performance
|
||||
* 75 benchmarks for both implementations
|
||||
* 35 unit tests for both implementations
|
||||
* 14 concurrent tests for the thread-safe implementation
|
||||
|
||||
|
||||
|
||||
Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind
|
||||
however that the Python set is a built-in type and supports additional features and syntax that make it awesome.
|
||||
|
||||
## Examples but not exhaustive:
|
||||
|
||||
```go
|
||||
requiredClasses := mapset.NewSet()
|
||||
requiredClasses.Add("Cooking")
|
||||
requiredClasses.Add("English")
|
||||
requiredClasses.Add("Math")
|
||||
requiredClasses.Add("Biology")
|
||||
|
||||
scienceSlice := []interface{}{"Biology", "Chemistry"}
|
||||
scienceClasses := mapset.NewSetFromSlice(scienceSlice)
|
||||
|
||||
electiveClasses := mapset.NewSet()
|
||||
electiveClasses.Add("Welding")
|
||||
electiveClasses.Add("Music")
|
||||
electiveClasses.Add("Automotive")
|
||||
|
||||
bonusClasses := mapset.NewSet()
|
||||
bonusClasses.Add("Go Programming")
|
||||
bonusClasses.Add("Python Programming")
|
||||
|
||||
//Show me all the available classes I can take
|
||||
allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses)
|
||||
fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming}
|
||||
|
||||
|
||||
//Is cooking considered a science class?
|
||||
fmt.Println(scienceClasses.Contains("Cooking")) //false
|
||||
|
||||
//Show me all classes that are not science classes, since I hate science.
|
||||
fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding}
|
||||
|
||||
//Which science classes are also required classes?
|
||||
fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology}
|
||||
|
||||
//How many bonus classes do you offer?
|
||||
fmt.Println(bonusClasses.Cardinality()) //2
|
||||
|
||||
//Do you have the following classes? Welding, Automotive and English?
|
||||
fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true
|
||||
```
|
||||
|
||||
Thanks!
|
||||
|
||||
-Ralph
|
||||
|
||||
[](https://bitdeli.com/free "Bitdeli Badge")
|
||||
|
||||
[](https://github.com/igrigorik/ga-beacon)
|
674
vendor/github.com/deckarep/golang-set/bench_test.go
generated
vendored
Normal file
674
vendor/github.com/deckarep/golang-set/bench_test.go
generated
vendored
Normal file
@ -0,0 +1,674 @@
|
||||
package mapset
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func nrand(n int) []int {
|
||||
i := make([]int, n)
|
||||
for ind := range i {
|
||||
i[ind] = rand.Int()
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func toInterfaces(i []int) []interface{} {
|
||||
ifs := make([]interface{}, len(i))
|
||||
for ind, v := range i {
|
||||
ifs[ind] = v
|
||||
}
|
||||
return ifs
|
||||
}
|
||||
|
||||
func benchAdd(b *testing.B, s Set) {
|
||||
nums := nrand(b.N)
|
||||
b.ResetTimer()
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAddSafe(b *testing.B) {
|
||||
benchAdd(b, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkAddUnsafe(b *testing.B) {
|
||||
benchAdd(b, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchRemove(b *testing.B, s Set) {
|
||||
nums := nrand(b.N)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for _, v := range nums {
|
||||
s.Remove(v)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRemoveSafe(b *testing.B) {
|
||||
benchRemove(b, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkRemoveUnsafe(b *testing.B) {
|
||||
benchRemove(b, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchCardinality(b *testing.B, s Set) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Cardinality()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCardinalitySafe(b *testing.B) {
|
||||
benchCardinality(b, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkCardinalityUnsafe(b *testing.B) {
|
||||
benchCardinality(b, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchClear(b *testing.B, s Set) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Clear()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkClearSafe(b *testing.B) {
|
||||
benchClear(b, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkClearUnsafe(b *testing.B) {
|
||||
benchClear(b, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchClone(b *testing.B, n int, s Set) {
|
||||
nums := toInterfaces(nrand(n))
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Clone()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkClone1Safe(b *testing.B) {
|
||||
benchClone(b, 1, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkClone1Unsafe(b *testing.B) {
|
||||
benchClone(b, 1, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkClone10Safe(b *testing.B) {
|
||||
benchClone(b, 10, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkClone10Unsafe(b *testing.B) {
|
||||
benchClone(b, 10, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkClone100Safe(b *testing.B) {
|
||||
benchClone(b, 100, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkClone100Unsafe(b *testing.B) {
|
||||
benchClone(b, 100, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchContains(b *testing.B, n int, s Set) {
|
||||
nums := toInterfaces(nrand(n))
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
nums[n-1] = -1 // Definitely not in s
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Contains(nums...)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkContains1Safe(b *testing.B) {
|
||||
benchContains(b, 1, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkContains1Unsafe(b *testing.B) {
|
||||
benchContains(b, 1, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkContains10Safe(b *testing.B) {
|
||||
benchContains(b, 10, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkContains10Unsafe(b *testing.B) {
|
||||
benchContains(b, 10, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkContains100Safe(b *testing.B) {
|
||||
benchContains(b, 100, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkContains100Unsafe(b *testing.B) {
|
||||
benchContains(b, 100, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchEqual(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Equal(t)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEqual1Safe(b *testing.B) {
|
||||
benchEqual(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkEqual1Unsafe(b *testing.B) {
|
||||
benchEqual(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkEqual10Safe(b *testing.B) {
|
||||
benchEqual(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkEqual10Unsafe(b *testing.B) {
|
||||
benchEqual(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkEqual100Safe(b *testing.B) {
|
||||
benchEqual(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkEqual100Unsafe(b *testing.B) {
|
||||
benchEqual(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchDifference(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
for _, v := range nums[:n/2] {
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Difference(t)
|
||||
}
|
||||
}
|
||||
|
||||
func benchIsSubset(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.IsSubset(t)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIsSubset1Safe(b *testing.B) {
|
||||
benchIsSubset(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSubset1Unsafe(b *testing.B) {
|
||||
benchIsSubset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSubset10Safe(b *testing.B) {
|
||||
benchIsSubset(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSubset10Unsafe(b *testing.B) {
|
||||
benchIsSubset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSubset100Safe(b *testing.B) {
|
||||
benchIsSubset(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSubset100Unsafe(b *testing.B) {
|
||||
benchIsSubset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchIsSuperset(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.IsSuperset(t)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIsSuperset1Safe(b *testing.B) {
|
||||
benchIsSuperset(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSuperset1Unsafe(b *testing.B) {
|
||||
benchIsSuperset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSuperset10Safe(b *testing.B) {
|
||||
benchIsSuperset(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSuperset10Unsafe(b *testing.B) {
|
||||
benchIsSuperset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSuperset100Safe(b *testing.B) {
|
||||
benchIsSuperset(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsSuperset100Unsafe(b *testing.B) {
|
||||
benchIsSuperset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchIsProperSubset(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.IsProperSubset(t)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSubset1Safe(b *testing.B) {
|
||||
benchIsProperSubset(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSubset1Unsafe(b *testing.B) {
|
||||
benchIsProperSubset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSubset10Safe(b *testing.B) {
|
||||
benchIsProperSubset(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSubset10Unsafe(b *testing.B) {
|
||||
benchIsProperSubset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSubset100Safe(b *testing.B) {
|
||||
benchIsProperSubset(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSubset100Unsafe(b *testing.B) {
|
||||
benchIsProperSubset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchIsProperSuperset(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.IsProperSuperset(t)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSuperset1Safe(b *testing.B) {
|
||||
benchIsProperSuperset(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSuperset1Unsafe(b *testing.B) {
|
||||
benchIsProperSuperset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSuperset10Safe(b *testing.B) {
|
||||
benchIsProperSuperset(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSuperset10Unsafe(b *testing.B) {
|
||||
benchIsProperSuperset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSuperset100Safe(b *testing.B) {
|
||||
benchIsProperSuperset(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIsProperSuperset100Unsafe(b *testing.B) {
|
||||
benchIsProperSuperset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkDifference1Safe(b *testing.B) {
|
||||
benchDifference(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkDifference1Unsafe(b *testing.B) {
|
||||
benchDifference(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkDifference10Safe(b *testing.B) {
|
||||
benchDifference(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkDifference10Unsafe(b *testing.B) {
|
||||
benchDifference(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkDifference100Safe(b *testing.B) {
|
||||
benchDifference(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkDifference100Unsafe(b *testing.B) {
|
||||
benchDifference(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchIntersect(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(int(float64(n) * float64(1.5)))
|
||||
for _, v := range nums[:n] {
|
||||
s.Add(v)
|
||||
}
|
||||
for _, v := range nums[n/2:] {
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Intersect(t)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIntersect1Safe(b *testing.B) {
|
||||
benchIntersect(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIntersect1Unsafe(b *testing.B) {
|
||||
benchIntersect(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIntersect10Safe(b *testing.B) {
|
||||
benchIntersect(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIntersect10Unsafe(b *testing.B) {
|
||||
benchIntersect(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIntersect100Safe(b *testing.B) {
|
||||
benchIntersect(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIntersect100Unsafe(b *testing.B) {
|
||||
benchIntersect(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchSymmetricDifference(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(int(float64(n) * float64(1.5)))
|
||||
for _, v := range nums[:n] {
|
||||
s.Add(v)
|
||||
}
|
||||
for _, v := range nums[n/2:] {
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.SymmetricDifference(t)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSymmetricDifference1Safe(b *testing.B) {
|
||||
benchSymmetricDifference(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkSymmetricDifference1Unsafe(b *testing.B) {
|
||||
benchSymmetricDifference(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkSymmetricDifference10Safe(b *testing.B) {
|
||||
benchSymmetricDifference(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkSymmetricDifference10Unsafe(b *testing.B) {
|
||||
benchSymmetricDifference(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkSymmetricDifference100Safe(b *testing.B) {
|
||||
benchSymmetricDifference(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkSymmetricDifference100Unsafe(b *testing.B) {
|
||||
benchSymmetricDifference(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchUnion(b *testing.B, n int, s, t Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums[:n/2] {
|
||||
s.Add(v)
|
||||
}
|
||||
for _, v := range nums[n/2:] {
|
||||
t.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Union(t)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnion1Safe(b *testing.B) {
|
||||
benchUnion(b, 1, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkUnion1Unsafe(b *testing.B) {
|
||||
benchUnion(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkUnion10Safe(b *testing.B) {
|
||||
benchUnion(b, 10, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkUnion10Unsafe(b *testing.B) {
|
||||
benchUnion(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkUnion100Safe(b *testing.B) {
|
||||
benchUnion(b, 100, NewSet(), NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkUnion100Unsafe(b *testing.B) {
|
||||
benchUnion(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchEach(b *testing.B, n int, s Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.Each(func(elem interface{}) bool {
|
||||
return false
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEach1Safe(b *testing.B) {
|
||||
benchEach(b, 1, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkEach1Unsafe(b *testing.B) {
|
||||
benchEach(b, 1, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkEach10Safe(b *testing.B) {
|
||||
benchEach(b, 10, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkEach10Unsafe(b *testing.B) {
|
||||
benchEach(b, 10, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkEach100Safe(b *testing.B) {
|
||||
benchEach(b, 100, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkEach100Unsafe(b *testing.B) {
|
||||
benchEach(b, 100, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchIter(b *testing.B, n int, s Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c := s.Iter()
|
||||
for range c {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIter1Safe(b *testing.B) {
|
||||
benchIter(b, 1, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIter1Unsafe(b *testing.B) {
|
||||
benchIter(b, 1, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIter10Safe(b *testing.B) {
|
||||
benchIter(b, 10, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIter10Unsafe(b *testing.B) {
|
||||
benchIter(b, 10, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIter100Safe(b *testing.B) {
|
||||
benchIter(b, 100, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIter100Unsafe(b *testing.B) {
|
||||
benchIter(b, 100, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchIterator(b *testing.B, n int, s Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c := s.Iterator().C
|
||||
for range c {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIterator1Safe(b *testing.B) {
|
||||
benchIterator(b, 1, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIterator1Unsafe(b *testing.B) {
|
||||
benchIterator(b, 1, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIterator10Safe(b *testing.B) {
|
||||
benchIterator(b, 10, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIterator10Unsafe(b *testing.B) {
|
||||
benchIterator(b, 10, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkIterator100Safe(b *testing.B) {
|
||||
benchIterator(b, 100, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkIterator100Unsafe(b *testing.B) {
|
||||
benchIterator(b, 100, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchString(b *testing.B, n int, s Set) {
|
||||
nums := nrand(n)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = s.String()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkString1Safe(b *testing.B) {
|
||||
benchString(b, 1, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkString1Unsafe(b *testing.B) {
|
||||
benchString(b, 1, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkString10Safe(b *testing.B) {
|
||||
benchString(b, 10, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkString10Unsafe(b *testing.B) {
|
||||
benchString(b, 10, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func BenchmarkString100Safe(b *testing.B) {
|
||||
benchString(b, 100, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkString100Unsafe(b *testing.B) {
|
||||
benchString(b, 100, NewThreadUnsafeSet())
|
||||
}
|
||||
|
||||
func benchToSlice(b *testing.B, s Set) {
|
||||
nums := nrand(b.N)
|
||||
for _, v := range nums {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.ToSlice()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkToSliceSafe(b *testing.B) {
|
||||
benchToSlice(b, NewSet())
|
||||
}
|
||||
|
||||
func BenchmarkToSliceUnsafe(b *testing.B) {
|
||||
benchToSlice(b, NewThreadUnsafeSet())
|
||||
}
|
58
vendor/github.com/deckarep/golang-set/iterator.go
generated
vendored
Normal file
58
vendor/github.com/deckarep/golang-set/iterator.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package mapset
|
||||
|
||||
// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's
|
||||
// elements.
|
||||
type Iterator struct {
|
||||
C <-chan interface{}
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
// Stop stops the Iterator, no further elements will be received on C, C will be closed.
|
||||
func (i *Iterator) Stop() {
|
||||
// Allows for Stop() to be called multiple times
|
||||
// (close() panics when called on already closed channel)
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
|
||||
close(i.stop)
|
||||
|
||||
// Exhaust any remaining elements.
|
||||
for range i.C {
|
||||
}
|
||||
}
|
||||
|
||||
// newIterator returns a new Iterator instance together with its item and stop channels.
|
||||
func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) {
|
||||
itemChan := make(chan interface{})
|
||||
stopChan := make(chan struct{})
|
||||
return &Iterator{
|
||||
C: itemChan,
|
||||
stop: stopChan,
|
||||
}, itemChan, stopChan
|
||||
}
|
32
vendor/github.com/deckarep/golang-set/iterator_example_test.go
generated
vendored
Normal file
32
vendor/github.com/deckarep/golang-set/iterator_example_test.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package mapset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type YourType struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func ExampleIterator() {
|
||||
set := NewSetFromSlice([]interface{}{
|
||||
&YourType{Name: "Alise"},
|
||||
&YourType{Name: "Bob"},
|
||||
&YourType{Name: "John"},
|
||||
&YourType{Name: "Nick"},
|
||||
})
|
||||
|
||||
var found *YourType
|
||||
it := set.Iterator()
|
||||
|
||||
for elem := range it.C {
|
||||
if elem.(*YourType).Name == "John" {
|
||||
found = elem.(*YourType)
|
||||
it.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Found %+v\n", found)
|
||||
|
||||
// Output: Found &{Name:John}
|
||||
}
|
217
vendor/github.com/deckarep/golang-set/set.go
generated
vendored
Normal file
217
vendor/github.com/deckarep/golang-set/set.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
/*
|
||||
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Package mapset implements a simple and generic set collection.
|
||||
// Items stored within it are unordered and unique. It supports
|
||||
// typical set operations: membership testing, intersection, union,
|
||||
// difference, symmetric difference and cloning.
|
||||
//
|
||||
// Package mapset provides two implementations of the Set
|
||||
// interface. The default implementation is safe for concurrent
|
||||
// access, but a non-thread-safe implementation is also provided for
|
||||
// programs that can benefit from the slight speed improvement and
|
||||
// that can enforce mutual exclusion through other means.
|
||||
package mapset
|
||||
|
||||
// Set is the primary interface provided by the mapset package. It
|
||||
// represents an unordered set of data and a large number of
|
||||
// operations that can be applied to that set.
|
||||
type Set interface {
|
||||
// Adds an element to the set. Returns whether
|
||||
// the item was added.
|
||||
Add(i interface{}) bool
|
||||
|
||||
// Returns the number of elements in the set.
|
||||
Cardinality() int
|
||||
|
||||
// Removes all elements from the set, leaving
|
||||
// the empty set.
|
||||
Clear()
|
||||
|
||||
// Returns a clone of the set using the same
|
||||
// implementation, duplicating all keys.
|
||||
Clone() Set
|
||||
|
||||
// Returns whether the given items
|
||||
// are all in the set.
|
||||
Contains(i ...interface{}) bool
|
||||
|
||||
// Returns the difference between this set
|
||||
// and other. The returned set will contain
|
||||
// all elements of this set that are not also
|
||||
// elements of other.
|
||||
//
|
||||
// Note that the argument to Difference
|
||||
// must be of the same type as the receiver
|
||||
// of the method. Otherwise, Difference will
|
||||
// panic.
|
||||
Difference(other Set) Set
|
||||
|
||||
// Determines if two sets are equal to each
|
||||
// other. If they have the same cardinality
|
||||
// and contain the same elements, they are
|
||||
// considered equal. The order in which
|
||||
// the elements were added is irrelevant.
|
||||
//
|
||||
// Note that the argument to Equal must be
|
||||
// of the same type as the receiver of the
|
||||
// method. Otherwise, Equal will panic.
|
||||
Equal(other Set) bool
|
||||
|
||||
// Returns a new set containing only the elements
|
||||
// that exist only in both sets.
|
||||
//
|
||||
// Note that the argument to Intersect
|
||||
// must be of the same type as the receiver
|
||||
// of the method. Otherwise, Intersect will
|
||||
// panic.
|
||||
Intersect(other Set) Set
|
||||
|
||||
// Determines if every element in this set is in
|
||||
// the other set but the two sets are not equal.
|
||||
//
|
||||
// Note that the argument to IsProperSubset
|
||||
// must be of the same type as the receiver
|
||||
// of the method. Otherwise, IsProperSubset
|
||||
// will panic.
|
||||
IsProperSubset(other Set) bool
|
||||
|
||||
// Determines if every element in the other set
|
||||
// is in this set but the two sets are not
|
||||
// equal.
|
||||
//
|
||||
// Note that the argument to IsSuperset
|
||||
// must be of the same type as the receiver
|
||||
// of the method. Otherwise, IsSuperset will
|
||||
// panic.
|
||||
IsProperSuperset(other Set) bool
|
||||
|
||||
// Determines if every element in this set is in
|
||||
// the other set.
|
||||
//
|
||||
// Note that the argument to IsSubset
|
||||
// must be of the same type as the receiver
|
||||
// of the method. Otherwise, IsSubset will
|
||||
// panic.
|
||||
IsSubset(other Set) bool
|
||||
|
||||
// Determines if every element in the other set
|
||||
// is in this set.
|
||||
//
|
||||
// Note that the argument to IsSuperset
|
||||
// must be of the same type as the receiver
|
||||
// of the method. Otherwise, IsSuperset will
|
||||
// panic.
|
||||
IsSuperset(other Set) bool
|
||||
|
||||
// Iterates over elements and executes the passed func against each element.
|
||||
// If passed func returns true, stop iteration at the time.
|
||||
Each(func(interface{}) bool)
|
||||
|
||||
// Returns a channel of elements that you can
|
||||
// range over.
|
||||
Iter() <-chan interface{}
|
||||
|
||||
// Returns an Iterator object that you can
|
||||
// use to range over the set.
|
||||
Iterator() *Iterator
|
||||
|
||||
// Remove a single element from the set.
|
||||
Remove(i interface{})
|
||||
|
||||
// Provides a convenient string representation
|
||||
// of the current state of the set.
|
||||
String() string
|
||||
|
||||
// Returns a new set with all elements which are
|
||||
// in either this set or the other set but not in both.
|
||||
//
|
||||
// Note that the argument to SymmetricDifference
|
||||
// must be of the same type as the receiver
|
||||
// of the method. Otherwise, SymmetricDifference
|
||||
// will panic.
|
||||
SymmetricDifference(other Set) Set
|
||||
|
||||
// Returns a new set with all elements in both sets.
|
||||
//
|
||||
// Note that the argument to Union must be of the
|
||||
|
||||
// same type as the receiver of the method.
|
||||
// Otherwise, IsSuperset will panic.
|
||||
Union(other Set) Set
|
||||
|
||||
// Pop removes and returns an arbitrary item from the set.
|
||||
Pop() interface{}
|
||||
|
||||
// Returns all subsets of a given set (Power Set).
|
||||
PowerSet() Set
|
||||
|
||||
// Returns the Cartesian Product of two sets.
|
||||
CartesianProduct(other Set) Set
|
||||
|
||||
// Returns the members of the set as a slice.
|
||||
ToSlice() []interface{}
|
||||
}
|
||||
|
||||
// NewSet creates and returns a reference to an empty set. Operations
|
||||
// on the resulting set are thread-safe.
|
||||
func NewSet(s ...interface{}) Set {
|
||||
set := newThreadSafeSet()
|
||||
for _, item := range s {
|
||||
set.Add(item)
|
||||
}
|
||||
return &set
|
||||
}
|
||||
|
||||
// NewSetWith creates and returns a new set with the given elements.
|
||||
// Operations on the resulting set are thread-safe.
|
||||
func NewSetWith(elts ...interface{}) Set {
|
||||
return NewSetFromSlice(elts)
|
||||
}
|
||||
|
||||
// NewSetFromSlice creates and returns a reference to a set from an
|
||||
// existing slice. Operations on the resulting set are thread-safe.
|
||||
func NewSetFromSlice(s []interface{}) Set {
|
||||
a := NewSet(s...)
|
||||
return a
|
||||
}
|
||||
|
||||
// NewThreadUnsafeSet creates and returns a reference to an empty set.
|
||||
// Operations on the resulting set are not thread-safe.
|
||||
func NewThreadUnsafeSet() Set {
|
||||
set := newThreadUnsafeSet()
|
||||
return &set
|
||||
}
|
||||
|
||||
// NewThreadUnsafeSetFromSlice creates and returns a reference to a
|
||||
// set from an existing slice. Operations on the resulting set are
|
||||
// not thread-safe.
|
||||
func NewThreadUnsafeSetFromSlice(s []interface{}) Set {
|
||||
a := NewThreadUnsafeSet()
|
||||
for _, item := range s {
|
||||
a.Add(item)
|
||||
}
|
||||
return a
|
||||
}
|
1200
vendor/github.com/deckarep/golang-set/set_test.go
generated
vendored
Normal file
1200
vendor/github.com/deckarep/golang-set/set_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
283
vendor/github.com/deckarep/golang-set/threadsafe.go
generated
vendored
Normal file
283
vendor/github.com/deckarep/golang-set/threadsafe.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
||||
/*
|
||||
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package mapset
|
||||
|
||||
import "sync"
|
||||
|
||||
type threadSafeSet struct {
|
||||
s threadUnsafeSet
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func newThreadSafeSet() threadSafeSet {
|
||||
return threadSafeSet{s: newThreadUnsafeSet()}
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Add(i interface{}) bool {
|
||||
set.Lock()
|
||||
ret := set.s.Add(i)
|
||||
set.Unlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Contains(i ...interface{}) bool {
|
||||
set.RLock()
|
||||
ret := set.s.Contains(i...)
|
||||
set.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) IsSubset(other Set) bool {
|
||||
o := other.(*threadSafeSet)
|
||||
|
||||
set.RLock()
|
||||
o.RLock()
|
||||
|
||||
ret := set.s.IsSubset(&o.s)
|
||||
set.RUnlock()
|
||||
o.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) IsProperSubset(other Set) bool {
|
||||
o := other.(*threadSafeSet)
|
||||
|
||||
set.RLock()
|
||||
defer set.RUnlock()
|
||||
o.RLock()
|
||||
defer o.RUnlock()
|
||||
|
||||
return set.s.IsProperSubset(&o.s)
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) IsSuperset(other Set) bool {
|
||||
return other.IsSubset(set)
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) IsProperSuperset(other Set) bool {
|
||||
return other.IsProperSubset(set)
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Union(other Set) Set {
|
||||
o := other.(*threadSafeSet)
|
||||
|
||||
set.RLock()
|
||||
o.RLock()
|
||||
|
||||
unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet)
|
||||
ret := &threadSafeSet{s: *unsafeUnion}
|
||||
set.RUnlock()
|
||||
o.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Intersect(other Set) Set {
|
||||
o := other.(*threadSafeSet)
|
||||
|
||||
set.RLock()
|
||||
o.RLock()
|
||||
|
||||
unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet)
|
||||
ret := &threadSafeSet{s: *unsafeIntersection}
|
||||
set.RUnlock()
|
||||
o.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Difference(other Set) Set {
|
||||
o := other.(*threadSafeSet)
|
||||
|
||||
set.RLock()
|
||||
o.RLock()
|
||||
|
||||
unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet)
|
||||
ret := &threadSafeSet{s: *unsafeDifference}
|
||||
set.RUnlock()
|
||||
o.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) SymmetricDifference(other Set) Set {
|
||||
o := other.(*threadSafeSet)
|
||||
|
||||
set.RLock()
|
||||
o.RLock()
|
||||
|
||||
unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet)
|
||||
ret := &threadSafeSet{s: *unsafeDifference}
|
||||
set.RUnlock()
|
||||
o.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Clear() {
|
||||
set.Lock()
|
||||
set.s = newThreadUnsafeSet()
|
||||
set.Unlock()
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Remove(i interface{}) {
|
||||
set.Lock()
|
||||
delete(set.s, i)
|
||||
set.Unlock()
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Cardinality() int {
|
||||
set.RLock()
|
||||
defer set.RUnlock()
|
||||
return len(set.s)
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Each(cb func(interface{}) bool) {
|
||||
set.RLock()
|
||||
for elem := range set.s {
|
||||
if cb(elem) {
|
||||
break
|
||||
}
|
||||
}
|
||||
set.RUnlock()
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Iter() <-chan interface{} {
|
||||
ch := make(chan interface{})
|
||||
go func() {
|
||||
set.RLock()
|
||||
|
||||
for elem := range set.s {
|
||||
ch <- elem
|
||||
}
|
||||
close(ch)
|
||||
set.RUnlock()
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Iterator() *Iterator {
|
||||
iterator, ch, stopCh := newIterator()
|
||||
|
||||
go func() {
|
||||
set.RLock()
|
||||
L:
|
||||
for elem := range set.s {
|
||||
select {
|
||||
case <-stopCh:
|
||||
break L
|
||||
case ch <- elem:
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
set.RUnlock()
|
||||
}()
|
||||
|
||||
return iterator
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Equal(other Set) bool {
|
||||
o := other.(*threadSafeSet)
|
||||
|
||||
set.RLock()
|
||||
o.RLock()
|
||||
|
||||
ret := set.s.Equal(&o.s)
|
||||
set.RUnlock()
|
||||
o.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Clone() Set {
|
||||
set.RLock()
|
||||
|
||||
unsafeClone := set.s.Clone().(*threadUnsafeSet)
|
||||
ret := &threadSafeSet{s: *unsafeClone}
|
||||
set.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) String() string {
|
||||
set.RLock()
|
||||
ret := set.s.String()
|
||||
set.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) PowerSet() Set {
|
||||
set.RLock()
|
||||
unsafePowerSet := set.s.PowerSet().(*threadUnsafeSet)
|
||||
set.RUnlock()
|
||||
|
||||
ret := &threadSafeSet{s: newThreadUnsafeSet()}
|
||||
for subset := range unsafePowerSet.Iter() {
|
||||
unsafeSubset := subset.(*threadUnsafeSet)
|
||||
ret.Add(&threadSafeSet{s: *unsafeSubset})
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) Pop() interface{} {
|
||||
set.Lock()
|
||||
defer set.Unlock()
|
||||
return set.s.Pop()
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) CartesianProduct(other Set) Set {
|
||||
o := other.(*threadSafeSet)
|
||||
|
||||
set.RLock()
|
||||
o.RLock()
|
||||
|
||||
unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet)
|
||||
ret := &threadSafeSet{s: *unsafeCartProduct}
|
||||
set.RUnlock()
|
||||
o.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) ToSlice() []interface{} {
|
||||
keys := make([]interface{}, 0, set.Cardinality())
|
||||
set.RLock()
|
||||
for elem := range set.s {
|
||||
keys = append(keys, elem)
|
||||
}
|
||||
set.RUnlock()
|
||||
return keys
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) MarshalJSON() ([]byte, error) {
|
||||
set.RLock()
|
||||
b, err := set.s.MarshalJSON()
|
||||
set.RUnlock()
|
||||
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (set *threadSafeSet) UnmarshalJSON(p []byte) error {
|
||||
set.RLock()
|
||||
err := set.s.UnmarshalJSON(p)
|
||||
set.RUnlock()
|
||||
|
||||
return err
|
||||
}
|
524
vendor/github.com/deckarep/golang-set/threadsafe_test.go
generated
vendored
Normal file
524
vendor/github.com/deckarep/golang-set/threadsafe_test.go
generated
vendored
Normal file
@ -0,0 +1,524 @@
|
||||
/*
|
||||
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package mapset
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const N = 1000
|
||||
|
||||
func Test_AddConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ints))
|
||||
for i := 0; i < len(ints); i++ {
|
||||
go func(i int) {
|
||||
s.Add(i)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
for _, i := range ints {
|
||||
if !s.Contains(i) {
|
||||
t.Errorf("Set is missing element: %v", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_CardinalityConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
elems := s.Cardinality()
|
||||
for i := 0; i < N; i++ {
|
||||
newElems := s.Cardinality()
|
||||
if newElems < elems {
|
||||
t.Errorf("Cardinality shrunk from %v to %v", elems, newElems)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
for i := 0; i < N; i++ {
|
||||
s.Add(rand.Int())
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_ClearConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ints))
|
||||
for i := 0; i < len(ints); i++ {
|
||||
go func() {
|
||||
s.Clear()
|
||||
wg.Done()
|
||||
}()
|
||||
go func(i int) {
|
||||
s.Add(i)
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_CloneConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ints))
|
||||
for i := range ints {
|
||||
go func(i int) {
|
||||
s.Remove(i)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
s.Clone()
|
||||
}
|
||||
|
||||
func Test_ContainsConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
interfaces := make([]interface{}, 0)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
interfaces = append(interfaces, v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.Contains(interfaces...)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_DifferenceConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s, ss := NewSet(), NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
ss.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.Difference(ss)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_EqualConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s, ss := NewSet(), NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
ss.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.Equal(ss)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_IntersectConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s, ss := NewSet(), NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
ss.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.Intersect(ss)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_IsSubsetConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s, ss := NewSet(), NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
ss.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.IsSubset(ss)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_IsProperSubsetConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s, ss := NewSet(), NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
ss.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.IsProperSubset(ss)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_IsSupersetConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s, ss := NewSet(), NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
ss.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.IsSuperset(ss)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_IsProperSupersetConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s, ss := NewSet(), NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
ss.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.IsProperSuperset(ss)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_EachConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
concurrent := 10
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
var count int64
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(concurrent)
|
||||
for n := 0; n < concurrent; n++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s.Each(func(elem interface{}) bool {
|
||||
atomic.AddInt64(&count, 1)
|
||||
return false
|
||||
})
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if count != int64(N*concurrent) {
|
||||
t.Errorf("%v != %v", count, int64(N*concurrent))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IterConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
cs := make([]<-chan interface{}, 0)
|
||||
for range ints {
|
||||
cs = append(cs, s.Iter())
|
||||
}
|
||||
|
||||
c := make(chan interface{})
|
||||
go func() {
|
||||
for n := 0; n < len(ints)*N; {
|
||||
for _, d := range cs {
|
||||
select {
|
||||
case <-d:
|
||||
n++
|
||||
c <- nil
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
|
||||
for range c {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_RemoveConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ints))
|
||||
for _, v := range ints {
|
||||
go func(i int) {
|
||||
s.Remove(i)
|
||||
wg.Done()
|
||||
}(v)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if s.Cardinality() != 0 {
|
||||
t.Errorf("Expected cardinality 0; got %v", s.Cardinality())
|
||||
}
|
||||
}
|
||||
|
||||
func Test_StringConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ints))
|
||||
for range ints {
|
||||
go func() {
|
||||
_ = s.String()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_SymmetricDifferenceConcurrent(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s, ss := NewSet(), NewSet()
|
||||
ints := rand.Perm(N)
|
||||
for _, v := range ints {
|
||||
s.Add(v)
|
||||
ss.Add(v)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range ints {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
s.SymmetricDifference(ss)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_ToSlice(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
s := NewSet()
|
||||
ints := rand.Perm(N)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ints))
|
||||
for i := 0; i < len(ints); i++ {
|
||||
go func(i int) {
|
||||
s.Add(i)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
setAsSlice := s.ToSlice()
|
||||
if len(setAsSlice) != s.Cardinality() {
|
||||
t.Errorf("Set length is incorrect: %v", len(setAsSlice))
|
||||
}
|
||||
|
||||
for _, i := range setAsSlice {
|
||||
if !s.Contains(i) {
|
||||
t.Errorf("Set is missing element: %v", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test_ToSliceDeadlock - fixes issue: https://github.com/deckarep/golang-set/issues/36
|
||||
// This code reveals the deadlock however it doesn't happen consistently.
|
||||
func Test_ToSliceDeadlock(t *testing.T) {
|
||||
runtime.GOMAXPROCS(2)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
set := NewSet()
|
||||
workers := 10
|
||||
wg.Add(workers)
|
||||
for i := 1; i <= workers; i++ {
|
||||
go func() {
|
||||
for j := 0; j < 1000; j++ {
|
||||
set.Add(1)
|
||||
set.ToSlice()
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func Test_UnmarshalJSON(t *testing.T) {
|
||||
s := []byte(`["test", 1, 2, 3, ["4,5,6"]]`)
|
||||
expected := NewSetFromSlice(
|
||||
[]interface{}{
|
||||
json.Number("1"),
|
||||
json.Number("2"),
|
||||
json.Number("3"),
|
||||
"test",
|
||||
},
|
||||
)
|
||||
actual := NewSet()
|
||||
err := json.Unmarshal(s, actual)
|
||||
if err != nil {
|
||||
t.Errorf("Error should be nil: %v", err)
|
||||
}
|
||||
|
||||
if !expected.Equal(actual) {
|
||||
t.Errorf("Expected no difference, got: %v", expected.Difference(actual))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MarshalJSON(t *testing.T) {
|
||||
expected := NewSetFromSlice(
|
||||
[]interface{}{
|
||||
json.Number("1"),
|
||||
"test",
|
||||
},
|
||||
)
|
||||
|
||||
b, err := json.Marshal(
|
||||
NewSetFromSlice(
|
||||
[]interface{}{
|
||||
1,
|
||||
"test",
|
||||
},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Error should be nil: %v", err)
|
||||
}
|
||||
|
||||
actual := NewSet()
|
||||
err = json.Unmarshal(b, actual)
|
||||
if err != nil {
|
||||
t.Errorf("Error should be nil: %v", err)
|
||||
}
|
||||
|
||||
if !expected.Equal(actual) {
|
||||
t.Errorf("Expected no difference, got: %v", expected.Difference(actual))
|
||||
}
|
||||
}
|
337
vendor/github.com/deckarep/golang-set/threadunsafe.go
generated
vendored
Normal file
337
vendor/github.com/deckarep/golang-set/threadunsafe.go
generated
vendored
Normal file
@ -0,0 +1,337 @@
|
||||
/*
|
||||
Open Source Initiative OSI - The MIT License (MIT):Licensing
|
||||
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package mapset
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type threadUnsafeSet map[interface{}]struct{}
|
||||
|
||||
// An OrderedPair represents a 2-tuple of values.
|
||||
type OrderedPair struct {
|
||||
First interface{}
|
||||
Second interface{}
|
||||
}
|
||||
|
||||
func newThreadUnsafeSet() threadUnsafeSet {
|
||||
return make(threadUnsafeSet)
|
||||
}
|
||||
|
||||
// Equal says whether two 2-tuples contain the same values in the same order.
|
||||
func (pair *OrderedPair) Equal(other OrderedPair) bool {
|
||||
if pair.First == other.First &&
|
||||
pair.Second == other.Second {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Add(i interface{}) bool {
|
||||
_, found := (*set)[i]
|
||||
if found {
|
||||
return false //False if it existed already
|
||||
}
|
||||
|
||||
(*set)[i] = struct{}{}
|
||||
return true
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Contains(i ...interface{}) bool {
|
||||
for _, val := range i {
|
||||
if _, ok := (*set)[val]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) IsSubset(other Set) bool {
|
||||
_ = other.(*threadUnsafeSet)
|
||||
for elem := range *set {
|
||||
if !other.Contains(elem) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) IsProperSubset(other Set) bool {
|
||||
return set.IsSubset(other) && !set.Equal(other)
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) IsSuperset(other Set) bool {
|
||||
return other.IsSubset(set)
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) IsProperSuperset(other Set) bool {
|
||||
return set.IsSuperset(other) && !set.Equal(other)
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Union(other Set) Set {
|
||||
o := other.(*threadUnsafeSet)
|
||||
|
||||
unionedSet := newThreadUnsafeSet()
|
||||
|
||||
for elem := range *set {
|
||||
unionedSet.Add(elem)
|
||||
}
|
||||
for elem := range *o {
|
||||
unionedSet.Add(elem)
|
||||
}
|
||||
return &unionedSet
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Intersect(other Set) Set {
|
||||
o := other.(*threadUnsafeSet)
|
||||
|
||||
intersection := newThreadUnsafeSet()
|
||||
// loop over smaller set
|
||||
if set.Cardinality() < other.Cardinality() {
|
||||
for elem := range *set {
|
||||
if other.Contains(elem) {
|
||||
intersection.Add(elem)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for elem := range *o {
|
||||
if set.Contains(elem) {
|
||||
intersection.Add(elem)
|
||||
}
|
||||
}
|
||||
}
|
||||
return &intersection
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Difference(other Set) Set {
|
||||
_ = other.(*threadUnsafeSet)
|
||||
|
||||
difference := newThreadUnsafeSet()
|
||||
for elem := range *set {
|
||||
if !other.Contains(elem) {
|
||||
difference.Add(elem)
|
||||
}
|
||||
}
|
||||
return &difference
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) SymmetricDifference(other Set) Set {
|
||||
_ = other.(*threadUnsafeSet)
|
||||
|
||||
aDiff := set.Difference(other)
|
||||
bDiff := other.Difference(set)
|
||||
return aDiff.Union(bDiff)
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Clear() {
|
||||
*set = newThreadUnsafeSet()
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Remove(i interface{}) {
|
||||
delete(*set, i)
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Cardinality() int {
|
||||
return len(*set)
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Each(cb func(interface{}) bool) {
|
||||
for elem := range *set {
|
||||
if cb(elem) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Iter() <-chan interface{} {
|
||||
ch := make(chan interface{})
|
||||
go func() {
|
||||
for elem := range *set {
|
||||
ch <- elem
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Iterator() *Iterator {
|
||||
iterator, ch, stopCh := newIterator()
|
||||
|
||||
go func() {
|
||||
L:
|
||||
for elem := range *set {
|
||||
select {
|
||||
case <-stopCh:
|
||||
break L
|
||||
case ch <- elem:
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return iterator
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Equal(other Set) bool {
|
||||
_ = other.(*threadUnsafeSet)
|
||||
|
||||
if set.Cardinality() != other.Cardinality() {
|
||||
return false
|
||||
}
|
||||
for elem := range *set {
|
||||
if !other.Contains(elem) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Clone() Set {
|
||||
clonedSet := newThreadUnsafeSet()
|
||||
for elem := range *set {
|
||||
clonedSet.Add(elem)
|
||||
}
|
||||
return &clonedSet
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) String() string {
|
||||
items := make([]string, 0, len(*set))
|
||||
|
||||
for elem := range *set {
|
||||
items = append(items, fmt.Sprintf("%v", elem))
|
||||
}
|
||||
return fmt.Sprintf("Set{%s}", strings.Join(items, ", "))
|
||||
}
|
||||
|
||||
// String outputs a 2-tuple in the form "(A, B)".
|
||||
func (pair OrderedPair) String() string {
|
||||
return fmt.Sprintf("(%v, %v)", pair.First, pair.Second)
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) Pop() interface{} {
|
||||
for item := range *set {
|
||||
delete(*set, item)
|
||||
return item
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) PowerSet() Set {
|
||||
powSet := NewThreadUnsafeSet()
|
||||
nullset := newThreadUnsafeSet()
|
||||
powSet.Add(&nullset)
|
||||
|
||||
for es := range *set {
|
||||
u := newThreadUnsafeSet()
|
||||
j := powSet.Iter()
|
||||
for er := range j {
|
||||
p := newThreadUnsafeSet()
|
||||
if reflect.TypeOf(er).Name() == "" {
|
||||
k := er.(*threadUnsafeSet)
|
||||
for ek := range *(k) {
|
||||
p.Add(ek)
|
||||
}
|
||||
} else {
|
||||
p.Add(er)
|
||||
}
|
||||
p.Add(es)
|
||||
u.Add(&p)
|
||||
}
|
||||
|
||||
powSet = powSet.Union(&u)
|
||||
}
|
||||
|
||||
return powSet
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) CartesianProduct(other Set) Set {
|
||||
o := other.(*threadUnsafeSet)
|
||||
cartProduct := NewThreadUnsafeSet()
|
||||
|
||||
for i := range *set {
|
||||
for j := range *o {
|
||||
elem := OrderedPair{First: i, Second: j}
|
||||
cartProduct.Add(elem)
|
||||
}
|
||||
}
|
||||
|
||||
return cartProduct
|
||||
}
|
||||
|
||||
func (set *threadUnsafeSet) ToSlice() []interface{} {
|
||||
keys := make([]interface{}, 0, set.Cardinality())
|
||||
for elem := range *set {
|
||||
keys = append(keys, elem)
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// MarshalJSON creates a JSON array from the set, it marshals all elements
|
||||
func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) {
|
||||
items := make([]string, 0, set.Cardinality())
|
||||
|
||||
for elem := range *set {
|
||||
b, err := json.Marshal(elem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items = append(items, string(b))
|
||||
}
|
||||
|
||||
return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON recreates a set from a JSON array, it only decodes
|
||||
// primitive types. Numbers are decoded as json.Number.
|
||||
func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error {
|
||||
var i []interface{}
|
||||
|
||||
d := json.NewDecoder(bytes.NewReader(b))
|
||||
d.UseNumber()
|
||||
err := d.Decode(&i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range i {
|
||||
switch t := v.(type) {
|
||||
case []interface{}, map[string]interface{}:
|
||||
continue
|
||||
default:
|
||||
set.Add(t)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
1
vendor/github.com/ethereum/go-ethereum/.gitattributes
generated
vendored
1
vendor/github.com/ethereum/go-ethereum/.gitattributes
generated
vendored
@ -1,2 +1,3 @@
|
||||
# Auto detect text files and perform LF normalization
|
||||
* text=auto
|
||||
*.sol linguist-language=Solidity
|
||||
|
40
vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS
generated
vendored
40
vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS
generated
vendored
@ -1,11 +1,35 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
les/ @zsfelfoldi
|
||||
light/ @zsfelfoldi
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
||||
p2p/simulations @lmars
|
||||
p2p/protocols @zelig
|
||||
swarm/api/http @justelad
|
||||
swarm/bmt @zelig
|
||||
swarm/dev @lmars
|
||||
swarm/fuse @jmozah @holisticode
|
||||
swarm/grafana_dashboards @nonsense
|
||||
swarm/metrics @nonsense @holisticode
|
||||
swarm/multihash @nolash
|
||||
swarm/network/bitvector @zelig @janos
|
||||
swarm/network/priorityqueue @zelig @janos
|
||||
swarm/network/simulations @zelig @janos
|
||||
swarm/network/stream @janos @zelig @holisticode @justelad
|
||||
swarm/network/stream/intervals @janos
|
||||
swarm/network/stream/testing @zelig
|
||||
swarm/pot @zelig
|
||||
swarm/pss @nolash @zelig @nonsense
|
||||
swarm/services @zelig
|
||||
swarm/state @justelad
|
||||
swarm/storage/encryption @zelig @nagydani
|
||||
swarm/storage/mock @janos
|
||||
swarm/storage/feed @nolash @jpeletier
|
||||
swarm/testutil @lmars
|
||||
whisper/ @gballet @gluk256
|
||||
|
46
vendor/github.com/ethereum/go-ethereum/.github/CONTRIBUTING.md
generated
vendored
46
vendor/github.com/ethereum/go-ethereum/.github/CONTRIBUTING.md
generated
vendored
@ -1,16 +1,40 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for considering to help out with the source code! We welcome
|
||||
contributions from anyone on the internet, and are grateful for even the
|
||||
smallest of fixes!
|
||||
|
||||
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a
|
||||
pull request for the maintainers to review and merge into the main code base. If
|
||||
you wish to submit more complex changes though, please check up with the core
|
||||
devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) to
|
||||
ensure those changes are in line with the general philosophy of the project
|
||||
and/or get some early feedback which can make both your efforts much lighter as
|
||||
well as our review and merge procedures quick and simple.
|
||||
|
||||
## Coding guidelines
|
||||
|
||||
Please make sure your contributions adhere to our coding guidelines:
|
||||
|
||||
* Code must adhere to the official Go
|
||||
[formatting](https://golang.org/doc/effective_go.html#formatting) guidelines
|
||||
(i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
|
||||
* Code must be documented adhering to the official Go
|
||||
[commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
|
||||
* Pull requests need to be based on and opened against the `master` branch.
|
||||
* Commit messages should be prefixed with the package(s) they modify.
|
||||
* E.g. "eth, rpc: make trace configs optional"
|
||||
|
||||
## Can I have feature X
|
||||
|
||||
Before you do a feature request please check and make sure that it isn't possible
|
||||
through some other means. The JavaScript enabled console is a powerful feature
|
||||
in the right hands. Please check our [Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
|
||||
Before you submit a feature request, please check and make sure that it isn't
|
||||
possible through some other means. The JavaScript-enabled console is a powerful
|
||||
feature in the right hands. Please check our
|
||||
[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
|
||||
and help.
|
||||
|
||||
## Contributing
|
||||
## Configuration, dependencies, and tests
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits which do not comply with the coding standards
|
||||
are ignored (use gofmt!).
|
||||
|
||||
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, testing, and
|
||||
dependency management.
|
||||
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, managing project dependencies
|
||||
and testing procedures.
|
||||
|
11
vendor/github.com/ethereum/go-ethereum/.github/no-response.yml
generated
vendored
Normal file
11
vendor/github.com/ethereum/go-ethereum/.github/no-response.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# Number of days of inactivity before an Issue is closed for lack of response
|
||||
daysUntilClose: 30
|
||||
# Label requiring a response
|
||||
responseRequiredLabel: more-information-needed
|
||||
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
|
||||
closeComment: >
|
||||
This issue has been automatically closed because there has been no response
|
||||
to our request for more information from the original author. With only the
|
||||
information that is currently in the issue, we don't have enough information
|
||||
to take action. Please reach out if you have more relevant information or
|
||||
answers to our questions so that we can investigate further.
|
3
vendor/github.com/ethereum/go-ethereum/.gitignore
generated
vendored
3
vendor/github.com/ethereum/go-ethereum/.gitignore
generated
vendored
@ -42,3 +42,6 @@ profile.cov
|
||||
/dashboard/assets/node_modules
|
||||
/dashboard/assets/stats.json
|
||||
/dashboard/assets/bundle.js
|
||||
/dashboard/assets/package-lock.json
|
||||
|
||||
**/yarn-error.log
|
||||
|
91
vendor/github.com/ethereum/go-ethereum/.travis.yml
generated
vendored
91
vendor/github.com/ethereum/go-ethereum/.travis.yml
generated
vendored
@ -6,40 +6,37 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.x
|
||||
go: 1.10.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
# These are the latest Go versions.
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
go: 1.11.x
|
||||
script:
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
- os: osx
|
||||
go: 1.9.x
|
||||
go: 1.11.x
|
||||
script:
|
||||
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
|
||||
- brew update
|
||||
- brew install caskroom/cask/brew-cask
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
- go run build/ci.go test -coverage $TEST_PACKAGES
|
||||
|
||||
# This builder only tests code linters on latest version of Go
|
||||
- os: linux
|
||||
dist: trusty
|
||||
go: 1.9.x
|
||||
go: 1.11.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
@ -47,14 +44,13 @@ matrix:
|
||||
script:
|
||||
- go run build/ci.go lint
|
||||
|
||||
# This builder does the Ubuntu PPA and Linux Azure uploads
|
||||
- os: linux
|
||||
# This builder does the Ubuntu PPA upload
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
go: 1.11.x
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
addons:
|
||||
@ -63,11 +59,26 @@ matrix:
|
||||
- devscripts
|
||||
- debhelper
|
||||
- dput
|
||||
- gcc-multilib
|
||||
- fakeroot
|
||||
script:
|
||||
# Build for the primary platforms that Trusty can manage
|
||||
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-linux
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- gcc-multilib
|
||||
script:
|
||||
# Build for the primary platforms that Trusty can manage
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -arch 386
|
||||
@ -87,11 +98,12 @@ matrix:
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
- os: linux
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
services:
|
||||
- docker
|
||||
go: 1.9.x
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
git:
|
||||
@ -114,8 +126,9 @@ matrix:
|
||||
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- os: linux
|
||||
dist: precise # Needed for the android tools
|
||||
- if: type = push
|
||||
os: linux
|
||||
dist: trusty
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
@ -135,24 +148,25 @@ matrix:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r15c-linux-x86_64.zip -o android-ndk-r15c.zip
|
||||
- unzip -q android-ndk-r15c.zip && rm android-ndk-r15c.zip
|
||||
- mv android-ndk-r15c $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r15c
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r17b-linux-x86_64.zip -o android-ndk-r17b.zip
|
||||
- unzip -q android-ndk-r17b.zip && rm android-ndk-r17b.zip
|
||||
- mv android-ndk-r17b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r17b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.9.x
|
||||
- if: type = push
|
||||
os: osx
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
@ -179,20 +193,13 @@ matrix:
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
- os: linux
|
||||
- if: type = cron
|
||||
os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
go: 1.11.x
|
||||
env:
|
||||
- azure-purge
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go purge -store gethstore/builds -days 14
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
|
||||
on_success: change
|
||||
on_failure: always
|
||||
|
1
vendor/github.com/ethereum/go-ethereum/AUTHORS
generated
vendored
1
vendor/github.com/ethereum/go-ethereum/AUTHORS
generated
vendored
@ -171,3 +171,4 @@ xiekeyang <xiekeyang@users.noreply.github.com>
|
||||
yoza <yoza.is12s@gmail.com>
|
||||
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
|
||||
Максим Чусовлянов <mchusovlianov@gmail.com>
|
||||
Ralph Caraveo <deckarep@gmail.com>
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/Dockerfile
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/Dockerfile
generated
vendored
@ -1,5 +1,5 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.9-alpine as builder
|
||||
FROM golang:1.11-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
@ -12,5 +12,5 @@ FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp 30304/udp
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
ENTRYPOINT ["geth"]
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools
generated
vendored
@ -1,5 +1,5 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.9-alpine as builder
|
||||
FROM golang:1.11-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
@ -12,4 +12,4 @@ FROM alpine:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp 30304/udp
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
|
7
vendor/github.com/ethereum/go-ethereum/Makefile
generated
vendored
7
vendor/github.com/ethereum/go-ethereum/Makefile
generated
vendored
@ -37,7 +37,11 @@ ios:
|
||||
test: all
|
||||
build/env.sh go run build/ci.go test
|
||||
|
||||
lint: ## Run linters.
|
||||
build/env.sh go run build/ci.go lint
|
||||
|
||||
clean:
|
||||
./build/clean_go_build_cache.sh
|
||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||
|
||||
# The devtools target installs tools required for 'go generate'.
|
||||
@ -53,6 +57,9 @@ devtools:
|
||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||
|
||||
swarm-devtools:
|
||||
env GOBIN= go install ./cmd/swarm/mimegen
|
||||
|
||||
# Cross Compilation Targets (xgo)
|
||||
|
||||
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
|
||||
|
10
vendor/github.com/ethereum/go-ethereum/README.md
generated
vendored
10
vendor/github.com/ethereum/go-ethereum/README.md
generated
vendored
@ -7,7 +7,7 @@ https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/6874
|
||||
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||
[](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||
[](https://travis-ci.org/ethereum/go-ethereum)
|
||||
[](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
[](https://discord.gg/nthXNEv)
|
||||
|
||||
Automated builds are available for stable releases and the unstable master branch.
|
||||
Binary archives are published at https://geth.ethereum.org/downloads/.
|
||||
@ -34,13 +34,13 @@ The go-ethereum project comes with several wrappers/executables found in the `cm
|
||||
|
||||
| Command | Description |
|
||||
|:----------:|-------------|
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default) archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
|
||||
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
|
||||
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
|
||||
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
|
||||
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
|
||||
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
|
||||
| `swarm` | swarm daemon and tools. This is the entrypoint for the swarm network. `swarm --help` for command line options and subcommands. See https://swarm-guide.readthedocs.io for swarm documentation. |
|
||||
| `swarm` | Swarm daemon and tools. This is the entrypoint for the Swarm network. `swarm --help` for command line options and subcommands. See [Swarm README](https://github.com/ethereum/go-ethereum/tree/master/swarm) for more information. |
|
||||
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
|
||||
|
||||
## Running geth
|
||||
@ -69,7 +69,7 @@ This command will:
|
||||
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
This too is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
This tool is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
with `geth attach`.
|
||||
|
||||
### Full node on the Ethereum test network
|
||||
@ -142,7 +142,7 @@ Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containe
|
||||
### Programatically interfacing Geth nodes
|
||||
|
||||
As a developer, sooner rather than later you'll want to start interacting with Geth and the Ethereum
|
||||
network via your own programs and not manually through the console. To aid this, Geth has built in
|
||||
network via your own programs and not manually through the console. To aid this, Geth has built-in
|
||||
support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC) and
|
||||
[Geth specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)). These can be
|
||||
exposed via HTTP, WebSockets and IPC (unix sockets on unix based platforms, and named pipes on Windows).
|
||||
|
1
vendor/github.com/ethereum/go-ethereum/VERSION
generated
vendored
1
vendor/github.com/ethereum/go-ethereum/VERSION
generated
vendored
@ -1 +0,0 @@
|
||||
1.8.2
|
5
vendor/github.com/ethereum/go-ethereum/accounts/abi/abi.go
generated
vendored
5
vendor/github.com/ethereum/go-ethereum/accounts/abi/abi.go
generated
vendored
@ -102,7 +102,7 @@ func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, output []byt
|
||||
if len(output)%32 != 0 {
|
||||
return fmt.Errorf("abi: improperly formatted output")
|
||||
}
|
||||
return method.Outputs.Unpack(v, output)
|
||||
return method.Outputs.UnpackIntoMap(v, output)
|
||||
} else if event, ok := abi.Events[name]; ok {
|
||||
return event.Inputs.UnpackIntoMap(v, output)
|
||||
}
|
||||
@ -155,6 +155,9 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
||||
// MethodById looks up a method by the 4-byte id
|
||||
// returns nil if none found
|
||||
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||
if len(sigdata) < 4 {
|
||||
return nil, fmt.Errorf("data too short (% bytes) for abi method lookup", len(sigdata))
|
||||
}
|
||||
for _, method := range abi.Methods {
|
||||
if bytes.Equal(method.Id(), sigdata[:4]) {
|
||||
return &method, nil
|
||||
|
26
vendor/github.com/ethereum/go-ethereum/accounts/abi/abi_test.go
generated
vendored
26
vendor/github.com/ethereum/go-ethereum/accounts/abi/abi_test.go
generated
vendored
@ -621,14 +621,16 @@ func TestBareEvents(t *testing.T) {
|
||||
// TestUnpackEvent is based on this contract:
|
||||
// contract T {
|
||||
// event received(address sender, uint amount, bytes memo);
|
||||
// event receivedAddr(address sender);
|
||||
// function receive(bytes memo) external payable {
|
||||
// received(msg.sender, msg.value, memo);
|
||||
// receivedAddr(msg.sender);
|
||||
// }
|
||||
// }
|
||||
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
|
||||
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
|
||||
func TestUnpackEvent(t *testing.T) {
|
||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
|
||||
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
|
||||
abi, err := JSON(strings.NewReader(abiJSON))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -656,6 +658,17 @@ func TestUnpackEvent(t *testing.T) {
|
||||
} else {
|
||||
t.Logf("len(data): %d; received event: %+v", len(data), ev)
|
||||
}
|
||||
|
||||
type ReceivedAddrEvent struct {
|
||||
Address common.Address
|
||||
}
|
||||
var receivedAddrEv ReceivedAddrEvent
|
||||
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
t.Logf("len(data): %d; received event: %+v", len(data), receivedAddrEv)
|
||||
}
|
||||
}
|
||||
|
||||
func TestABI_MethodById(t *testing.T) {
|
||||
@ -698,5 +711,14 @@ func TestABI_MethodById(t *testing.T) {
|
||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
||||
}
|
||||
}
|
||||
|
||||
// Also test empty
|
||||
if _, err := abi.MethodById([]byte{0x00}); err == nil {
|
||||
t.Errorf("Expected error, too short to decode data")
|
||||
}
|
||||
if _, err := abi.MethodById([]byte{}); err == nil {
|
||||
t.Errorf("Expected error, too short to decode data")
|
||||
}
|
||||
if _, err := abi.MethodById(nil); err == nil {
|
||||
t.Errorf("Expected error, nil is short to decode data")
|
||||
}
|
||||
}
|
||||
|
71
vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go
generated
vendored
71
vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go
generated
vendored
@ -100,13 +100,12 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||
return arguments.unpackAtomic(v, marshalledValues)
|
||||
}
|
||||
|
||||
// Unpack performs the operation hexdata -> Go format
|
||||
// Unpack performs the operation hexdata -> Go map
|
||||
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
|
||||
marshalledValues, err := arguments.UnpackValues(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return arguments.unpackIntoMap(v, marshalledValues)
|
||||
}
|
||||
|
||||
@ -121,18 +120,15 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
if err := requireUnpackKind(value, typ, kind, arguments); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the output interface is a struct, make sure names don't collide
|
||||
|
||||
// If the interface is a struct, get of abi->struct_field mapping
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
exists := make(map[string]bool)
|
||||
for _, arg := range arguments {
|
||||
field := capitalise(arg.Name)
|
||||
if field == "" {
|
||||
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
}
|
||||
if exists[field] {
|
||||
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
|
||||
}
|
||||
exists[field] = true
|
||||
var err error
|
||||
abi2struct, err = mapAbiToStructFields(arguments, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for i, arg := range arguments.NonIndexed() {
|
||||
@ -141,13 +137,9 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
name := capitalise(arg.Name)
|
||||
for j := 0; j < typ.NumField(); j++ {
|
||||
// TODO read tags: `abi:"fieldName"`
|
||||
if typ.Field(j).Name == name {
|
||||
if err := set(value.Field(j), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
if err := set(value.FieldByName(structField), reflectValue, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
@ -171,30 +163,14 @@ func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interfa
|
||||
|
||||
// Unpack arguments into map
|
||||
func (arguments Arguments) unpackIntoMap(v map[string]interface{}, marshalledValues []interface{}) error {
|
||||
// Make sure fields exist in map
|
||||
exists := make(map[string]bool)
|
||||
for _, arg := range arguments {
|
||||
field := arg.Name
|
||||
if field == "" {
|
||||
return fmt.Errorf("abi: purely underscored output cannot unpack to map")
|
||||
}
|
||||
if exists[field] {
|
||||
return fmt.Errorf("abi: multiple outputs mapping to the same map field '%s'", field)
|
||||
}
|
||||
exists[field] = true
|
||||
}
|
||||
|
||||
for name, _ := range exists {
|
||||
_, ok := v[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("abi: output map missing argument name")
|
||||
}
|
||||
// Make sure map is not nil
|
||||
if v == nil {
|
||||
return fmt.Errorf("abi: cannot unpack into a nil map")
|
||||
}
|
||||
|
||||
for i, arg := range arguments.NonIndexed() {
|
||||
v[arg.Name] = marshalledValues[i]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -203,9 +179,26 @@ func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interf
|
||||
if len(marshalledValues) != 1 {
|
||||
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
||||
}
|
||||
|
||||
elem := reflect.ValueOf(v).Elem()
|
||||
kind := elem.Kind()
|
||||
reflectValue := reflect.ValueOf(marshalledValues[0])
|
||||
|
||||
var abi2struct map[string]string
|
||||
if kind == reflect.Struct {
|
||||
var err error
|
||||
if abi2struct, err = mapAbiToStructFields(arguments, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
arg := arguments.NonIndexed()[0]
|
||||
if structField, ok := abi2struct[arg.Name]; ok {
|
||||
return set(elem.FieldByName(structField), reflectValue, arg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
||||
|
||||
}
|
||||
|
||||
// Computes the full size of an array;
|
||||
|
63
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go
generated
vendored
63
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/backends/simulated.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
@ -64,11 +65,11 @@ type SimulatedBackend struct {
|
||||
|
||||
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
|
||||
// for testing purposes.
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
|
||||
database, _ := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, Alloc: alloc}
|
||||
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
|
||||
database := ethdb.NewMemDatabase()
|
||||
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
|
||||
genesis.MustCommit(database)
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
|
||||
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil)
|
||||
|
||||
backend := &SimulatedBackend{
|
||||
database: database,
|
||||
@ -159,7 +160,7 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
|
||||
|
||||
// TransactionReceipt returns the receipt of a transaction.
|
||||
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
receipt, _, _, _ := core.GetReceipt(b.database, txHash)
|
||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash)
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
@ -207,7 +208,7 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
|
||||
}
|
||||
|
||||
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
|
||||
// chain doens't have miners, we just return a gas price of 1 for any call.
|
||||
// chain doesn't have miners, we just return a gas price of 1 for any call.
|
||||
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
|
||||
return big.NewInt(1), nil
|
||||
}
|
||||
@ -307,9 +308,9 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
|
||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
for _, tx := range b.pendingBlock.Transactions() {
|
||||
block.AddTx(tx)
|
||||
block.AddTxWithChain(b.blockchain, tx)
|
||||
}
|
||||
block.AddTx(tx)
|
||||
block.AddTxWithChain(b.blockchain, tx)
|
||||
})
|
||||
statedb, _ := b.blockchain.State()
|
||||
|
||||
@ -323,18 +324,24 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
//
|
||||
// TODO(karalabe): Deprecate when the subscription one can return past data too.
|
||||
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
var filter *filters.Filter
|
||||
if query.BlockHash != nil {
|
||||
// Block filter requested, construct a single-shot filter
|
||||
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
|
||||
} else {
|
||||
// Initialize unset filter boundaried to run from genesis to chain head
|
||||
from := int64(0)
|
||||
if query.FromBlock != nil {
|
||||
from = query.FromBlock.Int64()
|
||||
}
|
||||
to := int64(-1)
|
||||
if query.ToBlock != nil {
|
||||
to = query.ToBlock.Int64()
|
||||
}
|
||||
// Construct the range filter
|
||||
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
|
||||
}
|
||||
to := int64(-1)
|
||||
if query.ToBlock != nil {
|
||||
to = query.ToBlock.Int64()
|
||||
}
|
||||
// Construct and execute the filter
|
||||
filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
|
||||
|
||||
// Run the filter and return all the logs
|
||||
logs, err := filter.Logs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -429,12 +436,24 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
|
||||
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||
return fb.bc.GetHeaderByHash(hash), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
|
||||
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
||||
if number == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return rawdb.ReadReceipts(fb.db, hash, *number), nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
||||
receipts := core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash))
|
||||
number := rawdb.ReadHeaderNumber(fb.db, hash)
|
||||
if number == nil {
|
||||
return nil, nil
|
||||
}
|
||||
receipts := rawdb.ReadReceipts(fb.db, hash, *number)
|
||||
if receipts == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@ -445,7 +464,7 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
|
||||
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
|
||||
return event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
<-quit
|
||||
return nil
|
||||
|
49
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind.go
generated
vendored
49
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind.go
generated
vendored
@ -23,13 +23,13 @@ package bind
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unicode"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
// Lang is a target programming language selector to generate bindings for.
|
||||
@ -145,9 +145,9 @@ func Bind(types []string, abis []string, bytecodes []string, pkg string, lang La
|
||||
if err := tmpl.Execute(buffer, data); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// For Go bindings pass the code through goimports to clean it up and double check
|
||||
// For Go bindings pass the code through gofmt to clean it up
|
||||
if lang == LangGo {
|
||||
code, err := imports.Process(".", buffer.Bytes(), nil)
|
||||
code, err := format.Source(buffer.Bytes())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%v\n%s", err, buffer)
|
||||
}
|
||||
@ -207,7 +207,7 @@ func bindTypeGo(kind abi.Type) string {
|
||||
|
||||
// The inner function of bindTypeGo, this finds the inner type of stringKind.
|
||||
// (Or just the type itself if it is not an array or slice)
|
||||
// The length of the matched part is returned, with the the translated type.
|
||||
// The length of the matched part is returned, with the translated type.
|
||||
func bindUnnestedTypeGo(stringKind string) (int, string) {
|
||||
|
||||
switch {
|
||||
@ -255,7 +255,7 @@ func bindTypeJava(kind abi.Type) string {
|
||||
|
||||
// The inner function of bindTypeJava, this finds the inner type of stringKind.
|
||||
// (Or just the type itself if it is not an array or slice)
|
||||
// The length of the matched part is returned, with the the translated type.
|
||||
// The length of the matched part is returned, with the translated type.
|
||||
func bindUnnestedTypeJava(stringKind string) (int, string) {
|
||||
|
||||
switch {
|
||||
@ -385,8 +385,7 @@ var methodNormalizer = map[Lang]func(string) string{
|
||||
LangJava: decapitalise,
|
||||
}
|
||||
|
||||
// capitalise makes the first character of a string upper case, also removing any
|
||||
// prefixing underscores from the variable names.
|
||||
// capitalise makes a camel-case string which starts with an upper case character.
|
||||
func capitalise(input string) string {
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
@ -394,12 +393,42 @@ func capitalise(input string) string {
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.ToUpper(input[:1]) + input[1:]
|
||||
return toCamelCase(strings.ToUpper(input[:1]) + input[1:])
|
||||
}
|
||||
|
||||
// decapitalise makes the first character of a string lower case.
|
||||
// decapitalise makes a camel-case string which starts with a lower case character.
|
||||
func decapitalise(input string) string {
|
||||
return strings.ToLower(input[:1]) + input[1:]
|
||||
for len(input) > 0 && input[0] == '_' {
|
||||
input = input[1:]
|
||||
}
|
||||
if len(input) == 0 {
|
||||
return ""
|
||||
}
|
||||
return toCamelCase(strings.ToLower(input[:1]) + input[1:])
|
||||
}
|
||||
|
||||
// toCamelCase converts an under-score string to a camel-case string
|
||||
func toCamelCase(input string) string {
|
||||
toupper := false
|
||||
|
||||
result := ""
|
||||
for k, v := range input {
|
||||
switch {
|
||||
case k == 0:
|
||||
result = strings.ToUpper(string(input[0]))
|
||||
|
||||
case toupper:
|
||||
result += strings.ToUpper(string(v))
|
||||
toupper = false
|
||||
|
||||
case v == '_':
|
||||
toupper = true
|
||||
|
||||
default:
|
||||
result += string(v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// structured checks whether a list of ABI data types has enough information to
|
||||
|
180
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind_test.go
generated
vendored
180
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind_test.go
generated
vendored
File diff suppressed because one or more lines are too long
24
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/template.go
generated
vendored
24
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/template.go
generated
vendored
@ -64,6 +64,30 @@ const tmplSourceGo = `
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var (
|
||||
_ = big.NewInt
|
||||
_ = strings.NewReader
|
||||
_ = ethereum.NotFound
|
||||
_ = abi.U256
|
||||
_ = bind.Bind
|
||||
_ = common.Big1
|
||||
_ = types.BloomLookup
|
||||
_ = event.NewSubscription
|
||||
)
|
||||
|
||||
{{range $contract := .Contracts}}
|
||||
// {{.Type}}ABI is the input ABI used to generate the binding from.
|
||||
const {{.Type}}ABI = "{{.InputABI}}"
|
||||
|
8
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/util_test.go
generated
vendored
8
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/util_test.go
generated
vendored
@ -53,9 +53,11 @@ var waitDeployedTests = map[string]struct {
|
||||
|
||||
func TestWaitDeployed(t *testing.T) {
|
||||
for name, test := range waitDeployedTests {
|
||||
backend := backends.NewSimulatedBackend(core.GenesisAlloc{
|
||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
|
||||
})
|
||||
backend := backends.NewSimulatedBackend(
|
||||
core.GenesisAlloc{
|
||||
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
|
||||
}, 10000000,
|
||||
)
|
||||
|
||||
// Create the transaction.
|
||||
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code))
|
||||
|
8
vendor/github.com/ethereum/go-ethereum/accounts/abi/event.go
generated
vendored
8
vendor/github.com/ethereum/go-ethereum/accounts/abi/event.go
generated
vendored
@ -33,15 +33,15 @@ type Event struct {
|
||||
Inputs Arguments
|
||||
}
|
||||
|
||||
func (event Event) String() string {
|
||||
inputs := make([]string, len(event.Inputs))
|
||||
for i, input := range event.Inputs {
|
||||
func (e Event) String() string {
|
||||
inputs := make([]string, len(e.Inputs))
|
||||
for i, input := range e.Inputs {
|
||||
inputs[i] = fmt.Sprintf("%v %v", input.Name, input.Type)
|
||||
if input.Indexed {
|
||||
inputs[i] = fmt.Sprintf("%v indexed %v", input.Name, input.Type)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("event %v(%v)", event.Name, strings.Join(inputs, ", "))
|
||||
return fmt.Sprintf("e %v(%v)", e.Name, strings.Join(inputs, ", "))
|
||||
}
|
||||
|
||||
// Id returns the canonical representation of the event's signature used by the
|
||||
|
81
vendor/github.com/ethereum/go-ethereum/accounts/abi/event_test.go
generated
vendored
81
vendor/github.com/ethereum/go-ethereum/accounts/abi/event_test.go
generated
vendored
@ -58,12 +58,28 @@ var jsonEventPledge = []byte(`{
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
var jsonEventMixedCase = []byte(`{
|
||||
"anonymous": false,
|
||||
"inputs": [{
|
||||
"indexed": false, "name": "value", "type": "uint256"
|
||||
}, {
|
||||
"indexed": false, "name": "_value", "type": "uint256"
|
||||
}, {
|
||||
"indexed": false, "name": "Value", "type": "uint256"
|
||||
}],
|
||||
"name": "MixedCase",
|
||||
"type": "event"
|
||||
}`)
|
||||
|
||||
// 1000000
|
||||
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
|
||||
|
||||
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
|
||||
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
// 1000000,2218516807680,1000001
|
||||
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
|
||||
|
||||
func TestEventId(t *testing.T) {
|
||||
var table = []struct {
|
||||
definition string
|
||||
@ -121,6 +137,27 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
Value *big.Int
|
||||
}
|
||||
|
||||
type EventTransferWithTag struct {
|
||||
// this is valid because `value` is not exportable,
|
||||
// so value is only unmarshalled into `Value1`.
|
||||
value *big.Int
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithSameFieldAndTag struct {
|
||||
Value *big.Int
|
||||
Value1 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithDuplicatedTag struct {
|
||||
Value1 *big.Int `abi:"value"`
|
||||
Value2 *big.Int `abi:"value"`
|
||||
}
|
||||
|
||||
type BadEventTransferWithEmptyTag struct {
|
||||
Value *big.Int `abi:""`
|
||||
}
|
||||
|
||||
type EventPledge struct {
|
||||
Who common.Address
|
||||
Wad *big.Int
|
||||
@ -133,9 +170,16 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
Currency [3]byte
|
||||
}
|
||||
|
||||
type EventMixedCase struct {
|
||||
Value1 *big.Int `abi:"value"`
|
||||
Value2 *big.Int `abi:"_value"`
|
||||
Value3 *big.Int `abi:"Value"`
|
||||
}
|
||||
|
||||
bigint := new(big.Int)
|
||||
bigintExpected := big.NewInt(1000000)
|
||||
bigintExpected2 := big.NewInt(2218516807680)
|
||||
bigintExpected3 := big.NewInt(1000001)
|
||||
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
|
||||
var testCases = []struct {
|
||||
data string
|
||||
@ -158,6 +202,34 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into slice",
|
||||
}, {
|
||||
transferData1,
|
||||
&EventTransferWithTag{},
|
||||
&EventTransferWithTag{Value1: bigintExpected},
|
||||
jsonEventTransfer,
|
||||
"",
|
||||
"Can unpack ERC20 Transfer event into structure with abi: tag",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithDuplicatedTag{},
|
||||
&BadEventTransferWithDuplicatedTag{},
|
||||
jsonEventTransfer,
|
||||
"struct: abi tag in 'Value2' already mapped",
|
||||
"Can not unpack ERC20 Transfer event with duplicated abi tag",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithSameFieldAndTag{},
|
||||
&BadEventTransferWithSameFieldAndTag{},
|
||||
jsonEventTransfer,
|
||||
"abi: multiple variables maps to the same abi field 'value'",
|
||||
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
|
||||
}, {
|
||||
transferData1,
|
||||
&BadEventTransferWithEmptyTag{},
|
||||
&BadEventTransferWithEmptyTag{},
|
||||
jsonEventTransfer,
|
||||
"struct: abi tag in 'Value' is empty",
|
||||
"Can not unpack ERC20 Transfer event with an empty tag",
|
||||
}, {
|
||||
pledgeData1,
|
||||
&EventPledge{},
|
||||
@ -216,6 +288,13 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
jsonEventPledge,
|
||||
"abi: cannot unmarshal tuple into map[string]interface {}",
|
||||
"Can not unpack Pledge event into map",
|
||||
}, {
|
||||
mixedCaseData1,
|
||||
&EventMixedCase{},
|
||||
&EventMixedCase{Value1: bigintExpected, Value2: bigintExpected2, Value3: bigintExpected3},
|
||||
jsonEventMixedCase,
|
||||
"",
|
||||
"Can unpack abi variables with mixed case",
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@ -227,7 +306,7 @@ func TestEventTupleUnpack(t *testing.T) {
|
||||
assert.Nil(err, "Should be able to unpack event data.")
|
||||
assert.Equal(tc.expected, tc.dest, tc.name)
|
||||
} else {
|
||||
assert.EqualError(err, tc.error)
|
||||
assert.EqualError(err, tc.error, tc.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/accounts/abi/method.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/accounts/abi/method.go
generated
vendored
@ -47,10 +47,8 @@ type Method struct {
|
||||
// Please note that "int" is substitute for its canonical representation "int256"
|
||||
func (method Method) Sig() string {
|
||||
types := make([]string, len(method.Inputs))
|
||||
i := 0
|
||||
for _, input := range method.Inputs {
|
||||
for i, input := range method.Inputs {
|
||||
types[i] = input.Type.String()
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf("%v(%v)", method.Name, strings.Join(types, ","))
|
||||
}
|
||||
|
37
vendor/github.com/ethereum/go-ethereum/accounts/abi/numbers.go
generated
vendored
37
vendor/github.com/ethereum/go-ethereum/accounts/abi/numbers.go
generated
vendored
@ -25,35 +25,20 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
big_t = reflect.TypeOf(&big.Int{})
|
||||
derefbig_t = reflect.TypeOf(big.Int{})
|
||||
uint8_t = reflect.TypeOf(uint8(0))
|
||||
uint16_t = reflect.TypeOf(uint16(0))
|
||||
uint32_t = reflect.TypeOf(uint32(0))
|
||||
uint64_t = reflect.TypeOf(uint64(0))
|
||||
int_t = reflect.TypeOf(int(0))
|
||||
int8_t = reflect.TypeOf(int8(0))
|
||||
int16_t = reflect.TypeOf(int16(0))
|
||||
int32_t = reflect.TypeOf(int32(0))
|
||||
int64_t = reflect.TypeOf(int64(0))
|
||||
address_t = reflect.TypeOf(common.Address{})
|
||||
int_ts = reflect.TypeOf([]int(nil))
|
||||
int8_ts = reflect.TypeOf([]int8(nil))
|
||||
int16_ts = reflect.TypeOf([]int16(nil))
|
||||
int32_ts = reflect.TypeOf([]int32(nil))
|
||||
int64_ts = reflect.TypeOf([]int64(nil))
|
||||
bigT = reflect.TypeOf(&big.Int{})
|
||||
derefbigT = reflect.TypeOf(big.Int{})
|
||||
uint8T = reflect.TypeOf(uint8(0))
|
||||
uint16T = reflect.TypeOf(uint16(0))
|
||||
uint32T = reflect.TypeOf(uint32(0))
|
||||
uint64T = reflect.TypeOf(uint64(0))
|
||||
int8T = reflect.TypeOf(int8(0))
|
||||
int16T = reflect.TypeOf(int16(0))
|
||||
int32T = reflect.TypeOf(int32(0))
|
||||
int64T = reflect.TypeOf(int64(0))
|
||||
addressT = reflect.TypeOf(common.Address{})
|
||||
)
|
||||
|
||||
// U256 converts a big Int into a 256bit EVM number.
|
||||
func U256(n *big.Int) []byte {
|
||||
return math.PaddedBigBytes(math.U256(n), 32)
|
||||
}
|
||||
|
||||
// checks whether the given reflect value is signed. This also works for slices with a number type
|
||||
func isSigned(v reflect.Value) bool {
|
||||
switch v.Type() {
|
||||
case int_ts, int8_ts, int16_ts, int32_ts, int64_ts, int_t, int8_t, int16_t, int32_t, int64_t:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
11
vendor/github.com/ethereum/go-ethereum/accounts/abi/numbers_test.go
generated
vendored
11
vendor/github.com/ethereum/go-ethereum/accounts/abi/numbers_test.go
generated
vendored
@ -19,7 +19,6 @@ package abi
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -32,13 +31,3 @@ func TestNumberTypes(t *testing.T) {
|
||||
t.Errorf("expected %x got %x", ubytes, unsigned)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSigned(t *testing.T) {
|
||||
if isSigned(reflect.ValueOf(uint(10))) {
|
||||
t.Error("signed")
|
||||
}
|
||||
|
||||
if !isSigned(reflect.ValueOf(int(10))) {
|
||||
t.Error("not signed")
|
||||
}
|
||||
}
|
||||
|
120
vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect.go
generated
vendored
120
vendor/github.com/ethereum/go-ethereum/accounts/abi/reflect.go
generated
vendored
@ -19,12 +19,13 @@ package abi
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// indirect recursively dereferences the value until it either gets the value
|
||||
// or finds a big.Int
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbig_t {
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Type() != derefbigT {
|
||||
return indirect(v.Elem())
|
||||
}
|
||||
return v
|
||||
@ -36,26 +37,26 @@ func reflectIntKindAndType(unsigned bool, size int) (reflect.Kind, reflect.Type)
|
||||
switch size {
|
||||
case 8:
|
||||
if unsigned {
|
||||
return reflect.Uint8, uint8_t
|
||||
return reflect.Uint8, uint8T
|
||||
}
|
||||
return reflect.Int8, int8_t
|
||||
return reflect.Int8, int8T
|
||||
case 16:
|
||||
if unsigned {
|
||||
return reflect.Uint16, uint16_t
|
||||
return reflect.Uint16, uint16T
|
||||
}
|
||||
return reflect.Int16, int16_t
|
||||
return reflect.Int16, int16T
|
||||
case 32:
|
||||
if unsigned {
|
||||
return reflect.Uint32, uint32_t
|
||||
return reflect.Uint32, uint32T
|
||||
}
|
||||
return reflect.Int32, int32_t
|
||||
return reflect.Int32, int32T
|
||||
case 64:
|
||||
if unsigned {
|
||||
return reflect.Uint64, uint64_t
|
||||
return reflect.Uint64, uint64T
|
||||
}
|
||||
return reflect.Int64, int64_t
|
||||
return reflect.Int64, int64T
|
||||
}
|
||||
return reflect.Ptr, big_t
|
||||
return reflect.Ptr, bigT
|
||||
}
|
||||
|
||||
// mustArrayToBytesSlice creates a new byte slice with the exact same size as value
|
||||
@ -110,3 +111,102 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mapAbiToStringField maps abi to struct fields.
|
||||
// first round: for each Exportable field that contains a `abi:""` tag
|
||||
// and this field name exists in the arguments, pair them together.
|
||||
// second round: for each argument field that has not been already linked,
|
||||
// find what variable is expected to be mapped into, if it exists and has not been
|
||||
// used, pair them.
|
||||
func mapAbiToStructFields(args Arguments, value reflect.Value) (map[string]string, error) {
|
||||
|
||||
typ := value.Type()
|
||||
|
||||
abi2struct := make(map[string]string)
|
||||
struct2abi := make(map[string]string)
|
||||
|
||||
// first round ~~~
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
structFieldName := typ.Field(i).Name
|
||||
|
||||
// skip private struct fields.
|
||||
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip fields that have no abi:"" tag.
|
||||
var ok bool
|
||||
var tagName string
|
||||
if tagName, ok = typ.Field(i).Tag.Lookup("abi"); !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// check if tag is empty.
|
||||
if tagName == "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
|
||||
}
|
||||
|
||||
// check which argument field matches with the abi tag.
|
||||
found := false
|
||||
for _, abiField := range args.NonIndexed() {
|
||||
if abiField.Name == tagName {
|
||||
if abi2struct[abiField.Name] != "" {
|
||||
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
|
||||
}
|
||||
// pair them
|
||||
abi2struct[abiField.Name] = structFieldName
|
||||
struct2abi[structFieldName] = abiField.Name
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
// check if this tag has been mapped.
|
||||
if !found {
|
||||
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// second round ~~~
|
||||
for _, arg := range args {
|
||||
|
||||
abiFieldName := arg.Name
|
||||
structFieldName := capitalise(abiFieldName)
|
||||
|
||||
if structFieldName == "" {
|
||||
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
|
||||
}
|
||||
|
||||
// this abi has already been paired, skip it... unless there exists another, yet unassigned
|
||||
// struct field with the same field name. If so, raise an error:
|
||||
// abi: [ { "name": "value" } ]
|
||||
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
|
||||
if abi2struct[abiFieldName] != "" {
|
||||
if abi2struct[abiFieldName] != structFieldName &&
|
||||
struct2abi[structFieldName] == "" &&
|
||||
value.FieldByName(structFieldName).IsValid() {
|
||||
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", abiFieldName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// return an error if this struct field has already been paired.
|
||||
if struct2abi[structFieldName] != "" {
|
||||
return nil, fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", structFieldName)
|
||||
}
|
||||
|
||||
if value.FieldByName(structFieldName).IsValid() {
|
||||
// pair them
|
||||
abi2struct[abiFieldName] = structFieldName
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
} else {
|
||||
// not paired, but annotate as used, to detect cases like
|
||||
// abi : [ { "name": "value" }, { "name": "_value" } ]
|
||||
// struct { Value *big.Int }
|
||||
struct2abi[structFieldName] = abiFieldName
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return abi2struct, nil
|
||||
}
|
||||
|
9
vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go
generated
vendored
9
vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go
generated
vendored
@ -103,7 +103,12 @@ func NewType(t string) (typ Type, err error) {
|
||||
return typ, err
|
||||
}
|
||||
// parse the type and size of the abi-type.
|
||||
parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
|
||||
matches := typeRegex.FindAllStringSubmatch(t, -1)
|
||||
if len(matches) == 0 {
|
||||
return Type{}, fmt.Errorf("invalid type '%v'", t)
|
||||
}
|
||||
parsedType := matches[0]
|
||||
|
||||
// varSize is the size of the variable
|
||||
var varSize int
|
||||
if len(parsedType[3]) > 0 {
|
||||
@ -135,7 +140,7 @@ func NewType(t string) (typ Type, err error) {
|
||||
typ.Type = reflect.TypeOf(bool(false))
|
||||
case "address":
|
||||
typ.Kind = reflect.Array
|
||||
typ.Type = address_t
|
||||
typ.Type = addressT
|
||||
typ.Size = 20
|
||||
typ.T = AddressTy
|
||||
case "string":
|
||||
|
69
vendor/github.com/ethereum/go-ethereum/accounts/abi/type_test.go
generated
vendored
69
vendor/github.com/ethereum/go-ethereum/accounts/abi/type_test.go
generated
vendored
@ -46,36 +46,36 @@ func TestTypeRegexp(t *testing.T) {
|
||||
{"bool[2][2][2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
|
||||
{"bool[][][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
|
||||
{"bool[][2][]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"int16", Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||
{"int32", Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||
{"int64", Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||
{"int256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8_t, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16_t, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64_t, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint16", Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||
{"uint32", Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||
{"uint64", Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8_t, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16_t, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64_t, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"int8", Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
|
||||
{"int16", Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
|
||||
{"int32", Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
|
||||
{"int64", Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
|
||||
{"int256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
|
||||
{"int8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
|
||||
{"int8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
|
||||
{"int16[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
|
||||
{"int16[2]", Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
|
||||
{"int32[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
|
||||
{"int32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
|
||||
{"int64[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
|
||||
{"int64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
|
||||
{"int256[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
|
||||
{"int256[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
|
||||
{"uint8", Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
|
||||
{"uint16", Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
|
||||
{"uint32", Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
|
||||
{"uint64", Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
|
||||
{"uint256", Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
|
||||
{"uint8[]", Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
|
||||
{"uint8[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
|
||||
{"uint16[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
|
||||
{"uint16[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
|
||||
{"uint32[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
|
||||
{"uint32[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
|
||||
{"uint64[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
|
||||
{"uint64[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
|
||||
{"uint256[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
|
||||
{"uint256[2]", Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
|
||||
{"bytes32", Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
|
||||
{"bytes[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
|
||||
{"bytes[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
|
||||
@ -84,9 +84,9 @@ func TestTypeRegexp(t *testing.T) {
|
||||
{"string", Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
|
||||
{"string[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
|
||||
{"string[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
|
||||
{"address", Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}},
|
||||
{"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||
{"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||
{"address", Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
|
||||
{"address[]", Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
|
||||
{"address[2]", Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
|
||||
// TODO when fixed types are implemented properly
|
||||
// {"fixed", Type{}},
|
||||
// {"fixed128x128", Type{}},
|
||||
@ -252,6 +252,9 @@ func TestTypeCheck(t *testing.T) {
|
||||
{"bytes20", common.Address{}, ""},
|
||||
{"address", [20]byte{}, ""},
|
||||
{"address", common.Address{}, ""},
|
||||
{"bytes32[]]", "", "invalid arg type in abi"},
|
||||
{"invalidType", "", "unsupported arg type: invalidType"},
|
||||
{"invalidSlice[]", "", "unsupported arg type: invalidSlice"},
|
||||
} {
|
||||
typ, err := NewType(test.typ)
|
||||
if err != nil && len(test.err) == 0 {
|
||||
|
28
vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go
generated
vendored
28
vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack.go
generated
vendored
@ -25,8 +25,17 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
var (
|
||||
maxUint256 = big.NewInt(0).Add(
|
||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil),
|
||||
big.NewInt(-1))
|
||||
maxInt256 = big.NewInt(0).Add(
|
||||
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil),
|
||||
big.NewInt(-1))
|
||||
)
|
||||
|
||||
// reads the integer based on its kind
|
||||
func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
return b[len(b)-1]
|
||||
@ -45,7 +54,20 @@ func readInteger(kind reflect.Kind, b []byte) interface{} {
|
||||
case reflect.Int64:
|
||||
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
|
||||
default:
|
||||
return new(big.Int).SetBytes(b)
|
||||
// the only case lefts for integer is int256/uint256.
|
||||
// big.SetBytes can't tell if a number is negative, positive on itself.
|
||||
// On EVM, if the returned number > max int256, it is negative.
|
||||
ret := new(big.Int).SetBytes(b)
|
||||
if typ == UintTy {
|
||||
return ret
|
||||
}
|
||||
|
||||
if ret.Cmp(maxInt256) > 0 {
|
||||
ret.Add(maxUint256, big.NewInt(0).Neg(ret))
|
||||
ret.Add(ret, big.NewInt(1))
|
||||
ret.Neg(ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
@ -179,7 +201,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
||||
case StringTy: // variable arrays are written at the end of the return bytes
|
||||
return string(output[begin : begin+end]), nil
|
||||
case IntTy, UintTy:
|
||||
return readInteger(t.Kind, returnOutput), nil
|
||||
return readInteger(t.T, t.Kind, returnOutput), nil
|
||||
case BoolTy:
|
||||
return readBool(returnOutput)
|
||||
case AddressTy:
|
||||
|
22
vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack_test.go
generated
vendored
22
vendor/github.com/ethereum/go-ethereum/accounts/abi/unpack_test.go
generated
vendored
@ -56,6 +56,23 @@ var unpackTests = []unpackTest{
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
def: `[{ "type": "bool" }]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
def: `[{ "type": "bool" }]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000001000000000001",
|
||||
want: false,
|
||||
err: "abi: improperly encoded boolean value",
|
||||
},
|
||||
{
|
||||
def: `[{ "type": "bool" }]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000003",
|
||||
want: false,
|
||||
err: "abi: improperly encoded boolean value",
|
||||
},
|
||||
{
|
||||
def: `[{"type": "uint32"}]`,
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
@ -100,6 +117,11 @@ var unpackTests = []unpackTest{
|
||||
enc: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
want: big.NewInt(1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "int256"}]`,
|
||||
enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
|
||||
want: big.NewInt(-1),
|
||||
},
|
||||
{
|
||||
def: `[{"type": "address"}]`,
|
||||
enc: "0000000000000000000000000100000000000000000000000000000000000000",
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/accounts/accounts.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/accounts/accounts.go
generated
vendored
@ -106,7 +106,7 @@ type Wallet interface {
|
||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||
//
|
||||
// If the wallet requires additional authentication to sign the request (e.g.
|
||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
||||
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||
// an AuthNeededError instance will be returned, containing infos for the user
|
||||
// about which fields or actions are needed. The user may retry by providing
|
||||
// the needed details via SignTxWithPassphrase, or by other means (e.g. unlock
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/accounts/hd.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/accounts/hd.go
generated
vendored
@ -30,8 +30,8 @@ import (
|
||||
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
|
||||
|
||||
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
|
||||
// at m/44'/60'/0'/1, etc.
|
||||
// are incremented. As such, the first account will be at m/44'/60'/0'/0/0, the second
|
||||
// at m/44'/60'/0'/0/1, etc.
|
||||
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
|
||||
|
||||
// DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints
|
||||
|
12
vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go
generated
vendored
12
vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go
generated
vendored
@ -27,10 +27,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
||||
@ -79,7 +79,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
keydir: keydir,
|
||||
byAddr: make(map[common.Address][]accounts.Account),
|
||||
notify: make(chan struct{}, 1),
|
||||
fileC: fileCache{all: set.NewNonTS()},
|
||||
fileC: fileCache{all: mapset.NewThreadUnsafeSet()},
|
||||
}
|
||||
ac.watcher = newWatcher(ac)
|
||||
return ac, ac.notify
|
||||
@ -237,7 +237,7 @@ func (ac *accountCache) scanAccounts() error {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
return err
|
||||
}
|
||||
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
|
||||
if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 {
|
||||
return nil
|
||||
}
|
||||
// Create a helper method to scan the contents of the key files
|
||||
@ -272,15 +272,15 @@ func (ac *accountCache) scanAccounts() error {
|
||||
// Process all the file diffs
|
||||
start := time.Now()
|
||||
|
||||
for _, p := range creates.List() {
|
||||
for _, p := range creates.ToSlice() {
|
||||
if a := readAccount(p.(string)); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
for _, p := range deletes.List() {
|
||||
for _, p := range deletes.ToSlice() {
|
||||
ac.deleteByFile(p.(string))
|
||||
}
|
||||
for _, p := range updates.List() {
|
||||
for _, p := range updates.ToSlice() {
|
||||
path := p.(string)
|
||||
ac.deleteByFile(path)
|
||||
if a := readAccount(path); a != nil {
|
||||
|
26
vendor/github.com/ethereum/go-ethereum/accounts/keystore/file_cache.go
generated
vendored
26
vendor/github.com/ethereum/go-ethereum/accounts/keystore/file_cache.go
generated
vendored
@ -24,20 +24,20 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
set "gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// fileCache is a cache of files seen during scan of keystore.
|
||||
type fileCache struct {
|
||||
all *set.SetNonTS // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
all mapset.Set // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// scan performs a new scan on the given directory, compares against the already
|
||||
// cached filenames, and returns file sets: creates, deletes, updates.
|
||||
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
|
||||
t0 := time.Now()
|
||||
|
||||
// List all the failes from the keystore folder
|
||||
@ -51,14 +51,14 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
|
||||
defer fc.mu.Unlock()
|
||||
|
||||
// Iterate all the files and gather their metadata
|
||||
all := set.NewNonTS()
|
||||
mods := set.NewNonTS()
|
||||
all := mapset.NewThreadUnsafeSet()
|
||||
mods := mapset.NewThreadUnsafeSet()
|
||||
|
||||
var newLastMod time.Time
|
||||
for _, fi := range files {
|
||||
// Skip any non-key files from the folder
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
// Skip any non-key files from the folder
|
||||
if nonKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
@ -76,9 +76,9 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
|
||||
t2 := time.Now()
|
||||
|
||||
// Update the tracked files and return the three sets
|
||||
deletes := set.Difference(fc.all, all) // Deletes = previous - current
|
||||
creates := set.Difference(all, fc.all) // Creates = current - previous
|
||||
updates := set.Difference(mods, creates) // Updates = modified - creates
|
||||
deletes := fc.all.Difference(all) // Deletes = previous - current
|
||||
creates := all.Difference(fc.all) // Creates = current - previous
|
||||
updates := mods.Difference(creates) // Updates = modified - creates
|
||||
|
||||
fc.all, fc.lastMod = all, newLastMod
|
||||
t3 := time.Now()
|
||||
@ -88,8 +88,8 @@ func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Inte
|
||||
return creates, deletes, updates, nil
|
||||
}
|
||||
|
||||
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func nonKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
|
24
vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go
generated
vendored
24
vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go
generated
vendored
@ -66,19 +66,19 @@ type plainKeyJSON struct {
|
||||
|
||||
type encryptedKeyJSONV3 struct {
|
||||
Address string `json:"address"`
|
||||
Crypto cryptoJSON `json:"crypto"`
|
||||
Crypto CryptoJSON `json:"crypto"`
|
||||
Id string `json:"id"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
type encryptedKeyJSONV1 struct {
|
||||
Address string `json:"address"`
|
||||
Crypto cryptoJSON `json:"crypto"`
|
||||
Crypto CryptoJSON `json:"crypto"`
|
||||
Id string `json:"id"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type cryptoJSON struct {
|
||||
type CryptoJSON struct {
|
||||
Cipher string `json:"cipher"`
|
||||
CipherText string `json:"ciphertext"`
|
||||
CipherParams cipherparamsJSON `json:"cipherparams"`
|
||||
@ -179,26 +179,34 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
|
||||
return key, a, err
|
||||
}
|
||||
|
||||
func writeKeyFile(file string, content []byte) error {
|
||||
func writeTemporaryKeyFile(file string, content []byte) (string, error) {
|
||||
// Create the keystore directory with appropriate permissions
|
||||
// in case it is not present yet.
|
||||
const dirPerm = 0700
|
||||
if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
// Atomic write: create a temporary hidden file first
|
||||
// then move it into place. TempFile assigns mode 0600.
|
||||
f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
if _, err := f.Write(content); err != nil {
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
f.Close()
|
||||
return os.Rename(f.Name(), file)
|
||||
return f.Name(), nil
|
||||
}
|
||||
|
||||
func writeKeyFile(file string, content []byte) error {
|
||||
name, err := writeTemporaryKeyFile(file, content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(name, file)
|
||||
}
|
||||
|
||||
// keyFileName implements the naming convention for keyfiles:
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go
generated
vendored
@ -50,7 +50,7 @@ var (
|
||||
var KeyStoreType = reflect.TypeOf(&KeyStore{})
|
||||
|
||||
// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
|
||||
var KeyStoreScheme = "keystore"
|
||||
const KeyStoreScheme = "keystore"
|
||||
|
||||
// Maximum time between wallet refreshes (if filesystem notifications don't work).
|
||||
const walletRefreshCycle = 3 * time.Second
|
||||
@ -78,7 +78,7 @@ type unlocked struct {
|
||||
// NewKeyStore creates a keystore for the given directory.
|
||||
func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
|
||||
keydir, _ = filepath.Abs(keydir)
|
||||
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP}}
|
||||
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}}
|
||||
ks.init(keydir)
|
||||
return ks
|
||||
}
|
||||
|
143
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go
generated
vendored
143
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go
generated
vendored
@ -28,18 +28,19 @@ package keystore
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
crand "crypto/rand"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/randentropy"
|
||||
"github.com/pborman/uuid"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@ -72,6 +73,10 @@ type keyStorePassphrase struct {
|
||||
keysDirPath string
|
||||
scryptN int
|
||||
scryptP int
|
||||
// skipKeyFileVerification disables the security-feature which does
|
||||
// reads and decrypts any newly created keyfiles. This should be 'false' in all
|
||||
// cases except tests -- setting this to 'true' is not recommended.
|
||||
skipKeyFileVerification bool
|
||||
}
|
||||
|
||||
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
|
||||
@ -93,7 +98,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
|
||||
|
||||
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
|
||||
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth)
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth)
|
||||
return a.Address, err
|
||||
}
|
||||
|
||||
@ -102,33 +107,54 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeKeyFile(filename, keyjson)
|
||||
// Write into temporary file
|
||||
tmpName, err := writeTemporaryKeyFile(filename, keyjson)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ks.skipKeyFileVerification {
|
||||
// Verify that we can decrypt the file with the given password.
|
||||
_, err = ks.GetKey(key.Address, tmpName, auth)
|
||||
if err != nil {
|
||||
msg := "An error was encountered when saving and verifying the keystore file. \n" +
|
||||
"This indicates that the keystore is corrupted. \n" +
|
||||
"The corrupted file is stored at \n%v\n" +
|
||||
"Please file a ticket at:\n\n" +
|
||||
"https://github.com/ethereum/go-ethereum/issues." +
|
||||
"The error was : %s"
|
||||
return fmt.Errorf(msg, tmpName, err)
|
||||
}
|
||||
}
|
||||
return os.Rename(tmpName, filename)
|
||||
}
|
||||
|
||||
func (ks keyStorePassphrase) JoinPath(filename string) string {
|
||||
if filepath.IsAbs(filename) {
|
||||
return filename
|
||||
} else {
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
|
||||
// EncryptKey encrypts a key using the specified scrypt parameters into a json
|
||||
// blob that can be decrypted later on.
|
||||
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
authArray := []byte(auth)
|
||||
salt := randentropy.GetEntropyCSPRNG(32)
|
||||
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
|
||||
// Encryptdata encrypts the data given as 'data' with the password 'auth'.
|
||||
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
|
||||
|
||||
salt := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
derivedKey, err := scrypt.Key(auth, salt, scryptN, scryptR, scryptP, scryptDKLen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return CryptoJSON{}, err
|
||||
}
|
||||
encryptKey := derivedKey[:16]
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
|
||||
iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16
|
||||
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
|
||||
iv := make([]byte, aes.BlockSize) // 16
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
cipherText, err := aesCTRXOR(encryptKey, data, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return CryptoJSON{}, err
|
||||
}
|
||||
mac := crypto.Keccak256(derivedKey[16:32], cipherText)
|
||||
|
||||
@ -138,12 +164,11 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
scryptParamsJSON["p"] = scryptP
|
||||
scryptParamsJSON["dklen"] = scryptDKLen
|
||||
scryptParamsJSON["salt"] = hex.EncodeToString(salt)
|
||||
|
||||
cipherParamsJSON := cipherparamsJSON{
|
||||
IV: hex.EncodeToString(iv),
|
||||
}
|
||||
|
||||
cryptoStruct := cryptoJSON{
|
||||
cryptoStruct := CryptoJSON{
|
||||
Cipher: "aes-128-ctr",
|
||||
CipherText: hex.EncodeToString(cipherText),
|
||||
CipherParams: cipherParamsJSON,
|
||||
@ -151,6 +176,17 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
KDFParams: scryptParamsJSON,
|
||||
MAC: hex.EncodeToString(mac),
|
||||
}
|
||||
return cryptoStruct, nil
|
||||
}
|
||||
|
||||
// EncryptKey encrypts a key using the specified scrypt parameters into a json
|
||||
// blob that can be decrypted later on.
|
||||
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
|
||||
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
|
||||
cryptoStruct, err := EncryptDataV3(keyBytes, []byte(auth), scryptN, scryptP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encryptedKeyJSONV3 := encryptedKeyJSONV3{
|
||||
hex.EncodeToString(key.Address[:]),
|
||||
cryptoStruct,
|
||||
@ -197,43 +233,48 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
PrivateKey: key,
|
||||
}, nil
|
||||
}
|
||||
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
|
||||
if cryptoJson.Cipher != "aes-128-ctr" {
|
||||
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
|
||||
}
|
||||
mac, err := hex.DecodeString(cryptoJson.MAC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iv, err := hex.DecodeString(cryptoJson.CipherParams.IV)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cipherText, err := hex.DecodeString(cryptoJson.CipherText)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
derivedKey, err := getKDFKey(cryptoJson, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
|
||||
if !bytes.Equal(calculatedMAC, mac) {
|
||||
return nil, ErrDecrypt
|
||||
}
|
||||
|
||||
plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return plainText, err
|
||||
}
|
||||
|
||||
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
|
||||
if keyProtected.Version != version {
|
||||
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
|
||||
}
|
||||
|
||||
if keyProtected.Crypto.Cipher != "aes-128-ctr" {
|
||||
return nil, nil, fmt.Errorf("Cipher not supported: %v", keyProtected.Crypto.Cipher)
|
||||
}
|
||||
|
||||
keyId = uuid.Parse(keyProtected.Id)
|
||||
mac, err := hex.DecodeString(keyProtected.Crypto.MAC)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
derivedKey, err := getKDFKey(keyProtected.Crypto, auth)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
|
||||
if !bytes.Equal(calculatedMAC, mac) {
|
||||
return nil, nil, ErrDecrypt
|
||||
}
|
||||
|
||||
plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
|
||||
plainText, err := DecryptDataV3(keyProtected.Crypto, auth)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -274,7 +315,7 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt
|
||||
return plainText, keyId, err
|
||||
}
|
||||
|
||||
func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
|
||||
func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
|
||||
authArray := []byte(auth)
|
||||
salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string))
|
||||
if err != nil {
|
||||
|
3
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain.go
generated
vendored
3
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain.go
generated
vendored
@ -56,7 +56,6 @@ func (ks keyStorePlain) StoreKey(filename string, key *Key, auth string) error {
|
||||
func (ks keyStorePlain) JoinPath(filename string) string {
|
||||
if filepath.IsAbs(filename) {
|
||||
return filename
|
||||
} else {
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
return filepath.Join(ks.keysDirPath, filename)
|
||||
}
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain_test.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain_test.go
generated
vendored
@ -37,7 +37,7 @@ func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if encrypted {
|
||||
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP}
|
||||
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
|
||||
} else {
|
||||
ks = &keyStorePlain{d}
|
||||
}
|
||||
@ -191,7 +191,7 @@ func TestV1_1(t *testing.T) {
|
||||
|
||||
func TestV1_2(t *testing.T) {
|
||||
t.Parallel()
|
||||
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP}
|
||||
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP, true}
|
||||
addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e")
|
||||
file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e"
|
||||
k, err := ks.GetKey(addr, file, "g")
|
||||
|
16
vendor/github.com/ethereum/go-ethereum/accounts/url.go
generated
vendored
16
vendor/github.com/ethereum/go-ethereum/accounts/url.go
generated
vendored
@ -74,6 +74,22 @@ func (u URL) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(u.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses url.
|
||||
func (u *URL) UnmarshalJSON(input []byte) error {
|
||||
var textURL string
|
||||
err := json.Unmarshal(input, &textURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url, err := parseURL(textURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.Scheme = url.Scheme
|
||||
u.Path = url.Path
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cmp compares x and y and returns:
|
||||
//
|
||||
// -1 if x < y
|
||||
|
96
vendor/github.com/ethereum/go-ethereum/accounts/url_test.go
generated
vendored
Normal file
96
vendor/github.com/ethereum/go-ethereum/accounts/url_test.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestURLParsing(t *testing.T) {
|
||||
url, err := parseURL("https://ethereum.org")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if url.Scheme != "https" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||
}
|
||||
if url.Path != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path)
|
||||
}
|
||||
|
||||
_, err = parseURL("ethereum.org")
|
||||
if err == nil {
|
||||
t.Error("expected err, got: nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLString(t *testing.T) {
|
||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||
if url.String() != "https://ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
|
||||
}
|
||||
|
||||
url = URL{Scheme: "", Path: "ethereum.org"}
|
||||
if url.String() != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "ethereum.org", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLMarshalJSON(t *testing.T) {
|
||||
url := URL{Scheme: "https", Path: "ethereum.org"}
|
||||
json, err := url.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("unexpcted error: %v", err)
|
||||
}
|
||||
if string(json) != "\"https://ethereum.org\"" {
|
||||
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLUnmarshalJSON(t *testing.T) {
|
||||
url := &URL{}
|
||||
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
|
||||
if err != nil {
|
||||
t.Errorf("unexpcted error: %v", err)
|
||||
}
|
||||
if url.Scheme != "https" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
|
||||
}
|
||||
if url.Path != "ethereum.org" {
|
||||
t.Errorf("expected: %v, got: %v", "https", url.Path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLComparison(t *testing.T) {
|
||||
tests := []struct {
|
||||
urlA URL
|
||||
urlB URL
|
||||
expect int
|
||||
}{
|
||||
{URL{"https", "ethereum.org"}, URL{"https", "ethereum.org"}, 0},
|
||||
{URL{"http", "ethereum.org"}, URL{"https", "ethereum.org"}, -1},
|
||||
{URL{"https", "ethereum.org/a"}, URL{"https", "ethereum.org"}, 1},
|
||||
{URL{"https", "abc.org"}, URL{"https", "ethereum.org"}, -1},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
result := tt.urlA.Cmp(tt.urlB)
|
||||
if result != tt.expect {
|
||||
t.Errorf("test %d: cmp mismatch: expected: %d, got: %d", i, tt.expect, result)
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/hub.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/hub.go
generated
vendored
@ -127,7 +127,7 @@ func (hub *Hub) refreshWallets() {
|
||||
// breaking the Ledger protocol if that is waiting for user confirmation. This
|
||||
// is a bug acknowledged at Ledger, but it won't be fixed on old devices so we
|
||||
// need to prevent concurrent comms ourselves. The more elegant solution would
|
||||
// be to ditch enumeration in favor of hutplug events, but that don't work yet
|
||||
// be to ditch enumeration in favor of hotplug events, but that don't work yet
|
||||
// on Windows so if we need to hack it anyway, this is more elegant for now.
|
||||
hub.commsLock.Lock()
|
||||
if hub.commsPend > 0 { // A confirmation is pending, don't refresh
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/internal/trezor/trezor.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/internal/trezor/trezor.go
generated
vendored
@ -36,7 +36,7 @@ func Type(msg proto.Message) uint16 {
|
||||
}
|
||||
|
||||
// Name returns the friendly message type name of a specific protocol buffer
|
||||
// type numbers.
|
||||
// type number.
|
||||
func Name(kind uint16) string {
|
||||
name := MessageType_name[int32(kind)]
|
||||
if len(name) < 12 {
|
||||
|
6
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go
generated
vendored
6
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go
generated
vendored
@ -53,11 +53,9 @@ const (
|
||||
ledgerOpGetConfiguration ledgerOpcode = 0x06 // Returns specific wallet application configuration
|
||||
|
||||
ledgerP1DirectlyFetchAddress ledgerParam1 = 0x00 // Return address directly from the wallet
|
||||
ledgerP1ConfirmFetchAddress ledgerParam1 = 0x01 // Require a user confirmation before returning the address
|
||||
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
|
||||
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
|
||||
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
|
||||
ledgerP2ReturnAddressChainCode ledgerParam2 = 0x01 // Require a user confirmation before returning the address
|
||||
)
|
||||
|
||||
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
|
||||
@ -304,7 +302,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||
for i, component := range derivationPath {
|
||||
binary.BigEndian.PutUint32(path[1+4*i:], component)
|
||||
}
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requeste
|
||||
// Create the transaction RLP based on whether legacy or EIP155 signing was requested
|
||||
var (
|
||||
txrlp []byte
|
||||
err error
|
||||
@ -352,7 +350,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
}
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
if err != nil {
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/trezor.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/trezor.go
generated
vendored
@ -221,7 +221,7 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction
|
||||
signer = new(types.HomesteadSigner)
|
||||
} else {
|
||||
signer = types.NewEIP155Signer(chainID)
|
||||
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
|
||||
signature[64] -= byte(chainID.Uint64()*2 + 35)
|
||||
}
|
||||
// Inject the final signature into the transaction and sanity check the sender
|
||||
signed, err := tx.WithSignature(signer, signature)
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/wallet.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/wallet.go
generated
vendored
@ -99,7 +99,7 @@ type wallet struct {
|
||||
//
|
||||
// As such, a hardware wallet needs two locks to function correctly. A state
|
||||
// lock can be used to protect the wallet's software-side internal state, which
|
||||
// must not be held exlusively during hardware communication. A communication
|
||||
// must not be held exclusively during hardware communication. A communication
|
||||
// lock can be used to achieve exclusive access to the device itself, this one
|
||||
// however should allow "skipping" waiting for operations that might want to
|
||||
// use the device, but can live without too (e.g. account self-derivation).
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/appveyor.yml
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/appveyor.yml
generated
vendored
@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.2.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.9.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
561
vendor/github.com/ethereum/go-ethereum/bmt/bmt.go
generated
vendored
561
vendor/github.com/ethereum/go-ethereum/bmt/bmt.go
generated
vendored
@ -1,561 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
|
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution
|
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for righmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
depth: depth(p.SegmentCount),
|
||||
size: p.SegmentSize,
|
||||
blocksize: p.SegmentSize,
|
||||
count: p.SegmentCount,
|
||||
result: make(chan []byte),
|
||||
}
|
||||
}
|
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct {
|
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte
|
||||
}
|
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node {
|
||||
return &Node{
|
||||
parent: parent,
|
||||
level: level,
|
||||
index: index,
|
||||
initial: index == 0,
|
||||
isLeft: index%2 == 0,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *Tree
|
||||
hasher BaseHasher
|
||||
SegmentSize int
|
||||
SegmentCount int
|
||||
Capacity int
|
||||
count int
|
||||
}
|
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
|
||||
return &TreePool{
|
||||
c: make(chan *Tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: hasher().Size(),
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool uptil it has no more than n resources
|
||||
func (self *TreePool) Drain(n int) {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
for len(self.c) > n {
|
||||
<-self.c
|
||||
self.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (self *TreePool) Reserve() *Tree {
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
var t *Tree
|
||||
if self.count == self.Capacity {
|
||||
return <-self.c
|
||||
}
|
||||
select {
|
||||
case t = <-self.c:
|
||||
default:
|
||||
t = NewTree(self.hasher, self.SegmentSize, self.SegmentCount)
|
||||
self.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (self *TreePool) Release(t *Tree) {
|
||||
self.c <- t // can never fail but...
|
||||
}
|
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct {
|
||||
leaves []*Node
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (self *Tree) Draw(hash []byte, d int) string {
|
||||
var left, right []string
|
||||
var anc []*Node
|
||||
for i, n := range self.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = self.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*Node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
|
||||
n := NewNode(0, 0, nil)
|
||||
n.root = true
|
||||
prevlevel := []*Node{n}
|
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1
|
||||
count := 2
|
||||
for d := 1; d <= depth(segmentCount); d++ {
|
||||
nodes := make([]*Node, count)
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
parent := prevlevel[i/2]
|
||||
t := NewNode(level, i, parent)
|
||||
nodes[i] = t
|
||||
}
|
||||
prevlevel = nodes
|
||||
level++
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{
|
||||
leaves: prevlevel,
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (self *Hasher) Size() int {
|
||||
return self.size
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (self *Hasher) BlockSize() int {
|
||||
return self.blocksize
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (self *Hasher) Sum(b []byte) (r []byte) {
|
||||
t := self.bmt
|
||||
i := self.cur
|
||||
n := t.leaves[i]
|
||||
j := i
|
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(self.segment) > self.size && i > 0 && n.parent != nil {
|
||||
n = n.parent
|
||||
} else {
|
||||
i *= 2
|
||||
}
|
||||
d := self.finalise(n, i)
|
||||
self.writeSegment(j, self.segment, d)
|
||||
c := <-self.result
|
||||
self.releaseTree()
|
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if self.blockLength == nil {
|
||||
return c
|
||||
}
|
||||
res := self.pool.hasher()
|
||||
res.Reset()
|
||||
res.Write(self.blockLength)
|
||||
res.Write(c)
|
||||
return res.Sum(nil)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (self *Hasher) Hash() []byte {
|
||||
return <-self.result
|
||||
}
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (self *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
s := self.segment
|
||||
i := self.cur
|
||||
count := (self.count + 1) / 2
|
||||
need := self.count*self.size - self.cur*2*self.size
|
||||
size := self.size
|
||||
if need > size {
|
||||
size *= 2
|
||||
}
|
||||
if l < need {
|
||||
need = l
|
||||
}
|
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s)
|
||||
if need < rest {
|
||||
rest = need
|
||||
}
|
||||
s = append(s, b[:rest]...)
|
||||
need -= rest
|
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 {
|
||||
// push all finished chunks we read
|
||||
self.writeSegment(i, s, self.depth)
|
||||
need -= size
|
||||
if need < 0 {
|
||||
size += need
|
||||
}
|
||||
s = b[rest : rest+size]
|
||||
rest += size
|
||||
i++
|
||||
}
|
||||
self.segment = s
|
||||
self.cur = i
|
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (self *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
|
||||
bufsize := self.size*self.count - self.size*self.cur - len(self.segment)
|
||||
buf := make([]byte, bufsize)
|
||||
var read int
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Read(buf)
|
||||
read += n
|
||||
if err == io.EOF || read == len(buf) {
|
||||
hash := self.Sum(buf[:n])
|
||||
if read == len(buf) {
|
||||
err = NewEOC(hash)
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
n, err = self.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return int64(read), err
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (self *Hasher) Reset() {
|
||||
self.getTree()
|
||||
self.blockLength = nil
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the legth of the data subsumed under the hash
|
||||
func (self *Hasher) ResetWithLength(l []byte) {
|
||||
self.Reset()
|
||||
self.blockLength = l
|
||||
|
||||
}
|
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (self *Hasher) releaseTree() {
|
||||
if self.bmt != nil {
|
||||
n := self.bmt.leaves[self.cur]
|
||||
for ; n != nil; n = n.parent {
|
||||
n.unbalanced = false
|
||||
if n.parent != nil {
|
||||
n.root = false
|
||||
}
|
||||
}
|
||||
self.pool.Release(self.bmt)
|
||||
self.bmt = nil
|
||||
|
||||
}
|
||||
self.cur = 0
|
||||
self.segment = nil
|
||||
}
|
||||
|
||||
func (self *Hasher) writeSegment(i int, s []byte, d int) {
|
||||
h := self.pool.hasher()
|
||||
n := self.bmt.leaves[i]
|
||||
|
||||
if len(s) > self.size && n.parent != nil {
|
||||
go func() {
|
||||
h.Reset()
|
||||
h.Write(s)
|
||||
s = h.Sum(nil)
|
||||
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
self.run(n.parent, h, d, n.index, s)
|
||||
}()
|
||||
return
|
||||
}
|
||||
go self.run(n, h, d, i*2, s)
|
||||
}
|
||||
|
||||
func (self *Hasher) run(n *Node, h hash.Hash, d int, i int, s []byte) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
if !n.unbalanced && n.toggle() {
|
||||
return
|
||||
}
|
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
|
||||
h.Reset()
|
||||
h.Write(n.left)
|
||||
h.Write(n.right)
|
||||
s = h.Sum(nil)
|
||||
|
||||
} else {
|
||||
s = append(n.left, n.right...)
|
||||
}
|
||||
|
||||
self.hash = s
|
||||
if n.root {
|
||||
self.result <- s
|
||||
return
|
||||
}
|
||||
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (self *Hasher) getTree() *Tree {
|
||||
if self.bmt != nil {
|
||||
return self.bmt
|
||||
}
|
||||
t := self.pool.Reserve()
|
||||
self.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (self *Node) toggle() bool {
|
||||
return atomic.AddInt32(&self.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
func depth(n int) (d int) {
|
||||
for l := (n - 1) / 2; l > 0; l /= 2 {
|
||||
d++
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (self *Hasher) finalise(n *Node, i int) (d int) {
|
||||
isLeft := i%2 == 0
|
||||
for {
|
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toogle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft
|
||||
n.right = nil
|
||||
if n.initial {
|
||||
n.root = true
|
||||
return d
|
||||
}
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
d++
|
||||
}
|
||||
}
|
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct {
|
||||
Hash []byte // read the hash of the chunk off the error
|
||||
}
|
||||
|
||||
// Error returns the error string
|
||||
func (self *EOC) Error() string {
|
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", self.Hash)
|
||||
}
|
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC {
|
||||
return &EOC{hash}
|
||||
}
|
85
vendor/github.com/ethereum/go-ethereum/bmt/bmt_r.go
generated
vendored
85
vendor/github.com/ethereum/go-ethereum/bmt/bmt_r.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
span int
|
||||
section int
|
||||
cap int
|
||||
h hash.Hash
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
maxsize := hashsize * count
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
if c > 2 {
|
||||
c /= 2
|
||||
}
|
||||
return &RefHasher{
|
||||
section: 2 * hashsize,
|
||||
span: c * hashsize,
|
||||
cap: maxsize,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte {
|
||||
if len(d) > rh.cap {
|
||||
d = d[:rh.cap]
|
||||
}
|
||||
|
||||
return rh.hash(d, rh.span)
|
||||
}
|
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte {
|
||||
l := len(d)
|
||||
left := d
|
||||
var right []byte
|
||||
if l > rh.section {
|
||||
for ; s >= l; s /= 2 {
|
||||
}
|
||||
left = rh.hash(d[:s], s)
|
||||
right = d[s:]
|
||||
if l-s > rh.section/2 {
|
||||
right = rh.hash(right, s)
|
||||
}
|
||||
}
|
||||
defer rh.h.Reset()
|
||||
rh.h.Write(left)
|
||||
rh.h.Write(right)
|
||||
h := rh.h.Sum(nil)
|
||||
return h
|
||||
}
|
481
vendor/github.com/ethereum/go-ethereum/bmt/bmt_test.go
generated
vendored
481
vendor/github.com/ethereum/go-ethereum/bmt/bmt_test.go
generated
vendored
@ -1,481 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
)
|
||||
|
||||
const (
|
||||
maxproccnt = 8
|
||||
)
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) {
|
||||
hashFunc := sha3.NewKeccak256
|
||||
|
||||
sha3 := func(data ...[]byte) []byte {
|
||||
h := hashFunc()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct {
|
||||
from int64
|
||||
to int64
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 0,
|
||||
to: 64,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 65,
|
||||
to: 96,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), data[64:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 97,
|
||||
to: 128,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(data[:64]), sha3(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 129,
|
||||
to: 160,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 161,
|
||||
to: 192,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 193,
|
||||
to: 224,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 225,
|
||||
to: 256,
|
||||
expected: func(data []byte) []byte {
|
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for _, x := range tests {
|
||||
for length := x.from; length <= x.to; length++ {
|
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
|
||||
data := make([]byte, length)
|
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(hashFunc, 128).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDataReader(l int) (r io.Reader) {
|
||||
return io.LimitReader(crand.Reader, int64(l))
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
err := testHasher(testBaseHasher)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
|
||||
tdata := testDataReader(4128)
|
||||
data := make([]byte, 4128)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
max := count * size
|
||||
incr := 1
|
||||
for n := 0; n <= max+incr; n += incr {
|
||||
err = f(hasher, data, n, count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
}
|
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) {
|
||||
testHasherReuse(maxproccnt, t)
|
||||
}
|
||||
|
||||
func testHasherReuse(i int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, i)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherConcurrency(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, 128, maxproccnt)
|
||||
defer pool.Drain(0)
|
||||
wg := sync.WaitGroup{}
|
||||
cycles := 100
|
||||
wg.Add(maxproccnt * cycles)
|
||||
errc := make(chan error)
|
||||
|
||||
for p := 0; p < maxproccnt; p++ {
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
n := rand.Intn(4096)
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
wg.Done()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errc)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
err = fmt.Errorf("timed out")
|
||||
case err = <-errc:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
|
||||
pool := NewTreePool(hasher, count, 1)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
return testHasherCorrectness(bmt, hasher, d, n, count)
|
||||
}
|
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(data)
|
||||
timeout := time.NewTimer(time.Second)
|
||||
c := make(chan error)
|
||||
|
||||
go func() {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
got := bmt.Sum(nil)
|
||||
if !bytes.Equal(got, exp) {
|
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
close(c)
|
||||
}()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
err = fmt.Errorf("BMT hash calculation timed out")
|
||||
case err = <-c:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
|
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
|
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
|
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
|
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
|
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
|
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
|
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
|
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
|
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
|
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
|
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
|
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
|
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
|
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
|
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
|
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
tdata := testDataReader(64)
|
||||
data := make([]byte, 64)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hasher().Size() + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(maxproccnt)
|
||||
var i int32
|
||||
for j := 0; j < maxproccnt; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasher(n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
size := 1
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, size)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
|
||||
tdata := testDataReader(n)
|
||||
data := make([]byte, n)
|
||||
tdata.Read(data)
|
||||
|
||||
hasher := sha3.NewKeccak256
|
||||
segmentCount := 128
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 200
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
bmt := New(pool)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt.Reset()
|
||||
bmt.Write(data)
|
||||
bmt.Sum(nil)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
data := make([]byte, n)
|
||||
tdata := testDataReader(n)
|
||||
tdata.Read(data)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
13
vendor/github.com/ethereum/go-ethereum/build/ci-notes.md
generated
vendored
13
vendor/github.com/ethereum/go-ethereum/build/ci-notes.md
generated
vendored
@ -2,12 +2,7 @@
|
||||
|
||||
Tagged releases and develop branch commits are available as installable Debian packages
|
||||
for Ubuntu. Packages are built for the all Ubuntu versions which are supported by
|
||||
Canonical:
|
||||
|
||||
- Trusty Tahr (14.04 LTS)
|
||||
- Xenial Xerus (16.04 LTS)
|
||||
- Yakkety Yak (16.10)
|
||||
- Zesty Zapus (17.04)
|
||||
Canonical.
|
||||
|
||||
Packages of develop branch commits have suffix -unstable and cannot be installed alongside
|
||||
the stable version. Switching between release streams requires user intervention.
|
||||
@ -21,18 +16,18 @@ variable which Travis CI makes available to certain builds.
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
|
||||
golang-1.9, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
golang-1.10, which is co-installable alongside the regular golang package. PPA dependencies
|
||||
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
|
||||
|
||||
## Building Packages Locally (for testing)
|
||||
|
||||
You need to run Ubuntu to do test packaging.
|
||||
|
||||
Add the gophers PPA and install Go 1.9 and Debian packaging tools:
|
||||
Add the gophers PPA and install Go 1.10 and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.9 devscripts debhelper
|
||||
$ sudo apt-get install build-essential golang-1.10 devscripts debhelper
|
||||
|
||||
Create the source packages:
|
||||
|
||||
|
273
vendor/github.com/ethereum/go-ethereum/build/ci.go
generated
vendored
273
vendor/github.com/ethereum/go-ethereum/build/ci.go
generated
vendored
@ -26,7 +26,7 @@ Available commands are:
|
||||
install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables
|
||||
test [ -coverage ] [ packages... ] -- runs the tests
|
||||
lint -- runs certain pre-selected linters
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts
|
||||
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts
|
||||
importkeys -- imports signing keys from env
|
||||
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
|
||||
nsis -- creates a Windows NSIS installer
|
||||
@ -59,6 +59,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/internal/build"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -77,52 +79,84 @@ var (
|
||||
executablePath("geth"),
|
||||
executablePath("puppeth"),
|
||||
executablePath("rlpdump"),
|
||||
executablePath("swarm"),
|
||||
executablePath("wnode"),
|
||||
}
|
||||
|
||||
// Files that end up in the swarm*.zip archive.
|
||||
swarmArchiveFiles = []string{
|
||||
"COPYING",
|
||||
executablePath("swarm"),
|
||||
}
|
||||
|
||||
// A debian package is created for all executables listed here.
|
||||
debExecutables = []debExecutable{
|
||||
{
|
||||
Name: "abigen",
|
||||
BinaryName: "abigen",
|
||||
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
|
||||
},
|
||||
{
|
||||
Name: "bootnode",
|
||||
BinaryName: "bootnode",
|
||||
Description: "Ethereum bootnode.",
|
||||
},
|
||||
{
|
||||
Name: "evm",
|
||||
BinaryName: "evm",
|
||||
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
|
||||
},
|
||||
{
|
||||
Name: "geth",
|
||||
BinaryName: "geth",
|
||||
Description: "Ethereum CLI client.",
|
||||
},
|
||||
{
|
||||
Name: "puppeth",
|
||||
BinaryName: "puppeth",
|
||||
Description: "Ethereum private network manager.",
|
||||
},
|
||||
{
|
||||
Name: "rlpdump",
|
||||
BinaryName: "rlpdump",
|
||||
Description: "Developer utility tool that prints RLP structures.",
|
||||
},
|
||||
{
|
||||
Name: "swarm",
|
||||
Description: "Ethereum Swarm daemon and tools",
|
||||
},
|
||||
{
|
||||
Name: "wnode",
|
||||
BinaryName: "wnode",
|
||||
Description: "Ethereum Whisper diagnostic tool",
|
||||
},
|
||||
}
|
||||
|
||||
// A debian package is created for all executables listed here.
|
||||
debSwarmExecutables = []debExecutable{
|
||||
{
|
||||
BinaryName: "swarm",
|
||||
PackageName: "ethereum-swarm",
|
||||
Description: "Ethereum Swarm daemon and tools",
|
||||
},
|
||||
}
|
||||
|
||||
debEthereum = debPackage{
|
||||
Name: "ethereum",
|
||||
Version: params.Version,
|
||||
Executables: debExecutables,
|
||||
}
|
||||
|
||||
debSwarm = debPackage{
|
||||
Name: "ethereum-swarm",
|
||||
Version: sv.Version,
|
||||
Executables: debSwarmExecutables,
|
||||
}
|
||||
|
||||
// Debian meta packages to build and push to Ubuntu PPA
|
||||
debPackages = []debPackage{
|
||||
debSwarm,
|
||||
debEthereum,
|
||||
}
|
||||
|
||||
// Packages to be cross-compiled by the xgo command
|
||||
allCrossCompiledArchiveFiles = append(allToolsArchiveFiles, swarmArchiveFiles...)
|
||||
|
||||
// Distros for which packages are created.
|
||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
||||
// Note: zesty is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "artful", "bionic"}
|
||||
// Note: artful is unsupported because it was officially deprecated on lanchpad.
|
||||
debDistros = []string{"trusty", "xenial", "bionic", "cosmic"}
|
||||
)
|
||||
|
||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||
@ -182,13 +216,13 @@ func doInstall(cmdline []string) {
|
||||
// Check Go version. People regularly open issues about compilation
|
||||
// failure with outdated Go. This should save them the trouble.
|
||||
if !strings.Contains(runtime.Version(), "devel") {
|
||||
// Figure out the minor version number since we can't textually compare (1.10 < 1.8)
|
||||
// Figure out the minor version number since we can't textually compare (1.10 < 1.9)
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if minor < 8 {
|
||||
if minor < 9 {
|
||||
log.Println("You have Go version", runtime.Version())
|
||||
log.Println("go-ethereum requires at least Go version 1.8 and cannot")
|
||||
log.Println("go-ethereum requires at least Go version 1.9 and cannot")
|
||||
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
|
||||
os.Exit(1)
|
||||
}
|
||||
@ -262,16 +296,6 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
|
||||
|
||||
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
|
||||
cmd := build.GoTool(subcmd, args...)
|
||||
if subcmd == "build" || subcmd == "install" || subcmd == "test" {
|
||||
// Go CGO has a Windows linker error prior to 1.8 (https://github.com/golang/go/issues/8756).
|
||||
// Work around issue by allowing multiple definitions for <1.8 builds.
|
||||
var minor int
|
||||
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
|
||||
|
||||
if runtime.GOOS == "windows" && minor < 8 {
|
||||
cmd.Args = append(cmd.Args, []string{"-ldflags", "-extldflags -Wl,--allow-multiple-definition"}...)
|
||||
}
|
||||
}
|
||||
cmd.Env = []string{"GOPATH=" + build.GOPATH()}
|
||||
if arch == "" || arch == runtime.GOARCH {
|
||||
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
|
||||
@ -296,9 +320,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
|
||||
// "tests" also includes static analysis tools such as vet.
|
||||
|
||||
func doTest(cmdline []string) {
|
||||
var (
|
||||
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
)
|
||||
coverage := flag.Bool("coverage", false, "Whether to record code coverage")
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
env := build.Env()
|
||||
|
||||
@ -308,14 +330,11 @@ func doTest(cmdline []string) {
|
||||
}
|
||||
packages = build.ExpandPackagesNoVendor(packages)
|
||||
|
||||
// Run analysis tools before the tests.
|
||||
build.MustRun(goTool("vet", packages...))
|
||||
|
||||
// Run the actual tests.
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
// Test a single package at a time. CI builders are slow
|
||||
// and some tests run into timeouts under load.
|
||||
gotest.Args = append(gotest.Args, "-p", "1")
|
||||
gotest := goTool("test", buildFlags(env)...)
|
||||
gotest.Args = append(gotest.Args, "-p", "1", "-timeout", "5m")
|
||||
if *coverage {
|
||||
gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
|
||||
}
|
||||
@ -339,7 +358,11 @@ func doLint(cmdline []string) {
|
||||
// Run fast linters batched together
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--tests",
|
||||
"--deadline=2m",
|
||||
"--disable-all",
|
||||
"--enable=goimports",
|
||||
"--enable=varcheck",
|
||||
"--enable=vet",
|
||||
"--enable=gofmt",
|
||||
"--enable=misspell",
|
||||
@ -350,13 +373,12 @@ func doLint(cmdline []string) {
|
||||
|
||||
// Run slow linters one by one
|
||||
for _, linter := range []string{"unconvert", "gosimple"} {
|
||||
configs = []string{"--vendor", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
configs = []string{"--vendor", "--tests", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// Release Packaging
|
||||
|
||||
func doArchive(cmdline []string) {
|
||||
var (
|
||||
arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging")
|
||||
@ -376,10 +398,14 @@ func doArchive(cmdline []string) {
|
||||
}
|
||||
|
||||
var (
|
||||
env = build.Env()
|
||||
base = archiveBasename(*arch, env)
|
||||
geth = "geth-" + base + ext
|
||||
alltools = "geth-alltools-" + base + ext
|
||||
env = build.Env()
|
||||
|
||||
basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
|
||||
geth = "geth-" + basegeth + ext
|
||||
alltools = "geth-alltools-" + basegeth + ext
|
||||
|
||||
baseswarm = archiveBasename(*arch, sv.ArchiveVersion(env.Commit))
|
||||
swarm = "swarm-" + baseswarm + ext
|
||||
)
|
||||
maybeSkipArchive(env)
|
||||
if err := build.WriteArchive(geth, gethArchiveFiles); err != nil {
|
||||
@ -388,14 +414,17 @@ func doArchive(cmdline []string) {
|
||||
if err := build.WriteArchive(alltools, allToolsArchiveFiles); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, archive := range []string{geth, alltools} {
|
||||
if err := build.WriteArchive(swarm, swarmArchiveFiles); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, archive := range []string{geth, alltools, swarm} {
|
||||
if err := archiveUpload(archive, *upload, *signer); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func archiveBasename(arch string, env build.Environment) string {
|
||||
func archiveBasename(arch string, archiveVersion string) string {
|
||||
platform := runtime.GOOS + "-" + arch
|
||||
if arch == "arm" {
|
||||
platform += os.Getenv("GOARM")
|
||||
@ -406,18 +435,7 @@ func archiveBasename(arch string, env build.Environment) string {
|
||||
if arch == "ios" {
|
||||
platform = "ios-all"
|
||||
}
|
||||
return platform + "-" + archiveVersion(env)
|
||||
}
|
||||
|
||||
func archiveVersion(env build.Environment) string {
|
||||
version := build.VERSION()
|
||||
if isUnstableBuild(env) {
|
||||
version += "-unstable"
|
||||
}
|
||||
if env.Commit != "" {
|
||||
version += "-" + env.Commit[:8]
|
||||
}
|
||||
return version
|
||||
return platform + "-" + archiveVersion
|
||||
}
|
||||
|
||||
func archiveUpload(archive string, blobstore string, signer string) error {
|
||||
@ -467,7 +485,6 @@ func maybeSkipArchive(env build.Environment) {
|
||||
}
|
||||
|
||||
// Debian Packaging
|
||||
|
||||
func doDebianSource(cmdline []string) {
|
||||
var (
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
@ -491,21 +508,23 @@ func doDebianSource(cmdline []string) {
|
||||
build.MustRun(gpg)
|
||||
}
|
||||
|
||||
// Create the packages.
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
// Create Debian packages and upload them
|
||||
for _, pkg := range debPackages {
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
|
||||
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||
changes = filepath.Join(*workdir, changes)
|
||||
if *signer != "" {
|
||||
build.MustRunCommand("debsign", changes)
|
||||
}
|
||||
if *upload != "" {
|
||||
build.MustRunCommand("dput", *upload, changes)
|
||||
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||
changes = filepath.Join(*workdir, changes)
|
||||
if *signer != "" {
|
||||
build.MustRunCommand("debsign", changes)
|
||||
}
|
||||
if *upload != "" {
|
||||
build.MustRunCommand("dput", *upload, changes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -530,9 +549,17 @@ func isUnstableBuild(env build.Environment) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type debPackage struct {
|
||||
Name string // the name of the Debian package to produce, e.g. "ethereum", or "ethereum-swarm"
|
||||
Version string // the clean version of the debPackage, e.g. 1.8.12 or 0.3.0, without any metadata
|
||||
Executables []debExecutable // executables to be included in the package
|
||||
}
|
||||
|
||||
type debMetadata struct {
|
||||
Env build.Environment
|
||||
|
||||
PackageName string
|
||||
|
||||
// go-ethereum version being built. Note that this
|
||||
// is not the debian package version. The package version
|
||||
// is constructed by VersionString.
|
||||
@ -544,21 +571,33 @@ type debMetadata struct {
|
||||
}
|
||||
|
||||
type debExecutable struct {
|
||||
Name, Description string
|
||||
PackageName string
|
||||
BinaryName string
|
||||
Description string
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time) debMetadata {
|
||||
// Package returns the name of the package if present, or
|
||||
// fallbacks to BinaryName
|
||||
func (d debExecutable) Package() string {
|
||||
if d.PackageName != "" {
|
||||
return d.PackageName
|
||||
}
|
||||
return d.BinaryName
|
||||
}
|
||||
|
||||
func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
|
||||
if author == "" {
|
||||
// No signing key, use default author.
|
||||
author = "Ethereum Builds <fjl@ethereum.org>"
|
||||
}
|
||||
return debMetadata{
|
||||
PackageName: name,
|
||||
Env: env,
|
||||
Author: author,
|
||||
Distro: distro,
|
||||
Version: build.VERSION(),
|
||||
Version: version,
|
||||
Time: t.Format(time.RFC1123Z),
|
||||
Executables: debExecutables,
|
||||
Executables: exes,
|
||||
}
|
||||
}
|
||||
|
||||
@ -566,9 +605,9 @@ func newDebMetadata(distro, author string, env build.Environment, t time.Time) d
|
||||
// on all executable packages.
|
||||
func (meta debMetadata) Name() string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return "ethereum-unstable"
|
||||
return meta.PackageName + "-unstable"
|
||||
}
|
||||
return "ethereum"
|
||||
return meta.PackageName
|
||||
}
|
||||
|
||||
// VersionString returns the debian version of the packages.
|
||||
@ -595,9 +634,9 @@ func (meta debMetadata) ExeList() string {
|
||||
// ExeName returns the package name of an executable package.
|
||||
func (meta debMetadata) ExeName(exe debExecutable) string {
|
||||
if isUnstableBuild(meta.Env) {
|
||||
return exe.Name + "-unstable"
|
||||
return exe.Package() + "-unstable"
|
||||
}
|
||||
return exe.Name
|
||||
return exe.Package()
|
||||
}
|
||||
|
||||
// ExeConflicts returns the content of the Conflicts field
|
||||
@ -612,7 +651,7 @@ func (meta debMetadata) ExeConflicts(exe debExecutable) string {
|
||||
// be preferred and the conflicting files should be handled via
|
||||
// alternates. We might do this eventually but using a conflict is
|
||||
// easier now.
|
||||
return "ethereum, " + exe.Name
|
||||
return "ethereum, " + exe.Package()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@ -629,24 +668,23 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
|
||||
|
||||
// Put the debian build files in place.
|
||||
debian := filepath.Join(pkgdir, "debian")
|
||||
build.Render("build/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
|
||||
build.Render("build/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
|
||||
build.Render("build/deb.control", filepath.Join(debian, "control"), 0644, meta)
|
||||
build.Render("build/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.control", filepath.Join(debian, "control"), 0644, meta)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
|
||||
build.RenderString("8\n", filepath.Join(debian, "compat"), 0644, meta)
|
||||
build.RenderString("3.0 (native)\n", filepath.Join(debian, "source/format"), 0644, meta)
|
||||
for _, exe := range meta.Executables {
|
||||
install := filepath.Join(debian, meta.ExeName(exe)+".install")
|
||||
docs := filepath.Join(debian, meta.ExeName(exe)+".docs")
|
||||
build.Render("build/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb.docs", docs, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe)
|
||||
build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe)
|
||||
}
|
||||
|
||||
return pkgdir
|
||||
}
|
||||
|
||||
// Windows installer
|
||||
|
||||
func doWindowsInstaller(cmdline []string) {
|
||||
// Parse the flags and make skip installer generation on PRs
|
||||
var (
|
||||
@ -696,11 +734,11 @@ func doWindowsInstaller(cmdline []string) {
|
||||
// Build the installer. This assumes that all the needed files have been previously
|
||||
// built (don't mix building and packaging to keep cross compilation complexity to a
|
||||
// minimum).
|
||||
version := strings.Split(build.VERSION(), ".")
|
||||
version := strings.Split(params.Version, ".")
|
||||
if env.Commit != "" {
|
||||
version[2] += "-" + env.Commit[:8]
|
||||
}
|
||||
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, env) + ".exe")
|
||||
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
|
||||
build.MustRunCommand("makensis.exe",
|
||||
"/DOUTPUTFILE="+installer,
|
||||
"/DMAJORVERSION="+version[0],
|
||||
@ -736,9 +774,9 @@ func doAndroidArchive(cmdline []string) {
|
||||
log.Fatal("Please ensure ANDROID_NDK points to your Android NDK")
|
||||
}
|
||||
// Build the Android archive and Maven resources
|
||||
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile"))
|
||||
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
|
||||
build.MustRun(gomobileTool("init", "--ndk", os.Getenv("ANDROID_NDK")))
|
||||
build.MustRun(gomobileTool("bind", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
|
||||
build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
|
||||
|
||||
if *local {
|
||||
// If we're building locally, copy bundle to build dir and skip Maven
|
||||
@ -752,7 +790,7 @@ func doAndroidArchive(cmdline []string) {
|
||||
maybeSkipArchive(env)
|
||||
|
||||
// Sign and upload the archive to Azure
|
||||
archive := "geth-" + archiveBasename("android", env) + ".aar"
|
||||
archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar"
|
||||
os.Rename("geth.aar", archive)
|
||||
|
||||
if err := archiveUpload(archive, *upload, *signer); err != nil {
|
||||
@ -762,22 +800,27 @@ func doAndroidArchive(cmdline []string) {
|
||||
os.Rename(archive, meta.Package+".aar")
|
||||
if *signer != "" && *deploy != "" {
|
||||
// Import the signing key into the local GPG instance
|
||||
if b64key := os.Getenv(*signer); b64key != "" {
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid base64 %s", *signer)
|
||||
}
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
b64key := os.Getenv(*signer)
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid base64 %s", *signer)
|
||||
}
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
|
||||
keyID, err := build.PGPKeyID(string(key))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Upload the artifacts to Sonatype and/or Maven Central
|
||||
repo := *deploy + "/service/local/staging/deploy/maven2"
|
||||
if meta.Develop {
|
||||
repo = *deploy + "/content/repositories/snapshots"
|
||||
}
|
||||
build.MustRunCommand("mvn", "gpg:sign-and-deploy-file",
|
||||
build.MustRunCommand("mvn", "gpg:sign-and-deploy-file", "-e", "-X",
|
||||
"-settings=build/mvn.settings", "-Durl="+repo, "-DrepositoryId=ossrh",
|
||||
"-Dgpg.keyname="+keyID,
|
||||
"-DpomFile="+meta.Package+".pom", "-Dfile="+meta.Package+".aar")
|
||||
}
|
||||
}
|
||||
@ -787,9 +830,10 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd {
|
||||
cmd.Args = append(cmd.Args, args...)
|
||||
cmd.Env = []string{
|
||||
"GOPATH=" + build.GOPATH(),
|
||||
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
|
||||
}
|
||||
for _, e := range os.Environ() {
|
||||
if strings.HasPrefix(e, "GOPATH=") {
|
||||
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") {
|
||||
continue
|
||||
}
|
||||
cmd.Env = append(cmd.Env, e)
|
||||
@ -831,7 +875,7 @@ func newMavenMetadata(env build.Environment) mavenMetadata {
|
||||
}
|
||||
}
|
||||
// Render the version and package strings
|
||||
version := build.VERSION()
|
||||
version := params.Version
|
||||
if isUnstableBuild(env) {
|
||||
version += "-SNAPSHOT"
|
||||
}
|
||||
@ -856,9 +900,9 @@ func doXCodeFramework(cmdline []string) {
|
||||
env := build.Env()
|
||||
|
||||
// Build the iOS XCode framework
|
||||
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile"))
|
||||
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
|
||||
build.MustRun(gomobileTool("init"))
|
||||
bind := gomobileTool("bind", "--target", "ios", "--tags", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
||||
bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "--tags", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
||||
|
||||
if *local {
|
||||
// If we're building locally, use the build folder and stop afterwards
|
||||
@ -866,7 +910,7 @@ func doXCodeFramework(cmdline []string) {
|
||||
build.MustRun(bind)
|
||||
return
|
||||
}
|
||||
archive := "geth-" + archiveBasename("ios", env)
|
||||
archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
|
||||
if err := os.Mkdir(archive, os.ModePerm); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -922,7 +966,7 @@ func newPodMetadata(env build.Environment, archive string) podMetadata {
|
||||
}
|
||||
}
|
||||
}
|
||||
version := build.VERSION()
|
||||
version := params.Version
|
||||
if isUnstableBuild(env) {
|
||||
version += "-unstable." + env.Buildnum
|
||||
}
|
||||
@ -952,7 +996,7 @@ func doXgo(cmdline []string) {
|
||||
|
||||
if *alltools {
|
||||
args = append(args, []string{"--dest", GOBIN}...)
|
||||
for _, res := range allToolsArchiveFiles {
|
||||
for _, res := range allCrossCompiledArchiveFiles {
|
||||
if strings.HasPrefix(res, GOBIN) {
|
||||
// Binary tool found, cross build it explicitly
|
||||
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
|
||||
@ -991,7 +1035,7 @@ func xgoTool(args []string) *exec.Cmd {
|
||||
func doPurge(cmdline []string) {
|
||||
var (
|
||||
store = flag.String("store", "", `Destination from where to purge archives (usually "gethstore/builds")`)
|
||||
limit = flag.Int("days", 30, `Age threshold above which to delete unstalbe archives`)
|
||||
limit = flag.Int("days", 30, `Age threshold above which to delete unstable archives`)
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
@ -1018,23 +1062,14 @@ func doPurge(cmdline []string) {
|
||||
}
|
||||
for i := 0; i < len(blobs); i++ {
|
||||
for j := i + 1; j < len(blobs); j++ {
|
||||
iTime, err := time.Parse(time.RFC1123, blobs[i].Properties.LastModified)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
jTime, err := time.Parse(time.RFC1123, blobs[j].Properties.LastModified)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if iTime.After(jTime) {
|
||||
if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
|
||||
blobs[i], blobs[j] = blobs[j], blobs[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
// Filter out all archives more recent that the given threshold
|
||||
for i, blob := range blobs {
|
||||
timestamp, _ := time.Parse(time.RFC1123, blob.Properties.LastModified)
|
||||
if time.Since(timestamp) < time.Duration(*limit)*24*time.Hour {
|
||||
if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
|
||||
blobs = blobs[:i]
|
||||
break
|
||||
}
|
||||
|
19
vendor/github.com/ethereum/go-ethereum/build/clean_go_build_cache.sh
generated
vendored
Executable file
19
vendor/github.com/ethereum/go-ethereum/build/clean_go_build_cache.sh
generated
vendored
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Cleaning the Go cache only makes sense if we actually have Go installed... or
|
||||
# if Go is actually callable. This does not hold true during deb packaging, so
|
||||
# we need an explicit check to avoid build failures.
|
||||
if ! command -v go > /dev/null; then
|
||||
exit
|
||||
fi
|
||||
|
||||
version_gt() {
|
||||
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
|
||||
}
|
||||
|
||||
golang_version=$(go version |cut -d' ' -f3 |sed 's/go//')
|
||||
|
||||
# Clean go build cache when go version is greater than or equal to 1.10
|
||||
if !(version_gt 1.10 $golang_version); then
|
||||
go clean -cache
|
||||
fi
|
1
vendor/github.com/ethereum/go-ethereum/build/deb.install
generated
vendored
1
vendor/github.com/ethereum/go-ethereum/build/deb.install
generated
vendored
@ -1 +0,0 @@
|
||||
build/bin/{{.Name}} usr/bin
|
13
vendor/github.com/ethereum/go-ethereum/build/deb.rules
generated
vendored
13
vendor/github.com/ethereum/go-ethereum/build/deb.rules
generated
vendored
@ -1,13 +0,0 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.9/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
%:
|
||||
dh $@
|
19
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum-swarm/deb.control
generated
vendored
Normal file
19
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum-swarm/deb.control
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.10
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||
|
||||
{{range .Executables}}
|
||||
Package: {{$.ExeName .}}
|
||||
Conflicts: {{$.ExeConflicts .}}
|
||||
Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}
|
||||
Built-Using: ${misc:Built-Using}
|
||||
Description: {{.Description}}
|
||||
{{.Description}}
|
||||
{{end}}
|
@ -1,4 +1,4 @@
|
||||
Copyright 2016 The go-ethereum Authors
|
||||
Copyright 2018 The go-ethereum Authors
|
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
1
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum-swarm/deb.install
generated
vendored
Normal file
1
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum-swarm/deb.install
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
build/bin/{{.BinaryName}} usr/bin
|
13
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum-swarm/deb.rules
generated
vendored
Normal file
13
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum-swarm/deb.rules
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.10/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
%:
|
||||
dh $@
|
5
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.changelog
generated
vendored
Normal file
5
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.changelog
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
|
||||
|
||||
* git build of {{.Env.Commit}}
|
||||
|
||||
-- {{.Author}} {{.Time}}
|
@ -2,7 +2,7 @@ Source: {{.Name}}
|
||||
Section: science
|
||||
Priority: extra
|
||||
Maintainer: {{.Author}}
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.9
|
||||
Build-Depends: debhelper (>= 8.0.0), golang-1.10
|
||||
Standards-Version: 3.9.5
|
||||
Homepage: https://ethereum.org
|
||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
||||
@ -11,8 +11,8 @@ Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||
Package: {{.Name}}
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, {{.ExeList}}
|
||||
Description: Meta-package to install geth and other tools
|
||||
Meta-package to install geth and other tools
|
||||
Description: Meta-package to install geth, swarm, and other tools
|
||||
Meta-package to install geth, swarm and other tools
|
||||
|
||||
{{range .Executables}}
|
||||
Package: {{$.ExeName .}}
|
14
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.copyright
generated
vendored
Normal file
14
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.copyright
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
Copyright 2018 The go-ethereum Authors
|
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
go-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
1
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.docs
generated
vendored
Normal file
1
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.docs
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
AUTHORS
|
1
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.install
generated
vendored
Normal file
1
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.install
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
build/bin/{{.BinaryName}} usr/bin
|
13
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.rules
generated
vendored
Normal file
13
vendor/github.com/ethereum/go-ethereum/build/deb/ethereum/deb.rules
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
override_dh_auto_build:
|
||||
build/env.sh /usr/lib/go-1.10/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
|
||||
|
||||
override_dh_auto_test:
|
||||
|
||||
%:
|
||||
dh $@
|
18
vendor/github.com/ethereum/go-ethereum/build/goimports.sh
generated
vendored
Executable file
18
vendor/github.com/ethereum/go-ethereum/build/goimports.sh
generated
vendored
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/sh
|
||||
|
||||
find_files() {
|
||||
find . ! \( \
|
||||
\( \
|
||||
-path '.github' \
|
||||
-o -path './build/_workspace' \
|
||||
-o -path './build/bin' \
|
||||
-o -path './crypto/bn256' \
|
||||
-o -path '*/vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
GOFMT="gofmt -s -w"
|
||||
GOIMPORTS="goimports -w"
|
||||
find_files | xargs $GOFMT
|
||||
find_files | xargs $GOIMPORTS
|
39
vendor/github.com/ethereum/go-ethereum/cmd/abigen/main.go
generated
vendored
39
vendor/github.com/ethereum/go-ethereum/cmd/abigen/main.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind")
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind, - for STDIN")
|
||||
binFlag = flag.String("bin", "", "Path to the Ethereum contract bytecode (generate deploy method)")
|
||||
typFlag = flag.String("type", "", "Struct name for the binding (default = package name)")
|
||||
|
||||
@ -75,16 +75,27 @@ func main() {
|
||||
bins []string
|
||||
types []string
|
||||
)
|
||||
if *solFlag != "" {
|
||||
if *solFlag != "" || (*abiFlag == "-" && *pkgFlag == "") {
|
||||
// Generate the list of types to exclude from binding
|
||||
exclude := make(map[string]bool)
|
||||
for _, kind := range strings.Split(*excFlag, ",") {
|
||||
exclude[strings.ToLower(kind)] = true
|
||||
}
|
||||
contracts, err := compiler.CompileSolidity(*solcFlag, *solFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to build Solidity contract: %v\n", err)
|
||||
os.Exit(-1)
|
||||
|
||||
var contracts map[string]*compiler.Contract
|
||||
var err error
|
||||
if *solFlag != "" {
|
||||
contracts, err = compiler.CompileSolidity(*solcFlag, *solFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to build Solidity contract: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
} else {
|
||||
contracts, err = contractsFromStdin()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read input ABIs from STDIN: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
// Gather all non-excluded contract for binding
|
||||
for name, contract := range contracts {
|
||||
@ -100,7 +111,13 @@ func main() {
|
||||
}
|
||||
} else {
|
||||
// Otherwise load up the ABI, optional bytecode and type name from the parameters
|
||||
abi, err := ioutil.ReadFile(*abiFlag)
|
||||
var abi []byte
|
||||
var err error
|
||||
if *abiFlag == "-" {
|
||||
abi, err = ioutil.ReadAll(os.Stdin)
|
||||
} else {
|
||||
abi, err = ioutil.ReadFile(*abiFlag)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read input ABI: %v\n", err)
|
||||
os.Exit(-1)
|
||||
@ -138,3 +155,11 @@ func main() {
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
|
||||
func contractsFromStdin() (map[string]*compiler.Contract, error) {
|
||||
bytes, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return compiler.ParseCombinedJSON(bytes, "", "", "", "")
|
||||
}
|
||||
|
16
vendor/github.com/ethereum/go-ethereum/cmd/bootnode/main.go
generated
vendored
16
vendor/github.com/ethereum/go-ethereum/cmd/bootnode/main.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
)
|
||||
@ -37,7 +38,7 @@ func main() {
|
||||
var (
|
||||
listenAddr = flag.String("addr", ":30301", "listen address")
|
||||
genKey = flag.String("genkey", "", "generate a node key")
|
||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's pubkey hash and quit")
|
||||
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
|
||||
nodeKeyFile = flag.String("nodekey", "", "private key filename")
|
||||
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
|
||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
|
||||
@ -85,7 +86,7 @@ func main() {
|
||||
}
|
||||
|
||||
if *writeAddr {
|
||||
fmt.Printf("%v\n", discover.PubkeyID(&nodeKey.PublicKey))
|
||||
fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
@ -118,16 +119,17 @@ func main() {
|
||||
}
|
||||
|
||||
if *runv5 {
|
||||
if _, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", restrictList); err != nil {
|
||||
if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
db, _ := enode.OpenDB("")
|
||||
ln := enode.NewLocalNode(db, nodeKey)
|
||||
cfg := discover.Config{
|
||||
PrivateKey: nodeKey,
|
||||
AnnounceAddr: realaddr,
|
||||
NetRestrict: restrictList,
|
||||
PrivateKey: nodeKey,
|
||||
NetRestrict: restrictList,
|
||||
}
|
||||
if _, err := discover.ListenUDP(conn, cfg); err != nil {
|
||||
if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
1
vendor/github.com/ethereum/go-ethereum/cmd/clef/4byte.json
generated
vendored
Normal file
1
vendor/github.com/ethereum/go-ethereum/cmd/clef/4byte.json
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
878
vendor/github.com/ethereum/go-ethereum/cmd/clef/README.md
generated
vendored
Normal file
878
vendor/github.com/ethereum/go-ethereum/cmd/clef/README.md
generated
vendored
Normal file
@ -0,0 +1,878 @@
|
||||
Clef
|
||||
----
|
||||
Clef can be used to sign transactions and data and is meant as a replacement for geth's account management.
|
||||
This allows DApps not to depend on geth's account management. When a DApp wants to sign data it can send the data to
|
||||
the signer, the signer will then provide the user with context and asks the user for permission to sign the data. If
|
||||
the users grants the signing request the signer will send the signature back to the DApp.
|
||||
|
||||
This setup allows a DApp to connect to a remote Ethereum node and send transactions that are locally signed. This can
|
||||
help in situations when a DApp is connected to a remote node because a local Ethereum node is not available, not
|
||||
synchronised with the chain or a particular Ethereum node that has no built-in (or limited) account management.
|
||||
|
||||
Clef can run as a daemon on the same machine, or off a usb-stick like [usb armory](https://inversepath.com/usbarmory),
|
||||
or a separate VM in a [QubesOS](https://www.qubes-os.org/) type os setup.
|
||||
|
||||
Check out
|
||||
|
||||
* the [tutorial](tutorial.md) for some concrete examples on how the signer works.
|
||||
* the [setup docs](docs/setup.md) for some information on how to configure it to work on QubesOS or USBArmory.
|
||||
|
||||
|
||||
## Command line flags
|
||||
Clef accepts the following command line options:
|
||||
```
|
||||
COMMANDS:
|
||||
init Initialize the signer, generate secret storage
|
||||
attest Attest that a js-file is to be used
|
||||
addpw Store a credential for a keystore file
|
||||
help Shows a list of commands or help for one command
|
||||
|
||||
GLOBAL OPTIONS:
|
||||
--loglevel value log level to emit to the screen (default: 4)
|
||||
--keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore")
|
||||
--configdir value Directory for clef configuration (default: "$HOME/.clef")
|
||||
--networkid value Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby) (default: 1)
|
||||
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
|
||||
--nousb Disables monitoring for and managing USB hardware wallets
|
||||
--rpcaddr value HTTP-RPC server listening interface (default: "localhost")
|
||||
--rpcport value HTTP-RPC server listening port (default: 8550)
|
||||
--signersecret value A file containing the password used to encrypt signer credentials, e.g. keystore credentials and ruleset hash
|
||||
--4bytedb value File containing 4byte-identifiers (default: "./4byte.json")
|
||||
--4bytedb-custom value File used for writing new 4byte-identifiers submitted via API (default: "./4byte-custom.json")
|
||||
--auditlog value File used to emit audit logs. Set to "" to disable (default: "audit.log")
|
||||
--rules value Enable rule-engine (default: "rules.json")
|
||||
--stdio-ui Use STDIN/STDOUT as a channel for an external UI. This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user interface, and can be used when the signer is started by an external process.
|
||||
--stdio-ui-test Mechanism to test interface between signer and UI. Requires 'stdio-ui'.
|
||||
--help, -h show help
|
||||
--version, -v print the version
|
||||
|
||||
```
|
||||
|
||||
|
||||
Example:
|
||||
```
|
||||
signer -keystore /my/keystore -chainid 4
|
||||
```
|
||||
|
||||
|
||||
## Security model
|
||||
|
||||
The security model of the signer is as follows:
|
||||
|
||||
* One critical component (the signer binary / daemon) is responsible for handling cryptographic operations: signing, private keys, encryption/decryption of keystore files.
|
||||
* The signer binary has a well-defined 'external' API.
|
||||
* The 'external' API is considered UNTRUSTED.
|
||||
* The signer binary also communicates with whatever process that invoked the binary, via stdin/stdout.
|
||||
* This channel is considered 'trusted'. Over this channel, approvals and passwords are communicated.
|
||||
|
||||
The general flow for signing a transaction using e.g. geth is as follows:
|
||||

|
||||
|
||||
In this case, `geth` would be started with `--externalsigner=http://localhost:8550` and would relay requests to `eth.sendTransaction`.
|
||||
|
||||
## TODOs
|
||||
|
||||
Some snags and todos
|
||||
|
||||
* [ ] The signer should take a startup param "--no-change", for UIs that do not contain the capability
|
||||
to perform changes to things, only approve/deny. Such a UI should be able to start the signer in
|
||||
a more secure mode by telling it that it only wants approve/deny capabilities.
|
||||
|
||||
* [x] It would be nice if the signer could collect new 4byte-id:s/method selectors, and have a
|
||||
secondary database for those (`4byte_custom.json`). Users could then (optionally) submit their collections for
|
||||
inclusion upstream.
|
||||
|
||||
* It should be possible to configure the signer to check if an account is indeed known to it, before
|
||||
passing on to the UI. The reason it currently does not, is that it would make it possible to enumerate
|
||||
accounts if it immediately returned "unknown account".
|
||||
* [x] It should be possible to configure the signer to auto-allow listing (certain) accounts, instead of asking every time.
|
||||
* [x] Done Upon startup, the signer should spit out some info to the caller (particularly important when executed in `stdio-ui`-mode),
|
||||
invoking methods with the following info:
|
||||
* [x] Version info about the signer
|
||||
* [x] Address of API (http/ipc)
|
||||
* [ ] List of known accounts
|
||||
* [ ] Have a default timeout on signing operations, so that if the user has not answered within e.g. 60 seconds, the request is rejected.
|
||||
* [ ] `account_signRawTransaction`
|
||||
* [ ] `account_bulkSignTransactions([] transactions)` should
|
||||
* only exist if enabled via config/flag
|
||||
* only allow non-data-sending transactions
|
||||
* all txs must use the same `from`-account
|
||||
* let the user confirm, showing
|
||||
* the total amount
|
||||
* the number of unique recipients
|
||||
|
||||
* Geth todos
|
||||
- The signer should pass the `Origin` header as call-info to the UI. As of right now, the way that info about the request is
|
||||
put together is a bit of a hack into the http server. This could probably be greatly improved
|
||||
- Relay: Geth should be started in `geth --external_signer localhost:8550`.
|
||||
- Currently, the Geth APIs use `common.Address` in the arguments to transaction submission (e.g `to` field). This
|
||||
type is 20 `bytes`, and is incapable of carrying checksum information. The signer uses `common.MixedcaseAddress`, which
|
||||
retains the original input.
|
||||
- The Geth api should switch to use the same type, and relay `to`-account verbatim to the external api.
|
||||
|
||||
* [x] Storage
|
||||
* [x] An encrypted key-value storage should be implemented
|
||||
* See [rules.md](rules.md) for more info about this.
|
||||
|
||||
* Another potential thing to introduce is pairing.
|
||||
* To prevent spurious requests which users just accept, implement a way to "pair" the caller with the signer (external API).
|
||||
* Thus geth/mist/cpp would cryptographically handshake and afterwards the caller would be allowed to make signing requests.
|
||||
* This feature would make the addition of rules less dangerous.
|
||||
|
||||
* Wallets / accounts. Add API methods for wallets.
|
||||
|
||||
## Communication
|
||||
|
||||
### External API
|
||||
|
||||
The signer listens to HTTP requests on `rpcaddr`:`rpcport`, with the same JSONRPC standard as Geth. The messages are
|
||||
expected to be JSON [jsonrpc 2.0 standard](http://www.jsonrpc.org/specification).
|
||||
|
||||
Some of these call can require user interaction. Clients must be aware that responses
|
||||
may be delayed significantly or may never be received if a users decides to ignore the confirmation request.
|
||||
|
||||
The External API is **untrusted** : it does not accept credentials over this api, nor does it expect
|
||||
that requests have any authority.
|
||||
|
||||
### UI API
|
||||
|
||||
The signer has one native console-based UI, for operation without any standalone tools.
|
||||
However, there is also an API to communicate with an external UI. To enable that UI,
|
||||
the signer needs to be executed with the `--stdio-ui` option, which allocates the
|
||||
`stdin`/`stdout` for the UI-api.
|
||||
|
||||
An example (insecure) proof-of-concept of has been implemented in `pythonsigner.py`.
|
||||
|
||||
The model is as follows:
|
||||
|
||||
* The user starts the UI app (`pythonsigner.py`).
|
||||
* The UI app starts the `signer` with `--stdio-ui`, and listens to the
|
||||
process output for confirmation-requests.
|
||||
* The `signer` opens the external http api.
|
||||
* When the `signer` receives requests, it sends a `jsonrpc` request via `stdout`.
|
||||
* The UI app prompts the user accordingly, and responds to the `signer`
|
||||
* The `signer` signs (or not), and responds to the original request.
|
||||
|
||||
## External API
|
||||
|
||||
See the [external api changelog](extapi_changelog.md) for information about changes to this API.
|
||||
|
||||
### Encoding
|
||||
- number: positive integers that are hex encoded
|
||||
- data: hex encoded data
|
||||
- string: ASCII string
|
||||
|
||||
All hex encoded values must be prefixed with `0x`.
|
||||
|
||||
## Methods
|
||||
|
||||
### account_new
|
||||
|
||||
#### Create new password protected account
|
||||
|
||||
The signer will generate a new private key, encrypts it according to [web3 keystore spec](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) and stores it in the keystore directory.
|
||||
The client is responsible for creating a backup of the keystore. If the keystore is lost there is no method of retrieving lost accounts.
|
||||
|
||||
#### Arguments
|
||||
|
||||
None
|
||||
|
||||
#### Result
|
||||
- address [string]: account address that is derived from the generated key
|
||||
- url [string]: location of the keyfile
|
||||
|
||||
#### Sample call
|
||||
```json
|
||||
{
|
||||
"id": 0,
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_new",
|
||||
"params": []
|
||||
}
|
||||
|
||||
{
|
||||
"id": 0,
|
||||
"jsonrpc": "2.0",
|
||||
"result": {
|
||||
"address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133",
|
||||
"url": "keystore:///my/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### account_list
|
||||
|
||||
#### List available accounts
|
||||
List all accounts that this signer currently manages
|
||||
|
||||
#### Arguments
|
||||
|
||||
None
|
||||
|
||||
#### Result
|
||||
- array with account records:
|
||||
- account.address [string]: account address that is derived from the generated key
|
||||
- account.type [string]: type of the
|
||||
- account.url [string]: location of the account
|
||||
|
||||
#### Sample call
|
||||
```json
|
||||
{
|
||||
"id": 1,
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_list"
|
||||
}
|
||||
|
||||
{
|
||||
"id": 1,
|
||||
"jsonrpc": "2.0",
|
||||
"result": [
|
||||
{
|
||||
"address": "0xafb2f771f58513609765698f65d3f2f0224a956f",
|
||||
"type": "account",
|
||||
"url": "keystore:///tmp/keystore/UTC--2017-08-24T07-26-47.162109726Z--afb2f771f58513609765698f65d3f2f0224a956f"
|
||||
},
|
||||
{
|
||||
"address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133",
|
||||
"type": "account",
|
||||
"url": "keystore:///tmp/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### account_signTransaction
|
||||
|
||||
#### Sign transactions
|
||||
Signs a transactions and responds with the signed transaction in RLP encoded form.
|
||||
|
||||
#### Arguments
|
||||
2. transaction object:
|
||||
- `from` [address]: account to send the transaction from
|
||||
- `to` [address]: receiver account. If omitted or `0x`, will cause contract creation.
|
||||
- `gas` [number]: maximum amount of gas to burn
|
||||
- `gasPrice` [number]: gas price
|
||||
- `value` [number:optional]: amount of Wei to send with the transaction
|
||||
- `data` [data:optional]: input data
|
||||
- `nonce` [number]: account nonce
|
||||
3. method signature [string:optional]
|
||||
- The method signature, if present, is to aid decoding the calldata. Should consist of `methodname(paramtype,...)`, e.g. `transfer(uint256,address)`. The signer may use this data to parse the supplied calldata, and show the user. The data, however, is considered totally untrusted, and reliability is not expected.
|
||||
|
||||
|
||||
#### Result
|
||||
- signed transaction in RLP encoded form [data]
|
||||
|
||||
#### Sample call
|
||||
```json
|
||||
{
|
||||
"id": 2,
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_signTransaction",
|
||||
"params": [
|
||||
{
|
||||
"from": "0x1923f626bb8dc025849e00f99c25fe2b2f7fb0db",
|
||||
"gas": "0x55555",
|
||||
"gasPrice": "0x1234",
|
||||
"input": "0xabcd",
|
||||
"nonce": "0x0",
|
||||
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
|
||||
"value": "0x1234"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
Response
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"message": "Request denied"
|
||||
}
|
||||
}
|
||||
```
|
||||
#### Sample call with ABI-data
|
||||
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_signTransaction",
|
||||
"params": [
|
||||
{
|
||||
"from": "0x694267f14675d7e1b9494fd8d72fefe1755710fa",
|
||||
"gas": "0x333",
|
||||
"gasPrice": "0x1",
|
||||
"nonce": "0x0",
|
||||
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
|
||||
"value": "0x0",
|
||||
"data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"
|
||||
},
|
||||
"safeSend(address)"
|
||||
],
|
||||
"id": 67
|
||||
}
|
||||
```
|
||||
Response
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 67,
|
||||
"result": {
|
||||
"raw": "0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663",
|
||||
"tx": {
|
||||
"nonce": "0x0",
|
||||
"gasPrice": "0x1",
|
||||
"gas": "0x333",
|
||||
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
|
||||
"value": "0x0",
|
||||
"input": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012",
|
||||
"v": "0x26",
|
||||
"r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e",
|
||||
"s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663",
|
||||
"hash": "0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Bash example:
|
||||
```bash
|
||||
#curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/
|
||||
|
||||
{"jsonrpc":"2.0","id":67,"result":{"raw":"0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","tx":{"nonce":"0x0","gasPrice":"0x1","gas":"0x333","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0","value":"0x0","input":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012","v":"0x26","r":"0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e","s":"0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","hash":"0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"}}}
|
||||
```
|
||||
|
||||
|
||||
### account_sign
|
||||
|
||||
#### Sign data
|
||||
Signs a chunk of data and returns the calculated signature.
|
||||
|
||||
#### Arguments
|
||||
- account [address]: account to sign with
|
||||
- data [data]: data to sign
|
||||
|
||||
#### Result
|
||||
- calculated signature [data]
|
||||
|
||||
#### Sample call
|
||||
```json
|
||||
{
|
||||
"id": 3,
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_sign",
|
||||
"params": [
|
||||
"0x1923f626bb8dc025849e00f99c25fe2b2f7fb0db",
|
||||
"0xaabbccdd"
|
||||
]
|
||||
}
|
||||
```
|
||||
Response
|
||||
|
||||
```json
|
||||
{
|
||||
"id": 3,
|
||||
"jsonrpc": "2.0",
|
||||
"result": "0x5b6693f153b48ec1c706ba4169960386dbaa6903e249cc79a8e6ddc434451d417e1e57327872c7f538beeb323c300afa9999a3d4a5de6caf3be0d5ef832b67ef1c"
|
||||
}
|
||||
```
|
||||
|
||||
### account_ecRecover
|
||||
|
||||
#### Recover address
|
||||
Derive the address from the account that was used to sign data from the data and signature.
|
||||
|
||||
#### Arguments
|
||||
- data [data]: data that was signed
|
||||
- signature [data]: the signature to verify
|
||||
|
||||
#### Result
|
||||
- derived account [address]
|
||||
|
||||
#### Sample call
|
||||
```json
|
||||
{
|
||||
"id": 4,
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_ecRecover",
|
||||
"params": [
|
||||
"0xaabbccdd",
|
||||
"0x5b6693f153b48ec1c706ba4169960386dbaa6903e249cc79a8e6ddc434451d417e1e57327872c7f538beeb323c300afa9999a3d4a5de6caf3be0d5ef832b67ef1c"
|
||||
]
|
||||
}
|
||||
```
|
||||
Response
|
||||
|
||||
```json
|
||||
{
|
||||
"id": 4,
|
||||
"jsonrpc": "2.0",
|
||||
"result": "0x1923f626bb8dc025849e00f99c25fe2b2f7fb0db"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### account_import
|
||||
|
||||
#### Import account
|
||||
Import a private key into the keystore. The imported key is expected to be encrypted according to the web3 keystore
|
||||
format.
|
||||
|
||||
#### Arguments
|
||||
- account [object]: key in [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) (retrieved with account_export)
|
||||
|
||||
#### Result
|
||||
- imported key [object]:
|
||||
- key.address [address]: address of the imported key
|
||||
- key.type [string]: type of the account
|
||||
- key.url [string]: key URL
|
||||
|
||||
#### Sample call
|
||||
```json
|
||||
{
|
||||
"id": 6,
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_import",
|
||||
"params": [
|
||||
{
|
||||
"address": "c7412fc59930fd90099c917a50e5f11d0934b2f5",
|
||||
"crypto": {
|
||||
"cipher": "aes-128-ctr",
|
||||
"cipherparams": {
|
||||
"iv": "401c39a7c7af0388491c3d3ecb39f532"
|
||||
},
|
||||
"ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204",
|
||||
"kdf": "scrypt",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"n": 262144,
|
||||
"p": 1,
|
||||
"r": 8,
|
||||
"salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a"
|
||||
},
|
||||
"mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806"
|
||||
},
|
||||
"id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9",
|
||||
"version": 3
|
||||
},
|
||||
]
|
||||
}
|
||||
```
|
||||
Response
|
||||
|
||||
```json
|
||||
{
|
||||
"id": 6,
|
||||
"jsonrpc": "2.0",
|
||||
"result": {
|
||||
"address": "0xc7412fc59930fd90099c917a50e5f11d0934b2f5",
|
||||
"type": "account",
|
||||
"url": "keystore:///tmp/keystore/UTC--2017-08-24T11-00-42.032024108Z--c7412fc59930fd90099c917a50e5f11d0934b2f5"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### account_export
|
||||
|
||||
#### Export account from keystore
|
||||
Export a private key from the keystore. The exported private key is encrypted with the original passphrase. When the
|
||||
key is imported later this passphrase is required.
|
||||
|
||||
#### Arguments
|
||||
- account [address]: export private key that is associated with this account
|
||||
|
||||
#### Result
|
||||
- exported key, see [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) for
|
||||
more information
|
||||
|
||||
#### Sample call
|
||||
```json
|
||||
{
|
||||
"id": 5,
|
||||
"jsonrpc": "2.0",
|
||||
"method": "account_export",
|
||||
"params": [
|
||||
"0xc7412fc59930fd90099c917a50e5f11d0934b2f5"
|
||||
]
|
||||
}
|
||||
```
|
||||
Response
|
||||
|
||||
```json
|
||||
{
|
||||
"id": 5,
|
||||
"jsonrpc": "2.0",
|
||||
"result": {
|
||||
"address": "c7412fc59930fd90099c917a50e5f11d0934b2f5",
|
||||
"crypto": {
|
||||
"cipher": "aes-128-ctr",
|
||||
"cipherparams": {
|
||||
"iv": "401c39a7c7af0388491c3d3ecb39f532"
|
||||
},
|
||||
"ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204",
|
||||
"kdf": "scrypt",
|
||||
"kdfparams": {
|
||||
"dklen": 32,
|
||||
"n": 262144,
|
||||
"p": 1,
|
||||
"r": 8,
|
||||
"salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a"
|
||||
},
|
||||
"mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806"
|
||||
},
|
||||
"id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9",
|
||||
"version": 3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## UI API
|
||||
|
||||
These methods needs to be implemented by a UI listener.
|
||||
|
||||
By starting the signer with the switch `--stdio-ui-test`, the signer will invoke all known methods, and expect the UI to respond with
|
||||
denials. This can be used during development to ensure that the API is (at least somewhat) correctly implemented.
|
||||
See `pythonsigner`, which can be invoked via `python3 pythonsigner.py test` to perform the 'denial-handshake-test'.
|
||||
|
||||
All methods in this API uses object-based parameters, so that there can be no mixups of parameters: each piece of data is accessed by key.
|
||||
|
||||
See the [ui api changelog](intapi_changelog.md) for information about changes to this API.
|
||||
|
||||
OBS! A slight deviation from `json` standard is in place: every request and response should be confined to a single line.
|
||||
Whereas the `json` specification allows for linebreaks, linebreaks __should not__ be used in this communication channel, to make
|
||||
things simpler for both parties.
|
||||
|
||||
### ApproveTx
|
||||
|
||||
Invoked when there's a transaction for approval.
|
||||
|
||||
|
||||
#### Sample call
|
||||
|
||||
Here's a method invocation:
|
||||
```bash
|
||||
|
||||
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/
|
||||
```
|
||||
|
||||
```json
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "ApproveTx",
|
||||
"params": [
|
||||
{
|
||||
"transaction": {
|
||||
"from": "0x0x694267f14675d7e1b9494fd8d72fefe1755710fa",
|
||||
"to": "0x0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
|
||||
"gas": "0x333",
|
||||
"gasPrice": "0x1",
|
||||
"value": "0x0",
|
||||
"nonce": "0x0",
|
||||
"data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012",
|
||||
"input": null
|
||||
},
|
||||
"call_info": [
|
||||
{
|
||||
"type": "WARNING",
|
||||
"message": "Invalid checksum on to-address"
|
||||
},
|
||||
{
|
||||
"type": "Info",
|
||||
"message": "safeSend(address: 0x0000000000000000000000000000000000000012)"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"remote": "127.0.0.1:48486",
|
||||
"local": "localhost:8550",
|
||||
"scheme": "HTTP/1.1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The same method invocation, but with invalid data:
|
||||
```bash
|
||||
|
||||
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000002000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/
|
||||
```
|
||||
|
||||
```json
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "ApproveTx",
|
||||
"params": [
|
||||
{
|
||||
"transaction": {
|
||||
"from": "0x0x694267f14675d7e1b9494fd8d72fefe1755710fa",
|
||||
"to": "0x0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
|
||||
"gas": "0x333",
|
||||
"gasPrice": "0x1",
|
||||
"value": "0x0",
|
||||
"nonce": "0x0",
|
||||
"data": "0x4401a6e40000000000000002000000000000000000000000000000000000000000000012",
|
||||
"input": null
|
||||
},
|
||||
"call_info": [
|
||||
{
|
||||
"type": "WARNING",
|
||||
"message": "Invalid checksum on to-address"
|
||||
},
|
||||
{
|
||||
"type": "WARNING",
|
||||
"message": "Transaction data did not match ABI-interface: WARNING: Supplied data is stuffed with extra data. \nWant 0000000000000002000000000000000000000000000000000000000000000012\nHave 0000000000000000000000000000000000000000000000000000000000000012\nfor method safeSend(address)"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"remote": "127.0.0.1:48492",
|
||||
"local": "localhost:8550",
|
||||
"scheme": "HTTP/1.1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
```
|
||||
|
||||
One which has missing `to`, but with no `data`:
|
||||
|
||||
|
||||
```json
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 3,
|
||||
"method": "ApproveTx",
|
||||
"params": [
|
||||
{
|
||||
"transaction": {
|
||||
"from": "",
|
||||
"to": null,
|
||||
"gas": "0x0",
|
||||
"gasPrice": "0x0",
|
||||
"value": "0x0",
|
||||
"nonce": "0x0",
|
||||
"data": null,
|
||||
"input": null
|
||||
},
|
||||
"call_info": [
|
||||
{
|
||||
"type": "CRITICAL",
|
||||
"message": "Tx will create contract with empty code!"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"remote": "signer binary",
|
||||
"local": "main",
|
||||
"scheme": "in-proc"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### ApproveExport
|
||||
|
||||
Invoked when a request to export an account has been made.
|
||||
|
||||
#### Sample call
|
||||
|
||||
```json
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 7,
|
||||
"method": "ApproveExport",
|
||||
"params": [
|
||||
{
|
||||
"address": "0x0000000000000000000000000000000000000000",
|
||||
"meta": {
|
||||
"remote": "signer binary",
|
||||
"local": "main",
|
||||
"scheme": "in-proc"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### ApproveListing
|
||||
|
||||
Invoked when a request for account listing has been made.
|
||||
|
||||
#### Sample call
|
||||
|
||||
```json
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 5,
|
||||
"method": "ApproveListing",
|
||||
"params": [
|
||||
{
|
||||
"accounts": [
|
||||
{
|
||||
"type": "Account",
|
||||
"url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-20T14-44-54.089682944Z--123409812340981234098123409812deadbeef42",
|
||||
"address": "0x123409812340981234098123409812deadbeef42"
|
||||
},
|
||||
{
|
||||
"type": "Account",
|
||||
"url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-23T21-59-03.199240693Z--cafebabedeadbeef34098123409812deadbeef42",
|
||||
"address": "0xcafebabedeadbeef34098123409812deadbeef42"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"remote": "signer binary",
|
||||
"local": "main",
|
||||
"scheme": "in-proc"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
### ApproveSignData
|
||||
|
||||
#### Sample call
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 4,
|
||||
"method": "ApproveSignData",
|
||||
"params": [
|
||||
{
|
||||
"address": "0x123409812340981234098123409812deadbeef42",
|
||||
"raw_data": "0x01020304",
|
||||
"message": "\u0019Ethereum Signed Message:\n4\u0001\u0002\u0003\u0004",
|
||||
"hash": "0x7e3a4e7a9d1744bc5c675c25e1234ca8ed9162bd17f78b9085e48047c15ac310",
|
||||
"meta": {
|
||||
"remote": "signer binary",
|
||||
"local": "main",
|
||||
"scheme": "in-proc"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### ShowInfo
|
||||
|
||||
The UI should show the info to the user. Does not expect response.
|
||||
|
||||
#### Sample call
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 9,
|
||||
"method": "ShowInfo",
|
||||
"params": [
|
||||
{
|
||||
"text": "Tests completed"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### ShowError
|
||||
|
||||
The UI should show the info to the user. Does not expect response.
|
||||
|
||||
```json
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 2,
|
||||
"method": "ShowError",
|
||||
"params": [
|
||||
{
|
||||
"text": "Testing 'ShowError'"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### OnApproved
|
||||
|
||||
`OnApprovedTx` is called when a transaction has been approved and signed. The call contains the return value that will be sent to the external caller. The return value from this method is ignored - the reason for having this callback is to allow the ruleset to keep track of approved transactions.
|
||||
|
||||
When implementing rate-limited rules, this callback should be used.
|
||||
|
||||
TLDR; Use this method to keep track of signed transactions, instead of using the data in `ApproveTx`.
|
||||
|
||||
### OnSignerStartup
|
||||
|
||||
This method provide the UI with information about what API version the signer uses (both internal and external) aswell as build-info and external api,
|
||||
in k/v-form.
|
||||
|
||||
Example call:
|
||||
```json
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "OnSignerStartup",
|
||||
"params": [
|
||||
{
|
||||
"info": {
|
||||
"extapi_http": "http://localhost:8550",
|
||||
"extapi_ipc": null,
|
||||
"extapi_version": "2.0.0",
|
||||
"intapi_version": "1.2.0"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Rules for UI apis
|
||||
|
||||
A UI should conform to the following rules.
|
||||
|
||||
* A UI MUST NOT load any external resources that were not embedded/part of the UI package.
|
||||
* For example, not load icons, stylesheets from the internet
|
||||
* Not load files from the filesystem, unless they reside in the same local directory (e.g. config files)
|
||||
* A Graphical UI MUST show the blocky-identicon for ethereum addresses.
|
||||
* A UI MUST warn display approproate warning if the destination-account is formatted with invalid checksum.
|
||||
* A UI MUST NOT open any ports or services
|
||||
* The signer opens the public port
|
||||
* A UI SHOULD verify the permissions on the signer binary, and refuse to execute or warn if permissions allow non-user write.
|
||||
* A UI SHOULD inform the user about the `SHA256` or `MD5` hash of the binary being executed
|
||||
* A UI SHOULD NOT maintain a secondary storage of data, e.g. list of accounts
|
||||
* The signer provides accounts
|
||||
* A UI SHOULD, to the best extent possible, use static linking / bundling, so that required libraries are bundled
|
||||
along with the UI.
|
||||
|
||||
|
||||
### UI Implementations
|
||||
|
||||
There are a couple of implementation for a UI. We'll try to keep this list up to date.
|
||||
|
||||
| Name | Repo | UI type| No external resources| Blocky support| Verifies permissions | Hash information | No secondary storage | Statically linked| Can modify parameters|
|
||||
| ---- | ---- | -------| ---- | ---- | ---- |---- | ---- | ---- | ---- |
|
||||
| QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
|
||||
| GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
|
||||
| Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
|
||||
| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)|
|
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/clef_qubes_http.png
generated
vendored
Normal file
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/clef_qubes_http.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/clef_qubes_qrexec.png
generated
vendored
Normal file
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/clef_qubes_qrexec.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 17 KiB |
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qrexec-example.png
generated
vendored
Normal file
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qrexec-example.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 16 KiB |
23
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qubes-client.py
generated
vendored
Normal file
23
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qubes-client.py
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
"""
|
||||
This implements a dispatcher which listens to localhost:8550, and proxies
|
||||
requests via qrexec to the service qubes.EthSign on a target domain
|
||||
"""
|
||||
|
||||
import http.server
|
||||
import socketserver,subprocess
|
||||
|
||||
PORT=8550
|
||||
TARGET_DOMAIN= 'debian-work'
|
||||
|
||||
class Dispatcher(http.server.BaseHTTPRequestHandler):
|
||||
def do_POST(self):
|
||||
post_data = self.rfile.read(int(self.headers['Content-Length']))
|
||||
p = subprocess.Popen(['/usr/bin/qrexec-client-vm',TARGET_DOMAIN,'qubes.Clefsign'],stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
output = p.communicate(post_data)[0]
|
||||
self.wfile.write(output)
|
||||
|
||||
|
||||
with socketserver.TCPServer(("",PORT), Dispatcher) as httpd:
|
||||
print("Serving at port", PORT)
|
||||
httpd.serve_forever()
|
||||
|
16
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qubes.Clefsign
generated
vendored
Normal file
16
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qubes.Clefsign
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
SIGNER_BIN="/home/user/tools/clef/clef"
|
||||
SIGNER_CMD="/home/user/tools/gtksigner/gtkui.py -s $SIGNER_BIN"
|
||||
|
||||
# Start clef if not already started
|
||||
if [ ! -S /home/user/.clef/clef.ipc ]; then
|
||||
$SIGNER_CMD &
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Should be started by now
|
||||
if [ -S /home/user/.clef/clef.ipc ]; then
|
||||
# Post incoming request to HTTP channel
|
||||
curl -H "Content-Type: application/json" -X POST -d @- http://localhost:8550 2>/dev/null
|
||||
fi
|
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qubes_newaccount-1.png
generated
vendored
Normal file
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qubes_newaccount-1.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 22 KiB |
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qubes_newaccount-2.png
generated
vendored
Normal file
BIN
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/qubes/qubes_newaccount-2.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 36 KiB |
198
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/setup.md
generated
vendored
Normal file
198
vendor/github.com/ethereum/go-ethereum/cmd/clef/docs/setup.md
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
# Setting up Clef
|
||||
|
||||
This document describes how Clef can be used in a more secure manner than executing it from your everyday laptop,
|
||||
in order to ensure that the keys remain safe in the event that your computer should get compromised.
|
||||
|
||||
## Qubes OS
|
||||
|
||||
|
||||
### Background
|
||||
|
||||
The Qubes operating system is based around virtual machines (qubes), where a set of virtual machines are configured, typically for
|
||||
different purposes such as:
|
||||
|
||||
- personal
|
||||
- Your personal email, browsing etc
|
||||
- work
|
||||
- Work email etc
|
||||
- vault
|
||||
- a VM without network access, where gpg-keys and/or keepass credentials are stored.
|
||||
|
||||
A couple of dedicated virtual machines handle externalities:
|
||||
|
||||
- sys-net provides networking to all other (network-enabled) machines
|
||||
- sys-firewall handles firewall rules
|
||||
- sys-usb handles USB devices, and can map usb-devices to certain qubes.
|
||||
|
||||
The goal of this document is to describe how we can set up clef to provide secure transaction
|
||||
signing from a `vault` vm, to another networked qube which runs Dapps.
|
||||
|
||||
### Setup
|
||||
|
||||
There are two ways that this can be achieved: integrated via Qubes or integrated via networking.
|
||||
|
||||
|
||||
#### 1. Qubes Integrated
|
||||
|
||||
Qubes provdes a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request
|
||||
to another qube. The OS then asks the user if the call is permitted.
|
||||
|
||||

|
||||
|
||||
A policy-file can be created to allow such interaction. On the `target` domain, a service is invoked which can read the
|
||||
`stdin` from the `client` qube.
|
||||
|
||||
This is how [Split GPG](https://www.qubes-os.org/doc/split-gpg/) is implemented. We can set up Clef the same way:
|
||||
|
||||
##### Server
|
||||
|
||||

|
||||
|
||||
On the `target` qubes, we need to define the rpc service.
|
||||
|
||||
[qubes.Clefsign](qubes/qubes.Clefsign):
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
SIGNER_BIN="/home/user/tools/clef/clef"
|
||||
SIGNER_CMD="/home/user/tools/gtksigner/gtkui.py -s $SIGNER_BIN"
|
||||
|
||||
# Start clef if not already started
|
||||
if [ ! -S /home/user/.clef/clef.ipc ]; then
|
||||
$SIGNER_CMD &
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Should be started by now
|
||||
if [ -S /home/user/.clef/clef.ipc ]; then
|
||||
# Post incoming request to HTTP channel
|
||||
curl -H "Content-Type: application/json" -X POST -d @- http://localhost:8550 2>/dev/null
|
||||
fi
|
||||
|
||||
```
|
||||
This RPC service is not complete (see notes about HTTP headers below), but works as a proof-of-concept.
|
||||
It will forward the data received on `stdin` (forwarded by the OS) to Clef's HTTP channel.
|
||||
|
||||
It would have been possible to send data directly to the `/home/user/.clef/.clef.ipc`
|
||||
socket via e.g `nc -U /home/user/.clef/clef.ipc`, but the reason for sending the request
|
||||
data over `HTTP` instead of `IPC` is that we want the ability to forward `HTTP` headers.
|
||||
|
||||
To enable the service:
|
||||
|
||||
``` bash
|
||||
sudo cp qubes.Clefsign /etc/qubes-rpc/
|
||||
sudo chmod +x /etc/qubes-rpc/ qubes.Clefsign
|
||||
```
|
||||
|
||||
This setup uses [gtksigner](https://github.com/holiman/gtksigner), which is a very minimal GTK-based UI that works well
|
||||
with minimal requirements.
|
||||
|
||||
##### Client
|
||||
|
||||
|
||||
On the `client` qube, we need to create a listener which will receive the request from the Dapp, and proxy it.
|
||||
|
||||
|
||||
[qubes-client.py](qubes/client/qubes-client.py):
|
||||
|
||||
```python
|
||||
|
||||
"""
|
||||
This implements a dispatcher which listens to localhost:8550, and proxies
|
||||
requests via qrexec to the service qubes.EthSign on a target domain
|
||||
"""
|
||||
|
||||
import http.server
|
||||
import socketserver,subprocess
|
||||
|
||||
PORT=8550
|
||||
TARGET_DOMAIN= 'debian-work'
|
||||
|
||||
class Dispatcher(http.server.BaseHTTPRequestHandler):
|
||||
def do_POST(self):
|
||||
post_data = self.rfile.read(int(self.headers['Content-Length']))
|
||||
p = subprocess.Popen(['/usr/bin/qrexec-client-vm',TARGET_DOMAIN,'qubes.Clefsign'],stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
output = p.communicate(post_data)[0]
|
||||
self.wfile.write(output)
|
||||
|
||||
|
||||
with socketserver.TCPServer(("",PORT), Dispatcher) as httpd:
|
||||
print("Serving at port", PORT)
|
||||
httpd.serve_forever()
|
||||
|
||||
|
||||
```
|
||||
|
||||
#### Testing
|
||||
|
||||
To test the flow, if we have set up `debian-work` as the `target`, we can do
|
||||
|
||||
```bash
|
||||
$ cat newaccnt.json
|
||||
{ "id": 0, "jsonrpc": "2.0","method": "account_new","params": []}
|
||||
|
||||
$ cat newaccnt.json| qrexec-client-vm debian-work qubes.Clefsign
|
||||
```
|
||||
|
||||
This should pop up first a dialog to allow the IPC call:
|
||||
|
||||

|
||||
|
||||
Followed by a GTK-dialog to approve the operation
|
||||
|
||||

|
||||
|
||||
To test the full flow, we use the client wrapper. Start it on the `client` qube:
|
||||
```
|
||||
[user@work qubes]$ python3 qubes-client.py
|
||||
```
|
||||
|
||||
Make the request over http (`client` qube):
|
||||
```
|
||||
[user@work clef]$ cat newaccnt.json | curl -X POST -d @- http://localhost:8550
|
||||
```
|
||||
And it should show the same popups again.
|
||||
|
||||
##### Pros and cons
|
||||
|
||||
The benefits of this setup are:
|
||||
|
||||
- This is the qubes-os intended model for inter-qube communication,
|
||||
- and thus benefits from qubes-os dialogs and policies for user approval
|
||||
|
||||
However, it comes with a couple of drawbacks:
|
||||
|
||||
- The `qubes-gpg-client` must forward the http request via RPC to the `target` qube. When doing so, the proxy
|
||||
will either drop important headers, or replace them.
|
||||
- The `Host` header is most likely `localhost`
|
||||
- The `Origin` header must be forwarded
|
||||
- Information about the remote ip must be added as a `X-Forwarded-For`. However, Clef cannot always trust an `XFF` header,
|
||||
since malicious clients may lie about `XFF` in order to fool the http server into believing it comes from another address.
|
||||
- Even with a policy in place to allow rpc-calls between `caller` and `target`, there will be several popups:
|
||||
- One qubes-specific where the user specifies the `target` vm
|
||||
- One clef-specific to approve the transaction
|
||||
|
||||
|
||||
#### 2. Network integrated
|
||||
|
||||
The second way to set up Clef on a qubes system is to allow networking, and have Clef listen to a port which is accessible
|
||||
form other qubes.
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
## USBArmory
|
||||
|
||||
The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 Mhz ARM processor. It is a pocket-size
|
||||
computer. When inserted into a laptop, it identifies itself as a USB network interface, basically adding another network
|
||||
to your computer. Over this new network interface, you can SSH into the device.
|
||||
|
||||
Running Clef off a USB armory means that you can use the armory as a very versatile offline computer, which only
|
||||
ever connects to a local network between your computer and the device itself.
|
||||
|
||||
Needless to say, the while this model should be fairly secure against remote attacks, an attacker with physical access
|
||||
to the USB Armory would trivially be able to extract the contents of the device filesystem.
|
||||
|
32
vendor/github.com/ethereum/go-ethereum/cmd/clef/extapi_changelog.md
generated
vendored
Normal file
32
vendor/github.com/ethereum/go-ethereum/cmd/clef/extapi_changelog.md
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
### Changelog for external API
|
||||
|
||||
#### 4.0.0
|
||||
|
||||
* The external `account_Ecrecover`-method was removed.
|
||||
* The external `account_Import`-method was removed.
|
||||
|
||||
#### 3.0.0
|
||||
|
||||
* The external `account_List`-method was changed to not expose `url`, which contained info about the local filesystem. It now returns only a list of addresses.
|
||||
|
||||
#### 2.0.0
|
||||
|
||||
* Commit `73abaf04b1372fa4c43201fb1b8019fe6b0a6f8d`, move `from` into `transaction` object in `signTransaction`. This
|
||||
makes the `accounts_signTransaction` identical to the old `eth_signTransaction`.
|
||||
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
Initial release.
|
||||
|
||||
### Versioning
|
||||
|
||||
The API uses [semantic versioning](https://semver.org/).
|
||||
|
||||
TLDR; Given a version number MAJOR.MINOR.PATCH, increment the:
|
||||
|
||||
* MAJOR version when you make incompatible API changes,
|
||||
* MINOR version when you add functionality in a backwards-compatible manner, and
|
||||
* PATCH version when you make backwards-compatible bug fixes.
|
||||
|
||||
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
|
105
vendor/github.com/ethereum/go-ethereum/cmd/clef/intapi_changelog.md
generated
vendored
Normal file
105
vendor/github.com/ethereum/go-ethereum/cmd/clef/intapi_changelog.md
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
### Changelog for internal API (ui-api)
|
||||
|
||||
### 3.0.0
|
||||
|
||||
* Make use of `OnInputRequired(info UserInputRequest)` for obtaining master password during startup
|
||||
|
||||
### 2.1.0
|
||||
|
||||
* Add `OnInputRequired(info UserInputRequest)` to internal API. This method is used when Clef needs user input, e.g. passwords.
|
||||
|
||||
The following structures are used:
|
||||
```golang
|
||||
UserInputRequest struct {
|
||||
Prompt string `json:"prompt"`
|
||||
Title string `json:"title"`
|
||||
IsPassword bool `json:"isPassword"`
|
||||
}
|
||||
UserInputResponse struct {
|
||||
Text string `json:"text"`
|
||||
}
|
||||
|
||||
### 2.0.0
|
||||
|
||||
* Modify how `call_info` on a transaction is conveyed. New format:
|
||||
|
||||
```
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 2,
|
||||
"method": "ApproveTx",
|
||||
"params": [
|
||||
{
|
||||
"transaction": {
|
||||
"from": "0x82A2A876D39022B3019932D30Cd9c97ad5616813",
|
||||
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
|
||||
"gas": "0x333",
|
||||
"gasPrice": "0x123",
|
||||
"value": "0x10",
|
||||
"nonce": "0x0",
|
||||
"data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012",
|
||||
"input": null
|
||||
},
|
||||
"call_info": [
|
||||
{
|
||||
"type": "WARNING",
|
||||
"message": "Invalid checksum on to-address"
|
||||
},
|
||||
{
|
||||
"type": "WARNING",
|
||||
"message": "Tx contains data, but provided ABI signature could not be matched: Did not match: test (0 matches)"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"remote": "127.0.0.1:54286",
|
||||
"local": "localhost:8550",
|
||||
"scheme": "HTTP/1.1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### 1.2.0
|
||||
|
||||
* Add `OnStartup` method, to provide the UI with information about what API version
|
||||
the signer uses (both internal and external) aswell as build-info and external api.
|
||||
|
||||
Example call:
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "OnSignerStartup",
|
||||
"params": [
|
||||
{
|
||||
"info": {
|
||||
"extapi_http": "http://localhost:8550",
|
||||
"extapi_ipc": null,
|
||||
"extapi_version": "2.0.0",
|
||||
"intapi_version": "1.2.0"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### 1.1.0
|
||||
|
||||
* Add `OnApproved` method
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
Initial release.
|
||||
|
||||
### Versioning
|
||||
|
||||
The API uses [semantic versioning](https://semver.org/).
|
||||
|
||||
TLDR; Given a version number MAJOR.MINOR.PATCH, increment the:
|
||||
|
||||
* MAJOR version when you make incompatible API changes,
|
||||
* MINOR version when you add functionality in a backwards-compatible manner, and
|
||||
* PATCH version when you make backwards-compatible bug fixes.
|
||||
|
||||
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
|
735
vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go
generated
vendored
Normal file
735
vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go
generated
vendored
Normal file
@ -0,0 +1,735 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// signer is a utility that can be used so sign transactions and
|
||||
// arbitrary data.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/console"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/signer/core"
|
||||
"github.com/ethereum/go-ethereum/signer/rules"
|
||||
"github.com/ethereum/go-ethereum/signer/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// ExternalAPIVersion -- see extapi_changelog.md
|
||||
const ExternalAPIVersion = "4.0.0"
|
||||
|
||||
// InternalAPIVersion -- see intapi_changelog.md
|
||||
const InternalAPIVersion = "3.0.0"
|
||||
|
||||
const legalWarning = `
|
||||
WARNING!
|
||||
|
||||
Clef is alpha software, and not yet publically released. This software has _not_ been audited, and there
|
||||
are no guarantees about the workings of this software. It may contain severe flaws. You should not use this software
|
||||
unless you agree to take full responsibility for doing so, and know what you are doing.
|
||||
|
||||
TLDR; THIS IS NOT PRODUCTION-READY SOFTWARE!
|
||||
|
||||
`
|
||||
|
||||
var (
|
||||
logLevelFlag = cli.IntFlag{
|
||||
Name: "loglevel",
|
||||
Value: 4,
|
||||
Usage: "log level to emit to the screen",
|
||||
}
|
||||
advancedMode = cli.BoolFlag{
|
||||
Name: "advanced",
|
||||
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
|
||||
}
|
||||
keystoreFlag = cli.StringFlag{
|
||||
Name: "keystore",
|
||||
Value: filepath.Join(node.DefaultDataDir(), "keystore"),
|
||||
Usage: "Directory for the keystore",
|
||||
}
|
||||
configdirFlag = cli.StringFlag{
|
||||
Name: "configdir",
|
||||
Value: DefaultConfigDir(),
|
||||
Usage: "Directory for Clef configuration",
|
||||
}
|
||||
rpcPortFlag = cli.IntFlag{
|
||||
Name: "rpcport",
|
||||
Usage: "HTTP-RPC server listening port",
|
||||
Value: node.DefaultHTTPPort + 5,
|
||||
}
|
||||
signerSecretFlag = cli.StringFlag{
|
||||
Name: "signersecret",
|
||||
Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash",
|
||||
}
|
||||
dBFlag = cli.StringFlag{
|
||||
Name: "4bytedb",
|
||||
Usage: "File containing 4byte-identifiers",
|
||||
Value: "./4byte.json",
|
||||
}
|
||||
customDBFlag = cli.StringFlag{
|
||||
Name: "4bytedb-custom",
|
||||
Usage: "File used for writing new 4byte-identifiers submitted via API",
|
||||
Value: "./4byte-custom.json",
|
||||
}
|
||||
auditLogFlag = cli.StringFlag{
|
||||
Name: "auditlog",
|
||||
Usage: "File used to emit audit logs. Set to \"\" to disable",
|
||||
Value: "audit.log",
|
||||
}
|
||||
ruleFlag = cli.StringFlag{
|
||||
Name: "rules",
|
||||
Usage: "Enable rule-engine",
|
||||
Value: "rules.json",
|
||||
}
|
||||
stdiouiFlag = cli.BoolFlag{
|
||||
Name: "stdio-ui",
|
||||
Usage: "Use STDIN/STDOUT as a channel for an external UI. " +
|
||||
"This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user " +
|
||||
"interface, and can be used when Clef is started by an external process.",
|
||||
}
|
||||
testFlag = cli.BoolFlag{
|
||||
Name: "stdio-ui-test",
|
||||
Usage: "Mechanism to test interface between Clef and UI. Requires 'stdio-ui'.",
|
||||
}
|
||||
app = cli.NewApp()
|
||||
initCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(initializeSecrets),
|
||||
Name: "init",
|
||||
Usage: "Initialize the signer, generate secret storage",
|
||||
ArgsUsage: "",
|
||||
Flags: []cli.Flag{
|
||||
logLevelFlag,
|
||||
configdirFlag,
|
||||
},
|
||||
Description: `
|
||||
The init command generates a master seed which Clef can use to store credentials and data needed for
|
||||
the rule-engine to work.`,
|
||||
}
|
||||
attestCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(attestFile),
|
||||
Name: "attest",
|
||||
Usage: "Attest that a js-file is to be used",
|
||||
ArgsUsage: "<sha256sum>",
|
||||
Flags: []cli.Flag{
|
||||
logLevelFlag,
|
||||
configdirFlag,
|
||||
signerSecretFlag,
|
||||
},
|
||||
Description: `
|
||||
The attest command stores the sha256 of the rule.js-file that you want to use for automatic processing of
|
||||
incoming requests.
|
||||
|
||||
Whenever you make an edit to the rule file, you need to use attestation to tell
|
||||
Clef that the file is 'safe' to execute.`,
|
||||
}
|
||||
|
||||
setCredentialCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(setCredential),
|
||||
Name: "setpw",
|
||||
Usage: "Store a credential for a keystore file",
|
||||
ArgsUsage: "<address>",
|
||||
Flags: []cli.Flag{
|
||||
logLevelFlag,
|
||||
configdirFlag,
|
||||
signerSecretFlag,
|
||||
},
|
||||
Description: `
|
||||
The setpw command stores a password for a given address (keyfile). If you enter a blank passphrase, it will
|
||||
remove any stored credential for that address (keyfile)
|
||||
`,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
app.Name = "Clef"
|
||||
app.Usage = "Manage Ethereum account operations"
|
||||
app.Flags = []cli.Flag{
|
||||
logLevelFlag,
|
||||
keystoreFlag,
|
||||
configdirFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.NoUSBFlag,
|
||||
utils.RPCListenAddrFlag,
|
||||
utils.RPCVirtualHostsFlag,
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.RPCEnabledFlag,
|
||||
rpcPortFlag,
|
||||
signerSecretFlag,
|
||||
dBFlag,
|
||||
customDBFlag,
|
||||
auditLogFlag,
|
||||
ruleFlag,
|
||||
stdiouiFlag,
|
||||
testFlag,
|
||||
advancedMode,
|
||||
}
|
||||
app.Action = signer
|
||||
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand}
|
||||
|
||||
}
|
||||
func main() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func initializeSecrets(c *cli.Context) error {
|
||||
if err := initialize(c); err != nil {
|
||||
return err
|
||||
}
|
||||
configDir := c.GlobalString(configdirFlag.Name)
|
||||
|
||||
masterSeed := make([]byte, 256)
|
||||
num, err := io.ReadFull(rand.Reader, masterSeed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if num != len(masterSeed) {
|
||||
return fmt.Errorf("failed to read enough random")
|
||||
}
|
||||
|
||||
n, p := keystore.StandardScryptN, keystore.StandardScryptP
|
||||
if c.GlobalBool(utils.LightKDFFlag.Name) {
|
||||
n, p = keystore.LightScryptN, keystore.LightScryptP
|
||||
}
|
||||
text := "The master seed of clef is locked with a password. Please give a password. Do not forget this password."
|
||||
var password string
|
||||
for {
|
||||
password = getPassPhrase(text, true)
|
||||
if err := core.ValidatePasswordFormat(password); err != nil {
|
||||
fmt.Printf("invalid password: %v\n", err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
cipherSeed, err := encryptSeed(masterSeed, []byte(password), n, p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt master seed: %v", err)
|
||||
}
|
||||
|
||||
err = os.Mkdir(configDir, 0700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
location := filepath.Join(configDir, "masterseed.json")
|
||||
if _, err := os.Stat(location); err == nil {
|
||||
return fmt.Errorf("file %v already exists, will not overwrite", location)
|
||||
}
|
||||
err = ioutil.WriteFile(location, cipherSeed, 0400)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("A master seed has been generated into %s\n", location)
|
||||
fmt.Printf(`
|
||||
This is required to be able to store credentials, such as :
|
||||
* Passwords for keystores (used by rule engine)
|
||||
* Storage for javascript rules
|
||||
* Hash of rule-file
|
||||
|
||||
You should treat that file with utmost secrecy, and make a backup of it.
|
||||
NOTE: This file does not contain your accounts. Those need to be backed up separately!
|
||||
|
||||
`)
|
||||
return nil
|
||||
}
|
||||
func attestFile(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an argument.")
|
||||
}
|
||||
if err := initialize(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stretchedKey, err := readMasterKey(ctx, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
configDir := ctx.GlobalString(configdirFlag.Name)
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
|
||||
confKey := crypto.Keccak256([]byte("config"), stretchedKey)
|
||||
|
||||
// Initialize the encrypted storages
|
||||
configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confKey)
|
||||
val := ctx.Args().First()
|
||||
configStorage.Put("ruleset_sha256", val)
|
||||
log.Info("Ruleset attestation updated", "sha256", val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func setCredential(ctx *cli.Context) error {
|
||||
if len(ctx.Args()) < 1 {
|
||||
utils.Fatalf("This command requires an address to be passed as an argument.")
|
||||
}
|
||||
if err := initialize(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
address := ctx.Args().First()
|
||||
password := getPassPhrase("Enter a passphrase to store with this address.", true)
|
||||
|
||||
stretchedKey, err := readMasterKey(ctx, nil)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
configDir := ctx.GlobalString(configdirFlag.Name)
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
|
||||
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
|
||||
|
||||
// Initialize the encrypted storages
|
||||
pwStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "credentials.json"), pwkey)
|
||||
pwStorage.Put(address, password)
|
||||
log.Info("Credential store updated", "key", address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func initialize(c *cli.Context) error {
|
||||
// Set up the logger to print everything
|
||||
logOutput := os.Stdout
|
||||
if c.GlobalBool(stdiouiFlag.Name) {
|
||||
logOutput = os.Stderr
|
||||
// If using the stdioui, we can't do the 'confirm'-flow
|
||||
fmt.Fprintf(logOutput, legalWarning)
|
||||
} else {
|
||||
if !confirm(legalWarning) {
|
||||
return fmt.Errorf("aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int(logLevelFlag.Name)), log.StreamHandler(logOutput, log.TerminalFormat(true))))
|
||||
return nil
|
||||
}
|
||||
|
||||
func signer(c *cli.Context) error {
|
||||
if err := initialize(c); err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
ui core.SignerUI
|
||||
)
|
||||
if c.GlobalBool(stdiouiFlag.Name) {
|
||||
log.Info("Using stdin/stdout as UI-channel")
|
||||
ui = core.NewStdIOUI()
|
||||
} else {
|
||||
log.Info("Using CLI as UI-channel")
|
||||
ui = core.NewCommandlineUI()
|
||||
}
|
||||
fourByteDb := c.GlobalString(dBFlag.Name)
|
||||
fourByteLocal := c.GlobalString(customDBFlag.Name)
|
||||
db, err := core.NewAbiDBFromFiles(fourByteDb, fourByteLocal)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
log.Info("Loaded 4byte db", "signatures", db.Size(), "file", fourByteDb, "local", fourByteLocal)
|
||||
|
||||
var (
|
||||
api core.ExternalAPI
|
||||
)
|
||||
|
||||
configDir := c.GlobalString(configdirFlag.Name)
|
||||
if stretchedKey, err := readMasterKey(c, ui); err != nil {
|
||||
log.Info("No master seed provided, rules disabled", "error", err)
|
||||
} else {
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
|
||||
|
||||
// Generate domain specific keys
|
||||
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
|
||||
jskey := crypto.Keccak256([]byte("jsstorage"), stretchedKey)
|
||||
confkey := crypto.Keccak256([]byte("config"), stretchedKey)
|
||||
|
||||
// Initialize the encrypted storages
|
||||
pwStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "credentials.json"), pwkey)
|
||||
jsStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "jsstorage.json"), jskey)
|
||||
configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confkey)
|
||||
|
||||
//Do we have a rule-file?
|
||||
ruleJS, err := ioutil.ReadFile(c.GlobalString(ruleFlag.Name))
|
||||
if err != nil {
|
||||
log.Info("Could not load rulefile, rules not enabled", "file", "rulefile")
|
||||
} else {
|
||||
hasher := sha256.New()
|
||||
hasher.Write(ruleJS)
|
||||
shasum := hasher.Sum(nil)
|
||||
storedShasum := configStorage.Get("ruleset_sha256")
|
||||
if storedShasum != hex.EncodeToString(shasum) {
|
||||
log.Info("Could not validate ruleset hash, rules not enabled", "got", hex.EncodeToString(shasum), "expected", storedShasum)
|
||||
} else {
|
||||
// Initialize rules
|
||||
ruleEngine, err := rules.NewRuleEvaluator(ui, jsStorage, pwStorage)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
ruleEngine.Init(string(ruleJS))
|
||||
ui = ruleEngine
|
||||
log.Info("Rule engine configured", "file", c.String(ruleFlag.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
apiImpl := core.NewSignerAPI(
|
||||
c.GlobalInt64(utils.NetworkIdFlag.Name),
|
||||
c.GlobalString(keystoreFlag.Name),
|
||||
c.GlobalBool(utils.NoUSBFlag.Name),
|
||||
ui, db,
|
||||
c.GlobalBool(utils.LightKDFFlag.Name),
|
||||
c.GlobalBool(advancedMode.Name))
|
||||
api = apiImpl
|
||||
// Audit logging
|
||||
if logfile := c.GlobalString(auditLogFlag.Name); logfile != "" {
|
||||
api, err = core.NewAuditLogger(logfile, api)
|
||||
if err != nil {
|
||||
utils.Fatalf(err.Error())
|
||||
}
|
||||
log.Info("Audit logs configured", "file", logfile)
|
||||
}
|
||||
// register signer API with server
|
||||
var (
|
||||
extapiURL = "n/a"
|
||||
ipcapiURL = "n/a"
|
||||
)
|
||||
rpcAPI := []rpc.API{
|
||||
{
|
||||
Namespace: "account",
|
||||
Public: true,
|
||||
Service: api,
|
||||
Version: "1.0"},
|
||||
}
|
||||
if c.GlobalBool(utils.RPCEnabledFlag.Name) {
|
||||
|
||||
vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name))
|
||||
cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name))
|
||||
|
||||
// start http server
|
||||
httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
|
||||
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start RPC api: %v", err)
|
||||
}
|
||||
extapiURL = fmt.Sprintf("http://%s", httpEndpoint)
|
||||
log.Info("HTTP endpoint opened", "url", extapiURL)
|
||||
|
||||
defer func() {
|
||||
listener.Close()
|
||||
log.Info("HTTP endpoint closed", "url", httpEndpoint)
|
||||
}()
|
||||
|
||||
}
|
||||
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
|
||||
if c.IsSet(utils.IPCPathFlag.Name) {
|
||||
ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name)
|
||||
} else {
|
||||
ipcapiURL = filepath.Join(configDir, "clef.ipc")
|
||||
}
|
||||
|
||||
listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start IPC api: %v", err)
|
||||
}
|
||||
log.Info("IPC endpoint opened", "url", ipcapiURL)
|
||||
defer func() {
|
||||
listener.Close()
|
||||
log.Info("IPC endpoint closed", "url", ipcapiURL)
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
if c.GlobalBool(testFlag.Name) {
|
||||
log.Info("Performing UI test")
|
||||
go testExternalUI(apiImpl)
|
||||
}
|
||||
ui.OnSignerStartup(core.StartupInfo{
|
||||
Info: map[string]interface{}{
|
||||
"extapi_version": ExternalAPIVersion,
|
||||
"intapi_version": InternalAPIVersion,
|
||||
"extapi_http": extapiURL,
|
||||
"extapi_ipc": ipcapiURL,
|
||||
},
|
||||
})
|
||||
|
||||
abortChan := make(chan os.Signal)
|
||||
signal.Notify(abortChan, os.Interrupt)
|
||||
|
||||
sig := <-abortChan
|
||||
log.Info("Exiting...", "signal", sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// splitAndTrim splits input separated by a comma
|
||||
// and trims excessive white space from the substrings.
|
||||
func splitAndTrim(input string) []string {
|
||||
result := strings.Split(input, ",")
|
||||
for i, r := range result {
|
||||
result[i] = strings.TrimSpace(r)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// DefaultConfigDir is the default config directory to use for the vaults and other
|
||||
// persistence requirements.
|
||||
func DefaultConfigDir() string {
|
||||
// Try to place the data folder in the user's home dir
|
||||
home := homeDir()
|
||||
if home != "" {
|
||||
if runtime.GOOS == "darwin" {
|
||||
return filepath.Join(home, "Library", "Signer")
|
||||
} else if runtime.GOOS == "windows" {
|
||||
return filepath.Join(home, "AppData", "Roaming", "Signer")
|
||||
} else {
|
||||
return filepath.Join(home, ".clef")
|
||||
}
|
||||
}
|
||||
// As we cannot guess a stable location, return empty and handle later
|
||||
return ""
|
||||
}
|
||||
|
||||
func homeDir() string {
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
return home
|
||||
}
|
||||
if usr, err := user.Current(); err == nil {
|
||||
return usr.HomeDir
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func readMasterKey(ctx *cli.Context, ui core.SignerUI) ([]byte, error) {
|
||||
var (
|
||||
file string
|
||||
configDir = ctx.GlobalString(configdirFlag.Name)
|
||||
)
|
||||
if ctx.GlobalIsSet(signerSecretFlag.Name) {
|
||||
file = ctx.GlobalString(signerSecretFlag.Name)
|
||||
} else {
|
||||
file = filepath.Join(configDir, "masterseed.json")
|
||||
}
|
||||
if err := checkFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipherKey, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var password string
|
||||
// If ui is not nil, get the password from ui.
|
||||
if ui != nil {
|
||||
resp, err := ui.OnInputRequired(core.UserInputRequest{
|
||||
Title: "Master Password",
|
||||
Prompt: "Please enter the password to decrypt the master seed",
|
||||
IsPassword: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password = resp.Text
|
||||
} else {
|
||||
password = getPassPhrase("Decrypt master seed of clef", false)
|
||||
}
|
||||
masterSeed, err := decryptSeed(cipherKey, password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt the master seed of clef")
|
||||
}
|
||||
if len(masterSeed) < 256 {
|
||||
return nil, fmt.Errorf("master seed of insufficient length, expected >255 bytes, got %d", len(masterSeed))
|
||||
}
|
||||
|
||||
// Create vault location
|
||||
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterSeed)[:10]))
|
||||
err = os.Mkdir(vaultLocation, 0700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return masterSeed, nil
|
||||
}
|
||||
|
||||
// checkFile is a convenience function to check if a file
|
||||
// * exists
|
||||
// * is mode 0400
|
||||
func checkFile(filename string) error {
|
||||
info, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed stat on %s: %v", filename, err)
|
||||
}
|
||||
// Check the unix permission bits
|
||||
if info.Mode().Perm()&0377 != 0 {
|
||||
return fmt.Errorf("file (%v) has insecure file permissions (%v)", filename, info.Mode().String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// confirm displays a text and asks for user confirmation
|
||||
func confirm(text string) bool {
|
||||
fmt.Printf(text)
|
||||
fmt.Printf("\nEnter 'ok' to proceed:\n>")
|
||||
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
|
||||
if text := strings.TrimSpace(text); text == "ok" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func testExternalUI(api *core.SignerAPI) {
|
||||
|
||||
ctx := context.WithValue(context.Background(), "remote", "clef binary")
|
||||
ctx = context.WithValue(ctx, "scheme", "in-proc")
|
||||
ctx = context.WithValue(ctx, "local", "main")
|
||||
|
||||
errs := make([]string, 0)
|
||||
|
||||
api.UI.ShowInfo("Testing 'ShowInfo'")
|
||||
api.UI.ShowError("Testing 'ShowError'")
|
||||
|
||||
checkErr := func(method string, err error) {
|
||||
if err != nil && err != core.ErrRequestDenied {
|
||||
errs = append(errs, fmt.Sprintf("%v: %v", method, err.Error()))
|
||||
}
|
||||
}
|
||||
var err error
|
||||
|
||||
_, err = api.SignTransaction(ctx, core.SendTxArgs{From: common.MixedcaseAddress{}}, nil)
|
||||
checkErr("SignTransaction", err)
|
||||
_, err = api.Sign(ctx, common.MixedcaseAddress{}, common.Hex2Bytes("01020304"))
|
||||
checkErr("Sign", err)
|
||||
_, err = api.List(ctx)
|
||||
checkErr("List", err)
|
||||
_, err = api.New(ctx)
|
||||
checkErr("New", err)
|
||||
_, err = api.Export(ctx, common.Address{})
|
||||
checkErr("Export", err)
|
||||
_, err = api.Import(ctx, json.RawMessage{})
|
||||
checkErr("Import", err)
|
||||
|
||||
api.UI.ShowInfo("Tests completed")
|
||||
|
||||
if len(errs) > 0 {
|
||||
log.Error("Got errors")
|
||||
for _, e := range errs {
|
||||
log.Error(e)
|
||||
}
|
||||
} else {
|
||||
log.Info("No errors")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// getPassPhrase retrieves the password associated with clef, either fetched
|
||||
// from a list of preloaded passphrases, or requested interactively from the user.
|
||||
// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one.
|
||||
func getPassPhrase(prompt string, confirmation bool) string {
|
||||
fmt.Println(prompt)
|
||||
password, err := console.Stdin.PromptPassword("Passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
if confirmation {
|
||||
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
|
||||
}
|
||||
if password != confirm {
|
||||
utils.Fatalf("Passphrases do not match")
|
||||
}
|
||||
}
|
||||
return password
|
||||
}
|
||||
|
||||
type encryptedSeedStorage struct {
|
||||
Description string `json:"description"`
|
||||
Version int `json:"version"`
|
||||
Params keystore.CryptoJSON `json:"params"`
|
||||
}
|
||||
|
||||
// encryptSeed uses a similar scheme as the keystore uses, but with a different wrapping,
|
||||
// to encrypt the master seed
|
||||
func encryptSeed(seed []byte, auth []byte, scryptN, scryptP int) ([]byte, error) {
|
||||
cryptoStruct, err := keystore.EncryptDataV3(seed, auth, scryptN, scryptP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(&encryptedSeedStorage{"Clef seed", 1, cryptoStruct})
|
||||
}
|
||||
|
||||
// decryptSeed decrypts the master seed
|
||||
func decryptSeed(keyjson []byte, auth string) ([]byte, error) {
|
||||
var encSeed encryptedSeedStorage
|
||||
if err := json.Unmarshal(keyjson, &encSeed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if encSeed.Version != 1 {
|
||||
log.Warn(fmt.Sprintf("unsupported encryption format of seed: %d, operation will likely fail", encSeed.Version))
|
||||
}
|
||||
seed, err := keystore.DecryptDataV3(encSeed.Params, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return seed, err
|
||||
}
|
||||
|
||||
/**
|
||||
//Create Account
|
||||
|
||||
curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_new","params":["test"],"id":67}' localhost:8550
|
||||
|
||||
// List accounts
|
||||
|
||||
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_list","params":[""],"id":67}' http://localhost:8550/
|
||||
|
||||
// Make Transaction
|
||||
// safeSend(0x12)
|
||||
// 4401a6e40000000000000000000000000000000000000000000000000000000000000012
|
||||
|
||||
// supplied abi
|
||||
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x82A2A876D39022B3019932D30Cd9c97ad5616813","gas":"0x333","gasPrice":"0x123","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x10", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"test"],"id":67}' http://localhost:8550/
|
||||
|
||||
// Not supplied
|
||||
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x82A2A876D39022B3019932D30Cd9c97ad5616813","gas":"0x333","gasPrice":"0x123","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x10", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"}],"id":67}' http://localhost:8550/
|
||||
|
||||
// Sign data
|
||||
|
||||
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_sign","params":["0x694267f14675d7e1b9494fd8d72fefe1755710fa","bazonk gaz baz"],"id":67}' http://localhost:8550/
|
||||
|
||||
|
||||
**/
|
179
vendor/github.com/ethereum/go-ethereum/cmd/clef/pythonsigner.py
generated
vendored
Normal file
179
vendor/github.com/ethereum/go-ethereum/cmd/clef/pythonsigner.py
generated
vendored
Normal file
@ -0,0 +1,179 @@
|
||||
import os,sys, subprocess
|
||||
from tinyrpc.transports import ServerTransport
|
||||
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
|
||||
from tinyrpc.dispatch import public,RPCDispatcher
|
||||
from tinyrpc.server import RPCServer
|
||||
|
||||
""" This is a POC example of how to write a custom UI for Clef. The UI starts the
|
||||
clef process with the '--stdio-ui' option, and communicates with clef using standard input / output.
|
||||
|
||||
The standard input/output is a relatively secure way to communicate, as it does not require opening any ports
|
||||
or IPC files. Needless to say, it does not protect against memory inspection mechanisms where an attacker
|
||||
can access process memory."""
|
||||
|
||||
try:
|
||||
import urllib.parse as urlparse
|
||||
except ImportError:
|
||||
import urllib as urlparse
|
||||
|
||||
class StdIOTransport(ServerTransport):
|
||||
""" Uses std input/output for RPC """
|
||||
def receive_message(self):
|
||||
return None, urlparse.unquote(sys.stdin.readline())
|
||||
|
||||
def send_reply(self, context, reply):
|
||||
print(reply)
|
||||
|
||||
class PipeTransport(ServerTransport):
|
||||
""" Uses std a pipe for RPC """
|
||||
|
||||
def __init__(self,input, output):
|
||||
self.input = input
|
||||
self.output = output
|
||||
|
||||
def receive_message(self):
|
||||
data = self.input.readline()
|
||||
print(">> {}".format( data))
|
||||
return None, urlparse.unquote(data)
|
||||
|
||||
def send_reply(self, context, reply):
|
||||
print("<< {}".format( reply))
|
||||
self.output.write(reply)
|
||||
self.output.write("\n")
|
||||
|
||||
class StdIOHandler():
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@public
|
||||
def ApproveTx(self,req):
|
||||
"""
|
||||
Example request:
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "ApproveTx",
|
||||
"params": [{
|
||||
"transaction": {
|
||||
"to": "0xae967917c465db8578ca9024c205720b1a3651A9",
|
||||
"gas": "0x333",
|
||||
"gasPrice": "0x123",
|
||||
"value": "0x10",
|
||||
"data": "0xd7a5865800000000000000000000000000000000000000000000000000000000000000ff",
|
||||
"nonce": "0x0"
|
||||
},
|
||||
"from": "0xAe967917c465db8578ca9024c205720b1a3651A9",
|
||||
"call_info": "Warning! Could not validate ABI-data against calldata\nSupplied ABI spec does not contain method signature in data: 0xd7a58658",
|
||||
"meta": {
|
||||
"remote": "127.0.0.1:34572",
|
||||
"local": "localhost:8550",
|
||||
"scheme": "HTTP/1.1"
|
||||
}
|
||||
}],
|
||||
"id": 1
|
||||
}
|
||||
|
||||
:param transaction: transaction info
|
||||
:param call_info: info abou the call, e.g. if ABI info could not be
|
||||
:param meta: metadata about the request, e.g. where the call comes from
|
||||
:return:
|
||||
"""
|
||||
transaction = req.get('transaction')
|
||||
_from = req.get('from')
|
||||
call_info = req.get('call_info')
|
||||
meta = req.get('meta')
|
||||
|
||||
return {
|
||||
"approved" : False,
|
||||
#"transaction" : transaction,
|
||||
# "from" : _from,
|
||||
# "password" : None,
|
||||
}
|
||||
|
||||
@public
|
||||
def ApproveSignData(self, req):
|
||||
""" Example request
|
||||
|
||||
"""
|
||||
return {"approved": False, "password" : None}
|
||||
|
||||
@public
|
||||
def ApproveExport(self, req):
|
||||
""" Example request
|
||||
|
||||
"""
|
||||
return {"approved" : False}
|
||||
|
||||
@public
|
||||
def ApproveImport(self, req):
|
||||
""" Example request
|
||||
|
||||
"""
|
||||
return { "approved" : False, "old_password": "", "new_password": ""}
|
||||
|
||||
@public
|
||||
def ApproveListing(self, req):
|
||||
""" Example request
|
||||
|
||||
"""
|
||||
return {'accounts': []}
|
||||
|
||||
@public
|
||||
def ApproveNewAccount(self, req):
|
||||
"""
|
||||
Example request
|
||||
|
||||
:return:
|
||||
"""
|
||||
return {"approved": False,
|
||||
#"password": ""
|
||||
}
|
||||
|
||||
@public
|
||||
def ShowError(self,message = {}):
|
||||
"""
|
||||
Example request:
|
||||
|
||||
{"jsonrpc":"2.0","method":"ShowInfo","params":{"message":"Testing 'ShowError'"},"id":1}
|
||||
|
||||
:param message: to show
|
||||
:return: nothing
|
||||
"""
|
||||
if 'text' in message.keys():
|
||||
sys.stderr.write("Error: {}\n".format( message['text']))
|
||||
return
|
||||
|
||||
@public
|
||||
def ShowInfo(self,message = {}):
|
||||
"""
|
||||
Example request
|
||||
{"jsonrpc":"2.0","method":"ShowInfo","params":{"message":"Testing 'ShowInfo'"},"id":0}
|
||||
|
||||
:param message: to display
|
||||
:return:nothing
|
||||
"""
|
||||
|
||||
if 'text' in message.keys():
|
||||
sys.stdout.write("Error: {}\n".format( message['text']))
|
||||
return
|
||||
|
||||
def main(args):
|
||||
|
||||
cmd = ["./clef", "--stdio-ui"]
|
||||
if len(args) > 0 and args[0] == "test":
|
||||
cmd.extend(["--stdio-ui-test"])
|
||||
print("cmd: {}".format(" ".join(cmd)))
|
||||
dispatcher = RPCDispatcher()
|
||||
dispatcher.register_instance(StdIOHandler(), '')
|
||||
# line buffered
|
||||
p = subprocess.Popen(cmd, bufsize=1, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
|
||||
rpc_server = RPCServer(
|
||||
PipeTransport(p.stdout, p.stdin),
|
||||
JSONRPCProtocol(),
|
||||
dispatcher
|
||||
)
|
||||
rpc_server.serve_forever()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
236
vendor/github.com/ethereum/go-ethereum/cmd/clef/rules.md
generated
vendored
Normal file
236
vendor/github.com/ethereum/go-ethereum/cmd/clef/rules.md
generated
vendored
Normal file
@ -0,0 +1,236 @@
|
||||
# Rules
|
||||
|
||||
The `signer` binary contains a ruleset engine, implemented with [OttoVM](https://github.com/robertkrimen/otto)
|
||||
|
||||
It enables usecases like the following:
|
||||
|
||||
* I want to auto-approve transactions with contract `CasinoDapp`, with up to `0.05 ether` in value to maximum `1 ether` per 24h period
|
||||
* I want to auto-approve transaction to contract `EthAlarmClock` with `data`=`0xdeadbeef`, if `value=0`, `gas < 44k` and `gasPrice < 40Gwei`
|
||||
|
||||
The two main features that are required for this to work well are;
|
||||
|
||||
1. Rule Implementation: how to create, manage and interpret rules in a flexible but secure manner
|
||||
2. Credential managements and credentials; how to provide auto-unlock without exposing keys unnecessarily.
|
||||
|
||||
The section below deals with both of them
|
||||
|
||||
## Rule Implementation
|
||||
|
||||
A ruleset file is implemented as a `js` file. Under the hood, the ruleset-engine is a `SignerUI`, implementing the same methods as the `json-rpc` methods
|
||||
defined in the UI protocol. Example:
|
||||
|
||||
```javascript
|
||||
|
||||
function asBig(str){
|
||||
if(str.slice(0,2) == "0x"){ return new BigNumber(str.slice(2),16)}
|
||||
return new BigNumber(str)
|
||||
}
|
||||
|
||||
// Approve transactions to a certain contract if value is below a certain limit
|
||||
function ApproveTx(req){
|
||||
|
||||
var limit = big.Newint("0xb1a2bc2ec50000")
|
||||
var value = asBig(req.transaction.value);
|
||||
|
||||
if(req.transaction.to.toLowerCase()=="0xae967917c465db8578ca9024c205720b1a3651a9")
|
||||
&& value.lt(limit) ){
|
||||
return "Approve"
|
||||
}
|
||||
// If we return "Reject", it will be rejected.
|
||||
// By not returning anything, it will be passed to the next UI, for manual processing
|
||||
}
|
||||
|
||||
//Approve listings if request made from IPC
|
||||
function ApproveListing(req){
|
||||
if (req.metadata.scheme == "ipc"){ return "Approve"}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Whenever the external API is called (and the ruleset is enabled), the `signer` calls the UI, which is an instance of a ruleset-engine. The ruleset-engine
|
||||
invokes the corresponding method. In doing so, there are three possible outcomes:
|
||||
|
||||
1. JS returns "Approve"
|
||||
* Auto-approve request
|
||||
2. JS returns "Reject"
|
||||
* Auto-reject request
|
||||
3. Error occurs, or something else is returned
|
||||
* Pass on to `next` ui: the regular UI channel.
|
||||
|
||||
A more advanced example can be found below, "Example 1: ruleset for a rate-limited window", using `storage` to `Put` and `Get` `string`s by key.
|
||||
|
||||
* At the time of writing, storage only exists as an ephemeral unencrypted implementation, to be used during testing.
|
||||
|
||||
### Things to note
|
||||
|
||||
The Otto vm has a few [caveats](https://github.com/robertkrimen/otto):
|
||||
|
||||
* "use strict" will parse, but does nothing.
|
||||
* The regular expression engine (re2/regexp) is not fully compatible with the ECMA5 specification.
|
||||
* Otto targets ES5. ES6 features (eg: Typed Arrays) are not supported.
|
||||
|
||||
Additionally, a few more have been added
|
||||
|
||||
* The rule execution cannot load external javascript files.
|
||||
* The only preloaded libary is [`bignumber.js`](https://github.com/MikeMcl/bignumber.js) version `2.0.3`. This one is fairly old, and is not aligned with the documentation at the github repository.
|
||||
* Each invocation is made in a fresh virtual machine. This means that you cannot store data in global variables between invocations. This is a deliberate choice -- if you want to store data, use the disk-backed `storage`, since rules should not rely on ephemeral data.
|
||||
* Javascript API parameters are _always_ an object. This is also a design choice, to ensure that parameters are accessed by _key_ and not by order. This is to prevent mistakes due to missing parameters or parameter changes.
|
||||
* The JS engine has access to `storage` and `console`.
|
||||
|
||||
#### Security considerations
|
||||
|
||||
##### Security of ruleset
|
||||
|
||||
Some security precautions can be made, such as:
|
||||
|
||||
* Never load `ruleset.js` unless the file is `readonly` (`r-??-??-?`). If the user wishes to modify the ruleset, he must make it writeable and then set back to readonly.
|
||||
* This is to prevent attacks where files are dropped on the users disk.
|
||||
* Since we're going to have to have some form of secure storage (not defined in this section), we could also store the `sha3` of the `ruleset.js` file in there.
|
||||
* If the user wishes to modify the ruleset, he'd then have to perform e.g. `signer --attest /path/to/ruleset --credential <creds>`
|
||||
|
||||
##### Security of implementation
|
||||
|
||||
The drawbacks of this very flexible solution is that the `signer` needs to contain a javascript engine. This is pretty simple to implement, since it's already
|
||||
implemented for `geth`. There are no known security vulnerabilities in, nor have we had any security-problems with it so far.
|
||||
|
||||
The javascript engine would be an added attack surface; but if the validation of `rulesets` is made good (with hash-based attestation), the actual javascript cannot be considered
|
||||
an attack surface -- if an attacker can control the ruleset, a much simpler attack would be to implement an "always-approve" rule instead of exploiting the js vm. The only benefit
|
||||
to be gained from attacking the actual `signer` process from the `js` side would be if it could somehow extract cryptographic keys from memory.
|
||||
|
||||
##### Security in usability
|
||||
|
||||
Javascript is flexible, but also easy to get wrong, especially when users assume that `js` can handle large integers natively. Typical errors
|
||||
include trying to multiply `gasCost` with `gas` without using `bigint`:s.
|
||||
|
||||
It's unclear whether any other DSL could be more secure; since there's always the possibility of erroneously implementing a rule.
|
||||
|
||||
|
||||
## Credential management
|
||||
|
||||
The ability to auto-approve transaction means that the signer needs to have necessary credentials to decrypt keyfiles. These passwords are hereafter called `ksp` (keystore pass).
|
||||
|
||||
### Example implementation
|
||||
|
||||
Upon startup of the signer, the signer is given a switch: `--seed <path/to/masterseed>`
|
||||
The `seed` contains a blob of bytes, which is the master seed for the `signer`.
|
||||
|
||||
The `signer` uses the `seed` to:
|
||||
|
||||
* Generate the `path` where the settings are stored.
|
||||
* `./settings/1df094eb-c2b1-4689-90dd-790046d38025/vault.dat`
|
||||
* `./settings/1df094eb-c2b1-4689-90dd-790046d38025/rules.js`
|
||||
* Generate the encryption password for `vault.dat`.
|
||||
|
||||
The `vault.dat` would be an encrypted container storing the following information:
|
||||
|
||||
* `ksp` entries
|
||||
* `sha256` hash of `rules.js`
|
||||
* Information about pair:ed callers (not yet specified)
|
||||
|
||||
### Security considerations
|
||||
|
||||
This would leave it up to the user to ensure that the `path/to/masterseed` is handled in a secure way. It's difficult to get around this, although one could
|
||||
imagine leveraging OS-level keychains where supported. The setup is however in general similar to how ssh-keys are stored in `.ssh/`.
|
||||
|
||||
|
||||
# Implementation status
|
||||
|
||||
This is now implemented (with ephemeral non-encrypted storage for now, so not yet enabled).
|
||||
|
||||
## Example 1: ruleset for a rate-limited window
|
||||
|
||||
|
||||
```javascript
|
||||
|
||||
function big(str){
|
||||
if(str.slice(0,2) == "0x"){ return new BigNumber(str.slice(2),16)}
|
||||
return new BigNumber(str)
|
||||
}
|
||||
|
||||
// Time window: 1 week
|
||||
var window = 1000* 3600*24*7;
|
||||
|
||||
// Limit : 1 ether
|
||||
var limit = new BigNumber("1e18");
|
||||
|
||||
function isLimitOk(transaction){
|
||||
var value = big(transaction.value)
|
||||
// Start of our window function
|
||||
var windowstart = new Date().getTime() - window;
|
||||
|
||||
var txs = [];
|
||||
var stored = storage.Get('txs');
|
||||
|
||||
if(stored != ""){
|
||||
txs = JSON.parse(stored)
|
||||
}
|
||||
// First, remove all that have passed out of the time-window
|
||||
var newtxs = txs.filter(function(tx){return tx.tstamp > windowstart});
|
||||
console.log(txs, newtxs.length);
|
||||
|
||||
// Secondly, aggregate the current sum
|
||||
sum = new BigNumber(0)
|
||||
|
||||
sum = newtxs.reduce(function(agg, tx){ return big(tx.value).plus(agg)}, sum);
|
||||
console.log("ApproveTx > Sum so far", sum);
|
||||
console.log("ApproveTx > Requested", value.toNumber());
|
||||
|
||||
// Would we exceed weekly limit ?
|
||||
return sum.plus(value).lt(limit)
|
||||
|
||||
}
|
||||
function ApproveTx(r){
|
||||
if (isLimitOk(r.transaction)){
|
||||
return "Approve"
|
||||
}
|
||||
return "Nope"
|
||||
}
|
||||
|
||||
/**
|
||||
* OnApprovedTx(str) is called when a transaction has been approved and signed. The parameter
|
||||
* 'response_str' contains the return value that will be sent to the external caller.
|
||||
* The return value from this method is ignore - the reason for having this callback is to allow the
|
||||
* ruleset to keep track of approved transactions.
|
||||
*
|
||||
* When implementing rate-limited rules, this callback should be used.
|
||||
* If a rule responds with neither 'Approve' nor 'Reject' - the tx goes to manual processing. If the user
|
||||
* then accepts the transaction, this method will be called.
|
||||
*
|
||||
* TLDR; Use this method to keep track of signed transactions, instead of using the data in ApproveTx.
|
||||
*/
|
||||
function OnApprovedTx(resp){
|
||||
var value = big(resp.tx.value)
|
||||
var txs = []
|
||||
// Load stored transactions
|
||||
var stored = storage.Get('txs');
|
||||
if(stored != ""){
|
||||
txs = JSON.parse(stored)
|
||||
}
|
||||
// Add this to the storage
|
||||
txs.push({tstamp: new Date().getTime(), value: value});
|
||||
storage.Put("txs", JSON.stringify(txs));
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Example 2: allow destination
|
||||
|
||||
```javascript
|
||||
|
||||
function ApproveTx(r){
|
||||
if(r.transaction.from.toLowerCase()=="0x0000000000000000000000000000000000001337"){ return "Approve"}
|
||||
if(r.transaction.from.toLowerCase()=="0x000000000000000000000000000000000000dead"){ return "Reject"}
|
||||
// Otherwise goes to manual processing
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Example 3: Allow listing
|
||||
|
||||
```javascript
|
||||
|
||||
function ApproveListing(){
|
||||
return "Approve"
|
||||
}
|
||||
|
||||
```
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user