Write state diffs directly to Postgres #30
@ -159,7 +159,22 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
if ctx.GlobalBool(utils.StateDiffFlag.Name) {
|
||||
utils.RegisterStateDiffService(stack)
|
||||
var dbParams *[3]string
|
||||
if ctx.GlobalIsSet(utils.StateDiffDBFlag.Name) {
|
||||
dbParams = new([3]string)
|
||||
dbParams[0] = ctx.GlobalString(utils.StateDiffDBFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.StateDiffDBNodeIDFlag.Name) {
|
||||
dbParams[1] = ctx.GlobalString(utils.StateDiffDBNodeIDFlag.Name)
|
||||
} else {
|
||||
utils.Fatalf("Must specify node ID for statediff DB output")
|
||||
}
|
||||
if ctx.GlobalIsSet(utils.StateDiffDBClientNameFlag.Name) {
|
||||
dbParams[2] = ctx.GlobalString(utils.StateDiffDBClientNameFlag.Name)
|
||||
} else {
|
||||
utils.Fatalf("Must specify client name for statediff DB output")
|
||||
}
|
||||
}
|
||||
utils.RegisterStateDiffService(stack, dbParams, ctx.GlobalBool(utils.StateDiffWritingFlag.Name))
|
||||
}
|
||||
|
||||
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode
|
||||
|
||||
@ -147,6 +147,10 @@ var (
|
||||
utils.EWASMInterpreterFlag,
|
||||
utils.EVMInterpreterFlag,
|
||||
utils.StateDiffFlag,
|
||||
utils.StateDiffDBFlag,
|
||||
utils.StateDiffDBNodeIDFlag,
|
||||
utils.StateDiffDBClientNameFlag,
|
||||
utils.StateDiffWritingFlag,
|
||||
configFileFlag,
|
||||
}
|
||||
|
||||
|
||||
@ -255,6 +255,10 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Name: "STATE DIFF",
|
||||
Flags: []cli.Flag{
|
||||
utils.StateDiffFlag,
|
||||
utils.StateDiffDBFlag,
|
||||
utils.StateDiffDBNodeIDFlag,
|
||||
utils.StateDiffDBClientNameFlag,
|
||||
utils.StateDiffWritingFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@ -761,6 +761,22 @@ var (
|
||||
Name: "statediff",
|
||||
Usage: "Enables the processing of state diffs between each block",
|
||||
}
|
||||
StateDiffDBFlag = cli.StringFlag{
|
||||
Name: "statediff.db",
|
||||
Usage: "PostgreSQL database connection string for writing state diffs",
|
||||
}
|
||||
StateDiffDBNodeIDFlag = cli.StringFlag{
|
||||
Name: "statediff.dbnodeid",
|
||||
Usage: "Node ID to use when writing state diffs to database",
|
||||
}
|
||||
StateDiffDBClientNameFlag = cli.StringFlag{
|
||||
Name: "statediff.dbclientname",
|
||||
Usage: "Client name to use when writing state diffs to database",
|
||||
}
|
||||
StateDiffWritingFlag = cli.BoolFlag{
|
||||
Name: "statediff.writing",
|
||||
Usage: "Activates progressive writing of state diffs to database as new block are synced",
|
||||
}
|
||||
)
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
@ -1633,12 +1649,15 @@ func RegisterGraphQLService(stack *node.Node, endpoint string, cors, vhosts []st
|
||||
}
|
||||
|
||||
// RegisterStateDiffService configures and registers a service to stream state diff data over RPC
|
||||
func RegisterStateDiffService(stack *node.Node) {
|
||||
// dbParams are: Postgres connection URI, Node ID, client name
|
||||
func RegisterStateDiffService(stack *node.Node, dbParams *[3]string, startWriteLoop bool) {
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var ethServ *eth.Ethereum
|
||||
ctx.Service(ðServ)
|
||||
blockChain := ethServ.BlockChain()
|
||||
return statediff.NewStateDiffService(blockChain)
|
||||
err := ctx.Service(ðServ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return statediff.NewStateDiffService(ethServ, dbParams, startWriteLoop)
|
||||
}); err != nil {
|
||||
Fatalf("Failed to register State Diff Service", err)
|
||||
}
|
||||
|
||||
36
go.mod
@ -21,36 +21,48 @@ require (
|
||||
github.com/dop251/goja v0.0.0-20200106141417-aaec0e7bde29
|
||||
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c
|
||||
github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa
|
||||
github.com/fatih/color v1.3.0
|
||||
github.com/fatih/color v1.7.0
|
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect
|
||||
github.com/go-stack/stack v1.8.0
|
||||
github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.1
|
||||
github.com/google/go-cmp v0.3.1 // indirect
|
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277
|
||||
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3
|
||||
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883
|
||||
github.com/ipfs/go-block-format v0.0.2
|
||||
github.com/ipfs/go-cid v0.0.7
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1
|
||||
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
||||
github.com/ipfs/go-ipld-format v0.2.0
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
|
||||
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21
|
||||
github.com/jmoiron/sqlx v1.2.0
|
||||
github.com/julienschmidt/httprouter v1.2.0
|
||||
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.0
|
||||
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035
|
||||
github.com/lib/pq v1.8.0
|
||||
github.com/mattn/go-colorable v0.1.1
|
||||
github.com/mattn/go-isatty v0.0.5
|
||||
github.com/multiformats/go-multihash v0.0.14
|
||||
github.com/naoina/go-stringutil v0.1.0 // indirect
|
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
|
||||
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c
|
||||
github.com/onsi/ginkgo v1.7.0
|
||||
github.com/onsi/gomega v1.4.3
|
||||
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
|
||||
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150
|
||||
github.com/prometheus/client_golang v0.9.3
|
||||
github.com/prometheus/tsdb v0.7.1
|
||||
github.com/rjeczalik/notify v0.9.1
|
||||
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00
|
||||
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 // indirect
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570
|
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect
|
||||
@ -58,10 +70,10 @@ require (
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
|
||||
|
||||
338
go.sum
@ -1,3 +1,16 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
@ -20,6 +33,7 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
|
||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
@ -33,9 +47,16 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A=
|
||||
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk=
|
||||
github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI=
|
||||
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
@ -46,8 +67,14 @@ github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 h1:pl4eWIqvFe/
|
||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 h1:J82+/8rub3qSy0HxEnoYD8cs+HDlHWYrqYXe2Vqxluk=
|
||||
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@ -71,50 +98,146 @@ github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa h1:XKAhUk/dtp+CV
|
||||
github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
|
||||
github.com/fatih/color v1.3.0 h1:YehCCcyeQ6Km0D6+IapqPinWBK6y+0eB5umvZXK9WPs=
|
||||
github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c=
|
||||
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug=
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c h1:zqAKixg3cTcIasAMJV+EcfVbWwLpOZ7LeoWJvcuD/5Q=
|
||||
github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 h1:giknQ4mEuDFmmHSrGcbargOuLHQGtywqo4mheITex54=
|
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 h1:E0whKxgp2ojts0FDgUA8dl62bmH0LxKanMoBr6MDTDM=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
|
||||
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad h1:eMxs9EL0PvIGS9TTtxg4R+JxuPGav82J8rA+GFnY7po=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3 h1:DqD8eigqlUm0+znmx7zhL0xvTW3+e1jCekJMfBUADWI=
|
||||
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
|
||||
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883 h1:FSeK4fZCo8u40n2JMnyAsd6x7+SbvoOMHvQOU/n10P4=
|
||||
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
|
||||
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
|
||||
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
|
||||
github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE=
|
||||
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
|
||||
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
|
||||
github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY=
|
||||
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
||||
github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
|
||||
github.com/ipfs/go-datastore v0.4.2 h1:h8/n7WPzhp239kkLws+epN3Ic7YtcBPgcaXfEfdVDWM=
|
||||
github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
|
||||
github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g=
|
||||
github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE=
|
||||
github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
|
||||
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
|
||||
github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA=
|
||||
github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs=
|
||||
github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=
|
||||
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
|
||||
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
|
||||
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw=
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21 h1:F/iKcka0K2LgnKy/fgSBf235AETtm1n1TvBzqu40LE0=
|
||||
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw=
|
||||
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
@ -124,17 +247,64 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o=
|
||||
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 h1:USWjF42jDCSEeikX/G1g40ZWnsPXN5WkZ4jMHZWyBK4=
|
||||
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo=
|
||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
|
||||
github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
|
||||
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
|
||||
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
||||
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
|
||||
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
|
||||
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
||||
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
|
||||
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
|
||||
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I=
|
||||
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
||||
github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg=
|
||||
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
|
||||
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
|
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0=
|
||||
@ -148,10 +318,14 @@ github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 h1:goeTyGkArOZIVOMA0dQbyuPWGNQJZGPwPu/QS9GlpnA=
|
||||
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@ -159,23 +333,56 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 h1:ZeU+auZj1iNzN8iVhff6M38Mfu73FQiJve/GEXYJBjE=
|
||||
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 h1:8DPul/X0IT/1TNMIxoKLwdemEOBBHDC/K4EB16Cw5WE=
|
||||
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20=
|
||||
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9 h1:5Cp3cVwpQP4aCQ6jx6dNLP3IarbYiuStmIzYu+BjQwY=
|
||||
github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
|
||||
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
|
||||
@ -183,54 +390,185 @@ github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8
|
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo=
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk=
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 h1:LepdCS8Gf/MVejFIt8lsiexZATdoGVyp5bcyS+rYoUI=
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 h1:hhsSf/5z74Ck/DJYc+R8zpq8KGm7uJvpdLRQED/IedA=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI=
|
||||
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
. "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
// APIName is the namespace used for the state diffing service API
|
||||
@ -131,3 +132,8 @@ func (api *PublicStateDiffAPI) StreamCodeAndCodeHash(ctx context.Context, blockN
|
||||
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// WriteStateDiffAt writes a state diff object directly to DB at the specific blockheight
|
||||
func (api *PublicStateDiffAPI) WriteStateDiffAt(ctx context.Context, blockNumber uint64, params Params) error {
|
||||
return api.sds.WriteStateDiffAt(blockNumber, params)
|
||||
}
|
||||
|
||||
@ -29,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
. "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
@ -43,12 +44,55 @@ var (
|
||||
type Builder interface {
|
||||
BuildStateDiffObject(args Args, params Params) (StateObject, error)
|
||||
BuildStateTrieObject(current *types.Block) (StateObject, error)
|
||||
WriteStateDiffObject(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
stateCache state.Database
|
||||
}
|
||||
|
||||
func resolveNode(it trie.NodeIterator, trieDB *trie.Database) (StateNode, []interface{}, error) {
|
||||
nodePath := make([]byte, len(it.Path()))
|
||||
copy(nodePath, it.Path())
|
||||
node, err := trieDB.Node(it.Hash())
|
||||
if err != nil {
|
||||
return StateNode{}, nil, err
|
||||
}
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return StateNode{}, nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return StateNode{}, nil, err
|
||||
}
|
||||
return StateNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
}, nodeElements, nil
|
||||
}
|
||||
|
||||
// convenience
|
||||
func stateNodeAppender(nodes *[]StateNode) StateNodeSink {
|
||||
return func(node StateNode) error {
|
||||
*nodes = append(*nodes, node)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func storageNodeAppender(nodes *[]StorageNode) StorageNodeSink {
|
||||
return func(node StorageNode) error {
|
||||
*nodes = append(*nodes, node)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func codeMappingAppender(codeAndCodeHashes *[]CodeAndCodeHash) CodeSink {
|
||||
return func(c CodeAndCodeHash) error {
|
||||
*codeAndCodeHashes = append(*codeAndCodeHashes, c)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewBuilder is used to create a statediff builder
|
||||
func NewBuilder(stateCache state.Database) Builder {
|
||||
return &builder{
|
||||
@ -80,44 +124,27 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
|
||||
codeAndCodeHashes := make([]CodeAndCodeHash, 0)
|
||||
for it.Next(true) {
|
||||
// skip value nodes
|
||||
if it.Leaf() {
|
||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
nodePath := make([]byte, len(it.Path()))
|
||||
copy(nodePath, it.Path())
|
||||
node, err := sdb.stateCache.TrieDB().Node(it.Hash())
|
||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
switch ty {
|
||||
switch node.NodeType {
|
||||
case Leaf:
|
||||
var account state.Account
|
||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", nodePath, err)
|
||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
||||
}
|
||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(nodePath, partialPath...)
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := trie.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
node := StateNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
LeafKey: leafKey,
|
||||
NodeValue: node,
|
||||
}
|
||||
node.LeafKey = leafKey
|
||||
if !bytes.Equal(account.CodeHash, nullCodeHash) {
|
||||
storageNodes, err := sdb.buildStorageNodesEventual(account.Root, nil, true)
|
||||
var storageNodes []StorageNode
|
||||
err := sdb.buildStorageNodesEventual(account.Root, nil, true, storageNodeAppender(&storageNodes))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed building eventual storage diffs for account %+v\r\nerror: %v", account, err)
|
||||
}
|
||||
@ -135,13 +162,9 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
|
||||
}
|
||||
stateNodes = append(stateNodes, node)
|
||||
case Extension, Branch:
|
||||
stateNodes = append(stateNodes, StateNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
})
|
||||
stateNodes = append(stateNodes, node)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unexpected node type %s", ty)
|
||||
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||
}
|
||||
}
|
||||
return stateNodes, codeAndCodeHashes, it.Error()
|
||||
@ -149,36 +172,60 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
|
||||
|
||||
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
||||
func (sdb *builder) BuildStateDiffObject(args Args, params Params) (StateObject, error) {
|
||||
if !params.IntermediateStateNodes || len(params.WatchedAddresses) > 0 { // if we are watching only specific accounts then we are only diffing leaf nodes
|
||||
return sdb.buildStateDiffWithoutIntermediateStateNodes(args, params)
|
||||
var stateNodes []StateNode
|
||||
var codeAndCodeHashes []CodeAndCodeHash
|
||||
err := sdb.WriteStateDiffObject(
|
||||
StateRoots{OldStateRoot: args.OldStateRoot, NewStateRoot: args.NewStateRoot},
|
||||
params, stateNodeAppender(&stateNodes), codeMappingAppender(&codeAndCodeHashes))
|
||||
if err != nil {
|
||||
return StateObject{}, err
|
||||
}
|
||||
return sdb.buildStateDiffWithIntermediateStateNodes(args, params)
|
||||
return StateObject{
|
||||
BlockHash: args.BlockHash,
|
||||
BlockNumber: args.BlockNumber,
|
||||
Nodes: stateNodes,
|
||||
CodeAndCodeHashes: codeAndCodeHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args Args, params Params) (StateObject, error) {
|
||||
// Writes a statediff object to output callback
|
||||
func (sdb *builder) WriteStateDiffObject(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
|
||||
if !params.IntermediateStateNodes || len(params.WatchedAddresses) > 0 {
|
||||
// if we are watching only specific accounts then we are only diffing leaf nodes
|
||||
return sdb.buildStateDiffWithoutIntermediateStateNodes(args, params, output, codeOutput)
|
||||
} else {
|
||||
return sdb.buildStateDiffWithIntermediateStateNodes(args, params, output, codeOutput)
|
||||
}
|
||||
}
|
||||
|
||||
func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
|
||||
// Load tries for old and new states
|
||||
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error creating trie for oldStateRoot: %v", err)
|
||||
return fmt.Errorf("error creating trie for oldStateRoot: %v", err)
|
||||
}
|
||||
newTrie, err := sdb.stateCache.OpenTrie(args.NewStateRoot)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error creating trie for newStateRoot: %v", err)
|
||||
return fmt.Errorf("error creating trie for newStateRoot: %v", err)
|
||||
}
|
||||
|
||||
// collect a slice of all the intermediate nodes that were touched and exist at B
|
||||
// a map of their leafkey to all the accounts that were touched and exist at B
|
||||
// and a slice of all the paths for the nodes in both of the above sets
|
||||
createdOrUpdatedIntermediateNodes, diffAccountsAtB, diffPathsAtB, err := sdb.createdAndUpdatedStateWithIntermediateNodes(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
|
||||
diffAccountsAtB, diffPathsAtB, err := sdb.createdAndUpdatedStateWithIntermediateNodes(
|
||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||
output)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error collecting createdAndUpdatedNodes: %v", err)
|
||||
return fmt.Errorf("error collecting createdAndUpdatedNodes: %v", err)
|
||||
}
|
||||
|
||||
// collect a slice of all the nodes that existed at a path in A that doesn't exist in B
|
||||
// a map of their leafkey to all the accounts that were touched and exist at A
|
||||
emptiedPaths, diffAccountsAtA, err := sdb.deletedOrUpdatedState(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}), diffPathsAtB)
|
||||
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||
diffPathsAtB, output)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
||||
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
||||
}
|
||||
|
||||
// collect and sort the leafkey keys for both account mappings into a slice
|
||||
@ -192,48 +239,47 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args Args, params P
|
||||
updatedKeys := findIntersection(createKeys, deleteKeys)
|
||||
|
||||
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
||||
updatedAccounts, err := sdb.buildAccountUpdates(diffAccountsAtB, diffAccountsAtA, updatedKeys, params.WatchedStorageSlots, params.IntermediateStorageNodes)
|
||||
err = sdb.buildAccountUpdates(
|
||||
diffAccountsAtB, diffAccountsAtA, updatedKeys,
|
||||
params.WatchedStorageSlots, params.IntermediateStorageNodes, output)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error building diff for updated accounts: %v", err)
|
||||
return fmt.Errorf("error building diff for updated accounts: %v", err)
|
||||
}
|
||||
// build the diff nodes for created accounts
|
||||
createdAccounts, codeAndCodeHashes, err := sdb.buildAccountCreations(diffAccountsAtB, params.WatchedStorageSlots, params.IntermediateStorageNodes)
|
||||
err = sdb.buildAccountCreations(diffAccountsAtB, params.WatchedStorageSlots, params.IntermediateStorageNodes, output, codeOutput)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error building diff for created accounts: %v", err)
|
||||
return fmt.Errorf("error building diff for created accounts: %v", err)
|
||||
}
|
||||
|
||||
// assemble all of the nodes into the statediff object, including the intermediate nodes
|
||||
return StateObject{
|
||||
BlockNumber: args.BlockNumber,
|
||||
BlockHash: args.BlockHash,
|
||||
Nodes: append(append(append(updatedAccounts, createdAccounts...), createdOrUpdatedIntermediateNodes...), emptiedPaths...),
|
||||
CodeAndCodeHashes: codeAndCodeHashes,
|
||||
}, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args Args, params Params) (StateObject, error) {
|
||||
func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
|
||||
// Load tries for old (A) and new (B) states
|
||||
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error creating trie for oldStateRoot: %v", err)
|
||||
return fmt.Errorf("error creating trie for oldStateRoot: %v", err)
|
||||
}
|
||||
newTrie, err := sdb.stateCache.OpenTrie(args.NewStateRoot)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error creating trie for newStateRoot: %v", err)
|
||||
return fmt.Errorf("error creating trie for newStateRoot: %v", err)
|
||||
}
|
||||
|
||||
// collect a map of their leafkey to all the accounts that were touched and exist at B
|
||||
// and a slice of all the paths for the nodes in both of the above sets
|
||||
diffAccountsAtB, diffPathsAtB, err := sdb.createdAndUpdatedState(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}), params.WatchedAddresses)
|
||||
diffAccountsAtB, diffPathsAtB, err := sdb.createdAndUpdatedState(
|
||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||
params.WatchedAddresses)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error collecting createdAndUpdatedNodes: %v", err)
|
||||
return fmt.Errorf("error collecting createdAndUpdatedNodes: %v", err)
|
||||
}
|
||||
|
||||
// collect a slice of all the nodes that existed at a path in A that doesn't exist in B
|
||||
// a map of their leafkey to all the accounts that were touched and exist at A
|
||||
emptiedPaths, diffAccountsAtA, err := sdb.deletedOrUpdatedState(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}), diffPathsAtB)
|
||||
diffAccountsAtA, err := sdb.deletedOrUpdatedState(
|
||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||
diffPathsAtB, output)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
||||
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
||||
}
|
||||
|
||||
// collect and sort the leafkeys for both account mappings into a slice
|
||||
@ -247,23 +293,18 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args Args, param
|
||||
updatedKeys := findIntersection(createKeys, deleteKeys)
|
||||
|
||||
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
||||
updatedAccounts, err := sdb.buildAccountUpdates(diffAccountsAtB, diffAccountsAtA, updatedKeys, params.WatchedStorageSlots, params.IntermediateStorageNodes)
|
||||
err = sdb.buildAccountUpdates(
|
||||
diffAccountsAtB, diffAccountsAtA, updatedKeys,
|
||||
params.WatchedStorageSlots, params.IntermediateStorageNodes, output)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error building diff for updated accounts: %v", err)
|
||||
return fmt.Errorf("error building diff for updated accounts: %v", err)
|
||||
}
|
||||
// build the diff nodes for created accounts
|
||||
createdAccounts, codeAndCodeHashes, err := sdb.buildAccountCreations(diffAccountsAtB, params.WatchedStorageSlots, params.IntermediateStorageNodes)
|
||||
err = sdb.buildAccountCreations(diffAccountsAtB, params.WatchedStorageSlots, params.IntermediateStorageNodes, output, codeOutput)
|
||||
if err != nil {
|
||||
return StateObject{}, fmt.Errorf("error building diff for created accounts: %v", err)
|
||||
return fmt.Errorf("error building diff for created accounts: %v", err)
|
||||
}
|
||||
|
||||
// assemble all of the nodes into the statediff object
|
||||
return StateObject{
|
||||
BlockNumber: args.BlockNumber,
|
||||
BlockHash: args.BlockHash,
|
||||
Nodes: append(append(updatedAccounts, createdAccounts...), emptiedPaths...),
|
||||
CodeAndCodeHashes: codeAndCodeHashes,
|
||||
}, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// createdAndUpdatedState returns
|
||||
@ -275,49 +316,36 @@ func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddres
|
||||
it, _ := trie.NewDifferenceIterator(a, b)
|
||||
for it.Next(true) {
|
||||
// skip value nodes
|
||||
if it.Leaf() {
|
||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
nodePath := make([]byte, len(it.Path()))
|
||||
copy(nodePath, it.Path())
|
||||
node, err := sdb.stateCache.TrieDB().Node(it.Hash())
|
||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if ty == Leaf {
|
||||
if node.NodeType == Leaf {
|
||||
// created vs updated is important for leaf nodes since we need to diff their storage
|
||||
// so we need to map all changed accounts at B to their leafkey, since account can change pathes but not leafkey
|
||||
var account state.Account
|
||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", nodePath, err)
|
||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
||||
}
|
||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(nodePath, partialPath...)
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := trie.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
if isWatchedAddress(watchedAddresses, leafKey) {
|
||||
diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
NodeType: node.NodeType,
|
||||
Path: node.Path,
|
||||
NodeValue: node.NodeValue,
|
||||
LeafKey: leafKey,
|
||||
Account: &account,
|
||||
}
|
||||
}
|
||||
}
|
||||
// add both intermediate and leaf node paths to the list of diffPathsAtB
|
||||
diffPathsAtB[common.Bytes2Hex(nodePath)] = true
|
||||
diffPathsAtB[common.Bytes2Hex(node.Path)] = true
|
||||
}
|
||||
return diffAcountsAtB, diffPathsAtB, it.Error()
|
||||
}
|
||||
@ -326,140 +354,116 @@ func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddres
|
||||
// a slice of all the intermediate nodes that exist in a different state at B than A
|
||||
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
||||
// and a slice of the paths for all of the nodes included in both
|
||||
func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIterator) ([]StateNode, AccountMap, map[string]bool, error) {
|
||||
createdOrUpdatedIntermediateNodes := make([]StateNode, 0)
|
||||
func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIterator, output StateNodeSink) (AccountMap, map[string]bool, error) {
|
||||
diffPathsAtB := make(map[string]bool)
|
||||
diffAcountsAtB := make(AccountMap)
|
||||
it, _ := trie.NewDifferenceIterator(a, b)
|
||||
for it.Next(true) {
|
||||
// skip value nodes
|
||||
if it.Leaf() {
|
||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
nodePath := make([]byte, len(it.Path()))
|
||||
copy(nodePath, it.Path())
|
||||
node, err := sdb.stateCache.TrieDB().Node(it.Hash())
|
||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
switch ty {
|
||||
switch node.NodeType {
|
||||
case Leaf:
|
||||
// created vs updated is important for leaf nodes since we need to diff their storage
|
||||
// so we need to map all changed accounts at B to their leafkey, since account can change pathes but not leafkey
|
||||
// so we need to map all changed accounts at B to their leafkey, since account can change paths but not leafkey
|
||||
var account state.Account
|
||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", nodePath, err)
|
||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
||||
}
|
||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(nodePath, partialPath...)
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := trie.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
NodeType: node.NodeType,
|
||||
Path: node.Path,
|
||||
NodeValue: node.NodeValue,
|
||||
LeafKey: leafKey,
|
||||
Account: &account,
|
||||
}
|
||||
case Extension, Branch:
|
||||
// create a diff for any intermediate node that has changed at b
|
||||
// created vs updated makes no difference for intermediate nodes since we do not need to diff storage
|
||||
createdOrUpdatedIntermediateNodes = append(createdOrUpdatedIntermediateNodes, StateNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
})
|
||||
if err := output(StateNode{
|
||||
NodeType: node.NodeType,
|
||||
Path: node.Path,
|
||||
NodeValue: node.NodeValue,
|
||||
}); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
default:
|
||||
return nil, nil, nil, fmt.Errorf("unexpected node type %s", ty)
|
||||
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||
}
|
||||
// add both intermediate and leaf node paths to the list of diffPathsAtB
|
||||
diffPathsAtB[common.Bytes2Hex(nodePath)] = true
|
||||
diffPathsAtB[common.Bytes2Hex(node.Path)] = true
|
||||
}
|
||||
return createdOrUpdatedIntermediateNodes, diffAcountsAtB, diffPathsAtB, it.Error()
|
||||
return diffAcountsAtB, diffPathsAtB, it.Error()
|
||||
}
|
||||
|
||||
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
|
||||
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
||||
func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool) ([]StateNode, AccountMap, error) {
|
||||
emptiedPaths := make([]StateNode, 0)
|
||||
func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, output StateNodeSink) (AccountMap, error) {
|
||||
diffAccountAtA := make(AccountMap)
|
||||
it, _ := trie.NewDifferenceIterator(b, a)
|
||||
for it.Next(true) {
|
||||
// skip value nodes
|
||||
if it.Leaf() {
|
||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodePath := make([]byte, len(it.Path()))
|
||||
copy(nodePath, it.Path())
|
||||
// if this nodePath did not show up in diffPathsAtB
|
||||
// if this node's path did not show up in diffPathsAtB
|
||||
// that means the node at this path was deleted (or moved) in B
|
||||
// emit an empty "removed" diff to signify as such
|
||||
if _, ok := diffPathsAtB[common.Bytes2Hex(nodePath)]; !ok {
|
||||
emptiedPaths = append(emptiedPaths, StateNode{
|
||||
Path: nodePath,
|
||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
||||
if err := output(StateNode{
|
||||
Path: node.Path,
|
||||
NodeValue: []byte{},
|
||||
NodeType: Removed,
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
node, err := sdb.stateCache.TrieDB().Node(it.Hash())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
switch ty {
|
||||
switch node.NodeType {
|
||||
case Leaf:
|
||||
// map all different accounts at A to their leafkey
|
||||
var account state.Account
|
||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", nodePath, err)
|
||||
return nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
||||
}
|
||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(nodePath, partialPath...)
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := trie.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
diffAccountAtA[common.Bytes2Hex(leafKey)] = accountWrapper{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
NodeType: node.NodeType,
|
||||
Path: node.Path,
|
||||
NodeValue: node.NodeValue,
|
||||
LeafKey: leafKey,
|
||||
Account: &account,
|
||||
}
|
||||
case Extension, Branch:
|
||||
// fall through, we did everything we need to do with these node types
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unexpected node type %s", ty)
|
||||
return nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||
}
|
||||
}
|
||||
return emptiedPaths, diffAccountAtA, it.Error()
|
||||
return diffAccountAtA, it.Error()
|
||||
}
|
||||
|
||||
// buildAccountUpdates uses the account diffs maps for A => B and B => A and the known intersection of their leafkeys
|
||||
// to generate the statediff node objects for all of the accounts that existed at both A and B but in different states
|
||||
// needs to be called before building account creations and deletions as this mutates
|
||||
// those account maps to remove the accounts which were updated
|
||||
func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updatedKeys []string, watchedStorageKeys []common.Hash, intermediateStorageNodes bool) ([]StateNode, error) {
|
||||
updatedAccounts := make([]StateNode, 0, len(updatedKeys))
|
||||
func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updatedKeys []string,
|
||||
watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output StateNodeSink) error {
|
||||
var err error
|
||||
for _, key := range updatedKeys {
|
||||
createdAcc := creations[key]
|
||||
@ -468,30 +472,32 @@ func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updated
|
||||
if deletedAcc.Account != nil && createdAcc.Account != nil {
|
||||
oldSR := deletedAcc.Account.Root
|
||||
newSR := createdAcc.Account.Root
|
||||
storageDiffs, err = sdb.buildStorageNodesIncremental(oldSR, newSR, watchedStorageKeys, intermediateStorageNodes)
|
||||
err = sdb.buildStorageNodesIncremental(
|
||||
oldSR, newSR, watchedStorageKeys, intermediateStorageNodes,
|
||||
storageNodeAppender(&storageDiffs))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed building incremental storage diffs for account with leafkey %s\r\nerror: %v", key, err)
|
||||
return fmt.Errorf("failed building incremental storage diffs for account with leafkey %s\r\nerror: %v", key, err)
|
||||
}
|
||||
}
|
||||
updatedAccounts = append(updatedAccounts, StateNode{
|
||||
if err = output(StateNode{
|
||||
NodeType: createdAcc.NodeType,
|
||||
Path: createdAcc.Path,
|
||||
NodeValue: createdAcc.NodeValue,
|
||||
LeafKey: createdAcc.LeafKey,
|
||||
StorageNodes: storageDiffs,
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(creations, key)
|
||||
delete(deletions, key)
|
||||
}
|
||||
|
||||
return updatedAccounts, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildAccountCreations returns the statediff node objects for all the accounts that exist at B but not at A
|
||||
// it also returns the code and codehash for created contract accounts
|
||||
func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKeys []common.Hash, intermediateStorageNodes bool) ([]StateNode, []CodeAndCodeHash, error) {
|
||||
accountDiffs := make([]StateNode, 0, len(accounts))
|
||||
codeAndCodeHashes := make([]CodeAndCodeHash, 0)
|
||||
func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output StateNodeSink, codeOutput CodeSink) error {
|
||||
for _, val := range accounts {
|
||||
diff := StateNode{
|
||||
NodeType: val.NodeType,
|
||||
@ -501,239 +507,221 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey
|
||||
}
|
||||
if !bytes.Equal(val.Account.CodeHash, nullCodeHash) {
|
||||
// For contract creations, any storage node contained is a diff
|
||||
storageDiffs, err := sdb.buildStorageNodesEventual(val.Account.Root, watchedStorageKeys, intermediateStorageNodes)
|
||||
var storageDiffs []StorageNode
|
||||
err := sdb.buildStorageNodesEventual(val.Account.Root, watchedStorageKeys, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed building eventual storage diffs for node %x\r\nerror: %v", val.Path, err)
|
||||
return fmt.Errorf("failed building eventual storage diffs for node %x\r\nerror: %v", val.Path, err)
|
||||
}
|
||||
diff.StorageNodes = storageDiffs
|
||||
// emit codehash => code mappings for cod
|
||||
codeHash := common.BytesToHash(val.Account.CodeHash)
|
||||
code, err := sdb.stateCache.ContractCode(common.Hash{}, codeHash)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
||||
return fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
||||
}
|
||||
codeAndCodeHashes = append(codeAndCodeHashes, CodeAndCodeHash{
|
||||
if err := codeOutput(CodeAndCodeHash{
|
||||
Hash: codeHash,
|
||||
Code: code,
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := output(diff); err != nil {
|
||||
return err
|
||||
}
|
||||
accountDiffs = append(accountDiffs, diff)
|
||||
}
|
||||
|
||||
return accountDiffs, codeAndCodeHashes, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildStorageNodesEventual builds the storage diff node objects for a created account
|
||||
// i.e. it returns all the storage nodes at this state, since there is no previous state
|
||||
func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool) ([]StorageNode, error) {
|
||||
func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
|
||||
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
log.Debug("Storage Root For Eventual Diff", "root", sr.Hex())
|
||||
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
||||
if err != nil {
|
||||
log.Info("error in build storage diff eventual", "error", err)
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
it := sTrie.NodeIterator(make([]byte, 0))
|
||||
return sdb.buildStorageNodesFromTrie(it, watchedStorageKeys, intermediateNodes)
|
||||
err = sdb.buildStorageNodesFromTrie(it, watchedStorageKeys, intermediateNodes, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
|
||||
// if any storage keys are provided it will only return those leaf nodes
|
||||
// including intermediate nodes can be turned on or off
|
||||
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStorageKeys []common.Hash, intermediateNodes bool) ([]StorageNode, error) {
|
||||
storageDiffs := make([]StorageNode, 0)
|
||||
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
|
||||
for it.Next(true) {
|
||||
// skip value nodes
|
||||
if it.Leaf() {
|
||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
nodePath := make([]byte, len(it.Path()))
|
||||
copy(nodePath, it.Path())
|
||||
node, err := sdb.stateCache.TrieDB().Node(it.Hash())
|
||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch ty {
|
||||
switch node.NodeType {
|
||||
case Leaf:
|
||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(nodePath, partialPath...)
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := trie.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
if isWatchedStorageKey(watchedStorageKeys, leafKey) {
|
||||
storageDiffs = append(storageDiffs, StorageNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
if err := output(StorageNode{
|
||||
NodeType: node.NodeType,
|
||||
Path: node.Path,
|
||||
NodeValue: node.NodeValue,
|
||||
LeafKey: leafKey,
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case Extension, Branch:
|
||||
if intermediateNodes {
|
||||
storageDiffs = append(storageDiffs, StorageNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
})
|
||||
if err := output(StorageNode{
|
||||
NodeType: node.NodeType,
|
||||
Path: node.Path,
|
||||
NodeValue: node.NodeValue,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected node type %s", ty)
|
||||
return fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||
}
|
||||
}
|
||||
return storageDiffs, it.Error()
|
||||
return it.Error()
|
||||
}
|
||||
|
||||
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
||||
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool) ([]StorageNode, error) {
|
||||
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
|
||||
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
log.Debug("Storage Roots for Incremental Diff", "old", oldSR.Hex(), "new", newSR.Hex())
|
||||
oldTrie, err := sdb.stateCache.OpenTrie(oldSR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
newTrie, err := sdb.stateCache.OpenTrie(newSR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
createdOrUpdatedStorage, diffPathsAtB, err := sdb.createdAndUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}), watchedStorageKeys, intermediateNodes)
|
||||
diffPathsAtB, err := sdb.createdAndUpdatedStorage(
|
||||
oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||
watchedStorageKeys, intermediateNodes, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
deletedStorage, err := sdb.deletedOrUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}), diffPathsAtB, watchedStorageKeys, intermediateNodes)
|
||||
err = sdb.deletedOrUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||
diffPathsAtB, watchedStorageKeys, intermediateNodes, output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return append(createdOrUpdatedStorage, deletedStorage...), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys []common.Hash, intermediateNodes bool) ([]StorageNode, map[string]bool, error) {
|
||||
createdOrUpdatedStorage := make([]StorageNode, 0)
|
||||
func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) (map[string]bool, error) {
|
||||
diffPathsAtB := make(map[string]bool)
|
||||
it, _ := trie.NewDifferenceIterator(a, b)
|
||||
for it.Next(true) {
|
||||
// skip value nodes
|
||||
if it.Leaf() {
|
||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
nodePath := make([]byte, len(it.Path()))
|
||||
copy(nodePath, it.Path())
|
||||
node, err := sdb.stateCache.TrieDB().Node(it.Hash())
|
||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
switch ty {
|
||||
switch node.NodeType {
|
||||
case Leaf:
|
||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(nodePath, partialPath...)
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := trie.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
if isWatchedStorageKey(watchedKeys, leafKey) {
|
||||
createdOrUpdatedStorage = append(createdOrUpdatedStorage, StorageNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
if err := output(StorageNode{
|
||||
NodeType: node.NodeType,
|
||||
Path: node.Path,
|
||||
NodeValue: node.NodeValue,
|
||||
LeafKey: leafKey,
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
case Extension, Branch:
|
||||
if intermediateNodes {
|
||||
createdOrUpdatedStorage = append(createdOrUpdatedStorage, StorageNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
})
|
||||
if err := output(StorageNode{
|
||||
NodeType: node.NodeType,
|
||||
Path: node.Path,
|
||||
NodeValue: node.NodeValue,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unexpected node type %s", ty)
|
||||
return nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||
}
|
||||
diffPathsAtB[common.Bytes2Hex(nodePath)] = true
|
||||
diffPathsAtB[common.Bytes2Hex(node.Path)] = true
|
||||
}
|
||||
return createdOrUpdatedStorage, diffPathsAtB, it.Error()
|
||||
return diffPathsAtB, it.Error()
|
||||
}
|
||||
|
||||
func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedKeys []common.Hash, intermediateNodes bool) ([]StorageNode, error) {
|
||||
deletedStorage := make([]StorageNode, 0)
|
||||
func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
|
||||
it, _ := trie.NewDifferenceIterator(b, a)
|
||||
for it.Next(true) {
|
||||
// skip value nodes
|
||||
if it.Leaf() {
|
||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodePath := make([]byte, len(it.Path()))
|
||||
copy(nodePath, it.Path())
|
||||
// if this node path showed up in diffPathsAtB
|
||||
// that means this node was updated at B and we already have the updated diff for it
|
||||
// otherwise that means this node was deleted in B and we need to add a "removed" diff to represent that event
|
||||
if _, ok := diffPathsAtB[common.Bytes2Hex(nodePath)]; ok {
|
||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; ok {
|
||||
continue
|
||||
}
|
||||
node, err := sdb.stateCache.TrieDB().Node(it.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch ty {
|
||||
switch node.NodeType {
|
||||
case Leaf:
|
||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(nodePath, partialPath...)
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := trie.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
if isWatchedStorageKey(watchedKeys, leafKey) {
|
||||
deletedStorage = append(deletedStorage, StorageNode{
|
||||
if err := output(StorageNode{
|
||||
NodeType: Removed,
|
||||
Path: nodePath,
|
||||
Path: node.Path,
|
||||
NodeValue: []byte{},
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case Extension, Branch:
|
||||
if intermediateNodes {
|
||||
deletedStorage = append(deletedStorage, StorageNode{
|
||||
if err := output(StorageNode{
|
||||
NodeType: Removed,
|
||||
Path: nodePath,
|
||||
Path: node.Path,
|
||||
NodeValue: []byte{},
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected node type %s", ty)
|
||||
return fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||
}
|
||||
}
|
||||
return deletedStorage, it.Error()
|
||||
return it.Error()
|
||||
}
|
||||
|
||||
// isWatchedAddress is used to check if a state account corresponds to one of the addresses the builder is configured to watch
|
||||
|
||||
@ -23,6 +23,8 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
func sortKeys(data AccountMap) []string {
|
||||
@ -74,23 +76,23 @@ func findIntersection(a, b []string) []string {
|
||||
}
|
||||
|
||||
// CheckKeyType checks what type of key we have
|
||||
func CheckKeyType(elements []interface{}) (NodeType, error) {
|
||||
func CheckKeyType(elements []interface{}) (sdtypes.NodeType, error) {
|
||||
if len(elements) > 2 {
|
||||
return Branch, nil
|
||||
return sdtypes.Branch, nil
|
||||
}
|
||||
if len(elements) < 2 {
|
||||
return Unknown, fmt.Errorf("node cannot be less than two elements in length")
|
||||
return sdtypes.Unknown, fmt.Errorf("node cannot be less than two elements in length")
|
||||
}
|
||||
switch elements[0].([]byte)[0] / 16 {
|
||||
case '\x00':
|
||||
return Extension, nil
|
||||
return sdtypes.Extension, nil
|
||||
case '\x01':
|
||||
return Extension, nil
|
||||
return sdtypes.Extension, nil
|
||||
case '\x02':
|
||||
return Leaf, nil
|
||||
return sdtypes.Leaf, nil
|
||||
case '\x03':
|
||||
return Leaf, nil
|
||||
return sdtypes.Leaf, nil
|
||||
default:
|
||||
return Unknown, fmt.Errorf("unknown hex prefix")
|
||||
return sdtypes.Unknown, fmt.Errorf("unknown hex prefix")
|
||||
}
|
||||
}
|
||||
|
||||
55
statediff/indexer/helpers.go
Normal file
@ -0,0 +1,55 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
func ResolveFromNodeType(nodeType types.NodeType) int {
|
||||
switch nodeType {
|
||||
case types.Branch:
|
||||
return 0
|
||||
case types.Extension:
|
||||
return 1
|
||||
case types.Leaf:
|
||||
return 2
|
||||
case types.Removed:
|
||||
return 3
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// ChainConfig returns the appropriate ethereum chain config for the provided chain id
|
||||
func ChainConfig(chainID uint64) (*params.ChainConfig, error) {
|
||||
switch chainID {
|
||||
case 1:
|
||||
return params.MainnetChainConfig, nil
|
||||
case 3:
|
||||
return params.TestnetChainConfig, nil // Ropsten
|
||||
case 4:
|
||||
return params.RinkebyChainConfig, nil
|
||||
case 5:
|
||||
return params.GoerliChainConfig, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("chain config for chainid %d not available", chainID)
|
||||
}
|
||||
}
|
||||
395
statediff/indexer/indexer.go
Normal file
@ -0,0 +1,395 @@
|
||||
|
|
||||
// VulcanizeDB
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// (at your option) any later version.
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// GNU Affero General Public License for more details.
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
package indexer
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
import (
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"fmt"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"math/big"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"time"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/prom"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Indexer interface to allow substitution of mocks for testing
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
type Indexer interface {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (*BlockTx, error)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
PushStateNode(tx *BlockTx, stateNode sdtypes.StateNode) error
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sdtypes.CodeAndCodeHash) error
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// StateDiffIndexer satisfies the Indexer interface for ethereum statediff objects
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
type StateDiffIndexer struct {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
chainConfig *params.ChainConfig
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
dbWriter *PostgresCIDWriter
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// NewStateDiffIndexer creates a pointer to a new PayloadConverter which satisfies the PayloadConverter interface
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
func NewStateDiffIndexer(chainConfig *params.ChainConfig, db *postgres.DB) *StateDiffIndexer {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return &StateDiffIndexer{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
chainConfig: chainConfig,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
dbWriter: NewPostgresCIDWriter(db),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
type BlockTx struct {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
dbtx *sqlx.Tx
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
BlockNumber uint64
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
headerID int64
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
err error
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Close func() error
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Pushes and indexes block data in database, except state & storage nodes (includes header, uncles, transactions & receipts)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (*BlockTx, error) {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
start, t := time.Now(), time.Now()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
blockHash := block.Hash()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
blockHashStr := blockHash.String()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
height := block.NumberU64()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
traceMsg := fmt.Sprintf("indexer stats for statediff at %d with hash %s:\r\n", height, blockHashStr)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
transactions := block.Transactions()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Derive any missing fields
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil, err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Generate the block iplds
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, err := ipld.FromBlockAndReceipts(block, receipts)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil, err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if len(txNodes) != len(txTrieNodes) && len(rctNodes) != len(rctTrieNodes) && len(txNodes) != len(rctNodes) {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil, fmt.Errorf("expected number of transactions (%d), transaction trie nodes (%d), receipts (%d), and receipt trie nodes (%d)to be equal", len(txNodes), len(txTrieNodes), len(rctNodes), len(rctTrieNodes))
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Calculate reward
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
reward := CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
t = time.Now()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Begin new db tx for everything
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
tx, err := sdi.dbWriter.db.Beginx()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil, err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
blocktx := BlockTx{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
dbtx: tx,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// handle transaction commit or rollback for any return case
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Close: func() error {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
var err error
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if p := recover(); p != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
shared.Rollback(tx)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
panic(p)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
} else {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
tDiff := time.Now().Sub(t)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
prom.SetTimeMetric("t_state_store_code_processing", tDiff)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
t = time.Now()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
err = tx.Commit()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
tDiff = time.Now().Sub(t)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
prom.SetTimeMetric("t_postgres_commit", tDiff)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Now().Sub(start).String())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
log.Debug(traceMsg)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
},
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
tDiff := time.Now().Sub(t)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
prom.SetTimeMetric("t_free_postgres", tDiff)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
traceMsg += fmt.Sprintf("time spent waiting for free postgres tx: %s:\r\n", tDiff.String())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
t = time.Now()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Publish and index header, collect headerID
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
headerID, err := sdi.processHeader(tx, block.Header(), headerNode, reward, totalDifficulty)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil, err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
tDiff = time.Now().Sub(t)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
prom.SetTimeMetric("t_header_processing", tDiff)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
t = time.Now()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Publish and index uncles
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := sdi.processUncles(tx, headerID, height, uncleNodes); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil, err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
tDiff = time.Now().Sub(t)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
prom.SetTimeMetric("t_uncle_processing", tDiff)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
t = time.Now()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Publish and index receipts and txs
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := sdi.processReceiptsAndTxs(tx, processArgs{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
headerID: headerID,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
blockNumber: block.Number(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
receipts: receipts,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txs: transactions,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
rctNodes: rctNodes,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
rctTrieNodes: rctTrieNodes,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txNodes: txNodes,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txTrieNodes: txTrieNodes,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil, err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
tDiff = time.Now().Sub(t)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
prom.SetTimeMetric("t_tx_receipt_processing", tDiff)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
t = time.Now()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
blocktx.BlockNumber = height
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
blocktx.headerID = headerID
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return &blocktx, err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// processHeader publishes and indexes a header IPLD in Postgres
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// it returns the headerID
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
func (sdi *StateDiffIndexer) processHeader(tx *sqlx.Tx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// publish header
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := shared.PublishIPLD(tx, headerNode); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return 0, err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// index header
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return sdi.dbWriter.upsertHeaderCID(tx, models.HeaderModel{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
CID: headerNode.Cid().String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
ParentHash: header.ParentHash.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
BlockNumber: header.Number.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
BlockHash: header.Hash().String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
TotalDifficulty: td.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Reward: reward.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Bloom: header.Bloom.Bytes(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
StateRoot: header.Root.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
RctRoot: header.ReceiptHash.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
TxRoot: header.TxHash.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
UncleRoot: header.UncleHash.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Timestamp: header.Time,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
})
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
func (sdi *StateDiffIndexer) processUncles(tx *sqlx.Tx, headerID int64, blockNumber uint64, uncleNodes []*ipld.EthHeader) error {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// publish and index uncles
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
for _, uncleNode := range uncleNodes {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := shared.PublishIPLD(tx, uncleNode); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
uncleReward := CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
uncle := models.UncleModel{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
CID: uncleNode.Cid().String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
ParentHash: uncleNode.ParentHash.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
BlockHash: uncleNode.Hash().String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Reward: uncleReward.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := sdi.dbWriter.upsertUncleCID(tx, uncle, headerID); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// processArgs bundles arugments to processReceiptsAndTxs
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
type processArgs struct {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
headerID int64
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
blockNumber *big.Int
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
receipts types.Receipts
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txs types.Transactions
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
rctNodes []*ipld.EthReceipt
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
rctTrieNodes []*ipld.EthRctTrie
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txNodes []*ipld.EthTx
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txTrieNodes []*ipld.EthTxTrie
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *sqlx.Tx, args processArgs) error {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Process receipts and txs
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
for i, receipt := range args.receipts {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// tx that corresponds with this receipt
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
trx := args.txs[i]
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
from, err := types.Sender(signer, trx)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Publishing
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// publish trie nodes, these aren't indexed directly
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := shared.PublishIPLD(tx, args.txTrieNodes[i]); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := shared.PublishIPLD(tx, args.rctTrieNodes[i]); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// publish the txs and receipts
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txNode, rctNode := args.txNodes[i], args.rctNodes[i]
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := shared.PublishIPLD(tx, txNode); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := shared.PublishIPLD(tx, rctNode); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Indexing
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// extract topic and contract data from the receipt for indexing
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
topicSets := make([][]string, 4)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
mappedContracts := make(map[string]bool) // use map to avoid duplicate addresses
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
for _, log := range receipt.Logs {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
for i, topic := range log.Topics {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
topicSets[i] = append(topicSets[i], topic.Hex())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
mappedContracts[log.Address.String()] = true
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// these are the contracts seen in the logs
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
logContracts := make([]string, 0, len(mappedContracts))
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
for addr := range mappedContracts {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
logContracts = append(logContracts, addr)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// this is the contract address if this receipt is for a contract creation tx
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
contract := shared.HandleZeroAddr(receipt.ContractAddress)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
var contractHash string
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
isDeployment := contract != ""
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if isDeployment {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// if tx is a contract deployment, publish the data (code)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// codec doesn't matter in this case sine we are not interested in the cid and the db key is multihash-derived
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// TODO: THE DATA IS NOT DIRECTLY THE CONTRACT CODE; THERE IS A MISSING PROCESSING STEP HERE
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// the contractHash => contract code is not currently correct
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if _, err := shared.PublishRaw(tx, ipld.MEthStorageTrie, multihash.KECCAK_256, trx.Data()); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// index tx first so that the receipt can reference it by FK
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txModel := models.TxModel{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Dst: shared.HandleZeroAddrPointer(trx.To()),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Src: shared.HandleZeroAddr(from),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
TxHash: trx.Hash().String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Index: int64(i),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Data: trx.Data(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Deployment: isDeployment,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
CID: txNode.Cid().String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
txID, err := sdi.dbWriter.upsertTransactionCID(tx, txModel, args.headerID)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// index the receipt
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
rctModel := models.ReceiptModel{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Topic0s: topicSets[0],
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Topic1s: topicSets[1],
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Topic2s: topicSets[2],
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Topic3s: topicSets[3],
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Contract: contract,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
ContractHash: contractHash,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
LogContracts: logContracts,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
CID: rctNode.Cid().String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
MhKey: shared.MultihashKeyFromCID(rctNode.Cid()),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := sdi.dbWriter.upsertReceiptCID(tx, rctModel, txID); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateNode) error {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// publish the state node
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
stateCIDStr, err := shared.PublishRaw(tx.dbtx, ipld.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
mhKey, _ := shared.MultihashKeyFromCIDString(stateCIDStr)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
stateModel := models.StateNodeModel{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Path: stateNode.Path,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
CID: stateCIDStr,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
MhKey: mhKey,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
NodeType: ResolveFromNodeType(stateNode.NodeType),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// index the state node, collect the stateID to reference by FK
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
stateID, err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel, tx.headerID)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// if we have a leaf, decode and index the account data
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if stateNode.NodeType == sdtypes.Leaf {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
var i []interface{}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if len(i) != 2 {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements")
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
var account state.Account
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
accountModel := models.StateAccountModel{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Balance: account.Balance.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Nonce: account.Nonce,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
CodeHash: account.CodeHash,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
StorageRoot: account.Root.String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := sdi.dbWriter.upsertStateAccount(tx.dbtx, accountModel, stateID); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// if there are any storage nodes associated with this node, publish and index them
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
for _, storageNode := range stateNode.StorageNodes {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
storageCIDStr, err := shared.PublishRaw(tx.dbtx, ipld.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
mhKey, _ := shared.MultihashKeyFromCIDString(storageCIDStr)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
storageModel := models.StorageNodeModel{
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
Path: storageNode.Path,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
CID: storageCIDStr,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
MhKey: mhKey,
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
NodeType: ResolveFromNodeType(storageNode.NodeType),
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel, stateID); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// Publishes code and codehash pairs to the ipld database
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
func (sdi *StateDiffIndexer) PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
// codec doesn't matter since db key is multihash-based
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
if err := shared.PublishDirect(tx.dbtx, mhKey, codeAndCodeHash.Code); err != nil {
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return err
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
return nil
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
}
|
||||
|
i-norden
commented
We should change this to the Debug level We should change this to the Debug level
|
||||
292
statediff/indexer/indexer_test.go
Normal file
@ -0,0 +1,292 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package indexer_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-ipfs-blockstore"
|
||||
"github.com/ipfs/go-ipfs-ds-help"
|
||||
|
||||
ind "github.com/ethereum/go-ethereum/statediff/indexer"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
||||
eth "github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
db *postgres.DB
|
||||
err error
|
||||
indexer *ind.StateDiffIndexer
|
||||
ipfsPgGet = `SELECT data FROM public.blocks
|
||||
WHERE key = $1`
|
||||
)
|
||||
|
||||
func expectTrue(t *testing.T, value bool) {
|
||||
if !value {
|
||||
t.Fatalf("Assertion failed")
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t *testing.T) {
|
||||
db, err = shared.SetupDB()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
indexer = ind.NewStateDiffIndexer(params.MainnetChainConfig, db)
|
||||
var tx *ind.BlockTx
|
||||
tx, err = indexer.PushBlock(
|
||||
mocks.MockBlock,
|
||||
mocks.MockReceipts,
|
||||
mocks.MockBlock.Difficulty())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tx.Close()
|
||||
for _, node := range mocks.StateDiffs {
|
||||
err = indexer.PushStateNode(tx, node)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
shared.ExpectEqual(t, tx.BlockNumber, mocks.BlockNumber.Uint64())
|
||||
}
|
||||
|
||||
func tearDown(t *testing.T) {
|
||||
ind.TearDownDB(t, db)
|
||||
}
|
||||
|
||||
func TestPublishAndIndexer(t *testing.T) {
|
||||
t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
|
||||
setup(t)
|
||||
defer tearDown(t)
|
||||
pgStr := `SELECT cid, td, reward, id
|
||||
FROM eth.header_cids
|
||||
WHERE block_number = $1`
|
||||
// check header was properly indexed
|
||||
type res struct {
|
||||
CID string
|
||||
TD string
|
||||
Reward string
|
||||
ID int
|
||||
}
|
||||
header := new(res)
|
||||
err = db.QueryRowx(pgStr, 1).StructScan(header)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shared.ExpectEqual(t, header.CID, mocks.HeaderCID.String())
|
||||
shared.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String())
|
||||
shared.ExpectEqual(t, header.Reward, "5000000000000011250")
|
||||
dc, err := cid.Decode(header.CID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
var data []byte
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shared.ExpectEqual(t, data, mocks.MockHeaderRlp)
|
||||
})
|
||||
|
||||
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
||||
setup(t)
|
||||
defer tearDown(t)
|
||||
// check that txs were properly indexed
|
||||
trxs := make([]string, 0)
|
||||
pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
err = db.Select(&trxs, pgStr, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shared.ExpectEqual(t, len(trxs), 3)
|
||||
expectTrue(t, shared.ListContainsString(trxs, mocks.Trx1CID.String()))
|
||||
expectTrue(t, shared.ListContainsString(trxs, mocks.Trx2CID.String()))
|
||||
expectTrue(t, shared.ListContainsString(trxs, mocks.Trx3CID.String()))
|
||||
// and published
|
||||
for _, c := range trxs {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
var data []byte
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
switch c {
|
||||
case mocks.Trx1CID.String():
|
||||
shared.ExpectEqual(t, data, mocks.MockTransactions.GetRlp(0))
|
||||
case mocks.Trx2CID.String():
|
||||
shared.ExpectEqual(t, data, mocks.MockTransactions.GetRlp(1))
|
||||
case mocks.Trx3CID.String():
|
||||
shared.ExpectEqual(t, data, mocks.MockTransactions.GetRlp(2))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
||||
setup(t)
|
||||
defer tearDown(t)
|
||||
// check receipts were properly indexed
|
||||
rcts := make([]string, 0)
|
||||
pgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
||||
AND transaction_cids.header_id = header_cids.id
|
||||
AND header_cids.block_number = $1`
|
||||
err = db.Select(&rcts, pgStr, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shared.ExpectEqual(t, len(rcts), 3)
|
||||
expectTrue(t, shared.ListContainsString(rcts, mocks.Rct1CID.String()))
|
||||
expectTrue(t, shared.ListContainsString(rcts, mocks.Rct2CID.String()))
|
||||
expectTrue(t, shared.ListContainsString(rcts, mocks.Rct3CID.String()))
|
||||
// and published
|
||||
for _, c := range rcts {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
var data []byte
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
switch c {
|
||||
case mocks.Rct1CID.String():
|
||||
shared.ExpectEqual(t, data, mocks.MockReceipts.GetRlp(0))
|
||||
case mocks.Rct2CID.String():
|
||||
shared.ExpectEqual(t, data, mocks.MockReceipts.GetRlp(1))
|
||||
case mocks.Rct3CID.String():
|
||||
shared.ExpectEqual(t, data, mocks.MockReceipts.GetRlp(2))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||
setup(t)
|
||||
defer tearDown(t)
|
||||
// check that state nodes were properly indexed and published
|
||||
stateNodes := make([]eth.StateNodeModel, 0)
|
||||
pgStr := `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
|
||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
err = db.Select(&stateNodes, pgStr, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shared.ExpectEqual(t, len(stateNodes), 2)
|
||||
for _, stateNode := range stateNodes {
|
||||
var data []byte
|
||||
dc, err := cid.Decode(stateNode.CID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
|
||||
var account eth.StateAccountModel
|
||||
err = db.Get(&account, pgStr, stateNode.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stateNode.CID == mocks.State1CID.String() {
|
||||
shared.ExpectEqual(t, stateNode.NodeType, 2)
|
||||
shared.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
|
||||
shared.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
|
||||
shared.ExpectEqual(t, data, mocks.ContractLeafNode)
|
||||
shared.ExpectEqual(t, account, eth.StateAccountModel{
|
||||
ID: account.ID,
|
||||
StateID: stateNode.ID,
|
||||
Balance: "0",
|
||||
CodeHash: mocks.ContractCodeHash.Bytes(),
|
||||
StorageRoot: mocks.ContractRoot,
|
||||
Nonce: 1,
|
||||
})
|
||||
}
|
||||
if stateNode.CID == mocks.State2CID.String() {
|
||||
shared.ExpectEqual(t, stateNode.NodeType, 2)
|
||||
shared.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
|
||||
shared.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
|
||||
shared.ExpectEqual(t, data, mocks.AccountLeafNode)
|
||||
shared.ExpectEqual(t, account, eth.StateAccountModel{
|
||||
ID: account.ID,
|
||||
StateID: stateNode.ID,
|
||||
Balance: "1000",
|
||||
CodeHash: mocks.AccountCodeHash.Bytes(),
|
||||
StorageRoot: mocks.AccountRoot,
|
||||
Nonce: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
|
||||
})
|
||||
|
||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||
setup(t)
|
||||
defer tearDown(t)
|
||||
// check that storage nodes were properly indexed
|
||||
storageNodes := make([]eth.StorageNodeWithStateKeyModel, 0)
|
||||
pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
|
||||
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
||||
WHERE storage_cids.state_id = state_cids.id
|
||||
AND state_cids.header_id = header_cids.id
|
||||
AND header_cids.block_number = $1`
|
||||
err = db.Select(&storageNodes, pgStr, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shared.ExpectEqual(t, len(storageNodes), 1)
|
||||
shared.ExpectEqual(t, storageNodes[0], eth.StorageNodeWithStateKeyModel{
|
||||
CID: mocks.StorageCID.String(),
|
||||
NodeType: 2,
|
||||
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
|
||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||
Path: []byte{},
|
||||
})
|
||||
var data []byte
|
||||
dc, err := cid.Decode(storageNodes[0].CID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shared.ExpectEqual(t, data, mocks.StorageLeafNode)
|
||||
})
|
||||
}
|
||||
175
statediff/indexer/ipfs/ipld/eth_account.go
Normal file
@ -0,0 +1,175 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
)
|
||||
|
||||
// EthAccountSnapshot (eth-account-snapshot codec 0x97)
|
||||
// represents an ethereum account, i.e. a wallet address or
|
||||
// a smart contract
|
||||
type EthAccountSnapshot struct {
|
||||
*EthAccount
|
||||
|
||||
cid cid.Cid
|
||||
rawdata []byte
|
||||
}
|
||||
|
||||
// EthAccount is the building block of EthAccountSnapshot.
|
||||
// Or, is the former stripped of its cid and rawdata components.
|
||||
type EthAccount struct {
|
||||
Nonce uint64
|
||||
Balance *big.Int
|
||||
Root []byte // This is the storage root trie
|
||||
CodeHash []byte // This is the hash of the EVM code
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthAccountSnapshot satisfies the
|
||||
// node.Node interface.
|
||||
var _ node.Node = (*EthAccountSnapshot)(nil)
|
||||
|
||||
/*
|
||||
INPUT
|
||||
*/
|
||||
|
||||
// Input should be managed by EthStateTrie
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
// Output should be managed by EthStateTrie
|
||||
|
||||
/*
|
||||
Block INTERFACE
|
||||
*/
|
||||
|
||||
// RawData returns the binary of the RLP encode of the account snapshot.
|
||||
func (as *EthAccountSnapshot) RawData() []byte {
|
||||
return as.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the transaction.
|
||||
func (as *EthAccountSnapshot) Cid() cid.Cid {
|
||||
return as.cid
|
||||
}
|
||||
|
||||
// String is a helper for output
|
||||
func (as *EthAccountSnapshot) String() string {
|
||||
return fmt.Sprintf("<EthereumAccountSnapshot %s>", as.cid)
|
||||
}
|
||||
|
||||
// Loggable returns in a map the type of IPLD Link.
|
||||
func (as *EthAccountSnapshot) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "eth-account-snapshot",
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Node INTERFACE
|
||||
*/
|
||||
|
||||
// Resolve resolves a path through this node, stopping at any link boundary
|
||||
// and returning the object found as well as the remaining path to traverse
|
||||
func (as *EthAccountSnapshot) Resolve(p []string) (interface{}, []string, error) {
|
||||
if len(p) == 0 {
|
||||
return as, nil, nil
|
||||
}
|
||||
|
||||
if len(p) > 1 {
|
||||
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
|
||||
}
|
||||
|
||||
switch p[0] {
|
||||
case "balance":
|
||||
return as.Balance, nil, nil
|
||||
case "codeHash":
|
||||
return &node.Link{Cid: keccak256ToCid(RawBinary, as.CodeHash)}, nil, nil
|
||||
case "nonce":
|
||||
return as.Nonce, nil, nil
|
||||
case "root":
|
||||
return &node.Link{Cid: keccak256ToCid(MEthStorageTrie, as.Root)}, nil, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("no such link")
|
||||
}
|
||||
}
|
||||
|
||||
// Tree lists all paths within the object under 'path', and up to the given depth.
|
||||
// To list the entire object (similar to `find .`) pass "" and -1
|
||||
func (as *EthAccountSnapshot) Tree(p string, depth int) []string {
|
||||
if p != "" || depth == 0 {
|
||||
return nil
|
||||
}
|
||||
return []string{"balance", "codeHash", "nonce", "root"}
|
||||
}
|
||||
|
||||
// ResolveLink is a helper function that calls resolve and asserts the
|
||||
// output is a link
|
||||
func (as *EthAccountSnapshot) ResolveLink(p []string) (*node.Link, []string, error) {
|
||||
obj, rest, err := as.Resolve(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if lnk, ok := obj.(*node.Link); ok {
|
||||
return lnk, rest, nil
|
||||
}
|
||||
|
||||
return nil, nil, fmt.Errorf("resolved item was not a link")
|
||||
}
|
||||
|
||||
// Copy will go away. It is here to comply with the interface.
|
||||
func (as *EthAccountSnapshot) Copy() node.Node {
|
||||
panic("dont use this yet")
|
||||
}
|
||||
|
||||
// Links is a helper function that returns all links within this object
|
||||
func (as *EthAccountSnapshot) Links() []*node.Link {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat will go away. It is here to comply with the interface.
|
||||
func (as *EthAccountSnapshot) Stat() (*node.NodeStat, error) {
|
||||
return &node.NodeStat{}, nil
|
||||
}
|
||||
|
||||
// Size will go away. It is here to comply with the interface.
|
||||
func (as *EthAccountSnapshot) Size() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
/*
|
||||
EthAccountSnapshot functions
|
||||
*/
|
||||
|
||||
// MarshalJSON processes the transaction into readable JSON format.
|
||||
func (as *EthAccountSnapshot) MarshalJSON() ([]byte, error) {
|
||||
out := map[string]interface{}{
|
||||
"balance": as.Balance,
|
||||
"codeHash": keccak256ToCid(RawBinary, as.CodeHash),
|
||||
"nonce": as.Nonce,
|
||||
"root": keccak256ToCid(MEthStorageTrie, as.Root),
|
||||
}
|
||||
return json.Marshal(out)
|
||||
}
|
||||
256
statediff/indexer/ipfs/ipld/eth_header.go
Normal file
@ -0,0 +1,256 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// EthHeader (eth-block, codec 0x90), represents an ethereum block header
|
||||
type EthHeader struct {
|
||||
*types.Header
|
||||
|
||||
cid cid.Cid
|
||||
rawdata []byte
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthHeader satisfies the node.Node interface.
|
||||
var _ node.Node = (*EthHeader)(nil)
|
||||
|
||||
/*
|
||||
INPUT
|
||||
*/
|
||||
|
||||
// NewEthHeader converts a *types.Header into an EthHeader IPLD node
|
||||
func NewEthHeader(header *types.Header) (*EthHeader, error) {
|
||||
headerRLP, err := rlp.EncodeToBytes(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthHeader, headerRLP, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthHeader{
|
||||
Header: header,
|
||||
cid: c,
|
||||
rawdata: headerRLP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
// DecodeEthHeader takes a cid and its raw binary data
|
||||
// from IPFS and returns an EthTx object for further processing.
|
||||
func DecodeEthHeader(c cid.Cid, b []byte) (*EthHeader, error) {
|
||||
var h *types.Header
|
||||
if err := rlp.DecodeBytes(b, h); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthHeader{
|
||||
Header: h,
|
||||
cid: c,
|
||||
rawdata: b,
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Block INTERFACE
|
||||
*/
|
||||
|
||||
// RawData returns the binary of the RLP encode of the block header.
|
||||
func (b *EthHeader) RawData() []byte {
|
||||
return b.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the block header.
|
||||
func (b *EthHeader) Cid() cid.Cid {
|
||||
return b.cid
|
||||
}
|
||||
|
||||
// String is a helper for output
|
||||
func (b *EthHeader) String() string {
|
||||
return fmt.Sprintf("<EthHeader %s>", b.cid)
|
||||
}
|
||||
|
||||
// Loggable returns a map the type of IPLD Link.
|
||||
func (b *EthHeader) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "eth-block",
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Node INTERFACE
|
||||
*/
|
||||
|
||||
// Resolve resolves a path through this node, stopping at any link boundary
|
||||
// and returning the object found as well as the remaining path to traverse
|
||||
func (b *EthHeader) Resolve(p []string) (interface{}, []string, error) {
|
||||
if len(p) == 0 {
|
||||
return b, nil, nil
|
||||
}
|
||||
|
||||
first, rest := p[0], p[1:]
|
||||
|
||||
switch first {
|
||||
case "parent":
|
||||
return &node.Link{Cid: commonHashToCid(MEthHeader, b.ParentHash)}, rest, nil
|
||||
case "receipts":
|
||||
return &node.Link{Cid: commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash)}, rest, nil
|
||||
case "root":
|
||||
return &node.Link{Cid: commonHashToCid(MEthStateTrie, b.Root)}, rest, nil
|
||||
case "tx":
|
||||
return &node.Link{Cid: commonHashToCid(MEthTxTrie, b.TxHash)}, rest, nil
|
||||
case "uncles":
|
||||
return &node.Link{Cid: commonHashToCid(MEthHeaderList, b.UncleHash)}, rest, nil
|
||||
}
|
||||
|
||||
if len(p) != 1 {
|
||||
return nil, nil, fmt.Errorf("unexpected path elements past %s", first)
|
||||
}
|
||||
|
||||
switch first {
|
||||
case "bloom":
|
||||
return b.Bloom, nil, nil
|
||||
case "coinbase":
|
||||
return b.Coinbase, nil, nil
|
||||
case "difficulty":
|
||||
return b.Difficulty, nil, nil
|
||||
case "extra":
|
||||
// This is a []byte. By default they are marshalled into Base64.
|
||||
return fmt.Sprintf("0x%x", b.Extra), nil, nil
|
||||
case "gaslimit":
|
||||
return b.GasLimit, nil, nil
|
||||
case "gasused":
|
||||
return b.GasUsed, nil, nil
|
||||
case "mixdigest":
|
||||
return b.MixDigest, nil, nil
|
||||
case "nonce":
|
||||
return b.Nonce, nil, nil
|
||||
case "number":
|
||||
return b.Number, nil, nil
|
||||
case "time":
|
||||
return b.Time, nil, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("no such link")
|
||||
}
|
||||
}
|
||||
|
||||
// Tree lists all paths within the object under 'path', and up to the given depth.
|
||||
// To list the entire object (similar to `find .`) pass "" and -1
|
||||
func (b *EthHeader) Tree(p string, depth int) []string {
|
||||
if p != "" || depth == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return []string{
|
||||
"time",
|
||||
"bloom",
|
||||
"coinbase",
|
||||
"difficulty",
|
||||
"extra",
|
||||
"gaslimit",
|
||||
"gasused",
|
||||
"mixdigest",
|
||||
"nonce",
|
||||
"number",
|
||||
"parent",
|
||||
"receipts",
|
||||
"root",
|
||||
"tx",
|
||||
"uncles",
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveLink is a helper function that allows easier traversal of links through blocks
|
||||
func (b *EthHeader) ResolveLink(p []string) (*node.Link, []string, error) {
|
||||
obj, rest, err := b.Resolve(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if lnk, ok := obj.(*node.Link); ok {
|
||||
return lnk, rest, nil
|
||||
}
|
||||
|
||||
return nil, nil, fmt.Errorf("resolved item was not a link")
|
||||
}
|
||||
|
||||
// Copy will go away. It is here to comply with the Node interface.
|
||||
func (b *EthHeader) Copy() node.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// Links is a helper function that returns all links within this object
|
||||
// HINT: Use `ipfs refs <cid>`
|
||||
func (b *EthHeader) Links() []*node.Link {
|
||||
return []*node.Link{
|
||||
{Cid: commonHashToCid(MEthHeader, b.ParentHash)},
|
||||
{Cid: commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash)},
|
||||
{Cid: commonHashToCid(MEthStateTrie, b.Root)},
|
||||
{Cid: commonHashToCid(MEthTxTrie, b.TxHash)},
|
||||
{Cid: commonHashToCid(MEthHeaderList, b.UncleHash)},
|
||||
}
|
||||
}
|
||||
|
||||
// Stat will go away. It is here to comply with the Node interface.
|
||||
func (b *EthHeader) Stat() (*node.NodeStat, error) {
|
||||
return &node.NodeStat{}, nil
|
||||
}
|
||||
|
||||
// Size will go away. It is here to comply with the Node interface.
|
||||
func (b *EthHeader) Size() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
/*
|
||||
EthHeader functions
|
||||
*/
|
||||
|
||||
// MarshalJSON processes the block header into readable JSON format,
|
||||
// converting the right links into their cids, and keeping the original
|
||||
// hex hash, allowing the user to simplify external queries.
|
||||
func (b *EthHeader) MarshalJSON() ([]byte, error) {
|
||||
out := map[string]interface{}{
|
||||
"time": b.Time,
|
||||
"bloom": b.Bloom,
|
||||
"coinbase": b.Coinbase,
|
||||
"difficulty": b.Difficulty,
|
||||
"extra": fmt.Sprintf("0x%x", b.Extra),
|
||||
"gaslimit": b.GasLimit,
|
||||
"gasused": b.GasUsed,
|
||||
"mixdigest": b.MixDigest,
|
||||
"nonce": b.Nonce,
|
||||
"number": b.Number,
|
||||
"parent": commonHashToCid(MEthHeader, b.ParentHash),
|
||||
"receipts": commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash),
|
||||
"root": commonHashToCid(MEthStateTrie, b.Root),
|
||||
"tx": commonHashToCid(MEthTxTrie, b.TxHash),
|
||||
"uncles": commonHashToCid(MEthHeaderList, b.UncleHash),
|
||||
}
|
||||
return json.Marshal(out)
|
||||
}
|
||||
97
statediff/indexer/ipfs/ipld/eth_parser.go
Normal file
@ -0,0 +1,97 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// FromBlockAndReceipts takes a block and processes it
|
||||
// to return it a set of IPLD nodes for further processing.
|
||||
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, error) {
|
||||
// Process the header
|
||||
headerNode, err := NewEthHeader(block.Header())
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
// Process the uncles
|
||||
uncleNodes := make([]*EthHeader, len(block.Uncles()))
|
||||
for i, uncle := range block.Uncles() {
|
||||
uncleNode, err := NewEthHeader(uncle)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
uncleNodes[i] = uncleNode
|
||||
}
|
||||
// Process the txs
|
||||
ethTxNodes, ethTxTrieNodes, err := processTransactions(block.Transactions(),
|
||||
block.Header().TxHash[:])
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
// Process the receipts
|
||||
ethRctNodes, ethRctTrieNodes, err := processReceipts(receipts,
|
||||
block.Header().ReceiptHash[:])
|
||||
return headerNode, uncleNodes, ethTxNodes, ethTxTrieNodes, ethRctNodes, ethRctTrieNodes, err
|
||||
}
|
||||
|
||||
// processTransactions will take the found transactions in a parsed block body
|
||||
// to return IPLD node slices for eth-tx and eth-tx-trie
|
||||
func processTransactions(txs []*types.Transaction, expectedTxRoot []byte) ([]*EthTx, []*EthTxTrie, error) {
|
||||
var ethTxNodes []*EthTx
|
||||
transactionTrie := newTxTrie()
|
||||
|
||||
for idx, tx := range txs {
|
||||
ethTx, err := NewEthTx(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ethTxNodes = append(ethTxNodes, ethTx)
|
||||
transactionTrie.add(idx, ethTx.RawData())
|
||||
}
|
||||
|
||||
if !bytes.Equal(transactionTrie.rootHash(), expectedTxRoot) {
|
||||
return nil, nil, fmt.Errorf("wrong transaction hash computed")
|
||||
}
|
||||
|
||||
return ethTxNodes, transactionTrie.getNodes(), nil
|
||||
}
|
||||
|
||||
// processReceipts will take in receipts
|
||||
// to return IPLD node slices for eth-rct and eth-rct-trie
|
||||
func processReceipts(rcts []*types.Receipt, expectedRctRoot []byte) ([]*EthReceipt, []*EthRctTrie, error) {
|
||||
var ethRctNodes []*EthReceipt
|
||||
receiptTrie := newRctTrie()
|
||||
|
||||
for idx, rct := range rcts {
|
||||
ethRct, err := NewReceipt(rct)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ethRctNodes = append(ethRctNodes, ethRct)
|
||||
receiptTrie.add(idx, ethRct.RawData())
|
||||
}
|
||||
|
||||
if !bytes.Equal(receiptTrie.rootHash(), expectedRctRoot) {
|
||||
return nil, nil, fmt.Errorf("wrong receipt hash computed")
|
||||
}
|
||||
|
||||
return ethRctNodes, receiptTrie.getNodes(), nil
|
||||
}
|
||||
199
statediff/indexer/ipfs/ipld/eth_receipt.go
Normal file
@ -0,0 +1,199 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
type EthReceipt struct {
|
||||
*types.Receipt
|
||||
|
||||
rawdata []byte
|
||||
cid cid.Cid
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthReceipt satisfies the node.Node interface.
|
||||
var _ node.Node = (*EthReceipt)(nil)
|
||||
|
||||
/*
|
||||
INPUT
|
||||
*/
|
||||
|
||||
// NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node
|
||||
func NewReceipt(receipt *types.Receipt) (*EthReceipt, error) {
|
||||
receiptRLP, err := rlp.EncodeToBytes(receipt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthTxReceipt, receiptRLP, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthReceipt{
|
||||
Receipt: receipt,
|
||||
cid: c,
|
||||
rawdata: receiptRLP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
// DecodeEthReceipt takes a cid and its raw binary data
|
||||
// from IPFS and returns an EthTx object for further processing.
|
||||
func DecodeEthReceipt(c cid.Cid, b []byte) (*EthReceipt, error) {
|
||||
var r *types.Receipt
|
||||
if err := rlp.DecodeBytes(b, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthReceipt{
|
||||
Receipt: r,
|
||||
cid: c,
|
||||
rawdata: b,
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Block INTERFACE
|
||||
*/
|
||||
|
||||
func (node *EthReceipt) RawData() []byte {
|
||||
return node.rawdata
|
||||
}
|
||||
|
||||
func (node *EthReceipt) Cid() cid.Cid {
|
||||
return node.cid
|
||||
}
|
||||
|
||||
// String is a helper for output
|
||||
func (r *EthReceipt) String() string {
|
||||
return fmt.Sprintf("<EthereumReceipt %s>", r.cid)
|
||||
}
|
||||
|
||||
// Loggable returns in a map the type of IPLD Link.
|
||||
func (r *EthReceipt) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "eth-receipt",
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve resolves a path through this node, stopping at any link boundary
|
||||
// and returning the object found as well as the remaining path to traverse
|
||||
func (r *EthReceipt) Resolve(p []string) (interface{}, []string, error) {
|
||||
if len(p) == 0 {
|
||||
return r, nil, nil
|
||||
}
|
||||
|
||||
if len(p) > 1 {
|
||||
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
|
||||
}
|
||||
|
||||
switch p[0] {
|
||||
|
||||
case "root":
|
||||
return r.PostState, nil, nil
|
||||
case "status":
|
||||
return r.Status, nil, nil
|
||||
case "cumulativeGasUsed":
|
||||
return r.CumulativeGasUsed, nil, nil
|
||||
case "logsBloom":
|
||||
return r.Bloom, nil, nil
|
||||
case "logs":
|
||||
return r.Logs, nil, nil
|
||||
case "transactionHash":
|
||||
return r.TxHash, nil, nil
|
||||
case "contractAddress":
|
||||
return r.ContractAddress, nil, nil
|
||||
case "gasUsed":
|
||||
return r.GasUsed, nil, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("no such link")
|
||||
}
|
||||
}
|
||||
|
||||
// Tree lists all paths within the object under 'path', and up to the given depth.
|
||||
// To list the entire object (similar to `find .`) pass "" and -1
|
||||
func (r *EthReceipt) Tree(p string, depth int) []string {
|
||||
if p != "" || depth == 0 {
|
||||
return nil
|
||||
}
|
||||
return []string{"root", "status", "cumulativeGasUsed", "logsBloom", "logs", "transactionHash", "contractAddress", "gasUsed"}
|
||||
}
|
||||
|
||||
// ResolveLink is a helper function that calls resolve and asserts the
|
||||
// output is a link
|
||||
func (r *EthReceipt) ResolveLink(p []string) (*node.Link, []string, error) {
|
||||
obj, rest, err := r.Resolve(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if lnk, ok := obj.(*node.Link); ok {
|
||||
return lnk, rest, nil
|
||||
}
|
||||
|
||||
return nil, nil, fmt.Errorf("resolved item was not a link")
|
||||
}
|
||||
|
||||
// Copy will go away. It is here to comply with the Node interface.
|
||||
func (*EthReceipt) Copy() node.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// Links is a helper function that returns all links within this object
|
||||
func (*EthReceipt) Links() []*node.Link {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat will go away. It is here to comply with the interface.
|
||||
func (r *EthReceipt) Stat() (*node.NodeStat, error) {
|
||||
return &node.NodeStat{}, nil
|
||||
}
|
||||
|
||||
// Size will go away. It is here to comply with the interface.
|
||||
func (r *EthReceipt) Size() (uint64, error) {
|
||||
return strconv.ParseUint(r.Receipt.Size().String(), 10, 64)
|
||||
}
|
||||
|
||||
/*
|
||||
EthReceipt functions
|
||||
*/
|
||||
|
||||
// MarshalJSON processes the receipt into readable JSON format.
|
||||
func (r *EthReceipt) MarshalJSON() ([]byte, error) {
|
||||
out := map[string]interface{}{
|
||||
"root": r.PostState,
|
||||
"status": r.Status,
|
||||
"cumulativeGasUsed": r.CumulativeGasUsed,
|
||||
"logsBloom": r.Bloom,
|
||||
"logs": r.Logs,
|
||||
"transactionHash": r.TxHash,
|
||||
"contractAddress": r.ContractAddress,
|
||||
"gasUsed": r.GasUsed,
|
||||
}
|
||||
return json.Marshal(out)
|
||||
}
|
||||
152
statediff/indexer/ipfs/ipld/eth_receipt_trie.go
Normal file
@ -0,0 +1,152 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// EthRctTrie (eth-tx-trie codec 0x92) represents
|
||||
// a node from the transaction trie in ethereum.
|
||||
type EthRctTrie struct {
|
||||
*TrieNode
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthRctTrie satisfies the node.Node interface.
|
||||
var _ node.Node = (*EthRctTrie)(nil)
|
||||
|
||||
/*
|
||||
INPUT
|
||||
*/
|
||||
|
||||
// To create a proper trie of the eth-tx-trie objects, it is required
|
||||
// to input all transactions belonging to a forest in a single step.
|
||||
// We are adding the transactions, and creating its trie on
|
||||
// block body parsing time.
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
// DecodeEthRctTrie returns an EthRctTrie object from its cid and rawdata.
|
||||
func DecodeEthRctTrie(c cid.Cid, b []byte) (*EthRctTrie, error) {
|
||||
tn, err := decodeTrieNode(c, b, decodeEthRctTrieLeaf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthRctTrie{TrieNode: tn}, nil
|
||||
}
|
||||
|
||||
// decodeEthRctTrieLeaf parses a eth-rct-trie leaf
|
||||
//from decoded RLP elements
|
||||
func decodeEthRctTrieLeaf(i []interface{}) ([]interface{}, error) {
|
||||
var r types.Receipt
|
||||
err := rlp.DecodeBytes(i[1].([]byte), &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthTxReceipt, i[1].([]byte), multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []interface{}{
|
||||
i[0].([]byte),
|
||||
&EthReceipt{
|
||||
Receipt: &r,
|
||||
cid: c,
|
||||
rawdata: i[1].([]byte),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Block INTERFACE
|
||||
*/
|
||||
|
||||
// RawData returns the binary of the RLP encode of the transaction.
|
||||
func (t *EthRctTrie) RawData() []byte {
|
||||
return t.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the transaction.
|
||||
func (t *EthRctTrie) Cid() cid.Cid {
|
||||
return t.cid
|
||||
}
|
||||
|
||||
// String is a helper for output
|
||||
func (t *EthRctTrie) String() string {
|
||||
return fmt.Sprintf("<EthereumRctTrie %s>", t.cid)
|
||||
}
|
||||
|
||||
// Loggable returns in a map the type of IPLD Link.
|
||||
func (t *EthRctTrie) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "eth-rct-trie",
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
EthRctTrie functions
|
||||
*/
|
||||
|
||||
// rctTrie wraps a localTrie for use on the receipt trie.
|
||||
type rctTrie struct {
|
||||
*localTrie
|
||||
}
|
||||
|
||||
// newRctTrie initializes and returns a rctTrie.
|
||||
func newRctTrie() *rctTrie {
|
||||
return &rctTrie{
|
||||
localTrie: newLocalTrie(),
|
||||
}
|
||||
}
|
||||
|
||||
// getNodes invokes the localTrie, which computes the root hash of the
|
||||
// transaction trie and returns its database keys, to return a slice
|
||||
// of EthRctTrie nodes.
|
||||
func (rt *rctTrie) getNodes() []*EthRctTrie {
|
||||
keys := rt.getKeys()
|
||||
var out []*EthRctTrie
|
||||
it := rt.trie.NodeIterator([]byte{})
|
||||
for it.Next(true) {
|
||||
|
||||
}
|
||||
for _, k := range keys {
|
||||
rawdata, err := rt.db.Get(k)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
c, err := RawdataToCid(MEthTxReceiptTrie, rawdata, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
tn := &TrieNode{
|
||||
cid: c,
|
||||
rawdata: rawdata,
|
||||
}
|
||||
out = append(out, &EthRctTrie{TrieNode: tn})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
114
statediff/indexer/ipfs/ipld/eth_state.go
Normal file
@ -0,0 +1,114 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// EthStateTrie (eth-state-trie, codec 0x96), represents
|
||||
// a node from the satte trie in ethereum.
|
||||
type EthStateTrie struct {
|
||||
*TrieNode
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthStateTrie satisfies the node.Node interface.
|
||||
var _ node.Node = (*EthStateTrie)(nil)
|
||||
|
||||
/*
|
||||
INPUT
|
||||
*/
|
||||
|
||||
// FromStateTrieRLP takes the RLP representation of an ethereum
|
||||
// state trie node to return it as an IPLD node for further processing.
|
||||
func FromStateTrieRLP(raw []byte) (*EthStateTrie, error) {
|
||||
c, err := RawdataToCid(MEthStateTrie, raw, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Let's run the whole mile and process the nodeKind and
|
||||
// its elements, in case somebody would need this function
|
||||
// to parse an RLP element from the filesystem
|
||||
return DecodeEthStateTrie(c, raw)
|
||||
}
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
// DecodeEthStateTrie returns an EthStateTrie object from its cid and rawdata.
|
||||
func DecodeEthStateTrie(c cid.Cid, b []byte) (*EthStateTrie, error) {
|
||||
tn, err := decodeTrieNode(c, b, decodeEthStateTrieLeaf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthStateTrie{TrieNode: tn}, nil
|
||||
}
|
||||
|
||||
// decodeEthStateTrieLeaf parses a eth-tx-trie leaf
|
||||
// from decoded RLP elements
|
||||
func decodeEthStateTrieLeaf(i []interface{}) ([]interface{}, error) {
|
||||
var account EthAccount
|
||||
err := rlp.DecodeBytes(i[1].([]byte), &account)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthAccountSnapshot, i[1].([]byte), multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []interface{}{
|
||||
i[0].([]byte),
|
||||
&EthAccountSnapshot{
|
||||
EthAccount: &account,
|
||||
cid: c,
|
||||
rawdata: i[1].([]byte),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Block INTERFACE
|
||||
*/
|
||||
|
||||
// RawData returns the binary of the RLP encode of the state trie node.
|
||||
func (st *EthStateTrie) RawData() []byte {
|
||||
return st.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the state trie node.
|
||||
func (st *EthStateTrie) Cid() cid.Cid {
|
||||
return st.cid
|
||||
}
|
||||
|
||||
// String is a helper for output
|
||||
func (st *EthStateTrie) String() string {
|
||||
return fmt.Sprintf("<EthereumStateTrie %s>", st.cid)
|
||||
}
|
||||
|
||||
// Loggable returns in a map the type of IPLD Link.
|
||||
func (st *EthStateTrie) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "eth-state-trie",
|
||||
}
|
||||
}
|
||||
100
statediff/indexer/ipfs/ipld/eth_storage.go
Normal file
@ -0,0 +1,100 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// EthStorageTrie (eth-storage-trie, codec 0x98), represents
|
||||
// a node from the storage trie in ethereum.
|
||||
type EthStorageTrie struct {
|
||||
*TrieNode
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthStorageTrie satisfies the node.Node interface.
|
||||
var _ node.Node = (*EthStorageTrie)(nil)
|
||||
|
||||
/*
|
||||
INPUT
|
||||
*/
|
||||
|
||||
// FromStorageTrieRLP takes the RLP representation of an ethereum
|
||||
// storage trie node to return it as an IPLD node for further processing.
|
||||
func FromStorageTrieRLP(raw []byte) (*EthStorageTrie, error) {
|
||||
c, err := RawdataToCid(MEthStorageTrie, raw, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Let's run the whole mile and process the nodeKind and
|
||||
// its elements, in case somebody would need this function
|
||||
// to parse an RLP element from the filesystem
|
||||
return DecodeEthStorageTrie(c, raw)
|
||||
}
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
// DecodeEthStorageTrie returns an EthStorageTrie object from its cid and rawdata.
|
||||
func DecodeEthStorageTrie(c cid.Cid, b []byte) (*EthStorageTrie, error) {
|
||||
tn, err := decodeTrieNode(c, b, decodeEthStorageTrieLeaf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthStorageTrie{TrieNode: tn}, nil
|
||||
}
|
||||
|
||||
// decodeEthStorageTrieLeaf parses a eth-tx-trie leaf
|
||||
// from decoded RLP elements
|
||||
func decodeEthStorageTrieLeaf(i []interface{}) ([]interface{}, error) {
|
||||
return []interface{}{
|
||||
i[0].([]byte),
|
||||
i[1].([]byte),
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Block INTERFACE
|
||||
*/
|
||||
|
||||
// RawData returns the binary of the RLP encode of the storage trie node.
|
||||
func (st *EthStorageTrie) RawData() []byte {
|
||||
return st.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the storage trie node.
|
||||
func (st *EthStorageTrie) Cid() cid.Cid {
|
||||
return st.cid
|
||||
}
|
||||
|
||||
// String is a helper for output
|
||||
func (st *EthStorageTrie) String() string {
|
||||
return fmt.Sprintf("<EthereumStorageTrie %s>", st.cid)
|
||||
}
|
||||
|
||||
// Loggable returns in a map the type of IPLD Link.
|
||||
func (st *EthStorageTrie) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "eth-storage-trie",
|
||||
}
|
||||
}
|
||||
215
statediff/indexer/ipfs/ipld/eth_tx.go
Normal file
@ -0,0 +1,215 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// EthTx (eth-tx codec 0x93) represents an ethereum transaction
|
||||
type EthTx struct {
|
||||
*types.Transaction
|
||||
|
||||
cid cid.Cid
|
||||
rawdata []byte
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthTx satisfies the node.Node interface.
|
||||
var _ node.Node = (*EthTx)(nil)
|
||||
|
||||
/*
|
||||
INPUT
|
||||
*/
|
||||
|
||||
// NewEthTx converts a *types.Transaction to an EthTx IPLD node
|
||||
func NewEthTx(tx *types.Transaction) (*EthTx, error) {
|
||||
txRLP, err := rlp.EncodeToBytes(tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthTx, txRLP, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthTx{
|
||||
Transaction: tx,
|
||||
cid: c,
|
||||
rawdata: txRLP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
// DecodeEthTx takes a cid and its raw binary data
|
||||
// from IPFS and returns an EthTx object for further processing.
|
||||
func DecodeEthTx(c cid.Cid, b []byte) (*EthTx, error) {
|
||||
var t *types.Transaction
|
||||
if err := rlp.DecodeBytes(b, t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthTx{
|
||||
Transaction: t,
|
||||
cid: c,
|
||||
rawdata: b,
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Block INTERFACE
|
||||
*/
|
||||
|
||||
// RawData returns the binary of the RLP encode of the transaction.
|
||||
func (t *EthTx) RawData() []byte {
|
||||
return t.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the transaction.
|
||||
func (t *EthTx) Cid() cid.Cid {
|
||||
return t.cid
|
||||
}
|
||||
|
||||
// String is a helper for output
|
||||
func (t *EthTx) String() string {
|
||||
return fmt.Sprintf("<EthereumTx %s>", t.cid)
|
||||
}
|
||||
|
||||
// Loggable returns in a map the type of IPLD Link.
|
||||
func (t *EthTx) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "eth-tx",
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Node INTERFACE
|
||||
*/
|
||||
|
||||
// Resolve resolves a path through this node, stopping at any link boundary
|
||||
// and returning the object found as well as the remaining path to traverse
|
||||
func (t *EthTx) Resolve(p []string) (interface{}, []string, error) {
|
||||
if len(p) == 0 {
|
||||
return t, nil, nil
|
||||
}
|
||||
|
||||
if len(p) > 1 {
|
||||
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
|
||||
}
|
||||
|
||||
switch p[0] {
|
||||
|
||||
case "gas":
|
||||
return t.Gas(), nil, nil
|
||||
case "gasPrice":
|
||||
return t.GasPrice(), nil, nil
|
||||
case "input":
|
||||
return fmt.Sprintf("%x", t.Data()), nil, nil
|
||||
case "nonce":
|
||||
return t.Nonce(), nil, nil
|
||||
case "r":
|
||||
_, r, _ := t.RawSignatureValues()
|
||||
return hexutil.EncodeBig(r), nil, nil
|
||||
case "s":
|
||||
_, _, s := t.RawSignatureValues()
|
||||
return hexutil.EncodeBig(s), nil, nil
|
||||
case "toAddress":
|
||||
return t.To(), nil, nil
|
||||
case "v":
|
||||
v, _, _ := t.RawSignatureValues()
|
||||
return hexutil.EncodeBig(v), nil, nil
|
||||
case "value":
|
||||
return hexutil.EncodeBig(t.Value()), nil, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("no such link")
|
||||
}
|
||||
}
|
||||
|
||||
// Tree lists all paths within the object under 'path', and up to the given depth.
|
||||
// To list the entire object (similar to `find .`) pass "" and -1
|
||||
func (t *EthTx) Tree(p string, depth int) []string {
|
||||
if p != "" || depth == 0 {
|
||||
return nil
|
||||
}
|
||||
return []string{"gas", "gasPrice", "input", "nonce", "r", "s", "toAddress", "v", "value"}
|
||||
}
|
||||
|
||||
// ResolveLink is a helper function that calls resolve and asserts the
|
||||
// output is a link
|
||||
func (t *EthTx) ResolveLink(p []string) (*node.Link, []string, error) {
|
||||
obj, rest, err := t.Resolve(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if lnk, ok := obj.(*node.Link); ok {
|
||||
return lnk, rest, nil
|
||||
}
|
||||
|
||||
return nil, nil, fmt.Errorf("resolved item was not a link")
|
||||
}
|
||||
|
||||
// Copy will go away. It is here to comply with the interface.
|
||||
func (t *EthTx) Copy() node.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// Links is a helper function that returns all links within this object
|
||||
func (t *EthTx) Links() []*node.Link {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat will go away. It is here to comply with the interface.
|
||||
func (t *EthTx) Stat() (*node.NodeStat, error) {
|
||||
return &node.NodeStat{}, nil
|
||||
}
|
||||
|
||||
// Size will go away. It is here to comply with the interface.
|
||||
func (t *EthTx) Size() (uint64, error) {
|
||||
return strconv.ParseUint(t.Transaction.Size().String(), 10, 64)
|
||||
}
|
||||
|
||||
/*
|
||||
EthTx functions
|
||||
*/
|
||||
|
||||
// MarshalJSON processes the transaction into readable JSON format.
|
||||
func (t *EthTx) MarshalJSON() ([]byte, error) {
|
||||
v, r, s := t.RawSignatureValues()
|
||||
|
||||
out := map[string]interface{}{
|
||||
"gas": t.Gas(),
|
||||
"gasPrice": hexutil.EncodeBig(t.GasPrice()),
|
||||
"input": fmt.Sprintf("%x", t.Data()),
|
||||
"nonce": t.Nonce(),
|
||||
"r": hexutil.EncodeBig(r),
|
||||
"s": hexutil.EncodeBig(s),
|
||||
"toAddress": t.To(),
|
||||
"v": hexutil.EncodeBig(v),
|
||||
"value": hexutil.EncodeBig(t.Value()),
|
||||
}
|
||||
return json.Marshal(out)
|
||||
}
|
||||
152
statediff/indexer/ipfs/ipld/eth_tx_trie.go
Normal file
@ -0,0 +1,152 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// EthTxTrie (eth-tx-trie codec 0x92) represents
|
||||
// a node from the transaction trie in ethereum.
|
||||
type EthTxTrie struct {
|
||||
*TrieNode
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthTxTrie satisfies the node.Node interface.
|
||||
var _ node.Node = (*EthTxTrie)(nil)
|
||||
|
||||
/*
|
||||
INPUT
|
||||
*/
|
||||
|
||||
// To create a proper trie of the eth-tx-trie objects, it is required
|
||||
// to input all transactions belonging to a forest in a single step.
|
||||
// We are adding the transactions, and creating its trie on
|
||||
// block body parsing time.
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
// DecodeEthTxTrie returns an EthTxTrie object from its cid and rawdata.
|
||||
func DecodeEthTxTrie(c cid.Cid, b []byte) (*EthTxTrie, error) {
|
||||
tn, err := decodeTrieNode(c, b, decodeEthTxTrieLeaf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthTxTrie{TrieNode: tn}, nil
|
||||
}
|
||||
|
||||
// decodeEthTxTrieLeaf parses a eth-tx-trie leaf
|
||||
//from decoded RLP elements
|
||||
func decodeEthTxTrieLeaf(i []interface{}) ([]interface{}, error) {
|
||||
var t types.Transaction
|
||||
err := rlp.DecodeBytes(i[1].([]byte), &t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthTx, i[1].([]byte), multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []interface{}{
|
||||
i[0].([]byte),
|
||||
&EthTx{
|
||||
Transaction: &t,
|
||||
cid: c,
|
||||
rawdata: i[1].([]byte),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Block INTERFACE
|
||||
*/
|
||||
|
||||
// RawData returns the binary of the RLP encode of the transaction.
|
||||
func (t *EthTxTrie) RawData() []byte {
|
||||
return t.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the transaction.
|
||||
func (t *EthTxTrie) Cid() cid.Cid {
|
||||
return t.cid
|
||||
}
|
||||
|
||||
// String is a helper for output
|
||||
func (t *EthTxTrie) String() string {
|
||||
return fmt.Sprintf("<EthereumTxTrie %s>", t.cid)
|
||||
}
|
||||
|
||||
// Loggable returns in a map the type of IPLD Link.
|
||||
func (t *EthTxTrie) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"type": "eth-tx-trie",
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
EthTxTrie functions
|
||||
*/
|
||||
|
||||
// txTrie wraps a localTrie for use on the transaction trie.
|
||||
type txTrie struct {
|
||||
*localTrie
|
||||
}
|
||||
|
||||
// newTxTrie initializes and returns a txTrie.
|
||||
func newTxTrie() *txTrie {
|
||||
return &txTrie{
|
||||
localTrie: newLocalTrie(),
|
||||
}
|
||||
}
|
||||
|
||||
// getNodes invokes the localTrie, which computes the root hash of the
|
||||
// transaction trie and returns its database keys, to return a slice
|
||||
// of EthTxTrie nodes.
|
||||
func (tt *txTrie) getNodes() []*EthTxTrie {
|
||||
keys := tt.getKeys()
|
||||
var out []*EthTxTrie
|
||||
it := tt.trie.NodeIterator([]byte{})
|
||||
for it.Next(true) {
|
||||
|
||||
}
|
||||
for _, k := range keys {
|
||||
rawdata, err := tt.db.Get(k)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
c, err := RawdataToCid(MEthTxTrie, rawdata, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
tn := &TrieNode{
|
||||
cid: c,
|
||||
rawdata: rawdata,
|
||||
}
|
||||
out = append(out, &EthTxTrie{TrieNode: tn})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
126
statediff/indexer/ipfs/ipld/shared.go
Normal file
@ -0,0 +1,126 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// IPLD Codecs for Ethereum
|
||||
// See the authoritative document:
|
||||
// https://github.com/multiformats/multicodec/blob/master/table.csv
|
||||
const (
|
||||
RawBinary = 0x55
|
||||
MEthHeader = 0x90
|
||||
MEthHeaderList = 0x91
|
||||
MEthTxTrie = 0x92
|
||||
MEthTx = 0x93
|
||||
MEthTxReceiptTrie = 0x94
|
||||
MEthTxReceipt = 0x95
|
||||
MEthStateTrie = 0x96
|
||||
MEthAccountSnapshot = 0x97
|
||||
MEthStorageTrie = 0x98
|
||||
)
|
||||
|
||||
// RawdataToCid takes the desired codec and a slice of bytes
|
||||
// and returns the proper cid of the object.
|
||||
func RawdataToCid(codec uint64, rawdata []byte, multiHash uint64) (cid.Cid, error) {
|
||||
c, err := cid.Prefix{
|
||||
Codec: codec,
|
||||
Version: 1,
|
||||
MhType: multiHash,
|
||||
MhLength: -1,
|
||||
}.Sum(rawdata)
|
||||
if err != nil {
|
||||
return cid.Cid{}, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// keccak256ToCid takes a keccak256 hash and returns its cid based on
|
||||
// the codec given.
|
||||
func keccak256ToCid(codec uint64, h []byte) cid.Cid {
|
||||
buf, err := mh.Encode(h, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(codec, mh.Multihash(buf))
|
||||
}
|
||||
|
||||
// commonHashToCid takes a go-ethereum common.Hash and returns its
|
||||
// cid based on the codec given,
|
||||
func commonHashToCid(codec uint64, h common.Hash) cid.Cid {
|
||||
mhash, err := mh.Encode(h[:], mh.KECCAK_256)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(codec, mhash)
|
||||
}
|
||||
|
||||
// localTrie wraps a go-ethereum trie and its underlying memory db.
|
||||
// It contributes to the creation of the trie node objects.
|
||||
type localTrie struct {
|
||||
keys [][]byte
|
||||
db ethdb.Database
|
||||
trie *trie.Trie
|
||||
}
|
||||
|
||||
// newLocalTrie initializes and returns a localTrie object
|
||||
func newLocalTrie() *localTrie {
|
||||
var err error
|
||||
lt := &localTrie{}
|
||||
lt.db = rawdb.NewMemoryDatabase()
|
||||
lt.trie, err = trie.New(common.Hash{}, trie.NewDatabase(lt.db))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return lt
|
||||
}
|
||||
|
||||
// add receives the index of an object and its rawdata value
|
||||
// and includes it into the localTrie
|
||||
func (lt *localTrie) add(idx int, rawdata []byte) {
|
||||
key, err := rlp.EncodeToBytes(uint(idx))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lt.keys = append(lt.keys, key)
|
||||
if err := lt.db.Put(key, rawdata); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lt.trie.Update(key, rawdata)
|
||||
}
|
||||
|
||||
// rootHash returns the computed trie root.
|
||||
// Useful for sanity checks on parsed data.
|
||||
func (lt *localTrie) rootHash() []byte {
|
||||
return lt.trie.Hash().Bytes()
|
||||
}
|
||||
|
||||
// getKeys returns the stored keys of the memory database
|
||||
// of the localTrie for further processing.
|
||||
func (lt *localTrie) getKeys() [][]byte {
|
||||
return lt.keys
|
||||
}
|
||||
444
statediff/indexer/ipfs/ipld/trie_node.go
Normal file
@ -0,0 +1,444 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
)
|
||||
|
||||
// TrieNode is the general abstraction for
|
||||
//ethereum IPLD trie nodes.
|
||||
type TrieNode struct {
|
||||
// leaf, extension or branch
|
||||
nodeKind string
|
||||
|
||||
// If leaf or extension: [0] is key, [1] is val.
|
||||
// If branch: [0] - [16] are children.
|
||||
elements []interface{}
|
||||
|
||||
// IPLD block information
|
||||
cid cid.Cid
|
||||
rawdata []byte
|
||||
}
|
||||
|
||||
/*
|
||||
OUTPUT
|
||||
*/
|
||||
|
||||
type trieNodeLeafDecoder func([]interface{}) ([]interface{}, error)
|
||||
|
||||
// decodeTrieNode returns a TrieNode object from an IPLD block's
|
||||
// cid and rawdata.
|
||||
func decodeTrieNode(c cid.Cid, b []byte,
|
||||
leafDecoder trieNodeLeafDecoder) (*TrieNode, error) {
|
||||
var (
|
||||
i, decoded, elements []interface{}
|
||||
nodeKind string
|
||||
err error
|
||||
)
|
||||
|
||||
if err = rlp.DecodeBytes(b, &i); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
codec := c.Type()
|
||||
switch len(i) {
|
||||
case 2:
|
||||
nodeKind, decoded, err = decodeCompactKey(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if nodeKind == "extension" {
|
||||
elements, err = parseTrieNodeExtension(decoded, codec)
|
||||
}
|
||||
if nodeKind == "leaf" {
|
||||
elements, err = leafDecoder(decoded)
|
||||
}
|
||||
if nodeKind != "extension" && nodeKind != "leaf" {
|
||||
return nil, fmt.Errorf("unexpected nodeKind returned from decoder")
|
||||
}
|
||||
case 17:
|
||||
nodeKind = "branch"
|
||||
elements, err = parseTrieNodeBranch(i, codec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown trie node type")
|
||||
}
|
||||
|
||||
return &TrieNode{
|
||||
nodeKind: nodeKind,
|
||||
elements: elements,
|
||||
rawdata: b,
|
||||
cid: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// decodeCompactKey takes a compact key, and returns its nodeKind and value.
|
||||
func decodeCompactKey(i []interface{}) (string, []interface{}, error) {
|
||||
first := i[0].([]byte)
|
||||
last := i[1].([]byte)
|
||||
|
||||
switch first[0] / 16 {
|
||||
case '\x00':
|
||||
return "extension", []interface{}{
|
||||
nibbleToByte(first)[2:],
|
||||
last,
|
||||
}, nil
|
||||
case '\x01':
|
||||
return "extension", []interface{}{
|
||||
nibbleToByte(first)[1:],
|
||||
last,
|
||||
}, nil
|
||||
case '\x02':
|
||||
return "leaf", []interface{}{
|
||||
nibbleToByte(first)[2:],
|
||||
last,
|
||||
}, nil
|
||||
case '\x03':
|
||||
return "leaf", []interface{}{
|
||||
nibbleToByte(first)[1:],
|
||||
last,
|
||||
}, nil
|
||||
default:
|
||||
return "", nil, fmt.Errorf("unknown hex prefix")
|
||||
}
|
||||
}
|
||||
|
||||
// parseTrieNodeExtension helper improves readability
|
||||
func parseTrieNodeExtension(i []interface{}, codec uint64) ([]interface{}, error) {
|
||||
return []interface{}{
|
||||
i[0].([]byte),
|
||||
keccak256ToCid(codec, i[1].([]byte)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseTrieNodeBranch helper improves readability
|
||||
func parseTrieNodeBranch(i []interface{}, codec uint64) ([]interface{}, error) {
|
||||
var out []interface{}
|
||||
|
||||
for i, vi := range i {
|
||||
v, ok := vi.([]byte)
|
||||
// Sometimes this throws "panic: interface conversion: interface {} is []interface {}, not []uint8"
|
||||
// Figure out why, and if it is okay to continue
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to decode branch node entry into []byte at position: %d value: %+v", i, vi)
|
||||
}
|
||||
|
||||
switch len(v) {
|
||||
case 0:
|
||||
out = append(out, nil)
|
||||
case 32:
|
||||
out = append(out, keccak256ToCid(codec, v))
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized object: %v", v)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Node INTERFACE
|
||||
*/
|
||||
|
||||
// Resolve resolves a path through this node, stopping at any link boundary
|
||||
// and returning the object found as well as the remaining path to traverse
|
||||
func (t *TrieNode) Resolve(p []string) (interface{}, []string, error) {
|
||||
switch t.nodeKind {
|
||||
case "extension":
|
||||
return t.resolveTrieNodeExtension(p)
|
||||
case "leaf":
|
||||
return t.resolveTrieNodeLeaf(p)
|
||||
case "branch":
|
||||
return t.resolveTrieNodeBranch(p)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("nodeKind case not implemented")
|
||||
}
|
||||
}
|
||||
|
||||
// Tree lists all paths within the object under 'path', and up to the given depth.
|
||||
// To list the entire object (similar to `find .`) pass "" and -1
|
||||
func (t *TrieNode) Tree(p string, depth int) []string {
|
||||
if p != "" || depth == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var out []string
|
||||
|
||||
switch t.nodeKind {
|
||||
case "extension":
|
||||
var val string
|
||||
for _, e := range t.elements[0].([]byte) {
|
||||
val += fmt.Sprintf("%x", e)
|
||||
}
|
||||
return []string{val}
|
||||
case "branch":
|
||||
for i, elem := range t.elements {
|
||||
if _, ok := elem.(*cid.Cid); ok {
|
||||
out = append(out, fmt.Sprintf("%x", i))
|
||||
}
|
||||
}
|
||||
return out
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveLink is a helper function that calls resolve and asserts the
|
||||
// output is a link
|
||||
func (t *TrieNode) ResolveLink(p []string) (*node.Link, []string, error) {
|
||||
obj, rest, err := t.Resolve(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
lnk, ok := obj.(*node.Link)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("was not a link")
|
||||
}
|
||||
|
||||
return lnk, rest, nil
|
||||
}
|
||||
|
||||
// Copy will go away. It is here to comply with the interface.
|
||||
func (t *TrieNode) Copy() node.Node {
|
||||
panic("dont use this yet")
|
||||
}
|
||||
|
||||
// Links is a helper function that returns all links within this object
|
||||
func (t *TrieNode) Links() []*node.Link {
|
||||
var out []*node.Link
|
||||
|
||||
for _, i := range t.elements {
|
||||
c, ok := i.(cid.Cid)
|
||||
if ok {
|
||||
out = append(out, &node.Link{Cid: c})
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Stat will go away. It is here to comply with the interface.
|
||||
func (t *TrieNode) Stat() (*node.NodeStat, error) {
|
||||
return &node.NodeStat{}, nil
|
||||
}
|
||||
|
||||
// Size will go away. It is here to comply with the interface.
|
||||
func (t *TrieNode) Size() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
/*
|
||||
TrieNode functions
|
||||
*/
|
||||
|
||||
// MarshalJSON processes the transaction trie into readable JSON format.
|
||||
func (t *TrieNode) MarshalJSON() ([]byte, error) {
|
||||
var out map[string]interface{}
|
||||
|
||||
switch t.nodeKind {
|
||||
case "extension":
|
||||
fallthrough
|
||||
case "leaf":
|
||||
var hexPrefix string
|
||||
for _, e := range t.elements[0].([]byte) {
|
||||
hexPrefix += fmt.Sprintf("%x", e)
|
||||
}
|
||||
|
||||
// if we got a byte we need to do this casting otherwise
|
||||
// it will be marshaled to a base64 encoded value
|
||||
if _, ok := t.elements[1].([]byte); ok {
|
||||
var hexVal string
|
||||
for _, e := range t.elements[1].([]byte) {
|
||||
hexVal += fmt.Sprintf("%x", e)
|
||||
}
|
||||
|
||||
t.elements[1] = hexVal
|
||||
}
|
||||
|
||||
out = map[string]interface{}{
|
||||
"type": t.nodeKind,
|
||||
hexPrefix: t.elements[1],
|
||||
}
|
||||
|
||||
case "branch":
|
||||
out = map[string]interface{}{
|
||||
"type": "branch",
|
||||
"0": t.elements[0],
|
||||
"1": t.elements[1],
|
||||
"2": t.elements[2],
|
||||
"3": t.elements[3],
|
||||
"4": t.elements[4],
|
||||
"5": t.elements[5],
|
||||
"6": t.elements[6],
|
||||
"7": t.elements[7],
|
||||
"8": t.elements[8],
|
||||
"9": t.elements[9],
|
||||
"a": t.elements[10],
|
||||
"b": t.elements[11],
|
||||
"c": t.elements[12],
|
||||
"d": t.elements[13],
|
||||
"e": t.elements[14],
|
||||
"f": t.elements[15],
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("nodeKind %s not supported", t.nodeKind)
|
||||
}
|
||||
|
||||
return json.Marshal(out)
|
||||
}
|
||||
|
||||
// nibbleToByte expands the nibbles of a byte slice into their own bytes.
|
||||
func nibbleToByte(k []byte) []byte {
|
||||
var out []byte
|
||||
|
||||
for _, b := range k {
|
||||
out = append(out, b/16)
|
||||
out = append(out, b%16)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Resolve reading conveniences
|
||||
func (t *TrieNode) resolveTrieNodeExtension(p []string) (interface{}, []string, error) {
|
||||
nibbles := t.elements[0].([]byte)
|
||||
idx, rest := shiftFromPath(p, len(nibbles))
|
||||
if len(idx) < len(nibbles) {
|
||||
return nil, nil, fmt.Errorf("not enough nibbles to traverse this extension")
|
||||
}
|
||||
|
||||
for _, i := range idx {
|
||||
if getHexIndex(string(i)) == -1 {
|
||||
return nil, nil, fmt.Errorf("invalid path element")
|
||||
}
|
||||
}
|
||||
|
||||
for i, n := range nibbles {
|
||||
if string(idx[i]) != fmt.Sprintf("%x", n) {
|
||||
return nil, nil, fmt.Errorf("no such link in this extension")
|
||||
}
|
||||
}
|
||||
|
||||
return &node.Link{Cid: t.elements[1].(cid.Cid)}, rest, nil
|
||||
}
|
||||
|
||||
func (t *TrieNode) resolveTrieNodeLeaf(p []string) (interface{}, []string, error) {
|
||||
nibbles := t.elements[0].([]byte)
|
||||
|
||||
if len(nibbles) != 0 {
|
||||
idx, rest := shiftFromPath(p, len(nibbles))
|
||||
if len(idx) < len(nibbles) {
|
||||
return nil, nil, fmt.Errorf("not enough nibbles to traverse this leaf")
|
||||
}
|
||||
|
||||
for _, i := range idx {
|
||||
if getHexIndex(string(i)) == -1 {
|
||||
return nil, nil, fmt.Errorf("invalid path element")
|
||||
}
|
||||
}
|
||||
|
||||
for i, n := range nibbles {
|
||||
if string(idx[i]) != fmt.Sprintf("%x", n) {
|
||||
return nil, nil, fmt.Errorf("no such link in this extension")
|
||||
}
|
||||
}
|
||||
|
||||
p = rest
|
||||
}
|
||||
|
||||
link, ok := t.elements[1].(node.Node)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("leaf children is not an IPLD node")
|
||||
}
|
||||
|
||||
return link.Resolve(p)
|
||||
}
|
||||
|
||||
func (t *TrieNode) resolveTrieNodeBranch(p []string) (interface{}, []string, error) {
|
||||
idx, rest := shiftFromPath(p, 1)
|
||||
hidx := getHexIndex(idx)
|
||||
if hidx == -1 {
|
||||
return nil, nil, fmt.Errorf("incorrect path")
|
||||
}
|
||||
|
||||
child := t.elements[hidx]
|
||||
if child != nil {
|
||||
return &node.Link{Cid: child.(cid.Cid)}, rest, nil
|
||||
}
|
||||
return nil, nil, fmt.Errorf("no such link in this branch")
|
||||
}
|
||||
|
||||
// shiftFromPath extracts from a given path (as a slice of strings)
|
||||
// the given number of elements as a single string, returning whatever
|
||||
// it has not taken.
|
||||
//
|
||||
// Examples:
|
||||
// ["0", "a", "something"] and 1 -> "0" and ["a", "something"]
|
||||
// ["ab", "c", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
|
||||
// ["abc", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
|
||||
func shiftFromPath(p []string, i int) (string, []string) {
|
||||
var (
|
||||
out string
|
||||
rest []string
|
||||
)
|
||||
|
||||
for _, pe := range p {
|
||||
re := ""
|
||||
for _, c := range pe {
|
||||
if len(out) < i {
|
||||
out += string(c)
|
||||
} else {
|
||||
re += string(c)
|
||||
}
|
||||
}
|
||||
|
||||
if len(out) == i && re != "" {
|
||||
rest = append(rest, re)
|
||||
}
|
||||
}
|
||||
|
||||
return out, rest
|
||||
}
|
||||
|
||||
// getHexIndex returns to you the integer 0 - 15 equivalent to your
|
||||
// string character if applicable, or -1 otherwise.
|
||||
func getHexIndex(s string) int {
|
||||
if len(s) != 1 {
|
||||
return -1
|
||||
}
|
||||
|
||||
c := byte(s[0])
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return int(c - '0')
|
||||
case 'a' <= c && c <= 'f':
|
||||
return int(c - 'a' + 10)
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
22
statediff/indexer/ipfs/models.go
Normal file
@ -0,0 +1,22 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs
|
||||
|
||||
type BlockModel struct {
|
||||
CID string `db:"key"`
|
||||
Data []byte `db:"data"`
|
||||
}
|
||||
455
statediff/indexer/mocks/test_data.go
Normal file
@ -0,0 +1,455 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ipfs/go-block-format"
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
"github.com/ethereum/go-ethereum/statediff/testhelpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
// Test variables
|
||||
var (
|
||||
// block data
|
||||
BlockNumber = big.NewInt(1)
|
||||
MockHeader = types.Header{
|
||||
Time: 0,
|
||||
Number: new(big.Int).Set(BlockNumber),
|
||||
Root: common.HexToHash("0x0"),
|
||||
TxHash: common.HexToHash("0x0"),
|
||||
ReceiptHash: common.HexToHash("0x0"),
|
||||
Difficulty: big.NewInt(5000000),
|
||||
Extra: []byte{},
|
||||
}
|
||||
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts()
|
||||
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
|
||||
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts)
|
||||
MockBlockRlp, _ = rlp.EncodeToBytes(MockBlock)
|
||||
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
||||
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
||||
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
||||
ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String()
|
||||
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
|
||||
mockTopic11 = common.HexToHash("0x04")
|
||||
mockTopic12 = common.HexToHash("0x06")
|
||||
mockTopic21 = common.HexToHash("0x05")
|
||||
mockTopic22 = common.HexToHash("0x07")
|
||||
MockLog1 = &types.Log{
|
||||
Address: Address,
|
||||
Topics: []common.Hash{mockTopic11, mockTopic12},
|
||||
Data: []byte{},
|
||||
}
|
||||
MockLog2 = &types.Log{
|
||||
Address: AnotherAddress,
|
||||
Topics: []common.Hash{mockTopic21, mockTopic22},
|
||||
Data: []byte{},
|
||||
}
|
||||
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
|
||||
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
|
||||
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256)
|
||||
Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
|
||||
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256)
|
||||
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
|
||||
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(2), multihash.KECCAK_256)
|
||||
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
|
||||
Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256)
|
||||
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
|
||||
Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256)
|
||||
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
|
||||
Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(2), multihash.KECCAK_256)
|
||||
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
|
||||
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
|
||||
State1MhKey = shared.MultihashKeyFromCID(State1CID)
|
||||
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
|
||||
State2MhKey = shared.MultihashKeyFromCID(State2CID)
|
||||
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
|
||||
StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
|
||||
MockTrxMeta = []models.TxModel{
|
||||
{
|
||||
CID: "", // This is empty until we go to publish to ipfs
|
||||
MhKey: "",
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: Address.String(),
|
||||
Index: 0,
|
||||
TxHash: MockTransactions[0].Hash().String(),
|
||||
Data: []byte{},
|
||||
Deployment: false,
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
MhKey: "",
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: AnotherAddress.String(),
|
||||
Index: 1,
|
||||
TxHash: MockTransactions[1].Hash().String(),
|
||||
Data: []byte{},
|
||||
Deployment: false,
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
MhKey: "",
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: "",
|
||||
Index: 2,
|
||||
TxHash: MockTransactions[2].Hash().String(),
|
||||
Data: MockContractByteCode,
|
||||
Deployment: true,
|
||||
},
|
||||
}
|
||||
MockTrxMetaPostPublsh = []models.TxModel{
|
||||
{
|
||||
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
|
||||
MhKey: Trx1MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: Address.String(),
|
||||
Index: 0,
|
||||
TxHash: MockTransactions[0].Hash().String(),
|
||||
Data: []byte{},
|
||||
Deployment: false,
|
||||
},
|
||||
{
|
||||
CID: Trx2CID.String(),
|
||||
MhKey: Trx2MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: AnotherAddress.String(),
|
||||
Index: 1,
|
||||
TxHash: MockTransactions[1].Hash().String(),
|
||||
Data: []byte{},
|
||||
Deployment: false,
|
||||
},
|
||||
{
|
||||
CID: Trx3CID.String(),
|
||||
MhKey: Trx3MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: "",
|
||||
Index: 2,
|
||||
TxHash: MockTransactions[2].Hash().String(),
|
||||
Data: MockContractByteCode,
|
||||
Deployment: true,
|
||||
},
|
||||
}
|
||||
MockRctMeta = []models.ReceiptModel{
|
||||
{
|
||||
CID: "",
|
||||
MhKey: "",
|
||||
Topic0s: []string{
|
||||
mockTopic11.String(),
|
||||
},
|
||||
Topic1s: []string{
|
||||
mockTopic12.String(),
|
||||
},
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
LogContracts: []string{
|
||||
Address.String(),
|
||||
},
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
MhKey: "",
|
||||
Topic0s: []string{
|
||||
mockTopic21.String(),
|
||||
},
|
||||
Topic1s: []string{
|
||||
mockTopic22.String(),
|
||||
},
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
LogContracts: []string{
|
||||
AnotherAddress.String(),
|
||||
},
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
MhKey: "",
|
||||
Contract: ContractAddress.String(),
|
||||
ContractHash: ContractHash,
|
||||
LogContracts: []string{},
|
||||
},
|
||||
}
|
||||
MockRctMetaPostPublish = []models.ReceiptModel{
|
||||
{
|
||||
CID: Rct1CID.String(),
|
||||
MhKey: Rct1MhKey,
|
||||
Topic0s: []string{
|
||||
mockTopic11.String(),
|
||||
},
|
||||
Topic1s: []string{
|
||||
mockTopic12.String(),
|
||||
},
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
LogContracts: []string{
|
||||
Address.String(),
|
||||
},
|
||||
},
|
||||
{
|
||||
CID: Rct2CID.String(),
|
||||
MhKey: Rct2MhKey,
|
||||
Topic0s: []string{
|
||||
mockTopic21.String(),
|
||||
},
|
||||
Topic1s: []string{
|
||||
mockTopic22.String(),
|
||||
},
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
LogContracts: []string{
|
||||
AnotherAddress.String(),
|
||||
},
|
||||
},
|
||||
{
|
||||
CID: Rct3CID.String(),
|
||||
MhKey: Rct3MhKey,
|
||||
Contract: ContractAddress.String(),
|
||||
ContractHash: ContractHash,
|
||||
LogContracts: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
// statediff data
|
||||
storageLocation = common.HexToHash("0")
|
||||
StorageLeafKey = crypto.Keccak256Hash(storageLocation[:]).Bytes()
|
||||
StorageValue = common.Hex2Bytes("01")
|
||||
StoragePartialPath = common.Hex2Bytes("20290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
|
||||
StorageLeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
||||
StoragePartialPath,
|
||||
StorageValue,
|
||||
})
|
||||
|
||||
nonce1 = uint64(1)
|
||||
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
||||
ContractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
|
||||
contractPath = common.Bytes2Hex([]byte{'\x06'})
|
||||
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
|
||||
ContractAccount, _ = rlp.EncodeToBytes(state.Account{
|
||||
Nonce: nonce1,
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: ContractCodeHash.Bytes(),
|
||||
Root: common.HexToHash(ContractRoot),
|
||||
})
|
||||
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
|
||||
ContractLeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
||||
ContractPartialPath,
|
||||
ContractAccount,
|
||||
})
|
||||
|
||||
nonce0 = uint64(0)
|
||||
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
||||
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||
accountPath = common.Bytes2Hex([]byte{'\x0c'})
|
||||
AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
|
||||
AccountLeafKey = testhelpers.Account2LeafKey
|
||||
Account, _ = rlp.EncodeToBytes(state.Account{
|
||||
Nonce: nonce0,
|
||||
Balance: big.NewInt(1000),
|
||||
CodeHash: AccountCodeHash.Bytes(),
|
||||
Root: common.HexToHash(AccountRoot),
|
||||
})
|
||||
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
|
||||
AccountLeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
||||
AccountPartialPath,
|
||||
Account,
|
||||
})
|
||||
|
||||
StateDiffs = []sdtypes.StateNode{
|
||||
{
|
||||
Path: []byte{'\x06'},
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: ContractLeafKey,
|
||||
NodeValue: ContractLeafNode,
|
||||
StorageNodes: []sdtypes.StorageNode{
|
||||
{
|
||||
Path: []byte{},
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: StorageLeafKey,
|
||||
NodeValue: StorageLeafNode,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x0c'},
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: AccountLeafKey,
|
||||
NodeValue: AccountLeafNode,
|
||||
StorageNodes: []sdtypes.StorageNode{},
|
||||
},
|
||||
}
|
||||
|
||||
MockStateNodes = []shared.TrieNode{
|
||||
{
|
||||
LeafKey: common.BytesToHash(ContractLeafKey),
|
||||
Path: []byte{'\x06'},
|
||||
Value: ContractLeafNode,
|
||||
Type: sdtypes.Leaf,
|
||||
},
|
||||
{
|
||||
LeafKey: common.BytesToHash(AccountLeafKey),
|
||||
Path: []byte{'\x0c'},
|
||||
Value: AccountLeafNode,
|
||||
Type: sdtypes.Leaf,
|
||||
},
|
||||
}
|
||||
MockStateMetaPostPublish = []models.StateNodeModel{
|
||||
{
|
||||
CID: State1CID.String(),
|
||||
MhKey: State1MhKey,
|
||||
Path: []byte{'\x06'},
|
||||
NodeType: 2,
|
||||
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
|
||||
},
|
||||
{
|
||||
CID: State2CID.String(),
|
||||
MhKey: State2MhKey,
|
||||
Path: []byte{'\x0c'},
|
||||
NodeType: 2,
|
||||
StateKey: common.BytesToHash(AccountLeafKey).Hex(),
|
||||
},
|
||||
}
|
||||
MockStorageNodes = map[string][]shared.TrieNode{
|
||||
contractPath: {
|
||||
{
|
||||
LeafKey: common.BytesToHash(StorageLeafKey),
|
||||
Value: StorageLeafNode,
|
||||
Type: sdtypes.Leaf,
|
||||
Path: []byte{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// aggregate payloads
|
||||
MockCIDPayload = shared.CIDPayload{
|
||||
HeaderCID: models.HeaderModel{
|
||||
BlockHash: MockBlock.Hash().String(),
|
||||
BlockNumber: MockBlock.Number().String(),
|
||||
CID: HeaderCID.String(),
|
||||
MhKey: HeaderMhKey,
|
||||
ParentHash: MockBlock.ParentHash().String(),
|
||||
TotalDifficulty: MockBlock.Difficulty().String(),
|
||||
Reward: "5000000000000000000",
|
||||
StateRoot: MockBlock.Root().String(),
|
||||
RctRoot: MockBlock.ReceiptHash().String(),
|
||||
TxRoot: MockBlock.TxHash().String(),
|
||||
UncleRoot: MockBlock.UncleHash().String(),
|
||||
Bloom: MockBlock.Bloom().Bytes(),
|
||||
Timestamp: MockBlock.Time(),
|
||||
},
|
||||
UncleCIDs: []models.UncleModel{},
|
||||
TransactionCIDs: MockTrxMetaPostPublsh,
|
||||
ReceiptCIDs: map[common.Hash]models.ReceiptModel{
|
||||
MockTransactions[0].Hash(): MockRctMetaPostPublish[0],
|
||||
MockTransactions[1].Hash(): MockRctMetaPostPublish[1],
|
||||
MockTransactions[2].Hash(): MockRctMetaPostPublish[2],
|
||||
},
|
||||
StateNodeCIDs: MockStateMetaPostPublish,
|
||||
StorageNodeCIDs: map[string][]models.StorageNodeModel{
|
||||
contractPath: {
|
||||
{
|
||||
CID: StorageCID.String(),
|
||||
MhKey: StorageMhKey,
|
||||
Path: []byte{},
|
||||
StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
|
||||
NodeType: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
StateAccounts: map[string]models.StateAccountModel{
|
||||
contractPath: {
|
||||
Balance: big.NewInt(0).String(),
|
||||
Nonce: nonce1,
|
||||
CodeHash: ContractCodeHash.Bytes(),
|
||||
StorageRoot: common.HexToHash(ContractRoot).String(),
|
||||
},
|
||||
accountPath: {
|
||||
Balance: big.NewInt(1000).String(),
|
||||
Nonce: nonce0,
|
||||
CodeHash: AccountCodeHash.Bytes(),
|
||||
StorageRoot: common.HexToHash(AccountRoot).String(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
|
||||
Trx1IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(0), Trx1CID)
|
||||
Trx2IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(1), Trx2CID)
|
||||
Trx3IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(2), Trx3CID)
|
||||
Rct1IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(0), Rct1CID)
|
||||
Rct2IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(1), Rct2CID)
|
||||
Rct3IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(2), Rct3CID)
|
||||
State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
|
||||
State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
|
||||
StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
|
||||
)
|
||||
|
||||
// createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
|
||||
func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
|
||||
// make transactions
|
||||
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
|
||||
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
||||
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
|
||||
transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber))
|
||||
mockCurve := elliptic.P256()
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
signedTrx1, err := types.SignTx(trx1, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
signedTrx2, err := types.SignTx(trx2, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
signedTrx3, err := types.SignTx(trx3, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
SenderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
// make receipts
|
||||
mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
|
||||
mockReceipt1.Logs = []*types.Log{MockLog1}
|
||||
mockReceipt1.TxHash = signedTrx1.Hash()
|
||||
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
|
||||
mockReceipt2.Logs = []*types.Log{MockLog2}
|
||||
mockReceipt2.TxHash = signedTrx2.Hash()
|
||||
mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 75)
|
||||
mockReceipt3.Logs = []*types.Log{}
|
||||
mockReceipt3.TxHash = signedTrx3.Hash()
|
||||
return types.Transactions{signedTrx1, signedTrx2, signedTrx3}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3}, SenderAddr
|
||||
}
|
||||
126
statediff/indexer/models/models.go
Normal file
@ -0,0 +1,126 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package models
|
||||
|
||||
import "github.com/lib/pq"
|
||||
|
||||
// HeaderModel is the db model for eth.header_cids
|
||||
type HeaderModel struct {
|
||||
ID int64 `db:"id"`
|
||||
BlockNumber string `db:"block_number"`
|
||||
BlockHash string `db:"block_hash"`
|
||||
ParentHash string `db:"parent_hash"`
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
TotalDifficulty string `db:"td"`
|
||||
NodeID int64 `db:"node_id"`
|
||||
Reward string `db:"reward"`
|
||||
StateRoot string `db:"state_root"`
|
||||
UncleRoot string `db:"uncle_root"`
|
||||
TxRoot string `db:"tx_root"`
|
||||
RctRoot string `db:"receipt_root"`
|
||||
Bloom []byte `db:"bloom"`
|
||||
Timestamp uint64 `db:"timestamp"`
|
||||
TimesValidated int64 `db:"times_validated"`
|
||||
}
|
||||
|
||||
// UncleModel is the db model for eth.uncle_cids
|
||||
type UncleModel struct {
|
||||
ID int64 `db:"id"`
|
||||
HeaderID int64 `db:"header_id"`
|
||||
BlockHash string `db:"block_hash"`
|
||||
ParentHash string `db:"parent_hash"`
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
Reward string `db:"reward"`
|
||||
}
|
||||
|
||||
// TxModel is the db model for eth.transaction_cids
|
||||
type TxModel struct {
|
||||
ID int64 `db:"id"`
|
||||
HeaderID int64 `db:"header_id"`
|
||||
Index int64 `db:"index"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
Dst string `db:"dst"`
|
||||
Src string `db:"src"`
|
||||
Data []byte `db:"tx_data"`
|
||||
Deployment bool `db:"deployment"`
|
||||
}
|
||||
|
||||
// ReceiptModel is the db model for eth.receipt_cids
|
||||
type ReceiptModel struct {
|
||||
ID int64 `db:"id"`
|
||||
TxID int64 `db:"tx_id"`
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
Contract string `db:"contract"`
|
||||
ContractHash string `db:"contract_hash"`
|
||||
LogContracts pq.StringArray `db:"log_contracts"`
|
||||
Topic0s pq.StringArray `db:"topic0s"`
|
||||
Topic1s pq.StringArray `db:"topic1s"`
|
||||
Topic2s pq.StringArray `db:"topic2s"`
|
||||
Topic3s pq.StringArray `db:"topic3s"`
|
||||
}
|
||||
|
||||
// StateNodeModel is the db model for eth.state_cids
|
||||
type StateNodeModel struct {
|
||||
ID int64 `db:"id"`
|
||||
HeaderID int64 `db:"header_id"`
|
||||
Path []byte `db:"state_path"`
|
||||
StateKey string `db:"state_leaf_key"`
|
||||
NodeType int `db:"node_type"`
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
Diff bool `db:"diff"`
|
||||
}
|
||||
|
||||
// StorageNodeModel is the db model for eth.storage_cids
|
||||
type StorageNodeModel struct {
|
||||
ID int64 `db:"id"`
|
||||
StateID int64 `db:"state_id"`
|
||||
Path []byte `db:"storage_path"`
|
||||
StorageKey string `db:"storage_leaf_key"`
|
||||
NodeType int `db:"node_type"`
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
Diff bool `db:"diff"`
|
||||
}
|
||||
|
||||
// StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key
|
||||
type StorageNodeWithStateKeyModel struct {
|
||||
ID int64 `db:"id"`
|
||||
StateID int64 `db:"state_id"`
|
||||
Path []byte `db:"storage_path"`
|
||||
StateKey string `db:"state_leaf_key"`
|
||||
StorageKey string `db:"storage_leaf_key"`
|
||||
NodeType int `db:"node_type"`
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
Diff bool `db:"diff"`
|
||||
}
|
||||
|
||||
// StateAccountModel is a db model for an eth state account (decoded value of state leaf node)
|
||||
type StateAccountModel struct {
|
||||
ID int64 `db:"id"`
|
||||
StateID int64 `db:"state_id"`
|
||||
Balance string `db:"balance"`
|
||||
Nonce uint64 `db:"nonce"`
|
||||
CodeHash []byte `db:"code_hash"`
|
||||
StorageRoot string `db:"storage_root"`
|
||||
}
|
||||
25
statediff/indexer/node/node.go
Normal file
@ -0,0 +1,25 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package node
|
||||
|
||||
type Info struct {
|
||||
GenesisBlock string
|
||||
NetworkID string
|
||||
ChainID uint64
|
||||
ID string
|
||||
ClientName string
|
||||
}
|
||||
59
statediff/indexer/postgres/config.go
Normal file
@ -0,0 +1,59 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Env variables
|
||||
const (
|
||||
DATABASE_NAME = "DATABASE_NAME"
|
||||
|
i-norden
commented
Ideally we would pass individual db connection string components in by CLI flag in a manner that is overridable by these env variables Ideally we would pass individual db connection string components in by CLI flag in a manner that is overridable by these env variables
i-norden
commented
Note: this doesn't need to be resolved now Note: this doesn't need to be resolved now
|
||||
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
|
||||
DATABASE_PORT = "DATABASE_PORT"
|
||||
DATABASE_USER = "DATABASE_USER"
|
||||
DATABASE_PASSWORD = "DATABASE_PASSWORD"
|
||||
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
|
||||
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
|
||||
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
||||
)
|
||||
|
||||
type ConnectionParams struct {
|
||||
Hostname string
|
||||
Name string
|
||||
User string
|
||||
Password string
|
||||
Port int
|
||||
}
|
||||
|
||||
type ConnectionConfig struct {
|
||||
MaxIdle int
|
||||
MaxOpen int
|
||||
MaxLifetime int
|
||||
}
|
||||
|
||||
func DbConnectionString(params ConnectionParams) string {
|
||||
if len(params.User) > 0 && len(params.Password) > 0 {
|
||||
return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable",
|
||||
params.User, params.Password, params.Hostname, params.Port, params.Name)
|
||||
}
|
||||
if len(params.User) > 0 && len(params.Password) == 0 {
|
||||
return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable",
|
||||
params.User, params.Hostname, params.Port, params.Name)
|
||||
}
|
||||
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", params.Hostname, params.Port, params.Name)
|
||||
}
|
||||
53
statediff/indexer/postgres/errors.go
Normal file
@ -0,0 +1,53 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
BeginTransactionFailedMsg = "failed to begin transaction"
|
||||
DbConnectionFailedMsg = "db connection failed"
|
||||
DeleteQueryFailedMsg = "delete query failed"
|
||||
InsertQueryFailedMsg = "insert query failed"
|
||||
SettingNodeFailedMsg = "unable to set db node"
|
||||
)
|
||||
|
||||
func ErrBeginTransactionFailed(beginErr error) error {
|
||||
return formatError(BeginTransactionFailedMsg, beginErr.Error())
|
||||
}
|
||||
|
||||
func ErrDBConnectionFailed(connectErr error) error {
|
||||
return formatError(DbConnectionFailedMsg, connectErr.Error())
|
||||
}
|
||||
|
||||
func ErrDBDeleteFailed(deleteErr error) error {
|
||||
return formatError(DeleteQueryFailedMsg, deleteErr.Error())
|
||||
}
|
||||
|
||||
func ErrDBInsertFailed(insertErr error) error {
|
||||
return formatError(InsertQueryFailedMsg, insertErr.Error())
|
||||
}
|
||||
|
||||
func ErrUnableToSetNode(setErr error) error {
|
||||
return formatError(SettingNodeFailedMsg, setErr.Error())
|
||||
}
|
||||
|
||||
func formatError(msg, err string) error {
|
||||
return fmt.Errorf("%s: %s", msg, err)
|
||||
}
|
||||
76
statediff/indexer/postgres/postgres.go
Normal file
@ -0,0 +1,76 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq" //postgres driver
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||
)
|
||||
|
||||
type DB struct {
|
||||
*sqlx.DB
|
||||
Node node.Info
|
||||
NodeID int64
|
||||
}
|
||||
|
||||
func NewDB(connectString string, config ConnectionConfig, node node.Info) (*DB, error) {
|
||||
db, connectErr := sqlx.Connect("postgres", connectString)
|
||||
if connectErr != nil {
|
||||
return &DB{}, ErrDBConnectionFailed(connectErr)
|
||||
}
|
||||
if config.MaxOpen > 0 {
|
||||
db.SetMaxOpenConns(config.MaxOpen)
|
||||
}
|
||||
if config.MaxIdle > 0 {
|
||||
db.SetMaxIdleConns(config.MaxIdle)
|
||||
}
|
||||
if config.MaxLifetime > 0 {
|
||||
lifetime := time.Duration(config.MaxLifetime) * time.Second
|
||||
db.SetConnMaxLifetime(lifetime)
|
||||
}
|
||||
pg := DB{DB: db, Node: node}
|
||||
nodeErr := pg.CreateNode(&node)
|
||||
if nodeErr != nil {
|
||||
return &DB{}, ErrUnableToSetNode(nodeErr)
|
||||
}
|
||||
return &pg, nil
|
||||
}
|
||||
|
||||
func (db *DB) CreateNode(node *node.Info) error {
|
||||
var nodeID int64
|
||||
err := db.QueryRow(
|
||||
`INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT (genesis_block, network_id, node_id, chain_id)
|
||||
DO UPDATE
|
||||
SET genesis_block = $1,
|
||||
network_id = $2,
|
||||
node_id = $3,
|
||||
client_name = $4,
|
||||
chain_id = $5
|
||||
RETURNING id`,
|
||||
node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID).Scan(&nodeID)
|
||||
if err != nil {
|
||||
return ErrUnableToSetNode(err)
|
||||
}
|
||||
db.NodeID = nodeID
|
||||
return nil
|
||||
}
|
||||
25
statediff/indexer/postgres/postgres_suite_test.go
Normal file
@ -0,0 +1,25 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package postgres_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.Root().SetHandler(log.DiscardHandler())
|
||||
}
|
||||
131
statediff/indexer/postgres/postgres_test.go
Normal file
@ -0,0 +1,131 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package postgres_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"math/big"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
)
|
||||
|
||||
var DBParams postgres.ConnectionParams
|
||||
|
||||
func expectContainsSubstring(t *testing.T, full string, sub string) {
|
||||
if !strings.Contains(full, sub) {
|
||||
t.Fatalf("Expected \"%v\" to contain substring \"%v\"\n", full, sub)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresDB(t *testing.T) {
|
||||
var sqlxdb *sqlx.DB
|
||||
|
||||
t.Run("connects to the database", func(t *testing.T) {
|
||||
var err error
|
||||
pgConfig := postgres.DbConnectionString(DBParams)
|
||||
|
||||
sqlxdb, err = sqlx.Connect("postgres", pgConfig)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if sqlxdb == nil {
|
||||
t.Fatal("DB is nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("serializes big.Int to db", func(t *testing.T) {
|
||||
// postgres driver doesn't support go big.Int type
|
||||
// various casts in golang uint64, int64, overflow for
|
||||
// transaction value (in wei) even though
|
||||
// postgres numeric can handle an arbitrary
|
||||
// sized int, so use string representation of big.Int
|
||||
// and cast on insert
|
||||
|
||||
pgConnectString := postgres.DbConnectionString(DBParams)
|
||||
db, err := sqlx.Connect("postgres", pgConnectString)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bi := new(big.Int)
|
||||
bi.SetString("34940183920000000000", 10)
|
||||
shared.ExpectEqual(t, bi.String(), "34940183920000000000")
|
||||
|
||||
defer db.Exec(`DROP TABLE IF EXISTS example`)
|
||||
_, err = db.Exec("CREATE TABLE example ( id INTEGER, data NUMERIC )")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sqlStatement := `
|
||||
INSERT INTO example (id, data)
|
||||
VALUES (1, cast($1 AS NUMERIC))`
|
||||
_, err = db.Exec(sqlStatement, bi.String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var data string
|
||||
err = db.QueryRow(`SELECT data FROM example WHERE id = 1`).Scan(&data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
shared.ExpectEqual(t, bi.String(), data)
|
||||
actual := new(big.Int)
|
||||
actual.SetString(data, 10)
|
||||
shared.ExpectEqual(t, actual, bi)
|
||||
})
|
||||
|
||||
t.Run("throws error when can't connect to the database", func(t *testing.T) {
|
||||
invalidDatabase := postgres.ConnectionParams{}
|
||||
node := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||
|
||||
_, err := postgres.NewDB(postgres.DbConnectionString(invalidDatabase),
|
||||
postgres.ConnectionConfig{}, node)
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error")
|
||||
}
|
||||
|
||||
expectContainsSubstring(t, err.Error(), postgres.DbConnectionFailedMsg)
|
||||
})
|
||||
|
||||
t.Run("throws error when can't create node", func(t *testing.T) {
|
||||
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||
node := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||
|
||||
_, err := postgres.NewDB(postgres.DbConnectionString(DBParams), postgres.ConnectionConfig{}, node)
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error")
|
||||
}
|
||||
expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg)
|
||||
})
|
||||
}
|
||||
146
statediff/indexer/prom/db_stats_collector.go
Normal file
@ -0,0 +1,146 @@
|
||||
package prom
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
namespace = "ipld_eth_indexer"
|
||||
subsystem = "connections"
|
||||
)
|
||||
|
||||
// DBStatsGetter is an interface that gets sql.DBStats.
|
||||
type DBStatsGetter interface {
|
||||
Stats() sql.DBStats
|
||||
}
|
||||
|
||||
// DBStatsCollector implements the prometheus.Collector interface.
|
||||
type DBStatsCollector struct {
|
||||
sg DBStatsGetter
|
||||
|
||||
// descriptions of exported metrics
|
||||
maxOpenDesc *prometheus.Desc
|
||||
openDesc *prometheus.Desc
|
||||
inUseDesc *prometheus.Desc
|
||||
idleDesc *prometheus.Desc
|
||||
waitedForDesc *prometheus.Desc
|
||||
blockedSecondsDesc *prometheus.Desc
|
||||
closedMaxIdleDesc *prometheus.Desc
|
||||
closedMaxLifetimeDesc *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewDBStatsCollector creates a new DBStatsCollector.
|
||||
func NewDBStatsCollector(dbName string, sg DBStatsGetter) *DBStatsCollector {
|
||||
labels := prometheus.Labels{"db_name": dbName}
|
||||
return &DBStatsCollector{
|
||||
sg: sg,
|
||||
maxOpenDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, "max_open"),
|
||||
"Maximum number of open connections to the database.",
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
openDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, "open"),
|
||||
"The number of established connections both in use and idle.",
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
inUseDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, "in_use"),
|
||||
"The number of connections currently in use.",
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
idleDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, "idle"),
|
||||
"The number of idle connections.",
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
waitedForDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, "waited_for"),
|
||||
"The total number of connections waited for.",
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
blockedSecondsDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, "blocked_seconds"),
|
||||
"The total time blocked waiting for a new connection.",
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
closedMaxIdleDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, "closed_max_idle"),
|
||||
"The total number of connections closed due to SetMaxIdleConns.",
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
closedMaxLifetimeDesc: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, subsystem, "closed_max_lifetime"),
|
||||
"The total number of connections closed due to SetConnMaxLifetime.",
|
||||
nil,
|
||||
labels,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Describe implements the prometheus.Collector interface.
|
||||
func (c DBStatsCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- c.maxOpenDesc
|
||||
ch <- c.openDesc
|
||||
ch <- c.inUseDesc
|
||||
ch <- c.idleDesc
|
||||
ch <- c.waitedForDesc
|
||||
ch <- c.blockedSecondsDesc
|
||||
ch <- c.closedMaxIdleDesc
|
||||
ch <- c.closedMaxLifetimeDesc
|
||||
}
|
||||
|
||||
// Collect implements the prometheus.Collector interface.
|
||||
func (c DBStatsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
stats := c.sg.Stats()
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.maxOpenDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(stats.MaxOpenConnections),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.openDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(stats.OpenConnections),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.inUseDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(stats.InUse),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.idleDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(stats.Idle),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.waitedForDesc,
|
||||
prometheus.CounterValue,
|
||||
float64(stats.WaitCount),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.blockedSecondsDesc,
|
||||
prometheus.CounterValue,
|
||||
stats.WaitDuration.Seconds(),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.closedMaxIdleDesc,
|
||||
prometheus.CounterValue,
|
||||
float64(stats.MaxIdleClosed),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.closedMaxLifetimeDesc,
|
||||
prometheus.CounterValue,
|
||||
float64(stats.MaxLifetimeClosed),
|
||||
)
|
||||
}
|
||||
151
statediff/indexer/prom/prom.go
Normal file
@ -0,0 +1,151 @@
|
||||
package prom
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
const statsSubsystem = "stats"
|
||||
|
||||
var (
|
||||
metrics bool
|
||||
|
||||
receipts prometheus.Counter
|
||||
transactions prometheus.Counter
|
||||
blocks prometheus.Counter
|
||||
|
||||
lenPayloadChan prometheus.Gauge
|
||||
|
||||
tPayloadDecode prometheus.Histogram
|
||||
tFreePostgres prometheus.Histogram
|
||||
tPostgresCommit prometheus.Histogram
|
||||
tHeaderProcessing prometheus.Histogram
|
||||
tUncleProcessing prometheus.Histogram
|
||||
tTxAndRecProcessing prometheus.Histogram
|
||||
tStateAndStoreProcessing prometheus.Histogram
|
||||
tCodeAndCodeHashProcessing prometheus.Histogram
|
||||
)
|
||||
|
||||
// Init module initialization
|
||||
func Init() {
|
||||
metrics = true
|
||||
|
||||
blocks = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "blocks",
|
||||
Help: "The total number of processed blocks",
|
||||
})
|
||||
transactions = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "transactions",
|
||||
Help: "The total number of processed transactions",
|
||||
})
|
||||
receipts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "receipts",
|
||||
Help: "The total number of processed receipts",
|
||||
})
|
||||
|
||||
lenPayloadChan = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "len_payload_chan",
|
||||
Help: "Current length of publishPayload",
|
||||
})
|
||||
|
||||
tFreePostgres = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: statsSubsystem,
|
||||
Name: "t_free_postgres",
|
||||
Help: "Time spent waiting for free postgres tx",
|
||||
})
|
||||
tPostgresCommit = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: statsSubsystem,
|
||||
Name: "t_postgres_commit",
|
||||
Help: "Postgres transaction commit duration",
|
||||
})
|
||||
tHeaderProcessing = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: statsSubsystem,
|
||||
Name: "t_header_processing",
|
||||
Help: "Header processing time",
|
||||
})
|
||||
tUncleProcessing = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: statsSubsystem,
|
||||
Name: "t_uncle_processing",
|
||||
Help: "Uncle processing time",
|
||||
})
|
||||
tTxAndRecProcessing = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: statsSubsystem,
|
||||
Name: "t_tx_receipt_processing",
|
||||
Help: "Tx and receipt processing time",
|
||||
})
|
||||
tStateAndStoreProcessing = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: statsSubsystem,
|
||||
Name: "t_state_store_code_processing",
|
||||
Help: "State, storage, and code combinedprocessing time",
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterDBCollector create metric colletor for given connection
|
||||
func RegisterDBCollector(name string, db *sqlx.DB) {
|
||||
if metrics {
|
||||
prometheus.Register(NewDBStatsCollector(name, db))
|
||||
}
|
||||
}
|
||||
|
||||
// BlockInc block counter increment
|
||||
func BlockInc() {
|
||||
if metrics {
|
||||
blocks.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// TransactionInc transaction counter increment
|
||||
func TransactionInc() {
|
||||
if metrics {
|
||||
transactions.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// ReceiptInc receipt counter increment
|
||||
func ReceiptInc() {
|
||||
if metrics {
|
||||
receipts.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// SetLenPayloadChan set chan length
|
||||
func SetLenPayloadChan(ln int) {
|
||||
if metrics {
|
||||
lenPayloadChan.Set(float64(ln))
|
||||
}
|
||||
}
|
||||
|
||||
// SetTimeMetric time metric observation
|
||||
func SetTimeMetric(name string, t time.Duration) {
|
||||
if !metrics {
|
||||
return
|
||||
}
|
||||
tAsF64 := t.Seconds()
|
||||
switch name {
|
||||
case "t_free_postgres":
|
||||
tFreePostgres.Observe(tAsF64)
|
||||
case "t_postgres_commit":
|
||||
tPostgresCommit.Observe(tAsF64)
|
||||
case "t_header_processing":
|
||||
tHeaderProcessing.Observe(tAsF64)
|
||||
case "t_uncle_processing":
|
||||
tUncleProcessing.Observe(tAsF64)
|
||||
case "t_tx_receipt_processing":
|
||||
tTxAndRecProcessing.Observe(tAsF64)
|
||||
case "t_state_store_code_processing":
|
||||
tStateAndStoreProcessing.Observe(tAsF64)
|
||||
}
|
||||
}
|
||||
76
statediff/indexer/reward.go
Normal file
@ -0,0 +1,76 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
func CalcEthBlockReward(header *types.Header, uncles []*types.Header, txs types.Transactions, receipts types.Receipts) *big.Int {
|
||||
staticBlockReward := staticRewardByBlockNumber(header.Number.Uint64())
|
||||
transactionFees := calcEthTransactionFees(txs, receipts)
|
||||
uncleInclusionRewards := calcEthUncleInclusionRewards(header, uncles)
|
||||
tmp := transactionFees.Add(transactionFees, uncleInclusionRewards)
|
||||
return tmp.Add(tmp, staticBlockReward)
|
||||
}
|
||||
|
||||
func CalcUncleMinerReward(blockNumber, uncleBlockNumber uint64) *big.Int {
|
||||
staticBlockReward := staticRewardByBlockNumber(blockNumber)
|
||||
rewardDiv8 := staticBlockReward.Div(staticBlockReward, big.NewInt(8))
|
||||
mainBlock := new(big.Int).SetUint64(blockNumber)
|
||||
uncleBlock := new(big.Int).SetUint64(uncleBlockNumber)
|
||||
uncleBlockPlus8 := uncleBlock.Add(uncleBlock, big.NewInt(8))
|
||||
uncleBlockPlus8MinusMainBlock := uncleBlockPlus8.Sub(uncleBlockPlus8, mainBlock)
|
||||
return rewardDiv8.Mul(rewardDiv8, uncleBlockPlus8MinusMainBlock)
|
||||
}
|
||||
|
||||
func staticRewardByBlockNumber(blockNumber uint64) *big.Int {
|
||||
staticBlockReward := new(big.Int)
|
||||
//https://blog.ethereum.org/2017/10/12/byzantium-hf-announcement/
|
||||
if blockNumber >= 7280000 {
|
||||
staticBlockReward.SetString("2000000000000000000", 10)
|
||||
} else if blockNumber >= 4370000 {
|
||||
staticBlockReward.SetString("3000000000000000000", 10)
|
||||
} else {
|
||||
staticBlockReward.SetString("5000000000000000000", 10)
|
||||
}
|
||||
return staticBlockReward
|
||||
}
|
||||
|
||||
func calcEthTransactionFees(txs types.Transactions, receipts types.Receipts) *big.Int {
|
||||
transactionFees := new(big.Int)
|
||||
for i, transaction := range txs {
|
||||
receipt := receipts[i]
|
||||
gasPrice := big.NewInt(transaction.GasPrice().Int64())
|
||||
gasUsed := big.NewInt(int64(receipt.GasUsed))
|
||||
transactionFee := gasPrice.Mul(gasPrice, gasUsed)
|
||||
transactionFees = transactionFees.Add(transactionFees, transactionFee)
|
||||
}
|
||||
return transactionFees
|
||||
}
|
||||
|
||||
func calcEthUncleInclusionRewards(header *types.Header, uncles []*types.Header) *big.Int {
|
||||
uncleInclusionRewards := new(big.Int)
|
||||
for range uncles {
|
||||
staticBlockReward := staticRewardByBlockNumber(header.Number.Uint64())
|
||||
staticBlockReward.Div(staticBlockReward, big.NewInt(32))
|
||||
uncleInclusionRewards.Add(uncleInclusionRewards, staticBlockReward)
|
||||
}
|
||||
return uncleInclusionRewards
|
||||
}
|
||||
78
statediff/indexer/shared/chain_type.go
Normal file
@ -0,0 +1,78 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package shared
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ChainType enum for specifying blockchain
|
||||
type ChainType int
|
||||
|
||||
const (
|
||||
UnknownChain ChainType = iota
|
||||
Ethereum
|
||||
Bitcoin
|
||||
Omni
|
||||
EthereumClassic
|
||||
)
|
||||
|
||||
func (c ChainType) String() string {
|
||||
switch c {
|
||||
case Ethereum:
|
||||
return "Ethereum"
|
||||
case Bitcoin:
|
||||
return "Bitcoin"
|
||||
case Omni:
|
||||
return "Omni"
|
||||
case EthereumClassic:
|
||||
return "EthereumClassic"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (c ChainType) API() string {
|
||||
switch c {
|
||||
case Ethereum:
|
||||
return "eth"
|
||||
case Bitcoin:
|
||||
return "btc"
|
||||
case Omni:
|
||||
return "omni"
|
||||
case EthereumClassic:
|
||||
return "etc"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func NewChainType(name string) (ChainType, error) {
|
||||
switch strings.ToLower(name) {
|
||||
case "ethereum", "eth":
|
||||
return Ethereum, nil
|
||||
case "bitcoin", "btc", "xbt":
|
||||
return Bitcoin, nil
|
||||
case "omni":
|
||||
return Omni, nil
|
||||
case "classic", "etc":
|
||||
return EthereumClassic, nil
|
||||
default:
|
||||
return UnknownChain, errors.New("invalid name for chain")
|
||||
}
|
||||
}
|
||||
22
statediff/indexer/shared/constants.go
Normal file
@ -0,0 +1,22 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package shared
|
||||
|
||||
const (
|
||||
DefaultMaxBatchSize uint64 = 100
|
||||
DefaultMaxBatchNumber int64 = 50
|
||||
)
|
||||
101
statediff/indexer/shared/data_type.go
Normal file
@ -0,0 +1,101 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package shared
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DataType is an enum to loosely represent type of chain data
|
||||
type DataType int
|
||||
|
||||
const (
|
||||
UnknownDataType DataType = iota - 1
|
||||
Full
|
||||
Headers
|
||||
Uncles
|
||||
Transactions
|
||||
Receipts
|
||||
State
|
||||
Storage
|
||||
)
|
||||
|
||||
// String() method to resolve ReSyncType enum
|
||||
func (r DataType) String() string {
|
||||
switch r {
|
||||
case Full:
|
||||
return "full"
|
||||
case Headers:
|
||||
return "headers"
|
||||
case Uncles:
|
||||
return "uncles"
|
||||
case Transactions:
|
||||
return "transactions"
|
||||
case Receipts:
|
||||
return "receipts"
|
||||
case State:
|
||||
return "state"
|
||||
case Storage:
|
||||
return "storage"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateDataTypeFromString
|
||||
func GenerateDataTypeFromString(str string) (DataType, error) {
|
||||
switch strings.ToLower(str) {
|
||||
case "full", "f":
|
||||
return Full, nil
|
||||
case "headers", "header", "h":
|
||||
return Headers, nil
|
||||
case "uncles", "u":
|
||||
return Uncles, nil
|
||||
case "transactions", "transaction", "trxs", "txs", "trx", "tx", "t":
|
||||
return Transactions, nil
|
||||
case "receipts", "receipt", "rcts", "rct", "r":
|
||||
return Receipts, nil
|
||||
case "state":
|
||||
return State, nil
|
||||
case "storage":
|
||||
return Storage, nil
|
||||
default:
|
||||
return UnknownDataType, fmt.Errorf("unrecognized resync type: %s", str)
|
||||
}
|
||||
}
|
||||
|
||||
func SupportedDataType(d DataType) (bool, error) {
|
||||
switch d {
|
||||
case Full:
|
||||
return true, nil
|
||||
case Headers:
|
||||
return true, nil
|
||||
case Uncles:
|
||||
return true, nil
|
||||
case Transactions:
|
||||
return true, nil
|
||||
case Receipts:
|
||||
return true, nil
|
||||
case State:
|
||||
return true, nil
|
||||
case Storage:
|
||||
return true, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
124
statediff/indexer/shared/functions.go
Normal file
@ -0,0 +1,124 @@
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// VulcanizeDB
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// (at your option) any later version.
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// GNU Affero General Public License for more details.
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
package shared
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
import (
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
"github.com/ipfs/go-cid"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
"github.com/ipfs/go-ipfs-blockstore"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
"github.com/ipfs/go-ipfs-ds-help"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// HandleZeroAddrPointer will return an emtpy string for a nil address pointer
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func HandleZeroAddrPointer(to *common.Address) string {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
if to == nil {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return ""
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return to.Hex()
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// HandleZeroAddr will return an empty string for a 0 value address
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func HandleZeroAddr(to common.Address) string {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
if to.Hex() == "0x0000000000000000000000000000000000000000" {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return ""
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return to.Hex()
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// Rollback sql transaction and log any error
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func Rollback(tx *sqlx.Tx) {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
if err := tx.Rollback(); err != nil {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
log.Error(err.Error())
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// PublishIPLD is used to insert an IPLD into Postgres blockstore with the provided tx
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func PublishIPLD(tx *sqlx.Tx, i node.Node) error {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
dbKey := dshelp.MultihashToDsKey(i.Cid().Hash())
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
raw := i.RawData()
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
_, err := tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return err
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx and cid string
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
mhKey, err := MultihashKeyFromCIDString(cid)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return nil, err
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1`
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
var block []byte
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return block, tx.Get(&block, pgStr, mhKey)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// FetchIPLDByMhKey is used to retrieve an ipld from Postgres blockstore with the provided tx and mhkey string
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func FetchIPLDByMhKey(tx *sqlx.Tx, mhKey string) ([]byte, error) {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1`
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
var block []byte
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return block, tx.Get(&block, pgStr, mhKey)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func MultihashKeyFromCID(c cid.Cid) string {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return blockstore.BlockPrefix.String() + dbKey.String()
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// MultihashKeyFromCIDString converts a cid string into a blockstore-prefixed multihash db key string
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func MultihashKeyFromCIDString(c string) (string, error) {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
dc, err := cid.Decode(c)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return "", err
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
dbKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func PublishRaw(tx *sqlx.Tx, codec, mh uint64, raw []byte) (string, error) {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
c, err := ipld.RawdataToCid(codec, raw, mh)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return "", err
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
_, err = tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return c.String(), err
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// MultihashKeyFromKeccak256 converts keccak256 hash bytes into a blockstore-prefixed multihash db key string
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func MultihashKeyFromKeccak256(hash common.Hash) (string, error) {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
mh, err := multihash.Encode(hash.Bytes(), multihash.KECCAK_256)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
if err != nil {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return "", err
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
dbKey := dshelp.MultihashToDsKey(mh)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
// PublishDirect diretly writes a previously derived mhkey => value pair to the ipld database
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
func PublishDirect(tx *sqlx.Tx, key string, value []byte) error {
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
_, err := tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, key, value)
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
return err
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
}
|
||||
|
i-norden
commented
We should update all the ported packages to use Geth's log package We should update all the ported packages to use Geth's log package
i-norden
commented
Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega Similarly, we should update any tests we ported over to use the default test pkg that the rest of geth uses instead of ginko/gomega
|
||||
74
statediff/indexer/shared/test_helpers.go
Normal file
@ -0,0 +1,74 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package shared
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
||||
)
|
||||
|
||||
func ExpectEqual(t *testing.T, got interface{}, want interface{}) {
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("Expected: %v\nActual: %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
// SetupDB is use to setup a db for watcher tests
|
||||
func SetupDB() (*postgres.DB, error) {
|
||||
uri := postgres.DbConnectionString(postgres.ConnectionParams{
|
||||
User: "vulcanize",
|
||||
Password: "libertad",
|
||||
Hostname: "localhost",
|
||||
Name: "vulcanize_testing",
|
||||
Port: 5432,
|
||||
})
|
||||
return postgres.NewDB(uri, postgres.ConnectionConfig{}, node.Info{})
|
||||
}
|
||||
|
||||
// ListContainsString used to check if a list of strings contains a particular string
|
||||
func ListContainsString(sss []string, s string) bool {
|
||||
for _, str := range sss {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TestCID creates a basic CID for testing purposes
|
||||
func TestCID(b []byte) cid.Cid {
|
||||
pref := cid.Prefix{
|
||||
Version: 1,
|
||||
Codec: cid.Raw,
|
||||
MhType: multihash.KECCAK_256,
|
||||
MhLength: -1,
|
||||
}
|
||||
c, _ := pref.Sum(b)
|
||||
return c
|
||||
}
|
||||
|
||||
// PublishMockIPLD writes a mhkey-data pair to the public.blocks table so that test data can FK reference the mhkey
|
||||
func PublishMockIPLD(db *postgres.DB, mhKey string, mockData []byte) error {
|
||||
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, mhKey, mockData)
|
||||
return err
|
||||
}
|
||||
42
statediff/indexer/shared/types.go
Normal file
@ -0,0 +1,42 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package shared
|
||||
|
||||
import "github.com/ethereum/go-ethereum/common"
|
||||
import "github.com/ethereum/go-ethereum/statediff/types"
|
||||
import "github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
|
||||
// Trie struct used to flag node as leaf or not
|
||||
type TrieNode struct {
|
||||
Path []byte
|
||||
LeafKey common.Hash
|
||||
Value []byte
|
||||
Type types.NodeType
|
||||
}
|
||||
|
||||
// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres
|
||||
// Returned by IPLDPublisher
|
||||
// Passed to CIDIndexer
|
||||
type CIDPayload struct {
|
||||
HeaderCID models.HeaderModel
|
||||
UncleCIDs []models.UncleModel
|
||||
TransactionCIDs []models.TxModel
|
||||
ReceiptCIDs map[common.Hash]models.ReceiptModel
|
||||
StateNodeCIDs []models.StateNodeModel
|
||||
StateAccounts map[string]models.StateAccountModel
|
||||
StorageNodeCIDs map[string][]models.StorageNodeModel
|
||||
}
|
||||
81
statediff/indexer/test_helpers.go
Normal file
@ -0,0 +1,81 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
||||
)
|
||||
|
||||
// TearDownDB is used to tear down the watcher dbs after tests
|
||||
func TearDownDB(t *testing.T, db *postgres.DB) {
|
||||
tx, err := db.Beginx()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = tx.Exec(`DELETE FROM eth.state_cids`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = tx.Exec(`DELETE FROM blocks`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
|
||||
func TxModelsContainsCID(txs []models.TxModel, cid string) bool {
|
||||
for _, tx := range txs {
|
||||
if tx.CID == cid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ListContainsBytes used to check if a list of byte arrays contains a particular byte array
|
||||
func ReceiptModelsContainsCID(rcts []models.ReceiptModel, cid string) bool {
|
||||
for _, rct := range rcts {
|
||||
if rct.CID == cid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
138
statediff/indexer/writer.go
Normal file
@ -0,0 +1,138 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/prom"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
|
||||
)
|
||||
|
||||
// Indexer satisfies the Indexer interface for ethereum
|
||||
type PostgresCIDWriter struct {
|
||||
db *postgres.DB
|
||||
}
|
||||
|
||||
// NewPostgresCIDWriter creates a new pointer to a Indexer which satisfies the PostgresCIDWriter interface
|
||||
func NewPostgresCIDWriter(db *postgres.DB) *PostgresCIDWriter {
|
||||
return &PostgresCIDWriter{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (in *PostgresCIDWriter) upsertHeaderCID(tx *sqlx.Tx, header models.HeaderModel) (int64, error) {
|
||||
var headerID int64
|
||||
err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
|
||||
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1)
|
||||
RETURNING id`,
|
||||
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID, header.Reward, header.StateRoot, header.TxRoot,
|
||||
header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1).Scan(&headerID)
|
||||
if err == nil {
|
||||
prom.BlockInc()
|
||||
}
|
||||
return headerID, err
|
||||
}
|
||||
|
||||
func (in *PostgresCIDWriter) upsertUncleCID(tx *sqlx.Tx, uncle models.UncleModel, headerID int64) error {
|
||||
_, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
|
||||
ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)`,
|
||||
uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in *PostgresCIDWriter) upsertTransactionAndReceiptCIDs(tx *sqlx.Tx, payload shared.CIDPayload, headerID int64) error {
|
||||
for _, trxCidMeta := range payload.TransactionCIDs {
|
||||
var txID int64
|
||||
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data) = ($3, $4, $5, $6, $7, $8)
|
||||
RETURNING id`,
|
||||
headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index, trxCidMeta.MhKey, trxCidMeta.Data).Scan(&txID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prom.TransactionInc()
|
||||
receiptCidMeta, ok := payload.ReceiptCIDs[common.HexToHash(trxCidMeta.TxHash)]
|
||||
if ok {
|
||||
if err := in.upsertReceiptCID(tx, receiptCidMeta, txID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (in *PostgresCIDWriter) upsertTransactionCID(tx *sqlx.Tx, transaction models.TxModel, headerID int64) (int64, error) {
|
||||
var txID int64
|
||||
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data) = ($3, $4, $5, $6, $7, $8)
|
||||
RETURNING id`,
|
||||
headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data).Scan(&txID)
|
||||
if err == nil {
|
||||
prom.TransactionInc()
|
||||
}
|
||||
return txID, err
|
||||
}
|
||||
|
||||
func (in *PostgresCIDWriter) upsertReceiptCID(tx *sqlx.Tx, rct models.ReceiptModel, txID int64) error {
|
||||
_, err := tx.Exec(`INSERT INTO eth.receipt_cids (tx_id, cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||
ON CONFLICT (tx_id) DO UPDATE SET (cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts, mh_key) = ($2, $3, $4, $5, $6, $7, $8, $9, $10)`,
|
||||
txID, rct.CID, rct.Contract, rct.ContractHash, rct.Topic0s, rct.Topic1s, rct.Topic2s, rct.Topic3s, rct.LogContracts, rct.MhKey)
|
||||
if err == nil {
|
||||
prom.ReceiptInc()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (in *PostgresCIDWriter) upsertStateCID(tx *sqlx.Tx, stateNode models.StateNodeModel, headerID int64) (int64, error) {
|
||||
var stateID int64
|
||||
var stateKey string
|
||||
if stateNode.StateKey != nullHash.String() {
|
||||
stateKey = stateNode.StateKey
|
||||
}
|
||||
err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
|
||||
RETURNING id`,
|
||||
headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID)
|
||||
return stateID, err
|
||||
}
|
||||
|
||||
func (in *PostgresCIDWriter) upsertStateAccount(tx *sqlx.Tx, stateAccount models.StateAccountModel, stateID int64) error {
|
||||
_, err := tx.Exec(`INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`,
|
||||
stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)
|
||||
return err
|
||||
}
|
||||
|
||||
func (in *PostgresCIDWriter) upsertStorageCID(tx *sqlx.Tx, storageCID models.StorageNodeModel, stateID int64) error {
|
||||
var storageKey string
|
||||
if storageCID.StorageKey != nullHash.String() {
|
||||
storageKey = storageCID.StorageKey
|
||||
}
|
||||
_, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`,
|
||||
stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey)
|
||||
return err
|
||||
}
|
||||
@ -38,6 +38,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
"github.com/ethereum/go-ethereum/statediff/testhelpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -46,7 +47,7 @@ var (
|
||||
block1CoinbaseAddr, block2CoinbaseAddr, block3CoinbaseAddr common.Address
|
||||
block1CoinbaseHash, block2CoinbaseHash, block3CoinbaseHash common.Hash
|
||||
builder statediff.Builder
|
||||
emptyStorage = make([]statediff.StorageNode, 0)
|
||||
emptyStorage = make([]sdtypes.StorageNode, 0)
|
||||
|
||||
// block 1 data
|
||||
block1CoinbaseAccount, _ = rlp.EncodeToBytes(state.Account{
|
||||
@ -491,28 +492,28 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
||||
&statediff.StateObject{
|
||||
BlockNumber: block1.Number(),
|
||||
BlockHash: block1.Hash(),
|
||||
Nodes: []statediff.StateNode{
|
||||
Nodes: []sdtypes.StateNode{
|
||||
{
|
||||
Path: []byte{},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block1RootBranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x04'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block1x04BranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x04', '\x0b'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block1x040bBranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x04', '\x0b', '\x0e'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: block1CoinbaseHash.Bytes(),
|
||||
NodeValue: block1CoinbaseLeafNode,
|
||||
StorageNodes: emptyStorage,
|
||||
@ -534,28 +535,28 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
||||
&statediff.StateObject{
|
||||
BlockNumber: block2.Number(),
|
||||
BlockHash: block2.Hash(),
|
||||
Nodes: []statediff.StateNode{
|
||||
Nodes: []sdtypes.StateNode{
|
||||
{
|
||||
Path: []byte{},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block2RootBranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x00'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block2x00BranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x00', '\x08'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block2x0008BranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x00', '\x08', '\x0d'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block2x00080dBranchNode,
|
||||
},
|
||||
@ -564,14 +565,14 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
||||
// which necessitates we create a branch at x00 x08 x0d (as shown in the below UpdateAccounts)
|
||||
{
|
||||
Path: []byte{'\x00', '\x08', '\x0d', '\x00'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
StorageNodes: emptyStorage,
|
||||
LeafKey: common.HexToHash("08d0f2e24db7943eab4415f99e109698863b0fecca1cf9ffc500f38cefbbe29e").Bytes(),
|
||||
NodeValue: block2MovedPremineLeafNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x00', '\x08', '\x0d', '\x04'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
StorageNodes: emptyStorage,
|
||||
LeafKey: block2CoinbaseHash.Bytes(),
|
||||
NodeValue: block2CoinbaseLeafNode,
|
||||
@ -592,66 +593,66 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
||||
&statediff.StateObject{
|
||||
BlockNumber: block3.Number(),
|
||||
BlockHash: block3.Hash(),
|
||||
Nodes: []statediff.StateNode{
|
||||
Nodes: []sdtypes.StateNode{
|
||||
{
|
||||
Path: []byte{},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block3RootBranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x06'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block3x06BranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x06', '\x0e'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block3x060eBranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x0c'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block3x0cBranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x0c', '\x0e'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block3x0c0eBranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x0c', '\x0e', '\x05'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block3x0c0e05BranchNode,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x0c', '\x0e', '\x05', '\x07'},
|
||||
NodeType: statediff.Branch,
|
||||
NodeType: sdtypes.Branch,
|
||||
StorageNodes: emptyStorage,
|
||||
NodeValue: block3x0c0e0507BranchNode,
|
||||
},
|
||||
{ // How was this account created???
|
||||
Path: []byte{'\x0c', '\x0e', '\x05', '\x07', '\x03'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
StorageNodes: emptyStorage,
|
||||
LeafKey: common.HexToHash("ce573ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190").Bytes(),
|
||||
NodeValue: block3MovedPremineLeafNode1,
|
||||
},
|
||||
{ // This account (leaf) used to be at 0c 0e 05 07, likely moves because of the new account above
|
||||
Path: []byte{'\x0c', '\x0e', '\x05', '\x07', '\x08'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
StorageNodes: emptyStorage,
|
||||
LeafKey: common.HexToHash("ce5783bc1e69eedf90f402e11f6862da14ed8e50156635a04d6393bbae154012").Bytes(),
|
||||
NodeValue: block3MovedPremineLeafNode2,
|
||||
},
|
||||
{ // this is the new account created due to the coinbase mining a block, it's creation shouldn't affect 0x 0e 05 07
|
||||
Path: []byte{'\x06', '\x0e', '\x0f'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
StorageNodes: emptyStorage,
|
||||
LeafKey: block3CoinbaseHash.Bytes(),
|
||||
NodeValue: block3CoinbaseLeafNode,
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@ -28,6 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@ -35,10 +37,25 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
|
||||
ind "github.com/ethereum/go-ethereum/statediff/indexer"
|
||||
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/prom"
|
||||
. "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
const chainEventChanSize = 20000
|
||||
|
||||
var writeLoopParams = Params{
|
||||
IntermediateStateNodes: true,
|
||||
IntermediateStorageNodes: true,
|
||||
IncludeBlock: true,
|
||||
IncludeReceipts: true,
|
||||
IncludeTD: true,
|
||||
IncludeCode: true,
|
||||
}
|
||||
|
||||
type blockChain interface {
|
||||
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
|
||||
GetBlockByHash(hash common.Hash) *types.Block
|
||||
@ -65,6 +82,10 @@ type IService interface {
|
||||
StateTrieAt(blockNumber uint64, params Params) (*Payload, error)
|
||||
// Method to stream out all code and codehash pairs
|
||||
StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- CodeAndCodeHash, quitChan chan<- bool)
|
||||
// Method to write state diff object directly to DB
|
||||
WriteStateDiffAt(blockNumber uint64, params Params) error
|
||||
// Event loop for progressively processing and writing diffs directly to DB
|
||||
WriteLoop(chainEventCh chan core.ChainEvent)
|
||||
}
|
||||
|
||||
// Service is the underlying struct for the state diffing service
|
||||
@ -82,13 +103,44 @@ type Service struct {
|
||||
// A mapping of subscription params rlp hash to the corresponding subscription params
|
||||
SubscriptionTypes map[common.Hash]Params
|
||||
// Cache the last block so that we can avoid having to lookup the next block's parent
|
||||
lastBlock *types.Block
|
||||
lastBlock lastBlockCache
|
||||
// Whether or not we have any subscribers; only if we do, do we processes state diffs
|
||||
subscribers int32
|
||||
// Interface for publishing statediffs as PG-IPLD objects
|
||||
indexer ind.Indexer
|
||||
// Whether to enable writing state diffs directly to track blochain head
|
||||
enableWriteLoop bool
|
||||
}
|
||||
|
||||
// Wrap the cached last block for safe access from different service loops
|
||||
type lastBlockCache struct {
|
||||
sync.Mutex
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
// NewStateDiffService creates a new statediff.Service
|
||||
func NewStateDiffService(blockChain *core.BlockChain) (*Service, error) {
|
||||
func NewStateDiffService(ethServ *eth.Ethereum, dbParams *[3]string, enableWriteLoop bool) (*Service, error) {
|
||||
blockChain := ethServ.BlockChain()
|
||||
var indexer ind.Indexer
|
||||
if dbParams != nil {
|
||||
info := nodeinfo.Info{
|
||||
GenesisBlock: blockChain.Genesis().Hash().Hex(),
|
||||
NetworkID: strconv.FormatUint(ethServ.NetVersion(), 10),
|
||||
// ChainID: blockChain.Config().ChainID.String(),
|
||||
ChainID: blockChain.Config().ChainID.Uint64(),
|
||||
ID: dbParams[1],
|
||||
ClientName: dbParams[2],
|
||||
}
|
||||
|
||||
// TODO: pass max idle, open, lifetime?
|
||||
db, err := postgres.NewDB(dbParams[0], postgres.ConnectionConfig{}, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexer = ind.NewStateDiffIndexer(blockChain.Config(), db)
|
||||
}
|
||||
prom.Init()
|
||||
|
||||
return &Service{
|
||||
Mutex: sync.Mutex{},
|
||||
BlockChain: blockChain,
|
||||
@ -96,6 +148,8 @@ func NewStateDiffService(blockChain *core.BlockChain) (*Service, error) {
|
||||
QuitChan: make(chan bool),
|
||||
Subscriptions: make(map[common.Hash]map[rpc.ID]Subscription),
|
||||
SubscriptionTypes: make(map[common.Hash]Params),
|
||||
indexer: indexer,
|
||||
enableWriteLoop: enableWriteLoop,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -116,6 +170,52 @@ func (sds *Service) APIs() []rpc.API {
|
||||
}
|
||||
}
|
||||
|
||||
func (lbc *lastBlockCache) replace(currentBlock *types.Block, bc blockChain) *types.Block {
|
||||
lbc.Lock()
|
||||
parentHash := currentBlock.ParentHash()
|
||||
var parentBlock *types.Block
|
||||
if lbc.block != nil && bytes.Equal(lbc.block.Hash().Bytes(), parentHash.Bytes()) {
|
||||
parentBlock = lbc.block
|
||||
} else {
|
||||
parentBlock = bc.GetBlockByHash(parentHash)
|
||||
}
|
||||
lbc.block = currentBlock
|
||||
lbc.Unlock()
|
||||
return parentBlock
|
||||
}
|
||||
|
||||
func (sds *Service) WriteLoop(chainEventCh chan core.ChainEvent) {
|
||||
chainEventSub := sds.BlockChain.SubscribeChainEvent(chainEventCh)
|
||||
defer chainEventSub.Unsubscribe()
|
||||
errCh := chainEventSub.Err()
|
||||
for {
|
||||
select {
|
||||
//Notify chain event channel of events
|
||||
case chainEvent := <-chainEventCh:
|
||||
log.Debug("(WriteLoop) Event received from chainEventCh", "event", chainEvent)
|
||||
currentBlock := chainEvent.Block
|
||||
parentBlock := sds.lastBlock.replace(currentBlock, sds.BlockChain)
|
||||
if parentBlock == nil {
|
||||
log.Error(fmt.Sprintf("Parent block is nil, skipping this block (%d)", currentBlock.Number()))
|
||||
continue
|
||||
}
|
||||
err := sds.writeStateDiff(currentBlock, parentBlock.Root(), writeLoopParams)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("statediff (DB write) processing error at blockheight %d: err: %s", currentBlock.Number().Uint64(), err.Error()))
|
||||
continue
|
||||
}
|
||||
case err := <-errCh:
|
||||
log.Warn("Error from chain event subscription", "error", err)
|
||||
sds.close()
|
||||
return
|
||||
case <-sds.QuitChan:
|
||||
log.Info("Quitting the statediff writing process")
|
||||
sds.close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Loop is the main processing method
|
||||
func (sds *Service) Loop(chainEventCh chan core.ChainEvent) {
|
||||
chainEventSub := sds.BlockChain.SubscribeChainEvent(chainEventCh)
|
||||
@ -132,14 +232,7 @@ func (sds *Service) Loop(chainEventCh chan core.ChainEvent) {
|
||||
continue
|
||||
}
|
||||
currentBlock := chainEvent.Block
|
||||
parentHash := currentBlock.ParentHash()
|
||||
var parentBlock *types.Block
|
||||
if sds.lastBlock != nil && bytes.Equal(sds.lastBlock.Hash().Bytes(), currentBlock.ParentHash().Bytes()) {
|
||||
parentBlock = sds.lastBlock
|
||||
} else {
|
||||
parentBlock = sds.BlockChain.GetBlockByHash(parentHash)
|
||||
}
|
||||
sds.lastBlock = currentBlock
|
||||
parentBlock := sds.lastBlock.replace(currentBlock, sds.BlockChain)
|
||||
if parentBlock == nil {
|
||||
log.Error(fmt.Sprintf("Parent block is nil, skipping this block (%d)", currentBlock.Number()))
|
||||
continue
|
||||
@ -318,6 +411,11 @@ func (sds *Service) Start(*p2p.Server) error {
|
||||
chainEventCh := make(chan core.ChainEvent, chainEventChanSize)
|
||||
go sds.Loop(chainEventCh)
|
||||
|
||||
if sds.enableWriteLoop {
|
||||
log.Info("Starting statediff DB write loop", writeLoopParams)
|
||||
go sds.WriteLoop(make(chan core.ChainEvent, chainEventChanSize))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -405,3 +503,52 @@ func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- Cod
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// WriteStateDiffAt writes a state diff at the specific blockheight directly to the database
|
||||
// This operation cannot be performed back past the point of db pruning; it requires an archival node
|
||||
// for historical data
|
||||
func (sds *Service) WriteStateDiffAt(blockNumber uint64, params Params) error {
|
||||
log.Info(fmt.Sprintf("writing state diff at block %d", blockNumber))
|
||||
currentBlock := sds.BlockChain.GetBlockByNumber(blockNumber)
|
||||
parentRoot := common.Hash{}
|
||||
if blockNumber != 0 {
|
||||
parentBlock := sds.BlockChain.GetBlockByHash(currentBlock.ParentHash())
|
||||
parentRoot = parentBlock.Root()
|
||||
}
|
||||
return sds.writeStateDiff(currentBlock, parentRoot, params)
|
||||
}
|
||||
|
||||
// Writes a state diff from the current block, parent state root, and provided params
|
||||
func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, params Params) error {
|
||||
var totalDifficulty *big.Int
|
||||
var receipts types.Receipts
|
||||
if params.IncludeTD {
|
||||
totalDifficulty = sds.BlockChain.GetTdByHash(block.Hash())
|
||||
}
|
||||
if params.IncludeReceipts {
|
||||
receipts = sds.BlockChain.GetReceiptsByHash(block.Hash())
|
||||
}
|
||||
tx, err := sds.indexer.PushBlock(block, receipts, totalDifficulty)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// defer handling of commit/rollback for any return case
|
||||
defer tx.Close()
|
||||
output := func(node StateNode) error {
|
||||
return sds.indexer.PushStateNode(tx, node)
|
||||
}
|
||||
codeOutput := func(c CodeAndCodeHash) error {
|
||||
return sds.indexer.PushCodeAndCodeHash(tx, c)
|
||||
}
|
||||
err = sds.Builder.WriteStateDiffObject(StateRoots{
|
||||
NewStateRoot: block.Root(),
|
||||
OldStateRoot: parentRoot,
|
||||
}, params, output, codeOutput)
|
||||
|
||||
// allow dereferencing of parent, keep current locked as it should be the next parent
|
||||
sds.BlockChain.UnlockTrie(parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -29,7 +29,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
statediff "github.com/ethereum/go-ethereum/statediff"
|
||||
"github.com/ethereum/go-ethereum/statediff/testhelpers/mocks"
|
||||
)
|
||||
|
||||
|
||||
@ -19,12 +19,14 @@ package mocks
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
// Builder is a mock state diff builder
|
||||
type Builder struct {
|
||||
Args statediff.Args
|
||||
Params statediff.Params
|
||||
StateRoots statediff.StateRoots
|
||||
stateDiff statediff.StateObject
|
||||
block *types.Block
|
||||
stateTrie statediff.StateObject
|
||||
@ -39,6 +41,14 @@ func (builder *Builder) BuildStateDiffObject(args statediff.Args, params statedi
|
||||
return builder.stateDiff, builder.builderError
|
||||
}
|
||||
|
||||
// BuildStateDiffObject mock method
|
||||
func (builder *Builder) WriteStateDiffObject(args statediff.StateRoots, params statediff.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
|
||||
builder.StateRoots = args
|
||||
builder.Params = params
|
||||
|
||||
return builder.builderError
|
||||
}
|
||||
|
||||
// BuildStateTrieObject mock method
|
||||
func (builder *Builder) BuildStateTrieObject(block *types.Block) (statediff.StateObject, error) {
|
||||
builder.block = block
|
||||
|
||||
@ -32,6 +32,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
// MockStateDiffService is a mock state diff service
|
||||
@ -172,6 +173,37 @@ func (sds *MockStateDiffService) newPayload(stateObject []byte, block *types.Blo
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// WriteStateDiffAt mock method
|
||||
func (sds *MockStateDiffService) WriteStateDiffAt(blockNumber uint64, params statediff.Params) error {
|
||||
// TODO: something useful here
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop mock method
|
||||
func (sds *MockStateDiffService) WriteLoop(chan core.ChainEvent) {
|
||||
//loop through chain events until no more
|
||||
for {
|
||||
select {
|
||||
case block := <-sds.BlockChan:
|
||||
currentBlock := block
|
||||
parentBlock := <-sds.ParentBlockChan
|
||||
parentHash := parentBlock.Hash()
|
||||
if parentBlock == nil {
|
||||
log.Error("Parent block is nil, skipping this block",
|
||||
"parent block hash", parentHash.String(),
|
||||
"current block number", currentBlock.Number())
|
||||
continue
|
||||
}
|
||||
// TODO:
|
||||
// sds.writeStateDiff(currentBlock, parentBlock.Root(), statediff.Params{})
|
||||
case <-sds.QuitChan:
|
||||
log.Debug("Quitting the statediff block channel")
|
||||
sds.close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StateTrieAt mock method
|
||||
func (sds *MockStateDiffService) StateTrieAt(blockNumber uint64, params statediff.Params) (*statediff.Payload, error) {
|
||||
currentBlock := sds.BlockChain.GetBlockByNumber(blockNumber)
|
||||
@ -276,7 +308,7 @@ func (sds *MockStateDiffService) closeType(subType common.Hash) {
|
||||
delete(sds.SubscriptionTypes, subType)
|
||||
}
|
||||
|
||||
func (sds *MockStateDiffService) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- statediff.CodeAndCodeHash, quitChan chan<- bool) {
|
||||
func (sds *MockStateDiffService) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- sdtypes.CodeAndCodeHash, quitChan chan<- bool) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
@ -30,10 +30,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
"github.com/ethereum/go-ethereum/statediff/testhelpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyStorage = make([]statediff.StorageNode, 0)
|
||||
emptyStorage = make([]sdtypes.StorageNode, 0)
|
||||
block0, block1 *types.Block
|
||||
minerLeafKey = testhelpers.AddressToLeafKey(common.HexToAddress("0x0"))
|
||||
account1, _ = rlp.EncodeToBytes(state.Account{
|
||||
@ -94,24 +95,24 @@ func testSubscriptionAPI(t *testing.T) {
|
||||
expectedStateDiff := statediff.StateObject{
|
||||
BlockNumber: block1.Number(),
|
||||
BlockHash: block1.Hash(),
|
||||
Nodes: []statediff.StateNode{
|
||||
Nodes: []sdtypes.StateNode{
|
||||
{
|
||||
Path: []byte{'\x05'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: minerLeafKey,
|
||||
NodeValue: minerAccountLeafNode,
|
||||
StorageNodes: emptyStorage,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x0e'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: testhelpers.Account1LeafKey,
|
||||
NodeValue: account1LeafNode,
|
||||
StorageNodes: emptyStorage,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x00'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: testhelpers.BankLeafKey,
|
||||
NodeValue: bankAccountLeafNode,
|
||||
StorageNodes: emptyStorage,
|
||||
@ -178,24 +179,24 @@ func testHTTPAPI(t *testing.T) {
|
||||
expectedStateDiff := statediff.StateObject{
|
||||
BlockNumber: block1.Number(),
|
||||
BlockHash: block1.Hash(),
|
||||
Nodes: []statediff.StateNode{
|
||||
Nodes: []sdtypes.StateNode{
|
||||
{
|
||||
Path: []byte{'\x05'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: minerLeafKey,
|
||||
NodeValue: minerAccountLeafNode,
|
||||
StorageNodes: emptyStorage,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x0e'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: testhelpers.Account1LeafKey,
|
||||
NodeValue: account1LeafNode,
|
||||
StorageNodes: emptyStorage,
|
||||
},
|
||||
{
|
||||
Path: []byte{'\x00'},
|
||||
NodeType: statediff.Leaf,
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: testhelpers.BankLeafKey,
|
||||
NodeValue: bankAccountLeafNode,
|
||||
StorageNodes: emptyStorage,
|
||||
|
||||
@ -23,9 +23,9 @@ import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
// Subscription struct holds our subscription channels
|
||||
@ -52,6 +52,10 @@ type Args struct {
|
||||
BlockNumber *big.Int
|
||||
}
|
||||
|
||||
type StateRoots struct {
|
||||
OldStateRoot, NewStateRoot common.Hash
|
||||
}
|
||||
|
||||
// Payload packages the data to send to statediff subscriptions
|
||||
type Payload struct {
|
||||
BlockRlp []byte `json:"blockRlp"`
|
||||
@ -83,34 +87,10 @@ func (sd *Payload) Encode() ([]byte, error) {
|
||||
|
||||
// StateObject is the final output structure from the builder
|
||||
type StateObject struct {
|
||||
BlockNumber *big.Int `json:"blockNumber" gencodec:"required"`
|
||||
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
|
||||
Nodes []StateNode `json:"nodes" gencodec:"required"`
|
||||
CodeAndCodeHashes []CodeAndCodeHash `json:"codeMapping"`
|
||||
}
|
||||
|
||||
// CodeAndCodeHash struct for holding codehash => code mappings
|
||||
// we can't use an actual map because they are not rlp serializable
|
||||
type CodeAndCodeHash struct {
|
||||
Hash common.Hash `json:"codeHash"`
|
||||
Code []byte `json:"code"`
|
||||
}
|
||||
|
||||
// StateNode holds the data for a single state diff node
|
||||
type StateNode struct {
|
||||
NodeType NodeType `json:"nodeType" gencodec:"required"`
|
||||
Path []byte `json:"path" gencodec:"required"`
|
||||
NodeValue []byte `json:"value" gencodec:"required"`
|
||||
StorageNodes []StorageNode `json:"storage"`
|
||||
LeafKey []byte `json:"leafKey"`
|
||||
}
|
||||
|
||||
// StorageNode holds the data for a single storage diff node
|
||||
type StorageNode struct {
|
||||
NodeType NodeType `json:"nodeType" gencodec:"required"`
|
||||
Path []byte `json:"path" gencodec:"required"`
|
||||
NodeValue []byte `json:"value" gencodec:"required"`
|
||||
LeafKey []byte `json:"leafKey"`
|
||||
BlockNumber *big.Int `json:"blockNumber" gencodec:"required"`
|
||||
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
|
||||
Nodes []types.StateNode `json:"nodes" gencodec:"required"`
|
||||
CodeAndCodeHashes []types.CodeAndCodeHash `json:"codeMapping"`
|
||||
}
|
||||
|
||||
// AccountMap is a mapping of hex encoded path => account wrapper
|
||||
@ -119,19 +99,8 @@ type AccountMap map[string]accountWrapper
|
||||
// accountWrapper is used to temporary associate the unpacked node with its raw values
|
||||
type accountWrapper struct {
|
||||
Account *state.Account
|
||||
NodeType NodeType
|
||||
NodeType types.NodeType
|
||||
Path []byte
|
||||
NodeValue []byte
|
||||
LeafKey []byte
|
||||
}
|
||||
|
||||
// NodeType for explicitly setting type of node
|
||||
type NodeType string
|
||||
|
||||
const (
|
||||
Unknown NodeType = "Unknown"
|
||||
Leaf NodeType = "Leaf"
|
||||
Extension NodeType = "Extension"
|
||||
Branch NodeType = "Branch"
|
||||
Removed NodeType = "Removed" // used to represent pathes which have been emptied
|
||||
)
|
||||
|
||||
61
statediff/types/types.go
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Contains a batch of utility type declarations used by the tests. As the node
|
||||
// operates on unique types, a lot of them are needed to check various features.
|
||||
|
||||
package types
|
||||
|
||||
import "github.com/ethereum/go-ethereum/common"
|
||||
|
||||
// NodeType for explicitly setting type of node
|
||||
type NodeType string
|
||||
|
||||
const (
|
||||
Unknown NodeType = "Unknown"
|
||||
Leaf NodeType = "Leaf"
|
||||
Extension NodeType = "Extension"
|
||||
Branch NodeType = "Branch"
|
||||
Removed NodeType = "Removed" // used to represent pathes which have been emptied
|
||||
)
|
||||
|
||||
// StateNode holds the data for a single state diff node
|
||||
type StateNode struct {
|
||||
NodeType NodeType `json:"nodeType" gencodec:"required"`
|
||||
Path []byte `json:"path" gencodec:"required"`
|
||||
NodeValue []byte `json:"value" gencodec:"required"`
|
||||
StorageNodes []StorageNode `json:"storage"`
|
||||
LeafKey []byte `json:"leafKey"`
|
||||
}
|
||||
|
||||
// StorageNode holds the data for a single storage diff node
|
||||
type StorageNode struct {
|
||||
NodeType NodeType `json:"nodeType" gencodec:"required"`
|
||||
Path []byte `json:"path" gencodec:"required"`
|
||||
NodeValue []byte `json:"value" gencodec:"required"`
|
||||
LeafKey []byte `json:"leafKey"`
|
||||
}
|
||||
|
||||
// CodeAndCodeHash struct for holding codehash => code mappings
|
||||
// we can't use an actual map because they are not rlp serializable
|
||||
type CodeAndCodeHash struct {
|
||||
Hash common.Hash `json:"codeHash"`
|
||||
Code []byte `json:"code"`
|
||||
}
|
||||
|
||||
type StateNodeSink func(StateNode) error
|
||||
type StorageNodeSink func(StorageNode) error
|
||||
type CodeSink func(CodeAndCodeHash) error
|
||||
125
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
@ -1,125 +0,0 @@
|
||||
## 1.4.3
|
||||
|
||||
### Fixes:
|
||||
|
||||
- ensure file name and line numbers are correctly reported for XUnit [6fff58f]
|
||||
- Fixed matcher for content-type (#305) [69d9b43]
|
||||
|
||||
## 1.4.2
|
||||
|
||||
### Fixes:
|
||||
|
||||
- Add go.mod and go.sum files to define the gomega go module [f3de367, a085d30]
|
||||
- Work around go vet issue with Go v1.11 (#300) [40dd6ad]
|
||||
- Better output when using with go XUnit-style tests, fixes #255 (#297) [29a4b97]
|
||||
- Fix MatchJSON fail to parse json.RawMessage (#298) [ae19f1b]
|
||||
- show threshold in failure message of BeNumericallyMatcher (#293) [4bbecc8]
|
||||
|
||||
## 1.4.1
|
||||
|
||||
### Fixes:
|
||||
|
||||
- Update documentation formatting and examples (#289) [9be8410]
|
||||
- allow 'Receive' matcher to be used with concrete types (#286) [41673fd]
|
||||
- Fix data race in ghttp server (#283) [7ac6b01]
|
||||
- Travis badge should only show master [cc102ab]
|
||||
|
||||
## 1.4.0
|
||||
|
||||
### Features
|
||||
- Make string pretty diff user configurable (#273) [eb112ce, 649b44d]
|
||||
|
||||
### Fixes
|
||||
- Use httputil.DumpRequest to pretty-print unhandled requests (#278) [a4ff0fc, b7d1a52]
|
||||
- fix typo floa32 > float32 (#272) [041ae3b, 6e33911]
|
||||
- Fix link to documentation on adding your own matchers (#270) [bb2c830, fcebc62]
|
||||
- Use setters and getters to avoid race condition (#262) [13057c3, a9c79f1]
|
||||
- Avoid sending a signal if the process is not alive (#259) [b8043e5, 4fc1762]
|
||||
- Improve message from AssignableToTypeOf when expected value is nil (#281) [9c1fb20]
|
||||
|
||||
## 1.3.0
|
||||
|
||||
Improvements:
|
||||
|
||||
- The `Equal` matcher matches byte slices more performantly.
|
||||
- Improved how `MatchError` matches error strings.
|
||||
- `MatchXML` ignores the order of xml node attributes.
|
||||
- Improve support for XUnit style golang tests. ([#254](https://github.com/onsi/gomega/issues/254))
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- Diff generation now handles multi-byte sequences correctly.
|
||||
- Multiple goroutines can now call `gexec.Build` concurrently.
|
||||
|
||||
## 1.2.0
|
||||
|
||||
Improvements:
|
||||
|
||||
- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
|
||||
- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
|
||||
- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
|
||||
- Added `HavePrefix` and `HaveSuffix` matchers.
|
||||
- `ghttp` can now handle concurrent requests.
|
||||
- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
|
||||
- Improved `ghttp`'s behavior around failing assertions and panics:
|
||||
- If a registered handler makes a failing assertion `ghttp` will return `500`.
|
||||
- If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive.
|
||||
- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
|
||||
- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
|
||||
- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
|
||||
- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time.
|
||||
- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer.
|
||||
|
||||
Bug Fixes:
|
||||
- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure.
|
||||
- `ContainElement` no longer bails if a passed-in matcher errors.
|
||||
|
||||
## 1.0 (8/2/2014)
|
||||
|
||||
No changes. Dropping "beta" from the version number.
|
||||
|
||||
## 1.0.0-beta (7/8/2014)
|
||||
Breaking Changes:
|
||||
|
||||
- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead.
|
||||
- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher
|
||||
|
||||
New Test-Support Features:
|
||||
|
||||
- `ghttp`: supports testing http clients
|
||||
- Provides a flexible fake http server
|
||||
- Provides a collection of chainable http handlers that perform assertions.
|
||||
- `gbytes`: supports making ordered assertions against streams of data
|
||||
- Provides a `gbytes.Buffer`
|
||||
- Provides a `Say` matcher to perform ordered assertions against output data
|
||||
- `gexec`: supports testing external processes
|
||||
- Provides support for building Go binaries
|
||||
- Wraps and starts `exec.Cmd` commands
|
||||
- Makes it easy to assert against stdout and stderr
|
||||
- Makes it easy to send signals and wait for processes to exit
|
||||
- Provides an `Exit` matcher to assert against exit code.
|
||||
|
||||
DSL Changes:
|
||||
|
||||
- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs.
|
||||
- The default timeouts for `Eventually` and `Consistently` are now configurable.
|
||||
|
||||
New Matchers:
|
||||
|
||||
- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map.
|
||||
- `BeTemporally`: like `BeNumerically` but for `time.Time`
|
||||
- `HaveKeyWithValue`: asserts a map has a given key with the given value.
|
||||
|
||||
Updated Matchers:
|
||||
|
||||
- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
|
||||
- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
|
||||
|
||||
Misc:
|
||||
|
||||
- Start using semantic versioning
|
||||
- Start maintaining changelog
|
||||
|
||||
Major refactor:
|
||||
|
||||
- Pull out Gomega's internal to `internal`
|
||||
14
vendor/github.com/onsi/gomega/CONTRIBUTING.md
generated
vendored
@ -1,14 +0,0 @@
|
||||
# Contributing to Gomega
|
||||
|
||||
Your contributions to Gomega are essential for its long-term maintenance and improvement. To make a contribution:
|
||||
|
||||
- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
|
||||
- Ensure adequate test coverage:
|
||||
- Make sure to add appropriate unit tests
|
||||
- Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR
|
||||
- Please run following linter locally `go vet ./...` and make sure output does not contain any warnings
|
||||
- Update the documentation. In addition to standard `godoc` comments Gomega has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR.
|
||||
|
||||
If you're a committer, check out RELEASING.md to learn how to cut a release.
|
||||
|
||||
Thanks for supporting Gomega!
|
||||
20
vendor/github.com/onsi/gomega/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
||||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
6
vendor/github.com/onsi/gomega/Makefile
generated
vendored
@ -1,6 +0,0 @@
|
||||
test:
|
||||
[ -z "`gofmt -s -w -l -e .`" ]
|
||||
go vet
|
||||
ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
|
||||
|
||||
.PHONY: test
|
||||
21
vendor/github.com/onsi/gomega/README.md
generated
vendored
@ -1,21 +0,0 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/onsi/gomega)
|
||||
|
||||
Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
|
||||
|
||||
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
|
||||
|
||||
## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang
|
||||
|
||||
Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/)
|
||||
|
||||
## Community Matchers
|
||||
|
||||
A collection of community matchers is available on the [wiki](https://github.com/onsi/gomega/wiki).
|
||||
|
||||
## License
|
||||
|
||||
Gomega is MIT-Licensed
|
||||
|
||||
The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license.
|
||||
12
vendor/github.com/onsi/gomega/RELEASING.md
generated
vendored
@ -1,12 +0,0 @@
|
||||
A Gomega release is a tagged sha and a GitHub release. To cut a release:
|
||||
|
||||
1. Ensure CHANGELOG.md is up to date.
|
||||
- Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
|
||||
- Categorize the changes into
|
||||
- Breaking Changes (requires a major version)
|
||||
- New Features (minor version)
|
||||
- Fixes (fix version)
|
||||
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
|
||||
2. Update GOMEGA_VERSION in `gomega_dsl.go`
|
||||
3. Push a commit with the version number as the commit message (e.g. `v1.3.0`)
|
||||
4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
|
||||
382
vendor/github.com/onsi/gomega/format/format.go
generated
vendored
@ -1,382 +0,0 @@
|
||||
/*
|
||||
Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information.
|
||||
*/
|
||||
package format
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
|
||||
var MaxDepth = uint(10)
|
||||
|
||||
/*
|
||||
By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
|
||||
|
||||
Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead.
|
||||
|
||||
Note that GoString and String don't always have all the information you need to understand why a test failed!
|
||||
*/
|
||||
var UseStringerRepresentation = false
|
||||
|
||||
/*
|
||||
Print the content of context objects. By default it will be suppressed.
|
||||
|
||||
Set PrintContextObjects = true to enable printing of the context internals.
|
||||
*/
|
||||
var PrintContextObjects = false
|
||||
|
||||
// TruncatedDiff choose if we should display a truncated pretty diff or not
|
||||
var TruncatedDiff = true
|
||||
|
||||
// Ctx interface defined here to keep backwards compatability with go < 1.7
|
||||
// It matches the context.Context interface
|
||||
type Ctx interface {
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
Done() <-chan struct{}
|
||||
Err() error
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
var contextType = reflect.TypeOf((*Ctx)(nil)).Elem()
|
||||
var timeType = reflect.TypeOf(time.Time{})
|
||||
|
||||
//The default indentation string emitted by the format package
|
||||
var Indent = " "
|
||||
|
||||
var longFormThreshold = 20
|
||||
|
||||
/*
|
||||
Generates a formatted matcher success/failure message of the form:
|
||||
|
||||
Expected
|
||||
<pretty printed actual>
|
||||
<message>
|
||||
<pretty printed expected>
|
||||
|
||||
If expected is omited, then the message looks like:
|
||||
|
||||
Expected
|
||||
<pretty printed actual>
|
||||
<message>
|
||||
*/
|
||||
func Message(actual interface{}, message string, expected ...interface{}) string {
|
||||
if len(expected) == 0 {
|
||||
return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
|
||||
}
|
||||
return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1))
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
Generates a nicely formatted matcher success / failure message
|
||||
|
||||
Much like Message(...), but it attempts to pretty print diffs in strings
|
||||
|
||||
Expected
|
||||
<string>: "...aaaaabaaaaa..."
|
||||
to equal |
|
||||
<string>: "...aaaaazaaaaa..."
|
||||
|
||||
*/
|
||||
|
||||
func MessageWithDiff(actual, message, expected string) string {
|
||||
if TruncatedDiff && len(actual) >= truncateThreshold && len(expected) >= truncateThreshold {
|
||||
diffPoint := findFirstMismatch(actual, expected)
|
||||
formattedActual := truncateAndFormat(actual, diffPoint)
|
||||
formattedExpected := truncateAndFormat(expected, diffPoint)
|
||||
|
||||
spacesBeforeFormattedMismatch := findFirstMismatch(formattedActual, formattedExpected)
|
||||
|
||||
tabLength := 4
|
||||
spaceFromMessageToActual := tabLength + len("<string>: ") - len(message)
|
||||
padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|"
|
||||
return Message(formattedActual, message+padding, formattedExpected)
|
||||
}
|
||||
return Message(actual, message, expected)
|
||||
}
|
||||
|
||||
func truncateAndFormat(str string, index int) string {
|
||||
leftPadding := `...`
|
||||
rightPadding := `...`
|
||||
|
||||
start := index - charactersAroundMismatchToInclude
|
||||
if start < 0 {
|
||||
start = 0
|
||||
leftPadding = ""
|
||||
}
|
||||
|
||||
// slice index must include the mis-matched character
|
||||
lengthOfMismatchedCharacter := 1
|
||||
end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter
|
||||
if end > len(str) {
|
||||
end = len(str)
|
||||
rightPadding = ""
|
||||
|
||||
}
|
||||
return fmt.Sprintf("\"%s\"", leftPadding+str[start:end]+rightPadding)
|
||||
}
|
||||
|
||||
func findFirstMismatch(a, b string) int {
|
||||
aSlice := strings.Split(a, "")
|
||||
bSlice := strings.Split(b, "")
|
||||
|
||||
for index, str := range aSlice {
|
||||
if index > len(bSlice)-1 {
|
||||
return index
|
||||
}
|
||||
if str != bSlice[index] {
|
||||
return index
|
||||
}
|
||||
}
|
||||
|
||||
if len(b) > len(a) {
|
||||
return len(a) + 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
const (
|
||||
truncateThreshold = 50
|
||||
charactersAroundMismatchToInclude = 5
|
||||
)
|
||||
|
||||
/*
|
||||
Pretty prints the passed in object at the passed in indentation level.
|
||||
|
||||
Object recurses into deeply nested objects emitting pretty-printed representations of their components.
|
||||
|
||||
Modify format.MaxDepth to control how deep the recursion is allowed to go
|
||||
Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of
|
||||
recursing into the object.
|
||||
|
||||
Set PrintContextObjects to true to print the content of objects implementing context.Context
|
||||
*/
|
||||
func Object(object interface{}, indentation uint) string {
|
||||
indent := strings.Repeat(Indent, int(indentation))
|
||||
value := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
|
||||
}
|
||||
|
||||
/*
|
||||
IndentString takes a string and indents each line by the specified amount.
|
||||
*/
|
||||
func IndentString(s string, indentation uint) string {
|
||||
components := strings.Split(s, "\n")
|
||||
result := ""
|
||||
indent := strings.Repeat(Indent, int(indentation))
|
||||
for i, component := range components {
|
||||
result += indent + component
|
||||
if i < len(components)-1 {
|
||||
result += "\n"
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func formatType(object interface{}) string {
|
||||
t := reflect.TypeOf(object)
|
||||
if t == nil {
|
||||
return "nil"
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Chan:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
|
||||
case reflect.Ptr:
|
||||
return fmt.Sprintf("%T | %p", object, object)
|
||||
case reflect.Slice:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
|
||||
case reflect.Map:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d", object, v.Len())
|
||||
default:
|
||||
return fmt.Sprintf("%T", object)
|
||||
}
|
||||
}
|
||||
|
||||
func formatValue(value reflect.Value, indentation uint) string {
|
||||
if indentation > MaxDepth {
|
||||
return "..."
|
||||
}
|
||||
|
||||
if isNilValue(value) {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
if UseStringerRepresentation {
|
||||
if value.CanInterface() {
|
||||
obj := value.Interface()
|
||||
switch x := obj.(type) {
|
||||
case fmt.GoStringer:
|
||||
return x.GoString()
|
||||
case fmt.Stringer:
|
||||
return x.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !PrintContextObjects {
|
||||
if value.Type().Implements(contextType) && indentation > 1 {
|
||||
return "<suppressed context>"
|
||||
}
|
||||
}
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Bool:
|
||||
return fmt.Sprintf("%v", value.Bool())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return fmt.Sprintf("%v", value.Int())
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return fmt.Sprintf("%v", value.Uint())
|
||||
case reflect.Uintptr:
|
||||
return fmt.Sprintf("0x%x", value.Uint())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return fmt.Sprintf("%v", value.Float())
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return fmt.Sprintf("%v", value.Complex())
|
||||
case reflect.Chan:
|
||||
return fmt.Sprintf("0x%x", value.Pointer())
|
||||
case reflect.Func:
|
||||
return fmt.Sprintf("0x%x", value.Pointer())
|
||||
case reflect.Ptr:
|
||||
return formatValue(value.Elem(), indentation)
|
||||
case reflect.Slice:
|
||||
return formatSlice(value, indentation)
|
||||
case reflect.String:
|
||||
return formatString(value.String(), indentation)
|
||||
case reflect.Array:
|
||||
return formatSlice(value, indentation)
|
||||
case reflect.Map:
|
||||
return formatMap(value, indentation)
|
||||
case reflect.Struct:
|
||||
if value.Type() == timeType && value.CanInterface() {
|
||||
t, _ := value.Interface().(time.Time)
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
||||
return formatStruct(value, indentation)
|
||||
case reflect.Interface:
|
||||
return formatValue(value.Elem(), indentation)
|
||||
default:
|
||||
if value.CanInterface() {
|
||||
return fmt.Sprintf("%#v", value.Interface())
|
||||
}
|
||||
return fmt.Sprintf("%#v", value)
|
||||
}
|
||||
}
|
||||
|
||||
func formatString(object interface{}, indentation uint) string {
|
||||
if indentation == 1 {
|
||||
s := fmt.Sprintf("%s", object)
|
||||
components := strings.Split(s, "\n")
|
||||
result := ""
|
||||
for i, component := range components {
|
||||
if i == 0 {
|
||||
result += component
|
||||
} else {
|
||||
result += Indent + component
|
||||
}
|
||||
if i < len(components)-1 {
|
||||
result += "\n"
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s", result)
|
||||
} else {
|
||||
return fmt.Sprintf("%q", object)
|
||||
}
|
||||
}
|
||||
|
||||
func formatSlice(v reflect.Value, indentation uint) string {
|
||||
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) {
|
||||
return formatString(v.Bytes(), indentation)
|
||||
}
|
||||
|
||||
l := v.Len()
|
||||
result := make([]string, l)
|
||||
longest := 0
|
||||
for i := 0; i < l; i++ {
|
||||
result[i] = formatValue(v.Index(i), indentation+1)
|
||||
if len(result[i]) > longest {
|
||||
longest = len(result[i])
|
||||
}
|
||||
}
|
||||
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
}
|
||||
return fmt.Sprintf("[%s]", strings.Join(result, ", "))
|
||||
}
|
||||
|
||||
func formatMap(v reflect.Value, indentation uint) string {
|
||||
l := v.Len()
|
||||
result := make([]string, l)
|
||||
|
||||
longest := 0
|
||||
for i, key := range v.MapKeys() {
|
||||
value := v.MapIndex(key)
|
||||
result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1))
|
||||
if len(result[i]) > longest {
|
||||
longest = len(result[i])
|
||||
}
|
||||
}
|
||||
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
}
|
||||
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
|
||||
}
|
||||
|
||||
func formatStruct(v reflect.Value, indentation uint) string {
|
||||
t := v.Type()
|
||||
|
||||
l := v.NumField()
|
||||
result := []string{}
|
||||
longest := 0
|
||||
for i := 0; i < l; i++ {
|
||||
structField := t.Field(i)
|
||||
fieldEntry := v.Field(i)
|
||||
representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1))
|
||||
result = append(result, representation)
|
||||
if len(representation) > longest {
|
||||
longest = len(representation)
|
||||
}
|
||||
}
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
}
|
||||
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
|
||||
}
|
||||
|
||||
func isNilValue(a reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Invalid:
|
||||
return true
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return a.IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
/*
|
||||
Returns true when the string is entirely made of printable runes, false otherwise.
|
||||
*/
|
||||
func isPrintableString(str string) bool {
|
||||
for _, runeValue := range str {
|
||||
if !strconv.IsPrint(runeValue) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
15
vendor/github.com/onsi/gomega/go.mod
generated
vendored
@ -1,15 +0,0 @@
|
||||
module github.com/onsi/gomega
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.4.7 // indirect
|
||||
github.com/golang/protobuf v1.2.0
|
||||
github.com/hpcloud/tail v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo v1.6.0
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e // indirect
|
||||
golang.org/x/text v0.3.0 // indirect
|
||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.1
|
||||
)
|
||||
24
vendor/github.com/onsi/gomega/go.sum
generated
vendored
@ -1,24 +0,0 @@
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
429
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
@ -1,429 +0,0 @@
|
||||
/*
|
||||
Gomega is the Ginkgo BDD-style testing framework's preferred matcher library.
|
||||
|
||||
The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/
|
||||
|
||||
Gomega on Github: http://github.com/onsi/gomega
|
||||
|
||||
Learn more about Ginkgo online: http://onsi.github.io/ginkgo
|
||||
|
||||
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||
|
||||
Gomega is MIT-Licensed
|
||||
*/
|
||||
package gomega
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/internal/assertion"
|
||||
"github.com/onsi/gomega/internal/asyncassertion"
|
||||
"github.com/onsi/gomega/internal/testingtsupport"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const GOMEGA_VERSION = "1.4.3"
|
||||
|
||||
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
|
||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||
Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
|
||||
Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations.
|
||||
`
|
||||
|
||||
var globalFailWrapper *types.GomegaFailWrapper
|
||||
|
||||
var defaultEventuallyTimeout = time.Second
|
||||
var defaultEventuallyPollingInterval = 10 * time.Millisecond
|
||||
var defaultConsistentlyDuration = 100 * time.Millisecond
|
||||
var defaultConsistentlyPollingInterval = 10 * time.Millisecond
|
||||
|
||||
// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
|
||||
// the fail handler passed into RegisterFailHandler is called.
|
||||
func RegisterFailHandler(handler types.GomegaFailHandler) {
|
||||
RegisterFailHandlerWithT(testingtsupport.EmptyTWithHelper{}, handler)
|
||||
}
|
||||
|
||||
// RegisterFailHandlerWithT ensures that the given types.TWithHelper and fail handler
|
||||
// are used globally.
|
||||
func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) {
|
||||
if handler == nil {
|
||||
globalFailWrapper = nil
|
||||
return
|
||||
}
|
||||
|
||||
globalFailWrapper = &types.GomegaFailWrapper{
|
||||
Fail: handler,
|
||||
TWithHelper: t,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterTestingT connects Gomega to Golang's XUnit style
|
||||
// Testing.T tests. It is now deprecated and you should use NewWithT() instead.
|
||||
//
|
||||
// Legacy Documentation:
|
||||
//
|
||||
// You'll need to call this at the top of each XUnit style test:
|
||||
//
|
||||
// func TestFarmHasCow(t *testing.T) {
|
||||
// RegisterTestingT(t)
|
||||
//
|
||||
// f := farm.New([]string{"Cow", "Horse"})
|
||||
// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
|
||||
// }
|
||||
//
|
||||
// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to
|
||||
// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests
|
||||
// in parallel as the global fail handler cannot point to more than one testing.T at a time.
|
||||
//
|
||||
// NewWithT() does not have this limitation
|
||||
//
|
||||
// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
|
||||
func RegisterTestingT(t types.GomegaTestingT) {
|
||||
tWithHelper, hasHelper := t.(types.TWithHelper)
|
||||
if !hasHelper {
|
||||
RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail)
|
||||
return
|
||||
}
|
||||
RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail)
|
||||
}
|
||||
|
||||
// InterceptGomegaFailures runs a given callback and returns an array of
|
||||
// failure messages generated by any Gomega assertions within the callback.
|
||||
//
|
||||
// This is accomplished by temporarily replacing the *global* fail handler
|
||||
// with a fail handler that simply annotates failures. The original fail handler
|
||||
// is reset when InterceptGomegaFailures returns.
|
||||
//
|
||||
// This is most useful when testing custom matchers, but can also be used to check
|
||||
// on a value using a Gomega assertion without causing a test failure.
|
||||
func InterceptGomegaFailures(f func()) []string {
|
||||
originalHandler := globalFailWrapper.Fail
|
||||
failures := []string{}
|
||||
RegisterFailHandler(func(message string, callerSkip ...int) {
|
||||
failures = append(failures, message)
|
||||
})
|
||||
f()
|
||||
RegisterFailHandler(originalHandler)
|
||||
return failures
|
||||
}
|
||||
|
||||
// Ω wraps an actual value allowing assertions to be made on it:
|
||||
// Ω("foo").Should(Equal("foo"))
|
||||
//
|
||||
// If Ω is passed more than one argument it will pass the *first* argument to the matcher.
|
||||
// All subsequent arguments will be required to be nil/zero.
|
||||
//
|
||||
// This is convenient if you want to make an assertion on a method/function that returns
|
||||
// a value and an error - a common patter in Go.
|
||||
//
|
||||
// For example, given a function with signature:
|
||||
// func MyAmazingThing() (int, error)
|
||||
//
|
||||
// Then:
|
||||
// Ω(MyAmazingThing()).Should(Equal(3))
|
||||
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
|
||||
//
|
||||
// Ω and Expect are identical
|
||||
func Ω(actual interface{}, extra ...interface{}) Assertion {
|
||||
return ExpectWithOffset(0, actual, extra...)
|
||||
}
|
||||
|
||||
// Expect wraps an actual value allowing assertions to be made on it:
|
||||
// Expect("foo").To(Equal("foo"))
|
||||
//
|
||||
// If Expect is passed more than one argument it will pass the *first* argument to the matcher.
|
||||
// All subsequent arguments will be required to be nil/zero.
|
||||
//
|
||||
// This is convenient if you want to make an assertion on a method/function that returns
|
||||
// a value and an error - a common patter in Go.
|
||||
//
|
||||
// For example, given a function with signature:
|
||||
// func MyAmazingThing() (int, error)
|
||||
//
|
||||
// Then:
|
||||
// Expect(MyAmazingThing()).Should(Equal(3))
|
||||
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
|
||||
//
|
||||
// Expect and Ω are identical
|
||||
func Expect(actual interface{}, extra ...interface{}) Assertion {
|
||||
return ExpectWithOffset(0, actual, extra...)
|
||||
}
|
||||
|
||||
// ExpectWithOffset wraps an actual value allowing assertions to be made on it:
|
||||
// ExpectWithOffset(1, "foo").To(Equal("foo"))
|
||||
//
|
||||
// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
|
||||
// this is used to modify the call-stack offset when computing line numbers.
|
||||
//
|
||||
// This is most useful in helper functions that make assertions. If you want Gomega's
|
||||
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
|
||||
// set the first argument of `ExpectWithOffset` appropriately.
|
||||
func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion {
|
||||
if globalFailWrapper == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
return assertion.New(actual, globalFailWrapper, offset, extra...)
|
||||
}
|
||||
|
||||
// Eventually wraps an actual value allowing assertions to be made on it.
|
||||
// The assertion is tried periodically until it passes or a timeout occurs.
|
||||
//
|
||||
// Both the timeout and polling interval are configurable as optional arguments:
|
||||
// The first optional argument is the timeout
|
||||
// The second optional argument is the polling interval
|
||||
//
|
||||
// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
|
||||
// last case they are interpreted as seconds.
|
||||
//
|
||||
// If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
|
||||
// then Eventually will call the function periodically and try the matcher against the function's first return value.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Eventually(func() int {
|
||||
// return thingImPolling.Count()
|
||||
// }).Should(BeNumerically(">=", 17))
|
||||
//
|
||||
// Note that this example could be rewritten:
|
||||
//
|
||||
// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
|
||||
//
|
||||
// If the function returns more than one value, then Eventually will pass the first value to the matcher and
|
||||
// assert that all other values are nil/zero.
|
||||
// This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
|
||||
//
|
||||
// For example, consider a method that returns a value and an error:
|
||||
// func FetchFromDB() (string, error)
|
||||
//
|
||||
// Then
|
||||
// Eventually(FetchFromDB).Should(Equal("hasselhoff"))
|
||||
//
|
||||
// Will pass only if the the returned error is nil and the returned string passes the matcher.
|
||||
//
|
||||
// Eventually's default timeout is 1 second, and its default polling interval is 10ms
|
||||
func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
|
||||
return EventuallyWithOffset(0, actual, intervals...)
|
||||
}
|
||||
|
||||
// EventuallyWithOffset operates like Eventually but takes an additional
|
||||
// initial argument to indicate an offset in the call stack. This is useful when building helper
|
||||
// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
|
||||
func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
|
||||
if globalFailWrapper == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
timeoutInterval := defaultEventuallyTimeout
|
||||
pollingInterval := defaultEventuallyPollingInterval
|
||||
if len(intervals) > 0 {
|
||||
timeoutInterval = toDuration(intervals[0])
|
||||
}
|
||||
if len(intervals) > 1 {
|
||||
pollingInterval = toDuration(intervals[1])
|
||||
}
|
||||
return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset)
|
||||
}
|
||||
|
||||
// Consistently wraps an actual value allowing assertions to be made on it.
|
||||
// The assertion is tried periodically and is required to pass for a period of time.
|
||||
//
|
||||
// Both the total time and polling interval are configurable as optional arguments:
|
||||
// The first optional argument is the duration that Consistently will run for
|
||||
// The second optional argument is the polling interval
|
||||
//
|
||||
// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
|
||||
// last case they are interpreted as seconds.
|
||||
//
|
||||
// If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
|
||||
// then Consistently will call the function periodically and try the matcher against the function's first return value.
|
||||
//
|
||||
// If the function returns more than one value, then Consistently will pass the first value to the matcher and
|
||||
// assert that all other values are nil/zero.
|
||||
// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
|
||||
//
|
||||
// Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
|
||||
// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
|
||||
//
|
||||
// Consistently(channel).ShouldNot(Receive())
|
||||
//
|
||||
// Consistently's default duration is 100ms, and its default polling interval is 10ms
|
||||
func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
|
||||
return ConsistentlyWithOffset(0, actual, intervals...)
|
||||
}
|
||||
|
||||
// ConsistentlyWithOffset operates like Consistnetly but takes an additional
|
||||
// initial argument to indicate an offset in the call stack. This is useful when building helper
|
||||
// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
|
||||
func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
|
||||
if globalFailWrapper == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
timeoutInterval := defaultConsistentlyDuration
|
||||
pollingInterval := defaultConsistentlyPollingInterval
|
||||
if len(intervals) > 0 {
|
||||
timeoutInterval = toDuration(intervals[0])
|
||||
}
|
||||
if len(intervals) > 1 {
|
||||
pollingInterval = toDuration(intervals[1])
|
||||
}
|
||||
return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset)
|
||||
}
|
||||
|
||||
// SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
|
||||
func SetDefaultEventuallyTimeout(t time.Duration) {
|
||||
defaultEventuallyTimeout = t
|
||||
}
|
||||
|
||||
// SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually.
|
||||
func SetDefaultEventuallyPollingInterval(t time.Duration) {
|
||||
defaultEventuallyPollingInterval = t
|
||||
}
|
||||
|
||||
// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
|
||||
func SetDefaultConsistentlyDuration(t time.Duration) {
|
||||
defaultConsistentlyDuration = t
|
||||
}
|
||||
|
||||
// SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently.
|
||||
func SetDefaultConsistentlyPollingInterval(t time.Duration) {
|
||||
defaultConsistentlyPollingInterval = t
|
||||
}
|
||||
|
||||
// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
|
||||
// the matcher passed to the Should and ShouldNot methods.
|
||||
//
|
||||
// Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to
|
||||
// fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more
|
||||
// descriptive.
|
||||
//
|
||||
// Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
|
||||
// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
|
||||
type AsyncAssertion interface {
|
||||
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
}
|
||||
|
||||
// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter.
|
||||
type GomegaAsyncAssertion = AsyncAssertion
|
||||
|
||||
// Assertion is returned by Ω and Expect and compares the actual value to the matcher
|
||||
// passed to the Should/ShouldNot and To/ToNot/NotTo methods.
|
||||
//
|
||||
// Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
|
||||
// though this is not enforced.
|
||||
//
|
||||
// All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
|
||||
// and is used to annotate failure messages.
|
||||
//
|
||||
// All methods return a bool that is true if hte assertion passed and false if it failed.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
|
||||
type Assertion interface {
|
||||
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
|
||||
To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
}
|
||||
|
||||
// GomegaAssertion is deprecated in favor of Assertion, which does not stutter.
|
||||
type GomegaAssertion = Assertion
|
||||
|
||||
// OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
|
||||
type OmegaMatcher types.GomegaMatcher
|
||||
|
||||
// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
|
||||
// Gomega's rich ecosystem of matchers in standard `testing` test suites.
|
||||
//
|
||||
// Use `NewWithT` to instantiate a `WithT`
|
||||
type WithT struct {
|
||||
t types.GomegaTestingT
|
||||
}
|
||||
|
||||
// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter.
|
||||
type GomegaWithT = WithT
|
||||
|
||||
// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
|
||||
// Gomega's rich ecosystem of matchers in standard `testing` test suits.
|
||||
//
|
||||
// func TestFarmHasCow(t *testing.T) {
|
||||
// g := gomega.NewWithT(t)
|
||||
//
|
||||
// f := farm.New([]string{"Cow", "Horse"})
|
||||
// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
|
||||
// }
|
||||
func NewWithT(t types.GomegaTestingT) *WithT {
|
||||
return &WithT{
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter.
|
||||
func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT {
|
||||
return NewWithT(t)
|
||||
}
|
||||
|
||||
// Expect is used to make assertions. See documentation for Expect.
|
||||
func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion {
|
||||
return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...)
|
||||
}
|
||||
|
||||
// Eventually is used to make asynchronous assertions. See documentation for Eventually.
|
||||
func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
|
||||
timeoutInterval := defaultEventuallyTimeout
|
||||
pollingInterval := defaultEventuallyPollingInterval
|
||||
if len(intervals) > 0 {
|
||||
timeoutInterval = toDuration(intervals[0])
|
||||
}
|
||||
if len(intervals) > 1 {
|
||||
pollingInterval = toDuration(intervals[1])
|
||||
}
|
||||
return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
|
||||
}
|
||||
|
||||
// Consistently is used to make asynchronous assertions. See documentation for Consistently.
|
||||
func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
|
||||
timeoutInterval := defaultConsistentlyDuration
|
||||
pollingInterval := defaultConsistentlyPollingInterval
|
||||
if len(intervals) > 0 {
|
||||
timeoutInterval = toDuration(intervals[0])
|
||||
}
|
||||
if len(intervals) > 1 {
|
||||
pollingInterval = toDuration(intervals[1])
|
||||
}
|
||||
return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
|
||||
}
|
||||
|
||||
func toDuration(input interface{}) time.Duration {
|
||||
duration, ok := input.(time.Duration)
|
||||
if ok {
|
||||
return duration
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(input)
|
||||
kind := reflect.TypeOf(input).Kind()
|
||||
|
||||
if reflect.Int <= kind && kind <= reflect.Int64 {
|
||||
return time.Duration(value.Int()) * time.Second
|
||||
} else if reflect.Uint <= kind && kind <= reflect.Uint64 {
|
||||
return time.Duration(value.Uint()) * time.Second
|
||||
} else if reflect.Float32 <= kind && kind <= reflect.Float64 {
|
||||
return time.Duration(value.Float() * float64(time.Second))
|
||||
} else if reflect.String == kind {
|
||||
duration, err := time.ParseDuration(value.String())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input))
|
||||
}
|
||||
105
vendor/github.com/onsi/gomega/internal/assertion/assertion.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
package assertion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type Assertion struct {
|
||||
actualInput interface{}
|
||||
failWrapper *types.GomegaFailWrapper
|
||||
offset int
|
||||
extra []interface{}
|
||||
}
|
||||
|
||||
func New(actualInput interface{}, failWrapper *types.GomegaFailWrapper, offset int, extra ...interface{}) *Assertion {
|
||||
return &Assertion{
|
||||
actualInput: actualInput,
|
||||
failWrapper: failWrapper,
|
||||
offset: offset,
|
||||
extra: extra,
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
|
||||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
|
||||
matches, err := matcher.Match(assertion.actualInput)
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
if err != nil {
|
||||
assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
if matches != desiredMatch {
|
||||
var message string
|
||||
if desiredMatch {
|
||||
message = matcher.FailureMessage(assertion.actualInput)
|
||||
} else {
|
||||
message = matcher.NegatedFailureMessage(assertion.actualInput)
|
||||
}
|
||||
assertion.failWrapper.Fail(description+message, 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
|
||||
success, message := vetExtras(assertion.extra)
|
||||
if success {
|
||||
return true
|
||||
}
|
||||
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
assertion.failWrapper.Fail(description+message, 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
|
||||
func vetExtras(extras []interface{}) (bool, string) {
|
||||
for i, extra := range extras {
|
||||
if extra != nil {
|
||||
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
|
||||
if !reflect.DeepEqual(zeroValue, extra) {
|
||||
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
|
||||
return false, message
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
||||
194
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
@ -1,194 +0,0 @@
|
||||
package asyncassertion
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/internal/oraclematcher"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type AsyncAssertionType uint
|
||||
|
||||
const (
|
||||
AsyncAssertionTypeEventually AsyncAssertionType = iota
|
||||
AsyncAssertionTypeConsistently
|
||||
)
|
||||
|
||||
type AsyncAssertion struct {
|
||||
asyncType AsyncAssertionType
|
||||
actualInput interface{}
|
||||
timeoutInterval time.Duration
|
||||
pollingInterval time.Duration
|
||||
failWrapper *types.GomegaFailWrapper
|
||||
offset int
|
||||
}
|
||||
|
||||
func New(asyncType AsyncAssertionType, actualInput interface{}, failWrapper *types.GomegaFailWrapper, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
|
||||
actualType := reflect.TypeOf(actualInput)
|
||||
if actualType.Kind() == reflect.Func {
|
||||
if actualType.NumIn() != 0 || actualType.NumOut() == 0 {
|
||||
panic("Expected a function with no arguments and one or more return values.")
|
||||
}
|
||||
}
|
||||
|
||||
return &AsyncAssertion{
|
||||
asyncType: asyncType,
|
||||
actualInput: actualInput,
|
||||
failWrapper: failWrapper,
|
||||
timeoutInterval: timeoutInterval,
|
||||
pollingInterval: pollingInterval,
|
||||
offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
return assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
return assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
|
||||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
|
||||
actualType := reflect.TypeOf(assertion.actualInput)
|
||||
return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
|
||||
if assertion.actualInputIsAFunction() {
|
||||
values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{})
|
||||
|
||||
extras := []interface{}{}
|
||||
for _, value := range values[1:] {
|
||||
extras = append(extras, value.Interface())
|
||||
}
|
||||
|
||||
success, message := vetExtras(extras)
|
||||
|
||||
if !success {
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
return values[0].Interface(), nil
|
||||
}
|
||||
|
||||
return assertion.actualInput, nil
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
|
||||
if assertion.actualInputIsAFunction() {
|
||||
return true
|
||||
}
|
||||
|
||||
return oraclematcher.MatchMayChangeInTheFuture(matcher, value)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
|
||||
timer := time.Now()
|
||||
timeout := time.After(assertion.timeoutInterval)
|
||||
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
|
||||
var matches bool
|
||||
var err error
|
||||
mayChange := true
|
||||
value, err := assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
|
||||
fail := func(preamble string) {
|
||||
errMsg := ""
|
||||
message := ""
|
||||
if err != nil {
|
||||
errMsg = "Error: " + err.Error()
|
||||
} else {
|
||||
if desiredMatch {
|
||||
message = matcher.FailureMessage(value)
|
||||
} else {
|
||||
message = matcher.NegatedFailureMessage(value)
|
||||
}
|
||||
}
|
||||
assertion.failWrapper.TWithHelper.Helper()
|
||||
assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
|
||||
}
|
||||
|
||||
if assertion.asyncType == AsyncAssertionTypeEventually {
|
||||
for {
|
||||
if err == nil && matches == desiredMatch {
|
||||
return true
|
||||
}
|
||||
|
||||
if !mayChange {
|
||||
fail("No future change is possible. Bailing out early")
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(assertion.pollingInterval):
|
||||
value, err = assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
case <-timeout:
|
||||
fail("Timed out")
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else if assertion.asyncType == AsyncAssertionTypeConsistently {
|
||||
for {
|
||||
if !(err == nil && matches == desiredMatch) {
|
||||
fail("Failed")
|
||||
return false
|
||||
}
|
||||
|
||||
if !mayChange {
|
||||
return true
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(assertion.pollingInterval):
|
||||
value, err = assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
case <-timeout:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func vetExtras(extras []interface{}) (bool, string) {
|
||||
for i, extra := range extras {
|
||||
if extra != nil {
|
||||
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
|
||||
if !reflect.DeepEqual(zeroValue, extra) {
|
||||
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
|
||||
return false, message
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
||||
25
vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package oraclematcher
|
||||
|
||||
import "github.com/onsi/gomega/types"
|
||||
|
||||
/*
|
||||
GomegaMatchers that also match the OracleMatcher interface can convey information about
|
||||
whether or not their result will change upon future attempts.
|
||||
|
||||
This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
|
||||
|
||||
For example, a process' exit code can never change. So, gexec's Exit matcher returns `true`
|
||||
for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
|
||||
*/
|
||||
type OracleMatcher interface {
|
||||
MatchMayChangeInTheFuture(actual interface{}) bool
|
||||
}
|
||||
|
||||
func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool {
|
||||
oracleMatcher, ok := matcher.(OracleMatcher)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return oracleMatcher.MatchMayChangeInTheFuture(value)
|
||||
}
|
||||
60
vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
package testingtsupport
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
var StackTracePruneRE = regexp.MustCompile(`\/gomega\/|\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
|
||||
type EmptyTWithHelper struct{}
|
||||
|
||||
func (e EmptyTWithHelper) Helper() {}
|
||||
|
||||
type gomegaTestingT interface {
|
||||
Fatalf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func BuildTestingTGomegaFailWrapper(t gomegaTestingT) *types.GomegaFailWrapper {
|
||||
tWithHelper, hasHelper := t.(types.TWithHelper)
|
||||
if !hasHelper {
|
||||
tWithHelper = EmptyTWithHelper{}
|
||||
}
|
||||
|
||||
fail := func(message string, callerSkip ...int) {
|
||||
if hasHelper {
|
||||
tWithHelper.Helper()
|
||||
t.Fatalf("\n%s", message)
|
||||
} else {
|
||||
skip := 2
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
stackTrace := pruneStack(string(debug.Stack()), skip)
|
||||
t.Fatalf("\n%s\n%s\n", stackTrace, message)
|
||||
}
|
||||
}
|
||||
|
||||
return &types.GomegaFailWrapper{
|
||||
Fail: fail,
|
||||
TWithHelper: tWithHelper,
|
||||
}
|
||||
}
|
||||
|
||||
func pruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")[1:]
|
||||
if len(stack) > 2*skip {
|
||||
stack = stack[2*skip:]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
if !StackTracePruneRE.Match([]byte(stack[i*2])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
||||
427
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
@ -1,427 +0,0 @@
|
||||
package gomega
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/matchers"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
|
||||
//types when performing comparisons.
|
||||
//It is an error for both actual and expected to be nil. Use BeNil() instead.
|
||||
func Equal(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.EqualMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeEquivalentTo is more lax than Equal, allowing equality between different types.
|
||||
//This is done by converting actual to have the type of expected before
|
||||
//attempting equality with reflect.DeepEqual.
|
||||
//It is an error for actual and expected to be nil. Use BeNil() instead.
|
||||
func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeEquivalentToMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeIdenticalTo uses the == operator to compare actual with expected.
|
||||
//BeIdenticalTo is strict about types when performing comparisons.
|
||||
//It is an error for both actual and expected to be nil. Use BeNil() instead.
|
||||
func BeIdenticalTo(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeIdenticalToMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeNil succeeds if actual is nil
|
||||
func BeNil() types.GomegaMatcher {
|
||||
return &matchers.BeNilMatcher{}
|
||||
}
|
||||
|
||||
//BeTrue succeeds if actual is true
|
||||
func BeTrue() types.GomegaMatcher {
|
||||
return &matchers.BeTrueMatcher{}
|
||||
}
|
||||
|
||||
//BeFalse succeeds if actual is false
|
||||
func BeFalse() types.GomegaMatcher {
|
||||
return &matchers.BeFalseMatcher{}
|
||||
}
|
||||
|
||||
//HaveOccurred succeeds if actual is a non-nil error
|
||||
//The typical Go error checking pattern looks like:
|
||||
// err := SomethingThatMightFail()
|
||||
// Expect(err).ShouldNot(HaveOccurred())
|
||||
func HaveOccurred() types.GomegaMatcher {
|
||||
return &matchers.HaveOccurredMatcher{}
|
||||
}
|
||||
|
||||
//Succeed passes if actual is a nil error
|
||||
//Succeed is intended to be used with functions that return a single error value. Instead of
|
||||
// err := SomethingThatMightFail()
|
||||
// Expect(err).ShouldNot(HaveOccurred())
|
||||
//
|
||||
//You can write:
|
||||
// Expect(SomethingThatMightFail()).Should(Succeed())
|
||||
//
|
||||
//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
|
||||
//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
|
||||
//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
|
||||
func Succeed() types.GomegaMatcher {
|
||||
return &matchers.SucceedMatcher{}
|
||||
}
|
||||
|
||||
//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
|
||||
//
|
||||
//These are valid use-cases:
|
||||
// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
|
||||
// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
|
||||
//
|
||||
//It is an error for err to be nil or an object that does not implement the Error interface
|
||||
func MatchError(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchErrorMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeClosed succeeds if actual is a closed channel.
|
||||
//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
|
||||
//
|
||||
//In order to check whether or not the channel is closed, Gomega must try to read from the channel
|
||||
//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
|
||||
//values coming down the channel.
|
||||
//
|
||||
//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
|
||||
//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
|
||||
//
|
||||
//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
|
||||
func BeClosed() types.GomegaMatcher {
|
||||
return &matchers.BeClosedMatcher{}
|
||||
}
|
||||
|
||||
//Receive succeeds if there is a value to be received on actual.
|
||||
//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
|
||||
//
|
||||
//Receive returns immediately and never blocks:
|
||||
//
|
||||
//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
|
||||
//
|
||||
//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
|
||||
//
|
||||
//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
|
||||
//
|
||||
//If you have a go-routine running in the background that will write to channel `c` you can:
|
||||
// Eventually(c).Should(Receive())
|
||||
//
|
||||
//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
|
||||
//
|
||||
//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
|
||||
// Consistently(c).ShouldNot(Receive())
|
||||
//
|
||||
//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
|
||||
// Expect(c).Should(Receive(Equal("foo")))
|
||||
//
|
||||
//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
|
||||
//
|
||||
//Passing Receive a matcher is especially useful when paired with Eventually:
|
||||
//
|
||||
// Eventually(c).Should(Receive(ContainSubstring("bar")))
|
||||
//
|
||||
//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
|
||||
//
|
||||
//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
|
||||
// var myThing thing
|
||||
// Eventually(thingChan).Should(Receive(&myThing))
|
||||
// Expect(myThing.Sprocket).Should(Equal("foo"))
|
||||
// Expect(myThing.IsValid()).Should(BeTrue())
|
||||
func Receive(args ...interface{}) types.GomegaMatcher {
|
||||
var arg interface{}
|
||||
if len(args) > 0 {
|
||||
arg = args[0]
|
||||
}
|
||||
|
||||
return &matchers.ReceiveMatcher{
|
||||
Arg: arg,
|
||||
}
|
||||
}
|
||||
|
||||
//BeSent succeeds if a value can be sent to actual.
|
||||
//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
|
||||
//In addition, actual must not be closed.
|
||||
//
|
||||
//BeSent never blocks:
|
||||
//
|
||||
//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately
|
||||
//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
|
||||
//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
|
||||
//
|
||||
//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
|
||||
//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
|
||||
func BeSent(arg interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeSentMatcher{
|
||||
Arg: arg,
|
||||
}
|
||||
}
|
||||
|
||||
//MatchRegexp succeeds if actual is a string or stringer that matches the
|
||||
//passed-in regexp. Optional arguments can be provided to construct a regexp
|
||||
//via fmt.Sprintf().
|
||||
func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchRegexpMatcher{
|
||||
Regexp: regexp,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//ContainSubstring succeeds if actual is a string or stringer that contains the
|
||||
//passed-in substring. Optional arguments can be provided to construct the substring
|
||||
//via fmt.Sprintf().
|
||||
func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.ContainSubstringMatcher{
|
||||
Substr: substr,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//HavePrefix succeeds if actual is a string or stringer that contains the
|
||||
//passed-in string as a prefix. Optional arguments can be provided to construct
|
||||
//via fmt.Sprintf().
|
||||
func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.HavePrefixMatcher{
|
||||
Prefix: prefix,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveSuffix succeeds if actual is a string or stringer that contains the
|
||||
//passed-in string as a suffix. Optional arguments can be provided to construct
|
||||
//via fmt.Sprintf().
|
||||
func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveSuffixMatcher{
|
||||
Suffix: suffix,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//MatchJSON succeeds if actual is a string or stringer of JSON that matches
|
||||
//the expected JSON. The JSONs are decoded and the resulting objects are compared via
|
||||
//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
|
||||
func MatchJSON(json interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchJSONMatcher{
|
||||
JSONToMatch: json,
|
||||
}
|
||||
}
|
||||
|
||||
//MatchXML succeeds if actual is a string or stringer of XML that matches
|
||||
//the expected XML. The XMLs are decoded and the resulting objects are compared via
|
||||
//reflect.DeepEqual so things like whitespaces shouldn't matter.
|
||||
func MatchXML(xml interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchXMLMatcher{
|
||||
XMLToMatch: xml,
|
||||
}
|
||||
}
|
||||
|
||||
//MatchYAML succeeds if actual is a string or stringer of YAML that matches
|
||||
//the expected YAML. The YAML's are decoded and the resulting objects are compared via
|
||||
//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
|
||||
func MatchYAML(yaml interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchYAMLMatcher{
|
||||
YAMLToMatch: yaml,
|
||||
}
|
||||
}
|
||||
|
||||
//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
|
||||
func BeEmpty() types.GomegaMatcher {
|
||||
return &matchers.BeEmptyMatcher{}
|
||||
}
|
||||
|
||||
//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
|
||||
func HaveLen(count int) types.GomegaMatcher {
|
||||
return &matchers.HaveLenMatcher{
|
||||
Count: count,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice.
|
||||
func HaveCap(count int) types.GomegaMatcher {
|
||||
return &matchers.HaveCapMatcher{
|
||||
Count: count,
|
||||
}
|
||||
}
|
||||
|
||||
//BeZero succeeds if actual is the zero value for its type or if actual is nil.
|
||||
func BeZero() types.GomegaMatcher {
|
||||
return &matchers.BeZeroMatcher{}
|
||||
}
|
||||
|
||||
//ContainElement succeeds if actual contains the passed in element.
|
||||
//By default ContainElement() uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
|
||||
//
|
||||
//Actual must be an array, slice or map.
|
||||
//For maps, ContainElement searches through the map's values.
|
||||
func ContainElement(element interface{}) types.GomegaMatcher {
|
||||
return &matchers.ContainElementMatcher{
|
||||
Element: element,
|
||||
}
|
||||
}
|
||||
|
||||
//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
|
||||
//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
|
||||
//
|
||||
// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
|
||||
// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
|
||||
// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
|
||||
//
|
||||
//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
|
||||
//
|
||||
//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
|
||||
//is the only element passed in to ConsistOf:
|
||||
//
|
||||
// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
|
||||
//
|
||||
//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
|
||||
func ConsistOf(elements ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.ConsistOfMatcher{
|
||||
Elements: elements,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveKey succeeds if actual is a map with the passed in key.
|
||||
//By default HaveKey uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
|
||||
func HaveKey(key interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveKeyMatcher{
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
|
||||
//By default HaveKeyWithValue uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
|
||||
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
|
||||
func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveKeyWithValueMatcher{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
//BeNumerically performs numerical assertions in a type-agnostic way.
|
||||
//Actual and expected should be numbers, though the specific type of
|
||||
//number is irrelevant (float32, float64, uint8, etc...).
|
||||
//
|
||||
//There are six, self-explanatory, supported comparators:
|
||||
// Expect(1.0).Should(BeNumerically("==", 1))
|
||||
// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01))
|
||||
// Expect(1.0).Should(BeNumerically(">", 0.9))
|
||||
// Expect(1.0).Should(BeNumerically(">=", 1.0))
|
||||
// Expect(1.0).Should(BeNumerically("<", 3))
|
||||
// Expect(1.0).Should(BeNumerically("<=", 1.0))
|
||||
func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeNumericallyMatcher{
|
||||
Comparator: comparator,
|
||||
CompareTo: compareTo,
|
||||
}
|
||||
}
|
||||
|
||||
//BeTemporally compares time.Time's like BeNumerically
|
||||
//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
|
||||
// Expect(time.Now()).Should(BeTemporally(">", time.Time{}))
|
||||
// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
|
||||
func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
|
||||
return &matchers.BeTemporallyMatcher{
|
||||
Comparator: comparator,
|
||||
CompareTo: compareTo,
|
||||
Threshold: threshold,
|
||||
}
|
||||
}
|
||||
|
||||
//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
|
||||
//It will return an error when one of the values is nil.
|
||||
// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values
|
||||
// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
|
||||
// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
|
||||
// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
|
||||
func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.AssignableToTypeOfMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//Panic succeeds if actual is a function that, when invoked, panics.
|
||||
//Actual must be a function that takes no arguments and returns no results.
|
||||
func Panic() types.GomegaMatcher {
|
||||
return &matchers.PanicMatcher{}
|
||||
}
|
||||
|
||||
//BeAnExistingFile succeeds if a file exists.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeAnExistingFile() types.GomegaMatcher {
|
||||
return &matchers.BeAnExistingFileMatcher{}
|
||||
}
|
||||
|
||||
//BeARegularFile succeeds if a file exists and is a regular file.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeARegularFile() types.GomegaMatcher {
|
||||
return &matchers.BeARegularFileMatcher{}
|
||||
}
|
||||
|
||||
//BeADirectory succeeds if a file exists and is a directory.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeADirectory() types.GomegaMatcher {
|
||||
return &matchers.BeADirectoryMatcher{}
|
||||
}
|
||||
|
||||
//And succeeds only if all of the given matchers succeed.
|
||||
//The matchers are tried in order, and will fail-fast if one doesn't succeed.
|
||||
// Expect("hi").To(And(HaveLen(2), Equal("hi"))
|
||||
//
|
||||
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
|
||||
func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
|
||||
return &matchers.AndMatcher{Matchers: ms}
|
||||
}
|
||||
|
||||
//SatisfyAll is an alias for And().
|
||||
// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
|
||||
func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
|
||||
return And(matchers...)
|
||||
}
|
||||
|
||||
//Or succeeds if any of the given matchers succeed.
|
||||
//The matchers are tried in order and will return immediately upon the first successful match.
|
||||
// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
|
||||
//
|
||||
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
|
||||
func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
|
||||
return &matchers.OrMatcher{Matchers: ms}
|
||||
}
|
||||
|
||||
//SatisfyAny is an alias for Or().
|
||||
// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
|
||||
func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
|
||||
return Or(matchers...)
|
||||
}
|
||||
|
||||
//Not negates the given matcher; it succeeds if the given matcher fails.
|
||||
// Expect(1).To(Not(Equal(2))
|
||||
//
|
||||
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
|
||||
func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
|
||||
return &matchers.NotMatcher{Matcher: matcher}
|
||||
}
|
||||
|
||||
//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
|
||||
//The given transform must be a function of one parameter that returns one value.
|
||||
// var plus1 = func(i int) int { return i + 1 }
|
||||
// Expect(1).To(WithTransform(plus1, Equal(2))
|
||||
//
|
||||
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
|
||||
func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
|
||||
return matchers.NewWithTransformMatcher(transform, matcher)
|
||||
}
|
||||
63
vendor/github.com/onsi/gomega/matchers/and.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/internal/oraclematcher"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type AndMatcher struct {
|
||||
Matchers []types.GomegaMatcher
|
||||
|
||||
// state
|
||||
firstFailedMatcher types.GomegaMatcher
|
||||
}
|
||||
|
||||
func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
m.firstFailedMatcher = nil
|
||||
for _, matcher := range m.Matchers {
|
||||
success, err := matcher.Match(actual)
|
||||
if !success || err != nil {
|
||||
m.firstFailedMatcher = matcher
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return m.firstFailedMatcher.FailureMessage(actual)
|
||||
}
|
||||
|
||||
func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
// not the most beautiful list of matchers, but not bad either...
|
||||
return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
|
||||
}
|
||||
|
||||
func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
|
||||
/*
|
||||
Example with 3 matchers: A, B, C
|
||||
|
||||
Match evaluates them: T, F, <?> => F
|
||||
So match is currently F, what should MatchMayChangeInTheFuture() return?
|
||||
Seems like it only depends on B, since currently B MUST change to allow the result to become T
|
||||
|
||||
Match eval: T, T, T => T
|
||||
So match is currently T, what should MatchMayChangeInTheFuture() return?
|
||||
Seems to depend on ANY of them being able to change to F.
|
||||
*/
|
||||
|
||||
if m.firstFailedMatcher == nil {
|
||||
// so all matchers succeeded.. Any one of them changing would change the result.
|
||||
for _, matcher := range m.Matchers {
|
||||
if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false // none of were going to change
|
||||
}
|
||||
// one of the matchers failed.. it must be able to change in order to affect the result
|
||||
return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
|
||||
}
|
||||
35
vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type AssignableToTypeOfMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil && matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
|
||||
} else if matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Refusing to compare type to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
|
||||
} else if actual == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
actualType := reflect.TypeOf(actual)
|
||||
expectedType := reflect.TypeOf(matcher.Expected)
|
||||
|
||||
return actualType.AssignableTo(expectedType), nil
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
|
||||
return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
|
||||
return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
|
||||
}
|
||||
14
vendor/github.com/onsi/gomega/matchers/attributes_slice.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type attributesSlice []xml.Attr
|
||||
|
||||
func (attrs attributesSlice) Len() int { return len(attrs) }
|
||||
func (attrs attributesSlice) Less(i, j int) bool {
|
||||
return strings.Compare(attrs[i].Name.Local, attrs[j].Name.Local) == -1
|
||||
}
|
||||
func (attrs attributesSlice) Swap(i, j int) { attrs[i], attrs[j] = attrs[j], attrs[i] }
|
||||
54
vendor/github.com/onsi/gomega/matchers/be_a_directory.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type notADirectoryError struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (t notADirectoryError) Error() string {
|
||||
fileInfo := os.FileInfo(t)
|
||||
switch {
|
||||
case fileInfo.Mode().IsRegular():
|
||||
return "file is a regular file"
|
||||
default:
|
||||
return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
|
||||
}
|
||||
}
|
||||
|
||||
type BeADirectoryMatcher struct {
|
||||
expected interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualFilename, ok := actual.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(actualFilename)
|
||||
if err != nil {
|
||||
matcher.err = err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsDir() {
|
||||
matcher.err = notADirectoryError{fileInfo}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not be a directory"))
|
||||
}
|
||||
54
vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type notARegularFileError struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (t notARegularFileError) Error() string {
|
||||
fileInfo := os.FileInfo(t)
|
||||
switch {
|
||||
case fileInfo.IsDir():
|
||||
return "file is a directory"
|
||||
default:
|
||||
return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
|
||||
}
|
||||
}
|
||||
|
||||
type BeARegularFileMatcher struct {
|
||||
expected interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualFilename, ok := actual.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(actualFilename)
|
||||
if err != nil {
|
||||
matcher.err = err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsRegular() {
|
||||
matcher.err = notARegularFileError{fileInfo}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
|
||||
}
|
||||
|
||||
func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not be a regular file"))
|
||||
}
|
||||
38
vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeAnExistingFileMatcher struct {
|
||||
expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualFilename, ok := actual.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
|
||||
}
|
||||
|
||||
if _, err = os.Stat(actualFilename); err != nil {
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to exist"))
|
||||
}
|
||||
|
||||
func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not to exist"))
|
||||
}
|
||||
46
vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeClosedMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isChan(actual) {
|
||||
return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
channelType := reflect.TypeOf(actual)
|
||||
channelValue := reflect.ValueOf(actual)
|
||||
|
||||
if channelType.ChanDir() == reflect.SendDir {
|
||||
return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
winnerIndex, _, open := reflect.Select([]reflect.SelectCase{
|
||||
{Dir: reflect.SelectRecv, Chan: channelValue},
|
||||
{Dir: reflect.SelectDefault},
|
||||
})
|
||||
|
||||
var closed bool
|
||||
if winnerIndex == 0 {
|
||||
closed = !open
|
||||
} else if winnerIndex == 1 {
|
||||
closed = false
|
||||
}
|
||||
|
||||
return closed, nil
|
||||
}
|
||||
|
||||
func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be closed")
|
||||
}
|
||||
|
||||
func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be open")
|
||||
}
|
||||
27
vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeEmptyMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
length, ok := lengthOf(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return length == 0, nil
|
||||
}
|
||||
|
||||
func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be empty")
|
||||
}
|
||||
|
||||
func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be empty")
|
||||
}
|
||||
34
vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeEquivalentToMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil && matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Both actual and expected must not be nil.")
|
||||
}
|
||||
|
||||
convertedActual := actual
|
||||
|
||||
if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) {
|
||||
convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface()
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(convertedActual, matcher.Expected), nil
|
||||
}
|
||||
|
||||
func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be equivalent to", matcher.Expected)
|
||||
}
|
||||
|
||||
func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be equivalent to", matcher.Expected)
|
||||
}
|
||||
26
vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeFalseMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isBool(actual) {
|
||||
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return actual == false, nil
|
||||
}
|
||||
|
||||
func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be false")
|
||||
}
|
||||
|
||||
func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be false")
|
||||
}
|
||||
37
vendor/github.com/onsi/gomega/matchers/be_identical_to.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeIdenticalToMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) {
|
||||
if actual == nil && matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
success = false
|
||||
matchErr = nil
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return actual == matcher.Expected, nil
|
||||
}
|
||||
|
||||
func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string {
|
||||
return format.Message(actual, "to be identical to", matcher.Expected)
|
||||
}
|
||||
|
||||
func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string {
|
||||
return format.Message(actual, "not to be identical to", matcher.Expected)
|
||||
}
|
||||
18
vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import "github.com/onsi/gomega/format"
|
||||
|
||||
type BeNilMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
return isNil(actual), nil
|
||||
}
|
||||
|
||||
func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be nil")
|
||||
}
|
||||
|
||||
func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be nil")
|
||||
}
|
||||
132
vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
generated
vendored
@ -1,132 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeNumericallyMatcher struct {
|
||||
Comparator string
|
||||
CompareTo []interface{}
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return matcher.FormatFailureMessage(actual, false)
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return matcher.FormatFailureMessage(actual, true)
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) {
|
||||
if len(matcher.CompareTo) == 1 {
|
||||
message = fmt.Sprintf("to be %s", matcher.Comparator)
|
||||
} else {
|
||||
message = fmt.Sprintf("to be within %v of %s", matcher.CompareTo[1], matcher.Comparator)
|
||||
}
|
||||
if negated {
|
||||
message = "not " + message
|
||||
}
|
||||
return format.Message(actual, message, matcher.CompareTo[0])
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
|
||||
return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1))
|
||||
}
|
||||
if !isNumber(actual) {
|
||||
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
if !isNumber(matcher.CompareTo[0]) {
|
||||
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
|
||||
}
|
||||
if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) {
|
||||
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
|
||||
}
|
||||
|
||||
switch matcher.Comparator {
|
||||
case "==", "~", ">", ">=", "<", "<=":
|
||||
default:
|
||||
return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
|
||||
}
|
||||
|
||||
if isFloat(actual) || isFloat(matcher.CompareTo[0]) {
|
||||
var secondOperand float64 = 1e-8
|
||||
if len(matcher.CompareTo) == 2 {
|
||||
secondOperand = toFloat(matcher.CompareTo[1])
|
||||
}
|
||||
success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand)
|
||||
} else if isInteger(actual) {
|
||||
var secondOperand int64 = 0
|
||||
if len(matcher.CompareTo) == 2 {
|
||||
secondOperand = toInteger(matcher.CompareTo[1])
|
||||
}
|
||||
success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand)
|
||||
} else if isUnsignedInteger(actual) {
|
||||
var secondOperand uint64 = 0
|
||||
if len(matcher.CompareTo) == 2 {
|
||||
secondOperand = toUnsignedInteger(matcher.CompareTo[1])
|
||||
}
|
||||
success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand)
|
||||
} else {
|
||||
return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1))
|
||||
}
|
||||
|
||||
return success, nil
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) {
|
||||
switch matcher.Comparator {
|
||||
case "==", "~":
|
||||
diff := actual - compareTo
|
||||
return -threshold <= diff && diff <= threshold
|
||||
case ">":
|
||||
return (actual > compareTo)
|
||||
case ">=":
|
||||
return (actual >= compareTo)
|
||||
case "<":
|
||||
return (actual < compareTo)
|
||||
case "<=":
|
||||
return (actual <= compareTo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) {
|
||||
switch matcher.Comparator {
|
||||
case "==", "~":
|
||||
if actual < compareTo {
|
||||
actual, compareTo = compareTo, actual
|
||||
}
|
||||
return actual-compareTo <= threshold
|
||||
case ">":
|
||||
return (actual > compareTo)
|
||||
case ">=":
|
||||
return (actual >= compareTo)
|
||||
case "<":
|
||||
return (actual < compareTo)
|
||||
case "<=":
|
||||
return (actual <= compareTo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) {
|
||||
switch matcher.Comparator {
|
||||
case "~":
|
||||
return math.Abs(actual-compareTo) <= threshold
|
||||
case "==":
|
||||
return (actual == compareTo)
|
||||
case ">":
|
||||
return (actual > compareTo)
|
||||
case ">=":
|
||||
return (actual >= compareTo)
|
||||
case "<":
|
||||
return (actual < compareTo)
|
||||
case "<=":
|
||||
return (actual <= compareTo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
71
vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeSentMatcher struct {
|
||||
Arg interface{}
|
||||
channelClosed bool
|
||||
}
|
||||
|
||||
func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isChan(actual) {
|
||||
return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
channelType := reflect.TypeOf(actual)
|
||||
channelValue := reflect.ValueOf(actual)
|
||||
|
||||
if channelType.ChanDir() == reflect.RecvDir {
|
||||
return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
argType := reflect.TypeOf(matcher.Arg)
|
||||
assignable := argType.AssignableTo(channelType.Elem())
|
||||
|
||||
if !assignable {
|
||||
return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1))
|
||||
}
|
||||
|
||||
argValue := reflect.ValueOf(matcher.Arg)
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
success = false
|
||||
err = fmt.Errorf("Cannot send to a closed channel")
|
||||
matcher.channelClosed = true
|
||||
}
|
||||
}()
|
||||
|
||||
winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{
|
||||
{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue},
|
||||
{Dir: reflect.SelectDefault},
|
||||
})
|
||||
|
||||
var didSend bool
|
||||
if winnerIndex == 0 {
|
||||
didSend = true
|
||||
}
|
||||
|
||||
return didSend, nil
|
||||
}
|
||||
|
||||
func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to send:", matcher.Arg)
|
||||
}
|
||||
|
||||
func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to send:", matcher.Arg)
|
||||
}
|
||||
|
||||
func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
|
||||
if !isChan(actual) {
|
||||
return false
|
||||
}
|
||||
|
||||
return !matcher.channelClosed
|
||||
}
|
||||
66
vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeTemporallyMatcher struct {
|
||||
Comparator string
|
||||
CompareTo time.Time
|
||||
Threshold []time.Duration
|
||||
}
|
||||
|
||||
func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
|
||||
}
|
||||
|
||||
func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
|
||||
}
|
||||
|
||||
func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
|
||||
// predicate to test for time.Time type
|
||||
isTime := func(t interface{}) bool {
|
||||
_, ok := t.(time.Time)
|
||||
return ok
|
||||
}
|
||||
|
||||
if !isTime(actual) {
|
||||
return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
switch matcher.Comparator {
|
||||
case "==", "~", ">", ">=", "<", "<=":
|
||||
default:
|
||||
return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
|
||||
}
|
||||
|
||||
var threshold = time.Millisecond
|
||||
if len(matcher.Threshold) == 1 {
|
||||
threshold = matcher.Threshold[0]
|
||||
}
|
||||
|
||||
return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil
|
||||
}
|
||||
|
||||
func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) {
|
||||
switch matcher.Comparator {
|
||||
case "==":
|
||||
return actual.Equal(compareTo)
|
||||
case "~":
|
||||
diff := actual.Sub(compareTo)
|
||||
return -threshold <= diff && diff <= threshold
|
||||
case ">":
|
||||
return actual.After(compareTo)
|
||||
case ">=":
|
||||
return !actual.Before(compareTo)
|
||||
case "<":
|
||||
return actual.Before(compareTo)
|
||||
case "<=":
|
||||
return !actual.After(compareTo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
26
vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeTrueMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isBool(actual) {
|
||||
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return actual.(bool), nil
|
||||
}
|
||||
|
||||
func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be true")
|
||||
}
|
||||
|
||||
func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be true")
|
||||
}
|
||||
28
vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeZeroMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil {
|
||||
return true, nil
|
||||
}
|
||||
zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
|
||||
|
||||
return reflect.DeepEqual(zeroValue, actual), nil
|
||||
|
||||
}
|
||||
|
||||
func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be zero-valued")
|
||||
}
|
||||
|
||||
func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be zero-valued")
|
||||
}
|
||||
80
vendor/github.com/onsi/gomega/matchers/consist_of.go
generated
vendored
@ -1,80 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
|
||||
)
|
||||
|
||||
type ConsistOfMatcher struct {
|
||||
Elements []interface{}
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) {
|
||||
return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
elements := matcher.Elements
|
||||
if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) {
|
||||
elements = []interface{}{}
|
||||
value := reflect.ValueOf(matcher.Elements[0])
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
elements = append(elements, value.Index(i).Interface())
|
||||
}
|
||||
}
|
||||
|
||||
matchers := []interface{}{}
|
||||
for _, element := range elements {
|
||||
matcher, isMatcher := element.(omegaMatcher)
|
||||
if !isMatcher {
|
||||
matcher = &EqualMatcher{Expected: element}
|
||||
}
|
||||
matchers = append(matchers, matcher)
|
||||
}
|
||||
|
||||
values := matcher.valuesOf(actual)
|
||||
|
||||
if len(values) != len(matchers) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
neighbours := func(v, m interface{}) (bool, error) {
|
||||
match, err := m.(omegaMatcher).Match(v)
|
||||
return match && err == nil, nil
|
||||
}
|
||||
|
||||
bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return len(bipartiteGraph.LargestMatching()) == len(values), nil
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} {
|
||||
value := reflect.ValueOf(actual)
|
||||
values := []interface{}{}
|
||||
if isMap(actual) {
|
||||
keys := value.MapKeys()
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
values = append(values, value.MapIndex(keys[i]).Interface())
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
values = append(values, value.Index(i).Interface())
|
||||
}
|
||||
}
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to consist of", matcher.Elements)
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to consist of", matcher.Elements)
|
||||
}
|
||||
56
vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type ContainElementMatcher struct {
|
||||
Element interface{}
|
||||
}
|
||||
|
||||
func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) {
|
||||
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
|
||||
if !elementIsMatcher {
|
||||
elemMatcher = &EqualMatcher{Expected: matcher.Element}
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(actual)
|
||||
var keys []reflect.Value
|
||||
if isMap(actual) {
|
||||
keys = value.MapKeys()
|
||||
}
|
||||
var lastError error
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
var success bool
|
||||
var err error
|
||||
if isMap(actual) {
|
||||
success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface())
|
||||
} else {
|
||||
success, err = elemMatcher.Match(value.Index(i).Interface())
|
||||
}
|
||||
if err != nil {
|
||||
lastError = err
|
||||
continue
|
||||
}
|
||||
if success {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, lastError
|
||||
}
|
||||
|
||||
func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to contain element matching", matcher.Element)
|
||||
}
|
||||
|
||||
func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to contain element matching", matcher.Element)
|
||||
}
|
||||
38
vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type ContainSubstringMatcher struct {
|
||||
Substr string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualString, ok := toString(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return strings.Contains(actualString, matcher.stringToMatch()), nil
|
||||
}
|
||||
|
||||
func (matcher *ContainSubstringMatcher) stringToMatch() string {
|
||||
stringToMatch := matcher.Substr
|
||||
if len(matcher.Args) > 0 {
|
||||
stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...)
|
||||
}
|
||||
return stringToMatch
|
||||
}
|
||||
|
||||
func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to contain substring", matcher.stringToMatch())
|
||||
}
|
||||
|
||||
func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to contain substring", matcher.stringToMatch())
|
||||
}
|
||||
42
vendor/github.com/onsi/gomega/matchers/equal_matcher.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type EqualMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil && matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
|
||||
}
|
||||
// Shortcut for byte slices.
|
||||
// Comparing long byte slices with reflect.DeepEqual is very slow,
|
||||
// so use bytes.Equal if actual and expected are both byte slices.
|
||||
if actualByteSlice, ok := actual.([]byte); ok {
|
||||
if expectedByteSlice, ok := matcher.Expected.([]byte); ok {
|
||||
return bytes.Equal(actualByteSlice, expectedByteSlice), nil
|
||||
}
|
||||
}
|
||||
return reflect.DeepEqual(actual, matcher.Expected), nil
|
||||
}
|
||||
|
||||
func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
actualString, actualOK := actual.(string)
|
||||
expectedString, expectedOK := matcher.Expected.(string)
|
||||
if actualOK && expectedOK {
|
||||
return format.MessageWithDiff(actualString, "to equal", expectedString)
|
||||
}
|
||||
|
||||
return format.Message(actual, "to equal", matcher.Expected)
|
||||
}
|
||||
|
||||
func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to equal", matcher.Expected)
|
||||
}
|
||||
28
vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type HaveCapMatcher struct {
|
||||
Count int
|
||||
}
|
||||
|
||||
func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
length, ok := capOf(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return length == matcher.Count, nil
|
||||
}
|
||||
|
||||
func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count)
|
||||
}
|
||||
|
||||
func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count)
|
||||
}
|
||||
54
vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type HaveKeyMatcher struct {
|
||||
Key interface{}
|
||||
}
|
||||
|
||||
func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isMap(actual) {
|
||||
return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
|
||||
if !keyIsMatcher {
|
||||
keyMatcher = &EqualMatcher{Expected: matcher.Key}
|
||||
}
|
||||
|
||||
keys := reflect.ValueOf(actual).MapKeys()
|
||||
for i := 0; i < len(keys); i++ {
|
||||
success, err := keyMatcher.Match(keys[i].Interface())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error())
|
||||
}
|
||||
if success {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
switch matcher.Key.(type) {
|
||||
case omegaMatcher:
|
||||
return format.Message(actual, "to have key matching", matcher.Key)
|
||||
default:
|
||||
return format.Message(actual, "to have key", matcher.Key)
|
||||
}
|
||||
}
|
||||
|
||||
func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
switch matcher.Key.(type) {
|
||||
case omegaMatcher:
|
||||
return format.Message(actual, "not to have key matching", matcher.Key)
|
||||
default:
|
||||
return format.Message(actual, "not to have key", matcher.Key)
|
||||
}
|
||||
}
|
||||
74
vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
generated
vendored
@ -1,74 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type HaveKeyWithValueMatcher struct {
|
||||
Key interface{}
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isMap(actual) {
|
||||
return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher)
|
||||
if !keyIsMatcher {
|
||||
keyMatcher = &EqualMatcher{Expected: matcher.Key}
|
||||
}
|
||||
|
||||
valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher)
|
||||
if !valueIsMatcher {
|
||||
valueMatcher = &EqualMatcher{Expected: matcher.Value}
|
||||
}
|
||||
|
||||
keys := reflect.ValueOf(actual).MapKeys()
|
||||
for i := 0; i < len(keys); i++ {
|
||||
success, err := keyMatcher.Match(keys[i].Interface())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error())
|
||||
}
|
||||
if success {
|
||||
actualValue := reflect.ValueOf(actual).MapIndex(keys[i])
|
||||
success, err := valueMatcher.Match(actualValue.Interface())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error())
|
||||
}
|
||||
return success, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
str := "to have {key: value}"
|
||||
if _, ok := matcher.Key.(omegaMatcher); ok {
|
||||
str += " matching"
|
||||
} else if _, ok := matcher.Value.(omegaMatcher); ok {
|
||||
str += " matching"
|
||||
}
|
||||
|
||||
expect := make(map[interface{}]interface{}, 1)
|
||||
expect[matcher.Key] = matcher.Value
|
||||
return format.Message(actual, str, expect)
|
||||
}
|
||||
|
||||
func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
kStr := "not to have key"
|
||||
if _, ok := matcher.Key.(omegaMatcher); ok {
|
||||
kStr = "not to have key matching"
|
||||
}
|
||||
|
||||
vStr := "or that key's value not be"
|
||||
if _, ok := matcher.Value.(omegaMatcher); ok {
|
||||
vStr = "or to have that key's value not matching"
|
||||
}
|
||||
|
||||
return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value)
|
||||
}
|
||||
28
vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type HaveLenMatcher struct {
|
||||
Count int
|
||||
}
|
||||
|
||||
func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
length, ok := lengthOf(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return length == matcher.Count, nil
|
||||
}
|
||||
|
||||
func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count)
|
||||
}
|
||||
|
||||
func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count)
|
||||
}
|
||||
33
vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type HaveOccurredMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
// is purely nil?
|
||||
if actual == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// must be an 'error' type
|
||||
if !isError(actual) {
|
||||
return false, fmt.Errorf("Expected an error-type. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
// must be non-nil (or a pointer to a non-nil)
|
||||
return !isNil(actual), nil
|
||||
}
|
||||
|
||||
func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred")
|
||||
}
|
||||
36
vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type HavePrefixMatcher struct {
|
||||
Prefix string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualString, ok := toString(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
prefix := matcher.prefix()
|
||||
return len(actualString) >= len(prefix) && actualString[0:len(prefix)] == prefix, nil
|
||||
}
|
||||
|
||||
func (matcher *HavePrefixMatcher) prefix() string {
|
||||
if len(matcher.Args) > 0 {
|
||||
return fmt.Sprintf(matcher.Prefix, matcher.Args...)
|
||||
}
|
||||
return matcher.Prefix
|
||||
}
|
||||
|
||||
func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to have prefix", matcher.prefix())
|
||||
}
|
||||
|
||||
func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to have prefix", matcher.prefix())
|
||||
}
|
||||
36
vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type HaveSuffixMatcher struct {
|
||||
Suffix string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualString, ok := toString(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
suffix := matcher.suffix()
|
||||
return len(actualString) >= len(suffix) && actualString[len(actualString)-len(suffix):] == suffix, nil
|
||||
}
|
||||
|
||||
func (matcher *HaveSuffixMatcher) suffix() string {
|
||||
if len(matcher.Args) > 0 {
|
||||
return fmt.Sprintf(matcher.Suffix, matcher.Args...)
|
||||
}
|
||||
return matcher.Suffix
|
||||
}
|
||||
|
||||
func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to have suffix", matcher.suffix())
|
||||
}
|
||||
|
||||
func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to have suffix", matcher.suffix())
|
||||
}
|
||||
51
vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type MatchErrorMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if isNil(actual) {
|
||||
return false, fmt.Errorf("Expected an error, got nil")
|
||||
}
|
||||
|
||||
if !isError(actual) {
|
||||
return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
actualErr := actual.(error)
|
||||
|
||||
if isError(matcher.Expected) {
|
||||
return reflect.DeepEqual(actualErr, matcher.Expected), nil
|
||||
}
|
||||
|
||||
if isString(matcher.Expected) {
|
||||
return actualErr.Error() == matcher.Expected, nil
|
||||
}
|
||||
|
||||
var subMatcher omegaMatcher
|
||||
var hasSubMatcher bool
|
||||
if matcher.Expected != nil {
|
||||
subMatcher, hasSubMatcher = (matcher.Expected).(omegaMatcher)
|
||||
if hasSubMatcher {
|
||||
return subMatcher.Match(actualErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("MatchError must be passed an error, string, or Matcher that can match on strings. Got:\n%s", format.Object(matcher.Expected, 1))
|
||||
}
|
||||
|
||||
func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to match error", matcher.Expected)
|
||||
}
|
||||
|
||||
func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to match error", matcher.Expected)
|
||||
}
|
||||
65
vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type MatchJSONMatcher struct {
|
||||
JSONToMatch interface{}
|
||||
firstFailurePath []interface{}
|
||||
}
|
||||
|
||||
func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualString, expectedString, err := matcher.prettyPrint(actual)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var aval interface{}
|
||||
var eval interface{}
|
||||
|
||||
// this is guarded by prettyPrint
|
||||
json.Unmarshal([]byte(actualString), &aval)
|
||||
json.Unmarshal([]byte(expectedString), &eval)
|
||||
var equal bool
|
||||
equal, matcher.firstFailurePath = deepEqual(aval, eval)
|
||||
return equal, nil
|
||||
}
|
||||
|
||||
func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
actualString, expectedString, _ := matcher.prettyPrint(actual)
|
||||
return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath)
|
||||
}
|
||||
|
||||
func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
actualString, expectedString, _ := matcher.prettyPrint(actual)
|
||||
return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath)
|
||||
}
|
||||
|
||||
func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
|
||||
actualString, ok := toString(actual)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
expectedString, ok := toString(matcher.JSONToMatch)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.JSONToMatch, 1))
|
||||
}
|
||||
|
||||
abuf := new(bytes.Buffer)
|
||||
ebuf := new(bytes.Buffer)
|
||||
|
||||
if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil {
|
||||
return "", "", fmt.Errorf("Actual '%s' should be valid JSON, but it is not.\nUnderlying error:%s", actualString, err)
|
||||
}
|
||||
|
||||
if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil {
|
||||
return "", "", fmt.Errorf("Expected '%s' should be valid JSON, but it is not.\nUnderlying error:%s", expectedString, err)
|
||||
}
|
||||
|
||||
return abuf.String(), ebuf.String(), nil
|
||||
}
|
||||
43
vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type MatchRegexpMatcher struct {
|
||||
Regexp string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualString, ok := toString(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
match, err := regexp.Match(matcher.regexp(), []byte(actualString))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error())
|
||||
}
|
||||
|
||||
return match, nil
|
||||
}
|
||||
|
||||
func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to match regular expression", matcher.regexp())
|
||||
}
|
||||
|
||||
func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to match regular expression", matcher.regexp())
|
||||
}
|
||||
|
||||
func (matcher *MatchRegexpMatcher) regexp() string {
|
||||
re := matcher.Regexp
|
||||
if len(matcher.Args) > 0 {
|
||||
re = fmt.Sprintf(matcher.Regexp, matcher.Args...)
|
||||
}
|
||||
return re
|
||||
}
|
||||
134
vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
generated
vendored
@ -1,134 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"golang.org/x/net/html/charset"
|
||||
)
|
||||
|
||||
type MatchXMLMatcher struct {
|
||||
XMLToMatch interface{}
|
||||
}
|
||||
|
||||
func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualString, expectedString, err := matcher.formattedPrint(actual)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
aval, err := parseXmlContent(actualString)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Actual '%s' should be valid XML, but it is not.\nUnderlying error:%s", actualString, err)
|
||||
}
|
||||
|
||||
eval, err := parseXmlContent(expectedString)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Expected '%s' should be valid XML, but it is not.\nUnderlying error:%s", expectedString, err)
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(aval, eval), nil
|
||||
}
|
||||
|
||||
func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
actualString, expectedString, _ := matcher.formattedPrint(actual)
|
||||
return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString)
|
||||
}
|
||||
|
||||
func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
actualString, expectedString, _ := matcher.formattedPrint(actual)
|
||||
return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString)
|
||||
}
|
||||
|
||||
func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) {
|
||||
var ok bool
|
||||
actualString, ok = toString(actual)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
expectedString, ok = toString(matcher.XMLToMatch)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.XMLToMatch, 1))
|
||||
}
|
||||
return actualString, expectedString, nil
|
||||
}
|
||||
|
||||
func parseXmlContent(content string) (*xmlNode, error) {
|
||||
allNodes := []*xmlNode{}
|
||||
|
||||
dec := newXmlDecoder(strings.NewReader(content))
|
||||
for {
|
||||
tok, err := dec.Token()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("failed to decode next token: %v", err)
|
||||
}
|
||||
|
||||
lastNodeIndex := len(allNodes) - 1
|
||||
var lastNode *xmlNode
|
||||
if len(allNodes) > 0 {
|
||||
lastNode = allNodes[lastNodeIndex]
|
||||
} else {
|
||||
lastNode = &xmlNode{}
|
||||
}
|
||||
|
||||
switch tok := tok.(type) {
|
||||
case xml.StartElement:
|
||||
attrs := attributesSlice(tok.Attr)
|
||||
sort.Sort(attrs)
|
||||
allNodes = append(allNodes, &xmlNode{XMLName: tok.Name, XMLAttr: tok.Attr})
|
||||
case xml.EndElement:
|
||||
if len(allNodes) > 1 {
|
||||
allNodes[lastNodeIndex-1].Nodes = append(allNodes[lastNodeIndex-1].Nodes, lastNode)
|
||||
allNodes = allNodes[:lastNodeIndex]
|
||||
}
|
||||
case xml.CharData:
|
||||
lastNode.Content = append(lastNode.Content, tok.Copy()...)
|
||||
case xml.Comment:
|
||||
lastNode.Comments = append(lastNode.Comments, tok.Copy())
|
||||
case xml.ProcInst:
|
||||
lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy())
|
||||
}
|
||||
}
|
||||
|
||||
if len(allNodes) == 0 {
|
||||
return nil, errors.New("found no nodes")
|
||||
}
|
||||
firstNode := allNodes[0]
|
||||
trimParentNodesContentSpaces(firstNode)
|
||||
|
||||
return firstNode, nil
|
||||
}
|
||||
|
||||
func newXmlDecoder(reader io.Reader) *xml.Decoder {
|
||||
dec := xml.NewDecoder(reader)
|
||||
dec.CharsetReader = charset.NewReaderLabel
|
||||
return dec
|
||||
}
|
||||
|
||||
func trimParentNodesContentSpaces(node *xmlNode) {
|
||||
if len(node.Nodes) > 0 {
|
||||
node.Content = bytes.TrimSpace(node.Content)
|
||||
for _, childNode := range node.Nodes {
|
||||
trimParentNodesContentSpaces(childNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type xmlNode struct {
|
||||
XMLName xml.Name
|
||||
Comments []xml.Comment
|
||||
ProcInsts []xml.ProcInst
|
||||
XMLAttr []xml.Attr
|
||||
Content []byte
|
||||
Nodes []*xmlNode
|
||||
}
|
||||
76
vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
generated
vendored
@ -1,76 +0,0 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type MatchYAMLMatcher struct {
|
||||
YAMLToMatch interface{}
|
||||
firstFailurePath []interface{}
|
||||
}
|
||||
|
||||
func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualString, expectedString, err := matcher.toStrings(actual)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var aval interface{}
|
||||
var eval interface{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil {
|
||||
return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err)
|
||||
}
|
||||
if err := yaml.Unmarshal([]byte(expectedString), &eval); err != nil {
|
||||
return false, fmt.Errorf("Expected '%s' should be valid YAML, but it is not.\nUnderlying error:%s", expectedString, err)
|
||||
}
|
||||
|
||||
var equal bool
|
||||
equal, matcher.firstFailurePath = deepEqual(aval, eval)
|
||||
return equal, nil
|
||||
}
|
||||
|
||||
func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
|
||||
return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath)
|
||||
}
|
||||
|
||||
func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
|
||||
return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath)
|
||||
}
|
||||
|
||||
func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
|
||||
actualString, expectedString, err := matcher.toStrings(actual)
|
||||
return normalise(actualString), normalise(expectedString), err
|
||||
}
|
||||
|
||||
func normalise(input string) string {
|
||||
var val interface{}
|
||||
err := yaml.Unmarshal([]byte(input), &val)
|
||||
if err != nil {
|
||||
panic(err) // unreachable since Match already calls Unmarshal
|
||||
}
|
||||
output, err := yaml.Marshal(val)
|
||||
if err != nil {
|
||||
panic(err) // untested section, unreachable since we Unmarshal above
|
||||
}
|
||||
return strings.TrimSpace(string(output))
|
||||
}
|
||||
|
||||
func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
|
||||
actualString, ok := toString(actual)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
expectedString, ok := toString(matcher.YAMLToMatch)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.YAMLToMatch, 1))
|
||||
}
|
||||
|
||||
return actualString, expectedString, nil
|
||||
}
|
||||
We should change this to the Debug level
We should change this to the Debug level