commit
fbd4a5ca6a
@ -19,6 +19,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/resync"
|
||||
@ -45,9 +46,11 @@ func rsyncCmdCommand() {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
logWithCommand.Infof("resync config: %+v", rConfig)
|
||||
if rConfig.IPFSMode == shared.LocalInterface {
|
||||
if err := ipfs.InitIPFSPlugins(); err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
}
|
||||
logWithCommand.Debug("initializing new resync service")
|
||||
rService, err := resync.NewResyncService(rConfig)
|
||||
if err != nil {
|
||||
|
@ -18,11 +18,10 @@ package cmd
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||
@ -60,9 +59,11 @@ func superNode() {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
logWithCommand.Infof("super node config: %+v", superNodeConfig)
|
||||
if superNodeConfig.IPFSMode == shared.LocalInterface {
|
||||
if err := ipfs.InitIPFSPlugins(); err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
logWithCommand.Debug("initializing new super node service")
|
||||
superNode, err := super_node.NewSuperNode(superNodeConfig)
|
||||
|
43
cmd/watch.go
43
cmd/watch.go
@ -1,43 +0,0 @@
|
||||
// Copyright © 2020 Vulcanize, Inc
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// watchCmd represents the watch command
|
||||
var watchCmd = &cobra.Command{
|
||||
Use: "watch",
|
||||
Short: "Watch and transform data from a chain source",
|
||||
Long: `This command allows one to configure a set of wasm functions and SQL trigger functions
|
||||
that call them to watch and transform data from the specified chain source.
|
||||
|
||||
A watcher is composed of four parts:
|
||||
1) Go execution engine- this command- which fetches raw chain data and adds it to the Postres queued ready data tables
|
||||
2) TOML config file which specifies what subset of chain data to fetch and from where and contains references to the below
|
||||
3) Set of WASM binaries which are loaded into Postgres and used by
|
||||
4) Set of PostgreSQL trigger functions which automatically act on data as it is inserted into the queued ready data tables`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println("watch called")
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(watchCmd)
|
||||
}
|
@ -1,25 +1,14 @@
|
||||
FROM golang:alpine
|
||||
FROM golang:1.12-alpine as builder
|
||||
|
||||
RUN apk --update --no-cache add make git g++ linux-headers
|
||||
# DEBUG
|
||||
RUN apk add busybox-extras
|
||||
|
||||
# this is probably a noob move, but I want apk from alpine for the above but need to avoid Go 1.13 below as this error still occurs https://github.com/ipfs/go-ipfs/issues/6603
|
||||
FROM golang:1.12.4 as builder
|
||||
|
||||
# Get and build vulcanizedb
|
||||
ADD . /go/src/github.com/vulcanize/vulcanizedb
|
||||
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
||||
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o vulcanizedb .
|
||||
|
||||
# Get and build vulcanize's go-ipfs fork
|
||||
RUN go get -u -d github.com/ipfs/go-ipfs
|
||||
WORKDIR /go/src/github.com/ipfs/go-ipfs
|
||||
RUN git remote add vulcanize https://github.com/vulcanize/go-ipfs.git
|
||||
RUN git fetch vulcanize
|
||||
RUN git checkout -b pg_ipfs vulcanize/postgres_update
|
||||
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipfs ./cmd/ipfs
|
||||
|
||||
# Build migration tool
|
||||
RUN go get -u -d github.com/pressly/goose/cmd/goose
|
||||
WORKDIR /go/src/github.com/pressly/goose/cmd/goose
|
||||
@ -52,7 +41,6 @@ COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb vulcani
|
||||
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
||||
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
|
||||
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/environments environments
|
||||
COPY --from=builder /go/src/github.com/ipfs/go-ipfs/ipfs ipfs
|
||||
|
||||
EXPOSE $EXPOSE_PORT_1
|
||||
EXPOSE $EXPOSE_PORT_2
|
||||
|
@ -29,8 +29,6 @@ services:
|
||||
CONFIG_FILE: ./environments/superNodeBTC.toml
|
||||
environment:
|
||||
VDB_COMMAND: "superNode"
|
||||
IPFS_INIT: "true"
|
||||
IPFS_PATH: "/root/.btc/.ipfs"
|
||||
DATABASE_NAME: "vulcanize_public"
|
||||
DATABASE_HOSTNAME: "db"
|
||||
DATABASE_PORT: 5432
|
||||
@ -54,8 +52,6 @@ services:
|
||||
CONFIG_FILE: ./environments/superNodeETH.toml
|
||||
environment:
|
||||
VDB_COMMAND: "superNode"
|
||||
IPFS_INIT: "true"
|
||||
IPFS_PATH: "/root/.eth/.ipfs"
|
||||
DATABASE_NAME: "vulcanize_public"
|
||||
DATABASE_HOSTNAME: "db"
|
||||
DATABASE_PORT: 5432
|
||||
|
@ -31,26 +31,6 @@ if [ $rv != 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Export our database variables so that the IPFS Postgres plugin can use them
|
||||
export IPFS_PGHOST=$DATABASE_HOSTNAME
|
||||
export IPFS_PGUSER=$DATABASE_USER
|
||||
export IPFS_PGDATABASE=$DATABASE_NAME
|
||||
export IPFS_PGPORT=$DATABASE_PORT
|
||||
export IPFS_PGPASSWORD=$DATABASE_PASSWORD
|
||||
|
||||
|
||||
if [ ! -d "$HOME/.ipfs" ]; then
|
||||
# initialize PG-IPFS
|
||||
echo "Initializing Postgres-IPFS profile"
|
||||
./ipfs init --profile=postgresds
|
||||
|
||||
rv=$?
|
||||
if [ $rv != 0 ]; then
|
||||
echo "Could not initialize ipfs"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
echo "Beginning the vulcanizedb process"
|
||||
VDB_CONFIG_FILE=${VDB_CONFIG_FILE:-config.toml}
|
||||
@ -58,12 +38,19 @@ DEFAULT_OPTIONS="--config=$VDB_CONFIG_FILE"
|
||||
VDB_FULL_CL=${VDB_FULL_CL:-$VDB_COMMAND $DEFAULT_OPTIONS}
|
||||
echo running: ./vulcanizedb $VDB_FULL_CL $@
|
||||
|
||||
case "$1" in
|
||||
"/bin/sh" )
|
||||
echo dropping to shell
|
||||
exec /bin/sh
|
||||
esac
|
||||
|
||||
vdb_args="$@"
|
||||
# default is to use the config passed by the build arg
|
||||
if [[ -z "$vdb_args" ]];
|
||||
if [[ -z "$vdb_args" ]]; then
|
||||
vdb_args="--config=config.toml"
|
||||
fi
|
||||
|
||||
echo running: ./vulcanizedb $vdb_args
|
||||
./vulcanizedb $vdb_args
|
||||
rv=$?
|
||||
|
||||
|
1
go.mod
1
go.mod
@ -32,6 +32,7 @@ require (
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1
|
||||
github.com/ipfs/go-ipfs-cmds v0.1.1 // indirect
|
||||
github.com/ipfs/go-ipfs-config v0.0.3 // indirect
|
||||
github.com/ipfs/go-ipfs-ds-help v0.0.1
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1
|
||||
github.com/ipfs/go-ipld-cbor v0.0.3 // indirect
|
||||
github.com/ipfs/go-ipld-format v0.0.2
|
||||
|
46
go.sum
46
go.sum
@ -17,7 +17,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
@ -31,7 +30,6 @@ github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
|
||||
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 h1:7ABPr1+uJdqESAdlVevnc/2FJGiC/K3uMg1JiELeF+0=
|
||||
@ -62,7 +60,6 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
@ -129,16 +126,12 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
@ -148,7 +141,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
|
||||
@ -162,15 +154,12 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 h1:giknQ4mEuDFmmHSrGcbargOuLHQGtywqo4mheITex54=
|
||||
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
@ -199,7 +188,6 @@ github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOo
|
||||
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
|
||||
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
|
||||
github.com/ipfs/bbloom v0.0.1 h1:s7KkiBPfxCeDVo47KySjK0ACPc5GJRUxFpdyWEuDjhw=
|
||||
@ -230,7 +218,6 @@ github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAK
|
||||
github.com/ipfs/go-datastore v0.0.3/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-datastore v0.0.5 h1:q3OfiOZV5rlsK1H5V8benjeUApRfMGs4Mrhmr6NriQo=
|
||||
github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
|
||||
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
|
||||
github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8=
|
||||
github.com/ipfs/go-ds-badger v0.0.5 h1:dxKuqw5T1Jm8OuV+lchA76H9QZFyPKZeLuT6bN42hJQ=
|
||||
@ -246,11 +233,9 @@ github.com/ipfs/go-fs-lock v0.0.1 h1:XHX8uW4jQBYWHj59XXcjg7BHlHxV9ZOYs6Y43yb7/l0
|
||||
github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1 h1:O9n3PbmTYZoNhkgkEyrXTznbmktIXif62xLX+8dPHzc=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ=
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk=
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw=
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw=
|
||||
github.com/ipfs/go-ipfs-cmds v0.1.1 h1:H9/BLf5rcsULHMj/x8gC0e5o+raYhqk1OQsfzbGMNM4=
|
||||
github.com/ipfs/go-ipfs-cmds v0.1.1/go.mod h1:k1zMXcOLtljA9iAnZHddbH69yVm5+weRL0snmMD/rK0=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
|
||||
@ -262,7 +247,6 @@ github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFq
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew=
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0=
|
||||
github.com/ipfs/go-ipfs-files v0.0.2/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||
github.com/ipfs/go-ipfs-files v0.0.3 h1:ME+QnC3uOyla1ciRPezDW0ynQYK2ikOh9OCKAEg4uUA=
|
||||
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||
github.com/ipfs/go-ipfs-files v0.0.4 h1:WzRCivcybUQch/Qh6v8LBRhKtRsjnwyiuOV09mK7mrE=
|
||||
github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||
@ -316,12 +300,10 @@ github.com/ipfs/interface-go-ipfs-core v0.1.0/go.mod h1:h1zJvvfh9dcNU0bK+Jag516L
|
||||
github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc=
|
||||
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs=
|
||||
github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc=
|
||||
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
|
||||
github.com/jbenet/go-is-domain v1.0.2 h1:11r5MSptcNFZyBoqubBQnVMUKRWLuRjL1banaIk+iYo=
|
||||
github.com/jbenet/go-is-domain v1.0.2/go.mod h1:xbRLRb0S7FgzDBTJlguhDVwLYM/5yNtvktxj2Ttfy7Q=
|
||||
@ -338,7 +320,6 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
@ -348,16 +329,12 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ=
|
||||
github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
@ -392,7 +369,6 @@ github.com/libp2p/go-libp2p-autonat-svc v0.1.0 h1:28IM7iWMDclZeVkpiFQaWVANwXwE7z
|
||||
github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.1.3 h1:0KycuXvPDhmehw0ASsg+s1o3IfXgCUDqfzAl94KEBOg=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg=
|
||||
github.com/libp2p/go-libp2p-circuit v0.0.1/go.mod h1:Dqm0s/BiV63j8EEAs8hr1H5HudqvCAeXxDyic59lCwE=
|
||||
github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU=
|
||||
@ -439,7 +415,6 @@ github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCv
|
||||
github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c=
|
||||
github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c=
|
||||
github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q=
|
||||
github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ=
|
||||
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
|
||||
github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo=
|
||||
github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es=
|
||||
@ -482,7 +457,6 @@ github.com/libp2p/go-libp2p-swarm v0.1.1/go.mod h1:4NVJaLwq/dr5kEq79Jo6pMin7ZFwL
|
||||
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.4 h1:Qev57UR47GcLPXWjrunv5aLIQGO4n9mhI/8/EIrEEFc=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-tls v0.1.0 h1:o4bjjAdnUjNgJoPoDd0wUaZH7K+EenlNWJpgyXB3ulA=
|
||||
github.com/libp2p/go-libp2p-tls v0.1.0/go.mod h1:VZdoSWQDeNpIIAFJFv+6uqTqpnIIDHcqZQSTC/A1TT0=
|
||||
@ -559,7 +533,6 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
@ -610,7 +583,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
|
||||
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
|
||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk=
|
||||
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
@ -634,7 +606,6 @@ github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssy
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
@ -660,7 +631,6 @@ github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSg
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=
|
||||
github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
|
||||
github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
@ -673,15 +643,11 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek=
|
||||
github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0=
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
@ -713,9 +679,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
|
||||
@ -726,8 +690,6 @@ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:s
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8 h1:BHt0OW0rTgndFjSju7brF3dPceXWQuEV0IdtY8BjjT8=
|
||||
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo=
|
||||
github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290 h1:uMWt+x6JhVT7GyL983weZSxv1zDBxvGlI9HNkcTnUeg=
|
||||
github.com/vulcanize/go-ethereum v1.5.10-0.20200311182536-d07dc803d290/go.mod h1:7oC0Ni6dosMv5pxMigm6s0hN8g4haJMBnqmmo0D9YfQ=
|
||||
github.com/vulcanize/go-ipfs v0.4.22-alpha h1:W+6njT14KWllMhABRFtPndqHw8SHCt5SqD4YX528kxM=
|
||||
@ -735,7 +697,6 @@ github.com/vulcanize/go-ipfs v0.4.22-alpha/go.mod h1:uaekWWeoaA0A9Dv1LObOKCSh9kI
|
||||
github.com/vulcanize/go-ipfs-config v0.0.8-alpha h1:peaFvbEcPShF6ymOd8flqKkFz4YfcrNr/UOO7FmbWoQ=
|
||||
github.com/vulcanize/go-ipfs-config v0.0.8-alpha/go.mod h1:IGkVTacurWv9WFKc7IBPjHGM/7hi6+PEClqUb/l2BIM=
|
||||
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||
github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0=
|
||||
github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4=
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM=
|
||||
@ -776,7 +737,6 @@ go.uber.org/dig v1.7.0 h1:E5/L92iQTNJTjfgJF2KgU+/JpMaiuvK2DHLBj0+kSZk=
|
||||
go.uber.org/dig v1.7.0/go.mod h1:z+dSd2TP9Usi48jL8M3v63iSBVkiwtVyMKxMZYYauPg=
|
||||
go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY=
|
||||
go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw=
|
||||
go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo=
|
||||
go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
@ -802,7 +762,6 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -863,13 +822,11 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
@ -880,13 +837,10 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 h1:hhsSf/5z74Ck/DJYc+R8zpq8KGm7uJvpdLRQED/IedA=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/utilities"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -63,7 +64,7 @@ func (bf *backFiller) BackFill(startingBlock, endingBlock uint64, backFill chan
|
||||
logrus.Infof("going to fill in gap from %d to %d", startingBlock, endingBlock)
|
||||
|
||||
// break the range up into bins of smaller ranges
|
||||
blockRangeBins, err := utils.GetBlockHeightBins(startingBlock, endingBlock, bf.batchSize)
|
||||
blockRangeBins, err := utilities.GetBlockHeightBins(startingBlock, endingBlock, bf.batchSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,72 +0,0 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
func GetBlockHeightBins(startingBlock, endingBlock, batchSize uint64) ([][]uint64, error) {
|
||||
if endingBlock < startingBlock {
|
||||
return nil, errors.New("backfill: ending block number needs to be greater than starting block number")
|
||||
}
|
||||
if batchSize == 0 {
|
||||
return nil, errors.New("backfill: batchsize needs to be greater than zero")
|
||||
}
|
||||
length := endingBlock - startingBlock + 1
|
||||
numberOfBins := length / batchSize
|
||||
remainder := length % batchSize
|
||||
if remainder != 0 {
|
||||
numberOfBins++
|
||||
}
|
||||
blockRangeBins := make([][]uint64, numberOfBins)
|
||||
for i := range blockRangeBins {
|
||||
nextBinStart := startingBlock + batchSize
|
||||
blockRange := make([]uint64, 0, nextBinStart-startingBlock+1)
|
||||
for j := startingBlock; j < nextBinStart && j <= endingBlock; j++ {
|
||||
blockRange = append(blockRange, j)
|
||||
}
|
||||
startingBlock = nextBinStart
|
||||
blockRangeBins[i] = blockRange
|
||||
}
|
||||
return blockRangeBins, nil
|
||||
}
|
||||
|
||||
func MissingHeightsToGaps(heights []uint64) []shared.Gap {
|
||||
validationGaps := make([]shared.Gap, 0)
|
||||
start := heights[0]
|
||||
lastHeight := start
|
||||
for i, height := range heights[1:] {
|
||||
if height != lastHeight+1 {
|
||||
validationGaps = append(validationGaps, shared.Gap{
|
||||
Start: start,
|
||||
Stop: lastHeight,
|
||||
})
|
||||
start = height
|
||||
}
|
||||
if i+2 == len(heights) {
|
||||
validationGaps = append(validationGaps, shared.Gap{
|
||||
Start: start,
|
||||
Stop: height,
|
||||
})
|
||||
}
|
||||
lastHeight = height
|
||||
}
|
||||
return validationGaps
|
||||
}
|
36
libraries/shared/utilities/utilities_suite_test.go
Normal file
36
libraries/shared/utilities/utilities_suite_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utilities_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestShared(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Shared Utilities Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
})
|
@ -16,9 +16,62 @@
|
||||
|
||||
package utilities
|
||||
|
||||
func NullToZero(str string) string {
|
||||
if str == "" {
|
||||
return "0"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
// GetBlockHeightBins splits a block range up into bins of block heights of the given batch size
|
||||
func GetBlockHeightBins(startingBlock, endingBlock, batchSize uint64) ([][]uint64, error) {
|
||||
if endingBlock < startingBlock {
|
||||
return nil, errors.New("backfill: ending block number needs to be greater than starting block number")
|
||||
}
|
||||
return str
|
||||
if batchSize == 0 {
|
||||
return nil, errors.New("backfill: batchsize needs to be greater than zero")
|
||||
}
|
||||
length := endingBlock - startingBlock + 1
|
||||
numberOfBins := length / batchSize
|
||||
remainder := length % batchSize
|
||||
if remainder != 0 {
|
||||
numberOfBins++
|
||||
}
|
||||
blockRangeBins := make([][]uint64, numberOfBins)
|
||||
for i := range blockRangeBins {
|
||||
nextBinStart := startingBlock + batchSize
|
||||
blockRange := make([]uint64, 0, nextBinStart-startingBlock+1)
|
||||
for j := startingBlock; j < nextBinStart && j <= endingBlock; j++ {
|
||||
blockRange = append(blockRange, j)
|
||||
}
|
||||
startingBlock = nextBinStart
|
||||
blockRangeBins[i] = blockRange
|
||||
}
|
||||
return blockRangeBins, nil
|
||||
}
|
||||
|
||||
// MissingHeightsToGaps returns a slice of gaps from a slice of missing block heights
|
||||
func MissingHeightsToGaps(heights []uint64) []shared.Gap {
|
||||
if len(heights) == 0 {
|
||||
return nil
|
||||
}
|
||||
validationGaps := make([]shared.Gap, 0)
|
||||
start := heights[0]
|
||||
lastHeight := start
|
||||
for i, height := range heights[1:] {
|
||||
if height != lastHeight+1 {
|
||||
validationGaps = append(validationGaps, shared.Gap{
|
||||
Start: start,
|
||||
Stop: lastHeight,
|
||||
})
|
||||
start = height
|
||||
}
|
||||
if i+2 == len(heights) {
|
||||
validationGaps = append(validationGaps, shared.Gap{
|
||||
Start: start,
|
||||
Stop: height,
|
||||
})
|
||||
}
|
||||
lastHeight = height
|
||||
}
|
||||
return validationGaps
|
||||
}
|
||||
|
@ -14,13 +14,13 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package utils_test
|
||||
package utilities_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
utils "github.com/vulcanize/vulcanizedb/libraries/shared/utilities"
|
||||
)
|
||||
|
||||
var _ = Describe("GetBlockHeightBins", func() {
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
utils "github.com/vulcanize/vulcanizedb/libraries/shared/utilities"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
@ -69,11 +69,11 @@ type BackFillService struct {
|
||||
|
||||
// NewBackFillService returns a new BackFillInterface
|
||||
func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) {
|
||||
publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath)
|
||||
publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexer, err := NewCIDIndexer(settings.Chain, settings.DB)
|
||||
indexer, err := NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -17,17 +17,16 @@
|
||||
package btc
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
|
||||
"github.com/lib/pq"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
utils "github.com/vulcanize/vulcanizedb/libraries/shared/utilities"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
@ -65,17 +64,26 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
return nil, true, fmt.Errorf("btc retriever expected filter type %T got %T", &SubscriptionSettings{}, filter)
|
||||
}
|
||||
log.Debug("retrieving cids")
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Retrieve cached header CIDs
|
||||
headers, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("header cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
@ -92,9 +100,6 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
if !streamFilter.TxFilter.Off {
|
||||
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, header.ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("transaction cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
@ -105,7 +110,7 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
cws[i] = cw
|
||||
}
|
||||
|
||||
return cws, empty, tx.Commit()
|
||||
return cws, empty, err
|
||||
}
|
||||
|
||||
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
|
||||
@ -173,7 +178,7 @@ func (ecr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap,
|
||||
Start uint64 `db:"start"`
|
||||
Stop uint64 `db:"stop"`
|
||||
}, 0)
|
||||
if err := ecr.db.Select(&results, pgStr); err != nil {
|
||||
if err := ecr.db.Select(&results, pgStr); err != nil && err != sql.ErrNoRows {
|
||||
return nil, err
|
||||
}
|
||||
emptyGaps := make([]shared.Gap, len(results))
|
||||
@ -190,53 +195,66 @@ func (ecr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap,
|
||||
WHERE times_validated < $1
|
||||
ORDER BY block_number`
|
||||
var heights []uint64
|
||||
if err := ecr.db.Select(&heights, pgStr, validationLevel); err != nil {
|
||||
if err := ecr.db.Select(&heights, pgStr, validationLevel); err != nil && err != sql.ErrNoRows {
|
||||
return nil, err
|
||||
}
|
||||
if len(heights) == 0 {
|
||||
return emptyGaps, nil
|
||||
}
|
||||
return append(emptyGaps, utils.MissingHeightsToGaps(heights)...), nil
|
||||
}
|
||||
|
||||
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
|
||||
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (HeaderModel, []TxModel, error) {
|
||||
log.Debug("retrieving block cids for block hash ", blockHash.String())
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return HeaderModel{}, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
headerCID, err := ecr.RetrieveHeaderCIDByHash(tx, blockHash)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("header cid retrieval error")
|
||||
return HeaderModel{}, nil, err
|
||||
}
|
||||
txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("tx cid retrieval error")
|
||||
return HeaderModel{}, nil, err
|
||||
}
|
||||
return headerCID, txCIDs, tx.Commit()
|
||||
return headerCID, txCIDs, err
|
||||
}
|
||||
|
||||
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
|
||||
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (HeaderModel, []TxModel, error) {
|
||||
log.Debug("retrieving block cids for block number ", blockNumber)
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return HeaderModel{}, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
headerCID, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("header cid retrieval error")
|
||||
return HeaderModel{}, nil, err
|
||||
}
|
||||
@ -245,13 +263,9 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (HeaderModel,
|
||||
}
|
||||
txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("tx cid retrieval error")
|
||||
return HeaderModel{}, nil, err
|
||||
}
|
||||
return headerCID[0], txCIDs, tx.Commit()
|
||||
return headerCID[0], txCIDs, err
|
||||
}
|
||||
|
||||
// RetrieveHeaderCIDByHash returns the header for the given block hash
|
@ -50,9 +50,7 @@ func (c *Cleaner) ResetValidation(rngs [][2]uint64) error {
|
||||
SET times_validated = 0
|
||||
WHERE block_number BETWEEN $1 AND $2`
|
||||
if _, err := tx.Exec(pgStr, rng[0], rng[1]); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
shared.Rollback(tx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -68,9 +66,7 @@ func (c *Cleaner) Clean(rngs [][2]uint64, t shared.DataType) error {
|
||||
for _, rng := range rngs {
|
||||
logrus.Infof("btc db cleaner cleaning up block range %d to %d", rng[0], rng[1])
|
||||
if err := c.clean(tx, rng, t); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
shared.Rollback(tx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -43,29 +43,42 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("btc indexer expected cids type %T got %T", &CIDPayload{}, cids)
|
||||
}
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := in.db.Beginx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerID, err := in.indexHeaderCID(tx, cidWrapper.HeaderCID, in.db.NodeID)
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
headerID, err := in.indexHeaderCID(tx, cidWrapper.HeaderCID)
|
||||
if err != nil {
|
||||
logrus.Error("btc indexer error when indexing header")
|
||||
return err
|
||||
}
|
||||
if err := in.indexTransactionCIDs(tx, cidWrapper.TransactionCIDs, headerID); err != nil {
|
||||
err = in.indexTransactionCIDs(tx, cidWrapper.TransactionCIDs, headerID)
|
||||
if err != nil {
|
||||
logrus.Error("btc indexer error when indexing transactions")
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
return err
|
||||
}
|
||||
|
||||
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel, nodeID int64) (int64, error) {
|
||||
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) {
|
||||
var headerID int64
|
||||
err := tx.QueryRowx(`INSERT INTO btc.header_cids (block_number, block_hash, parent_hash, cid, timestamp, bits, node_id, times_validated)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, timestamp, bits, node_id, times_validated) = ($3, $4, $5, $6, $7, btc.header_cids.times_validated + 1)
|
||||
RETURNING id`,
|
||||
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.Timestamp, header.Bits, nodeID, 1).Scan(&headerID)
|
||||
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.Timestamp, header.Bits, in.db.NodeID, 1).Scan(&headerID)
|
||||
return headerID, err
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@ type IPLDFetcher struct {
|
||||
}
|
||||
|
||||
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
|
||||
// It interfaces with PG-IPFS through an internalized IPFS node interface
|
||||
func NewIPLDFetcher(ipfsPath string) (*IPLDFetcher, error) {
|
||||
blockService, err := ipfs.InitIPFSBlockService(ipfsPath)
|
||||
if err != nil {
|
||||
|
107
pkg/super_node/btc/ipld_pg_fetcher.go
Normal file
107
pkg/super_node/btc/ipld_pg_fetcher.go
Normal file
@ -0,0 +1,107 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package btc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
// IPLDPGFetcher satisfies the IPLDFetcher interface for ethereum
|
||||
// it interfaces directly with PG-IPFS instead of going through a node-interface or remote node
|
||||
type IPLDPGFetcher struct {
|
||||
db *postgres.DB
|
||||
}
|
||||
|
||||
// NewIPLDPGFetcher creates a pointer to a new IPLDPGFetcher
|
||||
func NewIPLDPGFetcher(db *postgres.DB) *IPLDPGFetcher {
|
||||
return &IPLDPGFetcher{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
|
||||
func (f *IPLDPGFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
|
||||
cidWrapper, ok := cids.(*CIDWrapper)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("btc fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
|
||||
}
|
||||
log.Debug("fetching iplds")
|
||||
iplds := IPLDs{}
|
||||
iplds.BlockNumber = cidWrapper.BlockNumber
|
||||
|
||||
tx, err := f.db.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
iplds.Header, err = f.FetchHeader(tx, cidWrapper.Header)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("btc pg fetcher: header fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.Transactions, err = f.FetchTrxs(tx, cidWrapper.Transactions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("btc pg fetcher: transaction fetching error: %s", err.Error())
|
||||
}
|
||||
return iplds, err
|
||||
}
|
||||
|
||||
// FetchHeaders fetches headers
|
||||
func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) {
|
||||
log.Debug("fetching header ipld")
|
||||
headerBytes, err := shared.FetchIPLD(tx, c.CID)
|
||||
if err != nil {
|
||||
return ipfs.BlockModel{}, err
|
||||
}
|
||||
return ipfs.BlockModel{
|
||||
Data: headerBytes,
|
||||
CID: c.CID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FetchTrxs fetches transactions
|
||||
func (f *IPLDPGFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockModel, error) {
|
||||
log.Debug("fetching transaction iplds")
|
||||
trxIPLDs := make([]ipfs.BlockModel, len(cids))
|
||||
for i, c := range cids {
|
||||
trxBytes, err := shared.FetchIPLD(tx, c.CID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trxIPLDs[i] = ipfs.BlockModel{
|
||||
Data: trxBytes,
|
||||
CID: c.CID,
|
||||
}
|
||||
}
|
||||
return trxIPLDs, nil
|
||||
}
|
124
pkg/super_node/btc/publishAndIndexer.go
Normal file
124
pkg/super_node/btc/publishAndIndexer.go
Normal file
@ -0,0 +1,124 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package btc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
// IPLDPublisherAndIndexer satisfies the IPLDPublisher interface for bitcoin
|
||||
// It interfaces directly with the public.blocks table of PG-IPFS rather than going through an ipfs intermediary
|
||||
// It publishes and indexes IPLDs together in a single sqlx.Tx
|
||||
type IPLDPublisherAndIndexer struct {
|
||||
indexer *CIDIndexer
|
||||
}
|
||||
|
||||
// NewIPLDPublisherAndIndexer creates a pointer to a new IPLDPublisherAndIndexer which satisfies the IPLDPublisher interface
|
||||
func NewIPLDPublisherAndIndexer(db *postgres.DB) *IPLDPublisherAndIndexer {
|
||||
return &IPLDPublisherAndIndexer{
|
||||
indexer: NewCIDIndexer(db),
|
||||
}
|
||||
}
|
||||
|
||||
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
||||
func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
|
||||
ipldPayload, ok := payload.(ConvertedPayload)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("btc publisher expected payload type %T got %T", ConvertedPayload{}, payload)
|
||||
}
|
||||
// Generate the iplds
|
||||
headerNode, txNodes, txTrieNodes, err := ipld.FromHeaderAndTxs(ipldPayload.Header, ipldPayload.Txs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := pub.indexer.db.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Publish trie nodes
|
||||
for _, node := range txTrieNodes {
|
||||
if err := shared.PublishIPLD(tx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Publish and index header
|
||||
if err := shared.PublishIPLD(tx, headerNode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header := HeaderModel{
|
||||
CID: headerNode.Cid().String(),
|
||||
ParentHash: ipldPayload.Header.PrevBlock.String(),
|
||||
BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)),
|
||||
BlockHash: ipldPayload.Header.BlockHash().String(),
|
||||
Timestamp: ipldPayload.Header.Timestamp.UnixNano(),
|
||||
Bits: ipldPayload.Header.Bits,
|
||||
}
|
||||
headerID, err := pub.indexer.indexHeaderCID(tx, header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Publish and index txs
|
||||
for i, txNode := range txNodes {
|
||||
if err := shared.PublishIPLD(tx, txNode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txModel := ipldPayload.TxMetaData[i]
|
||||
txModel.CID = txNode.Cid().String()
|
||||
txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, input := range txModel.TxInputs {
|
||||
if err := pub.indexer.indexTxInput(tx, input, txID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, output := range txModel.TxOutputs {
|
||||
if err := pub.indexer.indexTxOutput(tx, output, txID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This IPLDPublisher does both publishing and indexing, we do not need to pass anything forward to the indexer
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Index satisfies the shared.CIDIndexer interface
|
||||
func (pub *IPLDPublisherAndIndexer) Index(cids shared.CIDsForIndexing) error {
|
||||
return nil
|
||||
}
|
121
pkg/super_node/btc/publishAndIndexer_test.go
Normal file
121
pkg/super_node/btc/publishAndIndexer_test.go
Normal file
@ -0,0 +1,121 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package btc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-ipfs-blockstore"
|
||||
"github.com/ipfs/go-ipfs-ds-help"
|
||||
"github.com/multiformats/go-multihash"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/btc"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/btc/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
var _ = Describe("PublishAndIndexer", func() {
|
||||
var (
|
||||
db *postgres.DB
|
||||
err error
|
||||
repo *btc.IPLDPublisherAndIndexer
|
||||
ipfsPgGet = `SELECT data FROM public.blocks
|
||||
WHERE key = $1`
|
||||
)
|
||||
BeforeEach(func() {
|
||||
db, err = shared.SetupDB()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
repo = btc.NewIPLDPublisherAndIndexer(db)
|
||||
})
|
||||
AfterEach(func() {
|
||||
btc.TearDownDB(db)
|
||||
})
|
||||
|
||||
Describe("Publish", func() {
|
||||
It("Published and indexes header and transaction IPLDs in a single tx", func() {
|
||||
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
|
||||
Expect(emptyReturn).To(BeNil())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
pgStr := `SELECT * FROM btc.header_cids
|
||||
WHERE block_number = $1`
|
||||
// check header was properly indexed
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 80))
|
||||
err = mocks.MockBlock.Header.Serialize(buf)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
headerBytes := buf.Bytes()
|
||||
c, _ := ipld.RawdataToCid(ipld.MBitcoinHeader, headerBytes, multihash.DBL_SHA2_256)
|
||||
header := new(btc.HeaderModel)
|
||||
err = db.Get(header, pgStr, mocks.MockHeaderMetaData.BlockNumber)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(header.CID).To(Equal(c.String()))
|
||||
Expect(header.BlockNumber).To(Equal(mocks.MockHeaderMetaData.BlockNumber))
|
||||
Expect(header.Bits).To(Equal(mocks.MockHeaderMetaData.Bits))
|
||||
Expect(header.Timestamp).To(Equal(mocks.MockHeaderMetaData.Timestamp))
|
||||
Expect(header.BlockHash).To(Equal(mocks.MockHeaderMetaData.BlockHash))
|
||||
Expect(header.ParentHash).To(Equal(mocks.MockHeaderMetaData.ParentHash))
|
||||
dc, err := cid.Decode(header.CID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
mhKey := dshelp.CidToDsKey(dc)
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
var data []byte
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(data).To(Equal(headerBytes))
|
||||
|
||||
// check that txs were properly indexed
|
||||
trxs := make([]btc.TxModel, 0)
|
||||
pgStr = `SELECT transaction_cids.id, transaction_cids.header_id, transaction_cids.index,
|
||||
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.segwit, transaction_cids.witness_hash
|
||||
FROM btc.transaction_cids INNER JOIN btc.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
err = db.Select(&trxs, pgStr, mocks.MockHeaderMetaData.BlockNumber)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(trxs)).To(Equal(3))
|
||||
txData := make([][]byte, len(mocks.MockTransactions))
|
||||
txCIDs := make([]string, len(mocks.MockTransactions))
|
||||
for i, m := range mocks.MockTransactions {
|
||||
buf := bytes.NewBuffer(make([]byte, 0))
|
||||
err = m.MsgTx().Serialize(buf)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
tx := buf.Bytes()
|
||||
txData[i] = tx
|
||||
c, _ := ipld.RawdataToCid(ipld.MBitcoinTx, tx, multihash.DBL_SHA2_256)
|
||||
txCIDs[i] = c.String()
|
||||
}
|
||||
for _, tx := range trxs {
|
||||
Expect(tx.SegWit).To(Equal(false))
|
||||
Expect(tx.HeaderID).To(Equal(header.ID))
|
||||
Expect(tx.WitnessHash).To(Equal(""))
|
||||
Expect(tx.CID).To(Equal(txCIDs[tx.Index]))
|
||||
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[tx.Index].TxHash().String()))
|
||||
dc, err := cid.Decode(tx.CID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
mhKey := dshelp.CidToDsKey(dc)
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
var data []byte
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(data).To(Equal(txData[tx.Index]))
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
@ -52,6 +52,7 @@ type Config struct {
|
||||
// Ubiquitous fields
|
||||
Chain shared.ChainType
|
||||
IPFSPath string
|
||||
IPFSMode shared.IPFSMode
|
||||
DB *postgres.DB
|
||||
DBConfig config.Database
|
||||
Quit chan bool
|
||||
@ -98,10 +99,16 @@ func NewSuperNodeConfig() (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.IPFSMode, err = shared.GetIPFSMode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.IPFSMode == shared.LocalInterface || c.IPFSMode == shared.RemoteClient {
|
||||
c.IPFSPath, err = shared.GetIPFSPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
c.Sync = viper.GetBool("superNode.sync")
|
||||
if c.Sync {
|
||||
|
@ -44,12 +44,26 @@ func NewResponseFilterer(chain shared.ChainType) (shared.ResponseFilterer, error
|
||||
}
|
||||
|
||||
// NewCIDIndexer constructs a CIDIndexer for the provided chain type
|
||||
func NewCIDIndexer(chain shared.ChainType, db *postgres.DB) (shared.CIDIndexer, error) {
|
||||
func NewCIDIndexer(chain shared.ChainType, db *postgres.DB, ipfsMode shared.IPFSMode) (shared.CIDIndexer, error) {
|
||||
switch chain {
|
||||
case shared.Ethereum:
|
||||
switch ipfsMode {
|
||||
case shared.LocalInterface, shared.RemoteClient:
|
||||
return eth.NewCIDIndexer(db), nil
|
||||
case shared.DirectPostgres:
|
||||
return eth.NewIPLDPublisherAndIndexer(db), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("ethereum CIDIndexer unexpected ipfs mode %s", ipfsMode.String())
|
||||
}
|
||||
case shared.Bitcoin:
|
||||
switch ipfsMode {
|
||||
case shared.LocalInterface, shared.RemoteClient:
|
||||
return btc.NewCIDIndexer(db), nil
|
||||
case shared.DirectPostgres:
|
||||
return eth.NewIPLDPublisherAndIndexer(db), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("bitcoin CIDIndexer unexpected ipfs mode %s", ipfsMode.String())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid chain %s for indexer constructor", chain.String())
|
||||
}
|
||||
@ -122,24 +136,52 @@ func NewPayloadConverter(chain shared.ChainType) (shared.PayloadConverter, error
|
||||
}
|
||||
|
||||
// NewIPLDFetcher constructs an IPLDFetcher for the provided chain type
|
||||
func NewIPLDFetcher(chain shared.ChainType, ipfsPath string) (shared.IPLDFetcher, error) {
|
||||
func NewIPLDFetcher(chain shared.ChainType, ipfsPath string, db *postgres.DB, ipfsMode shared.IPFSMode) (shared.IPLDFetcher, error) {
|
||||
switch chain {
|
||||
case shared.Ethereum:
|
||||
switch ipfsMode {
|
||||
case shared.LocalInterface, shared.RemoteClient:
|
||||
return eth.NewIPLDFetcher(ipfsPath)
|
||||
case shared.DirectPostgres:
|
||||
return eth.NewIPLDPGFetcher(db), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("ethereum IPLDFetcher unexpected ipfs mode %s", ipfsMode.String())
|
||||
}
|
||||
case shared.Bitcoin:
|
||||
switch ipfsMode {
|
||||
case shared.LocalInterface, shared.RemoteClient:
|
||||
return btc.NewIPLDFetcher(ipfsPath)
|
||||
case shared.DirectPostgres:
|
||||
return btc.NewIPLDPGFetcher(db), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("bitcoin IPLDFetcher unexpected ipfs mode %s", ipfsMode.String())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid chain %s for IPLD fetcher constructor", chain.String())
|
||||
}
|
||||
}
|
||||
|
||||
// NewIPLDPublisher constructs an IPLDPublisher for the provided chain type
|
||||
func NewIPLDPublisher(chain shared.ChainType, ipfsPath string) (shared.IPLDPublisher, error) {
|
||||
func NewIPLDPublisher(chain shared.ChainType, ipfsPath string, db *postgres.DB, ipfsMode shared.IPFSMode) (shared.IPLDPublisher, error) {
|
||||
switch chain {
|
||||
case shared.Ethereum:
|
||||
switch ipfsMode {
|
||||
case shared.LocalInterface, shared.RemoteClient:
|
||||
return eth.NewIPLDPublisher(ipfsPath)
|
||||
case shared.DirectPostgres:
|
||||
return eth.NewIPLDPublisherAndIndexer(db), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("ethereum IPLDPublisher unexpected ipfs mode %s", ipfsMode.String())
|
||||
}
|
||||
case shared.Bitcoin:
|
||||
switch ipfsMode {
|
||||
case shared.LocalInterface, shared.RemoteClient:
|
||||
return btc.NewIPLDPublisher(ipfsPath)
|
||||
case shared.DirectPostgres:
|
||||
return btc.NewIPLDPublisherAndIndexer(db), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("bitcoin IPLDPublisher unexpected ipfs mode %s", ipfsMode.String())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid chain %s for publisher constructor", chain.String())
|
||||
}
|
||||
@ -149,7 +191,7 @@ func NewIPLDPublisher(chain shared.ChainType, ipfsPath string) (shared.IPLDPubli
|
||||
func NewPublicAPI(chain shared.ChainType, db *postgres.DB, ipfsPath string) (rpc.API, error) {
|
||||
switch chain {
|
||||
case shared.Ethereum:
|
||||
backend, err := eth.NewEthBackend(db, ipfsPath)
|
||||
backend, err := eth.NewEthBackend(db)
|
||||
if err != nil {
|
||||
return rpc.API{}, err
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@ -34,19 +36,19 @@ const APIName = "eth"
|
||||
const APIVersion = "0.0.1"
|
||||
|
||||
type PublicEthAPI struct {
|
||||
b *Backend
|
||||
B *Backend
|
||||
}
|
||||
|
||||
// NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend
|
||||
func NewPublicEthAPI(b *Backend) *PublicEthAPI {
|
||||
return &PublicEthAPI{
|
||||
b: b,
|
||||
B: b,
|
||||
}
|
||||
}
|
||||
|
||||
// BlockNumber returns the block number of the chain head.
|
||||
func (pea *PublicEthAPI) BlockNumber() hexutil.Uint64 {
|
||||
number, _ := pea.b.Retriever.RetrieveLastBlockNumber()
|
||||
number, _ := pea.B.Retriever.RetrieveLastBlockNumber()
|
||||
return hexutil.Uint64(number)
|
||||
}
|
||||
|
||||
@ -73,23 +75,36 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
|
||||
LogAddresses: addrStrs,
|
||||
Topics: topicStrSets,
|
||||
}
|
||||
tx, err := pea.b.DB.Beginx()
|
||||
|
||||
// Begin tx
|
||||
tx, err := pea.B.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// If we have a blockhash to filter on, fire off single retrieval query
|
||||
if crit.BlockHash != nil {
|
||||
rctCIDs, err := pea.b.Retriever.RetrieveRctCIDs(tx, filter, 0, crit.BlockHash, nil)
|
||||
rctCIDs, err := pea.B.Retriever.RetrieveRctCIDs(tx, filter, 0, crit.BlockHash, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rctIPLDs, err := pea.B.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rctIPLDs, err := pea.b.Fetcher.FetchRcts(rctCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractLogsOfInterest(rctIPLDs, filter.Topics)
|
||||
}
|
||||
// Otherwise, create block range from criteria
|
||||
@ -97,14 +112,14 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
|
||||
startingBlock := crit.FromBlock
|
||||
endingBlock := crit.ToBlock
|
||||
if startingBlock == nil {
|
||||
startingBlockInt, err := pea.b.Retriever.RetrieveFirstBlockNumber()
|
||||
startingBlockInt, err := pea.B.Retriever.RetrieveFirstBlockNumber()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startingBlock = big.NewInt(startingBlockInt)
|
||||
}
|
||||
if endingBlock == nil {
|
||||
endingBlockInt, err := pea.b.Retriever.RetrieveLastBlockNumber()
|
||||
endingBlockInt, err := pea.B.Retriever.RetrieveLastBlockNumber()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -114,27 +129,28 @@ func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery)
|
||||
end := endingBlock.Int64()
|
||||
allRctCIDs := make([]ReceiptModel, 0)
|
||||
for i := start; i <= end; i++ {
|
||||
rctCIDs, err := pea.b.Retriever.RetrieveRctCIDs(tx, filter, i, nil, nil)
|
||||
rctCIDs, err := pea.B.Retriever.RetrieveRctCIDs(tx, filter, i, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allRctCIDs = append(allRctCIDs, rctCIDs...)
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rctIPLDs, err := pea.b.Fetcher.FetchRcts(allRctCIDs)
|
||||
rctIPLDs, err := pea.B.Fetcher.FetchRcts(tx, allRctCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extractLogsOfInterest(rctIPLDs, filter.Topics)
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logs, err := extractLogsOfInterest(rctIPLDs, filter.Topics)
|
||||
return logs, err // need to return err variable so that we return the err = tx.Commit() assignment in the defer
|
||||
}
|
||||
|
||||
// GetHeaderByNumber returns the requested canonical block header.
|
||||
// * When blockNr is -1 the chain head is returned.
|
||||
// * We cannot support pending block calls since we do not have an active miner
|
||||
func (pea *PublicEthAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) {
|
||||
header, err := pea.b.HeaderByNumber(ctx, number)
|
||||
header, err := pea.B.HeaderByNumber(ctx, number)
|
||||
if header != nil && err == nil {
|
||||
return pea.rpcMarshalHeader(header)
|
||||
}
|
||||
@ -147,7 +163,7 @@ func (pea *PublicEthAPI) GetHeaderByNumber(ctx context.Context, number rpc.Block
|
||||
// * When fullTx is true all transactions in the block are returned, otherwise
|
||||
// only the transaction hash is returned.
|
||||
func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
|
||||
block, err := pea.b.BlockByNumber(ctx, number)
|
||||
block, err := pea.B.BlockByNumber(ctx, number)
|
||||
if block != nil && err == nil {
|
||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||
}
|
||||
@ -157,7 +173,7 @@ func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockN
|
||||
// GetBlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
|
||||
// detail, otherwise only the transaction hash is returned.
|
||||
func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) {
|
||||
block, err := pea.b.BlockByHash(ctx, hash)
|
||||
block, err := pea.B.BlockByHash(ctx, hash)
|
||||
if block != nil {
|
||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||
}
|
||||
@ -168,7 +184,7 @@ func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, f
|
||||
// SuperNode cannot currently handle pending/tx_pool txs
|
||||
func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
|
||||
// Try to return an already finalized transaction
|
||||
tx, blockHash, blockNumber, index, err := pea.b.GetTransaction(ctx, hash)
|
||||
tx, blockHash, blockNumber, index, err := pea.B.GetTransaction(ctx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -27,12 +27,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
mocks3 "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
|
||||
@ -87,8 +84,8 @@ var _ = Describe("API", func() {
|
||||
var (
|
||||
db *postgres.DB
|
||||
retriever *eth.CIDRetriever
|
||||
fetcher *eth.IPLDFetcher
|
||||
indexer *eth.CIDIndexer
|
||||
fetcher *eth.IPLDPGFetcher
|
||||
indexAndPublisher *eth.IPLDPublisherAndIndexer
|
||||
backend *eth.Backend
|
||||
api *eth.PublicEthAPI
|
||||
)
|
||||
@ -97,32 +94,15 @@ var _ = Describe("API", func() {
|
||||
db, err = shared.SetupDB()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
retriever = eth.NewCIDRetriever(db)
|
||||
blocksToReturn := map[cid.Cid]blocks.Block{
|
||||
mocks.HeaderCID: mocks.HeaderIPLD,
|
||||
mocks.Trx1CID: mocks.Trx1IPLD,
|
||||
mocks.Trx2CID: mocks.Trx2IPLD,
|
||||
mocks.Trx3CID: mocks.Trx3IPLD,
|
||||
mocks.Rct1CID: mocks.Rct1IPLD,
|
||||
mocks.Rct2CID: mocks.Rct2IPLD,
|
||||
mocks.Rct3CID: mocks.Rct3IPLD,
|
||||
mocks.State1CID: mocks.State1IPLD,
|
||||
mocks.State2CID: mocks.State2IPLD,
|
||||
mocks.StorageCID: mocks.StorageIPLD,
|
||||
}
|
||||
mockBlockService := &mocks3.MockIPFSBlockService{
|
||||
Blocks: blocksToReturn,
|
||||
}
|
||||
fetcher = ð.IPLDFetcher{
|
||||
BlockService: mockBlockService,
|
||||
}
|
||||
indexer = eth.NewCIDIndexer(db)
|
||||
fetcher = eth.NewIPLDPGFetcher(db)
|
||||
indexAndPublisher = eth.NewIPLDPublisherAndIndexer(db)
|
||||
backend = ð.Backend{
|
||||
Retriever: retriever,
|
||||
Fetcher: fetcher,
|
||||
DB: db,
|
||||
}
|
||||
api = eth.NewPublicEthAPI(backend)
|
||||
err = indexer.Index(mocks.MockCIDPayload)
|
||||
_, err = indexAndPublisher.Publish(mocks.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
uncles := mocks.MockBlock.Uncles()
|
||||
uncleHashes := make([]common.Hash, len(uncles))
|
||||
@ -186,8 +166,20 @@ var _ = Describe("API", func() {
|
||||
number, err := strconv.ParseInt(mocks.MockCIDPayload.HeaderCID.BlockNumber, 10, 64)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
header, err := api.GetHeaderByNumber(context.Background(), rpc.BlockNumber(number))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(header).To(Equal(expectedHeader))
|
||||
})
|
||||
|
||||
It("Throws an error if a header cannot be found", func() {
|
||||
number, err := strconv.ParseInt(mocks.MockCIDPayload.HeaderCID.BlockNumber, 10, 64)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
header, err := api.GetHeaderByNumber(context.Background(), rpc.BlockNumber(number+1))
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("header at block %d is not available", number+1))
|
||||
Expect(header).To(BeNil())
|
||||
_, err = api.B.DB.Beginx()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GetBlockByHash", func() {
|
||||
|
@ -22,13 +22,13 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
)
|
||||
@ -39,26 +39,21 @@ var (
|
||||
|
||||
type Backend struct {
|
||||
Retriever *CIDRetriever
|
||||
Fetcher *IPLDFetcher
|
||||
Fetcher *IPLDPGFetcher
|
||||
DB *postgres.DB
|
||||
}
|
||||
|
||||
func NewEthBackend(db *postgres.DB, ipfsPath string) (*Backend, error) {
|
||||
func NewEthBackend(db *postgres.DB) (*Backend, error) {
|
||||
r := NewCIDRetriever(db)
|
||||
f, err := NewIPLDFetcher(ipfsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Backend{
|
||||
Retriever: r,
|
||||
Fetcher: f,
|
||||
DB: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Header, error) {
|
||||
number := blockNumber.Int64()
|
||||
var err error
|
||||
number := blockNumber.Int64()
|
||||
if blockNumber == rpc.LatestBlockNumber {
|
||||
number, err = b.Retriever.RetrieveLastBlockNumber()
|
||||
if err != nil {
|
||||
@ -68,19 +63,26 @@ func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumbe
|
||||
if blockNumber == rpc.PendingBlockNumber {
|
||||
return nil, errPendingBlockNumber
|
||||
}
|
||||
// Retrieve the CIDs for headers at this height
|
||||
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Retrieve the CIDs for headers at this height
|
||||
headerCids, err := b.Retriever.RetrieveHeaderCIDs(tx, number)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If there are none, throw an error
|
||||
@ -88,7 +90,7 @@ func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumbe
|
||||
return nil, fmt.Errorf("header at block %d is not available", number)
|
||||
}
|
||||
// Fetch the header IPLDs for those CIDs
|
||||
headerIPLD, err := b.Fetcher.FetchHeader(headerCids[0])
|
||||
headerIPLD, err := b.Fetcher.FetchHeader(tx, headerCids[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -96,10 +98,8 @@ func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumbe
|
||||
// We throw an error in FetchHeaders() if the number of headers does not match the number of CIDs and we already
|
||||
// confirmed the number of CIDs is greater than 0 so there is no need to bound check the slice before accessing
|
||||
var header types.Header
|
||||
if err := rlp.DecodeBytes(headerIPLD.Data, &header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &header, nil
|
||||
err = rlp.DecodeBytes(headerIPLD.Data, &header)
|
||||
return &header, err
|
||||
}
|
||||
|
||||
// GetTd retrieves and returns the total difficulty at the given block hash
|
||||
@ -120,24 +120,29 @@ func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
|
||||
|
||||
// GetLogs returns all the logs for the given block hash
|
||||
func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
receiptCIDs, err := b.Retriever.RetrieveRctCIDs(tx, ReceiptFilter{}, 0, &hash, nil)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(receiptCIDs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
receiptIPLDs, err := b.Fetcher.FetchRcts(receiptCIDs)
|
||||
receiptIPLDs, err := b.Fetcher.FetchRcts(tx, receiptCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -149,15 +154,15 @@ func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log
|
||||
}
|
||||
logs[i] = rct.Logs
|
||||
}
|
||||
return logs, nil
|
||||
return logs, err
|
||||
}
|
||||
|
||||
// BlockByNumber returns the requested canonical block.
|
||||
// Since the SuperNode can contain forked blocks, it is recommended to fetch BlockByHash as
|
||||
// fetching by number can return non-deterministic results (returns the first block found at that height)
|
||||
func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Block, error) {
|
||||
number := blockNumber.Int64()
|
||||
var err error
|
||||
number := blockNumber.Int64()
|
||||
if blockNumber == rpc.LatestBlockNumber {
|
||||
number, err = b.Retriever.RetrieveLastBlockNumber()
|
||||
if err != nil {
|
||||
@ -173,8 +178,24 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Fetch and decode the header IPLD
|
||||
headerIPLD, err := b.Fetcher.FetchHeader(headerCID)
|
||||
headerIPLD, err := b.Fetcher.FetchHeader(tx, headerCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -183,7 +204,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
return nil, err
|
||||
}
|
||||
// Fetch and decode the uncle IPLDs
|
||||
uncleIPLDs, err := b.Fetcher.FetchUncles(uncleCIDs)
|
||||
uncleIPLDs, err := b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -196,20 +217,20 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
uncles = append(uncles, &uncle)
|
||||
}
|
||||
// Fetch and decode the transaction IPLDs
|
||||
txIPLDs, err := b.Fetcher.FetchTrxs(txCIDs)
|
||||
txIPLDs, err := b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var transactions []*types.Transaction
|
||||
for _, txIPLD := range txIPLDs {
|
||||
var tx types.Transaction
|
||||
if err := rlp.DecodeBytes(txIPLD.Data, &tx); err != nil {
|
||||
var transaction types.Transaction
|
||||
if err := rlp.DecodeBytes(txIPLD.Data, &transaction); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions = append(transactions, &tx)
|
||||
transactions = append(transactions, &transaction)
|
||||
}
|
||||
// Fetch and decode the receipt IPLDs
|
||||
rctIPLDs, err := b.Fetcher.FetchRcts(rctCIDs)
|
||||
rctIPLDs, err := b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -222,7 +243,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
receipts = append(receipts, &receipt)
|
||||
}
|
||||
// Compose everything together into a complete block
|
||||
return types.NewBlock(&header, transactions, uncles, receipts), nil
|
||||
return types.NewBlock(&header, transactions, uncles, receipts), err
|
||||
}
|
||||
|
||||
// BlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
|
||||
@ -233,8 +254,25 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Fetch and decode the header IPLD
|
||||
headerIPLD, err := b.Fetcher.FetchHeader(headerCID)
|
||||
headerIPLD, err := b.Fetcher.FetchHeader(tx, headerCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -243,7 +281,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
return nil, err
|
||||
}
|
||||
// Fetch and decode the uncle IPLDs
|
||||
uncleIPLDs, err := b.Fetcher.FetchUncles(uncleCIDs)
|
||||
uncleIPLDs, err := b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -256,20 +294,20 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
uncles = append(uncles, &uncle)
|
||||
}
|
||||
// Fetch and decode the transaction IPLDs
|
||||
txIPLDs, err := b.Fetcher.FetchTrxs(txCIDs)
|
||||
txIPLDs, err := b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var transactions []*types.Transaction
|
||||
for _, txIPLD := range txIPLDs {
|
||||
var tx types.Transaction
|
||||
if err := rlp.DecodeBytes(txIPLD.Data, &tx); err != nil {
|
||||
var transaction types.Transaction
|
||||
if err := rlp.DecodeBytes(txIPLD.Data, &transaction); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions = append(transactions, &tx)
|
||||
transactions = append(transactions, &transaction)
|
||||
}
|
||||
// Fetch and decode the receipt IPLDs
|
||||
rctIPLDs, err := b.Fetcher.FetchRcts(rctCIDs)
|
||||
rctIPLDs, err := b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -282,7 +320,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
receipts = append(receipts, &receipt)
|
||||
}
|
||||
// Compose everything together into a complete block
|
||||
return types.NewBlock(&header, transactions, uncles, receipts), nil
|
||||
return types.NewBlock(&header, transactions, uncles, receipts), err
|
||||
}
|
||||
|
||||
// GetTransaction retrieves a tx by hash
|
||||
@ -301,15 +339,35 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type
|
||||
if err := b.DB.Get(&txCIDWithHeaderInfo, pgStr, txHash.String()); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
txIPLD, err := b.Fetcher.FetchTrxs([]TxModel{{CID: txCIDWithHeaderInfo.CID}})
|
||||
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
txIPLD, err := b.Fetcher.FetchTrxs(tx, []TxModel{{CID: txCIDWithHeaderInfo.CID}})
|
||||
if err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
var transaction types.Transaction
|
||||
if err := rlp.DecodeBytes(txIPLD[0].Data, &transaction); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
return &transaction, common.HexToHash(txCIDWithHeaderInfo.BlockHash), uint64(txCIDWithHeaderInfo.BlockNumber), uint64(txCIDWithHeaderInfo.Index), nil
|
||||
return &transaction, common.HexToHash(txCIDWithHeaderInfo.BlockHash), uint64(txCIDWithHeaderInfo.BlockNumber), uint64(txCIDWithHeaderInfo.Index), err
|
||||
}
|
||||
|
||||
// extractLogsOfInterest returns logs from the receipt IPLD
|
||||
@ -364,7 +422,7 @@ func sliceContainsHash(slice []string, hash common.Hash) int {
|
||||
// a `PublicEthAPI`.
|
||||
func (pea *PublicEthAPI) rpcMarshalHeader(header *types.Header) (map[string]interface{}, error) {
|
||||
fields := RPCMarshalHeader(header)
|
||||
td, err := pea.b.GetTd(header.Hash())
|
||||
td, err := pea.B.GetTd(header.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -403,7 +461,7 @@ func (pea *PublicEthAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx boo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
td, err := pea.b.GetTd(b.Hash())
|
||||
td, err := pea.B.GetTd(b.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -17,17 +17,17 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
utils "github.com/vulcanize/vulcanizedb/libraries/shared/utilities"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
@ -65,17 +65,26 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
return nil, true, fmt.Errorf("eth retriever expected filter type %T got %T", &SubscriptionSettings{}, filter)
|
||||
}
|
||||
log.Debug("retrieving cids")
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Retrieve cached header CIDs at this block height
|
||||
headers, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("header cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
@ -91,9 +100,6 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
// Retrieve uncle cids for this header id
|
||||
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, header.ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("uncle cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
@ -104,9 +110,6 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
if !streamFilter.TxFilter.Off {
|
||||
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, header.ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("transaction cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
@ -122,9 +125,6 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
if !streamFilter.ReceiptFilter.Off {
|
||||
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, streamFilter.ReceiptFilter, header.ID, trxIds)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("receipt cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
@ -136,9 +136,6 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
if !streamFilter.StateFilter.Off {
|
||||
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, streamFilter.StateFilter, header.ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("state cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
@ -150,9 +147,6 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
if !streamFilter.StorageFilter.Off {
|
||||
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, streamFilter.StorageFilter, header.ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("storage cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
@ -163,7 +157,7 @@ func (ecr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumbe
|
||||
cws[i] = cw
|
||||
}
|
||||
|
||||
return cws, empty, tx.Commit()
|
||||
return cws, empty, err
|
||||
}
|
||||
|
||||
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
|
||||
@ -458,7 +452,7 @@ func (ecr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap,
|
||||
Start uint64 `db:"start"`
|
||||
Stop uint64 `db:"stop"`
|
||||
}, 0)
|
||||
if err := ecr.db.Select(&results, pgStr); err != nil {
|
||||
if err := ecr.db.Select(&results, pgStr); err != nil && err != sql.ErrNoRows {
|
||||
return nil, err
|
||||
}
|
||||
emptyGaps := make([]shared.Gap, len(results))
|
||||
@ -475,43 +469,44 @@ func (ecr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap,
|
||||
WHERE times_validated < $1
|
||||
ORDER BY block_number`
|
||||
var heights []uint64
|
||||
if err := ecr.db.Select(&heights, pgStr, validationLevel); err != nil {
|
||||
if err := ecr.db.Select(&heights, pgStr, validationLevel); err != nil && err != sql.ErrNoRows {
|
||||
return nil, err
|
||||
}
|
||||
if len(heights) == 0 {
|
||||
return emptyGaps, nil
|
||||
}
|
||||
return append(emptyGaps, utils.MissingHeightsToGaps(heights)...), nil
|
||||
}
|
||||
|
||||
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
|
||||
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (HeaderModel, []UncleModel, []TxModel, []ReceiptModel, error) {
|
||||
log.Debug("retrieving block cids for block hash ", blockHash.String())
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
headerCID, err := ecr.RetrieveHeaderCIDByHash(tx, blockHash)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("header cid retrieval error")
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("uncle cid retrieval error")
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("tx cid retrieval error")
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
@ -521,27 +516,33 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (HeaderModel
|
||||
}
|
||||
rctCIDs, err := ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("rct cid retrieval error")
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
return headerCID, uncleCIDs, txCIDs, rctCIDs, tx.Commit()
|
||||
return headerCID, uncleCIDs, txCIDs, rctCIDs, err
|
||||
}
|
||||
|
||||
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
|
||||
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (HeaderModel, []UncleModel, []TxModel, []ReceiptModel, error) {
|
||||
log.Debug("retrieving block cids for block number ", blockNumber)
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
headerCID, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("header cid retrieval error")
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
@ -550,17 +551,11 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (HeaderModel,
|
||||
}
|
||||
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("uncle cid retrieval error")
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("tx cid retrieval error")
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
@ -570,13 +565,9 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (HeaderModel,
|
||||
}
|
||||
rctCIDs, err := ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("rct cid retrieval error")
|
||||
return HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
return headerCID[0], uncleCIDs, txCIDs, rctCIDs, tx.Commit()
|
||||
return headerCID[0], uncleCIDs, txCIDs, rctCIDs, err
|
||||
}
|
||||
|
||||
// RetrieveHeaderCIDByHash returns the header for the given block hash
|
@ -50,9 +50,7 @@ func (c *Cleaner) ResetValidation(rngs [][2]uint64) error {
|
||||
SET times_validated = 0
|
||||
WHERE block_number BETWEEN $1 AND $2`
|
||||
if _, err := tx.Exec(pgStr, rng[0], rng[1]); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
shared.Rollback(tx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -68,9 +66,7 @@ func (c *Cleaner) Clean(rngs [][2]uint64, t shared.DataType) error {
|
||||
for _, rng := range rngs {
|
||||
logrus.Infof("eth db cleaner cleaning up block range %d to %d", rng[0], rng[1])
|
||||
if err := c.clean(tx, rng, t); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
shared.Rollback(tx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -50,51 +50,52 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("eth indexer expected cids type %T got %T", &CIDPayload{}, cids)
|
||||
}
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := in.db.Beginx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerID, err := in.indexHeaderCID(tx, cidPayload.HeaderCID, in.db.NodeID)
|
||||
if err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
headerID, err := in.indexHeaderCID(tx, cidPayload.HeaderCID)
|
||||
if err != nil {
|
||||
log.Error("eth indexer error when indexing header")
|
||||
return err
|
||||
}
|
||||
for _, uncle := range cidPayload.UncleCIDs {
|
||||
if err := in.indexUncleCID(tx, uncle, headerID); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("eth indexer error when indexing uncle")
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := in.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Error("eth indexer error when indexing transactions and receipts")
|
||||
return err
|
||||
}
|
||||
if err := in.indexStateAndStorageCIDs(tx, cidPayload, headerID); err != nil {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
err = in.indexStateAndStorageCIDs(tx, cidPayload, headerID)
|
||||
if err != nil {
|
||||
log.Error("eth indexer error when indexing state and storage nodes")
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
return err
|
||||
}
|
||||
|
||||
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel, nodeID int64) (int64, error) {
|
||||
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) {
|
||||
var headerID int64
|
||||
err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, times_validated)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
|
||||
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, times_validated) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, eth.header_cids.times_validated + 1)
|
||||
RETURNING id`,
|
||||
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, nodeID, header.Reward, header.StateRoot, header.TxRoot,
|
||||
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID, header.Reward, header.StateRoot, header.TxRoot,
|
||||
header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, 1).Scan(&headerID)
|
||||
return headerID, err
|
||||
}
|
||||
@ -126,6 +127,15 @@ func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPa
|
||||
return nil
|
||||
}
|
||||
|
||||
func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModel, headerID int64) (int64, error) {
|
||||
var txID int64
|
||||
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index) VALUES ($1, $2, $3, $4, $5, $6)
|
||||
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index) = ($3, $4, $5, $6)
|
||||
RETURNING id`,
|
||||
headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index).Scan(&txID)
|
||||
return txID, err
|
||||
}
|
||||
|
||||
func (in *CIDIndexer) indexReceiptCID(tx *sqlx.Tx, cidMeta ReceiptModel, txID int64) error {
|
||||
_, err := tx.Exec(`INSERT INTO eth.receipt_cids (tx_id, cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
ON CONFLICT (tx_id) DO UPDATE SET (cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts) = ($2, $3, $4, $5, $6, $7, $8, $9)`,
|
||||
@ -165,6 +175,19 @@ func (in *CIDIndexer) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (in *CIDIndexer) indexStateCID(tx *sqlx.Tx, stateNode StateNodeModel, headerID int64) (int64, error) {
|
||||
var stateID int64
|
||||
var stateKey string
|
||||
if stateNode.StateKey != nullHash.String() {
|
||||
stateKey = stateNode.StateKey
|
||||
}
|
||||
err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type) VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type) = ($2, $3, $5)
|
||||
RETURNING id`,
|
||||
headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType).Scan(&stateID)
|
||||
return stateID, err
|
||||
}
|
||||
|
||||
func (in *CIDIndexer) indexStateAccount(tx *sqlx.Tx, stateAccount StateAccountModel, stateID int64) error {
|
||||
_, err := tx.Exec(`INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`,
|
||||
|
@ -167,7 +167,6 @@ func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]ipfs.BlockModel, error) {
|
||||
|
||||
// FetchRcts fetches receipts
|
||||
// It uses the f.fetchBatch method
|
||||
// batch fetch preserves order?
|
||||
func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([]ipfs.BlockModel, error) {
|
||||
log.Debug("fetching receipt iplds")
|
||||
rctCids := make([]cid.Cid, len(cids))
|
||||
@ -198,9 +197,9 @@ func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([]ipfs.BlockModel, error)
|
||||
// needs to maintain the data's relation to state keys
|
||||
func (f *IPLDFetcher) FetchState(cids []StateNodeModel) ([]StateNode, error) {
|
||||
log.Debug("fetching state iplds")
|
||||
stateNodes := make([]StateNode, len(cids))
|
||||
for i, stateNode := range cids {
|
||||
if stateNode.CID == "" || stateNode.StateKey == "" {
|
||||
stateNodes := make([]StateNode, 0, len(cids))
|
||||
for _, stateNode := range cids {
|
||||
if stateNode.CID == "" {
|
||||
continue
|
||||
}
|
||||
dc, err := cid.Decode(stateNode.CID)
|
||||
@ -211,7 +210,7 @@ func (f *IPLDFetcher) FetchState(cids []StateNodeModel) ([]StateNode, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateNodes[i] = StateNode{
|
||||
stateNodes = append(stateNodes, StateNode{
|
||||
IPLD: ipfs.BlockModel{
|
||||
Data: state.RawData(),
|
||||
CID: state.Cid().String(),
|
||||
@ -219,7 +218,7 @@ func (f *IPLDFetcher) FetchState(cids []StateNodeModel) ([]StateNode, error) {
|
||||
StateLeafKey: common.HexToHash(stateNode.StateKey),
|
||||
Type: ResolveToNodeType(stateNode.NodeType),
|
||||
Path: stateNode.Path,
|
||||
}
|
||||
})
|
||||
}
|
||||
return stateNodes, nil
|
||||
}
|
||||
@ -229,9 +228,9 @@ func (f *IPLDFetcher) FetchState(cids []StateNodeModel) ([]StateNode, error) {
|
||||
// needs to maintain the data's relation to state and storage keys
|
||||
func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) ([]StorageNode, error) {
|
||||
log.Debug("fetching storage iplds")
|
||||
storageNodes := make([]StorageNode, len(cids))
|
||||
for i, storageNode := range cids {
|
||||
if storageNode.CID == "" || storageNode.StorageKey == "" || storageNode.StateKey == "" {
|
||||
storageNodes := make([]StorageNode, 0, len(cids))
|
||||
for _, storageNode := range cids {
|
||||
if storageNode.CID == "" || storageNode.StateKey == "" {
|
||||
continue
|
||||
}
|
||||
dc, err := cid.Decode(storageNode.CID)
|
||||
@ -242,7 +241,7 @@ func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) ([]Stora
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storageNodes[i] = StorageNode{
|
||||
storageNodes = append(storageNodes, StorageNode{
|
||||
IPLD: ipfs.BlockModel{
|
||||
Data: storage.RawData(),
|
||||
CID: storage.Cid().String(),
|
||||
@ -251,7 +250,7 @@ func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) ([]Stora
|
||||
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
|
||||
Type: ResolveToNodeType(storageNode.NodeType),
|
||||
Path: storageNode.Path,
|
||||
}
|
||||
})
|
||||
}
|
||||
return storageNodes, nil
|
||||
}
|
||||
|
@ -89,8 +89,8 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
var _ = Describe("Fetcher", func() {
|
||||
Describe("FetchCIDs", func() {
|
||||
var _ = Describe("IPLDFetcher", func() {
|
||||
Describe("Fetch", func() {
|
||||
BeforeEach(func() {
|
||||
mockBlockService = new(mocks.MockIPFSBlockService)
|
||||
err := mockBlockService.AddBlocks(mockBlocks)
|
||||
|
215
pkg/super_node/eth/ipld_pg_fetcher.go
Normal file
215
pkg/super_node/eth/ipld_pg_fetcher.go
Normal file
@ -0,0 +1,215 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
// IPLDPGFetcher satisfies the IPLDFetcher interface for ethereum
|
||||
// It interfaces directly with PG-IPFS
|
||||
type IPLDPGFetcher struct {
|
||||
db *postgres.DB
|
||||
}
|
||||
|
||||
// NewIPLDPGFetcher creates a pointer to a new IPLDPGFetcher
|
||||
func NewIPLDPGFetcher(db *postgres.DB) *IPLDPGFetcher {
|
||||
return &IPLDPGFetcher{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
|
||||
func (f *IPLDPGFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
|
||||
cidWrapper, ok := cids.(*CIDWrapper)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("eth fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
|
||||
}
|
||||
log.Debug("fetching iplds")
|
||||
iplds := IPLDs{}
|
||||
iplds.TotalDifficulty, ok = new(big.Int).SetString(cidWrapper.Header.TotalDifficulty, 10)
|
||||
if !ok {
|
||||
return nil, errors.New("eth fetcher: unable to set total difficulty")
|
||||
}
|
||||
iplds.BlockNumber = cidWrapper.BlockNumber
|
||||
|
||||
tx, err := f.db.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
iplds.Header, err = f.FetchHeader(tx, cidWrapper.Header)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: header fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.Uncles, err = f.FetchUncles(tx, cidWrapper.Uncles)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: uncle fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.Transactions, err = f.FetchTrxs(tx, cidWrapper.Transactions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: transaction fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.Receipts, err = f.FetchRcts(tx, cidWrapper.Receipts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: receipt fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.StateNodes, err = f.FetchState(tx, cidWrapper.StateNodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: state fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.StorageNodes, err = f.FetchStorage(tx, cidWrapper.StorageNodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: storage fetching error: %s", err.Error())
|
||||
}
|
||||
return iplds, err
|
||||
}
|
||||
|
||||
// FetchHeaders fetches headers
|
||||
func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) {
|
||||
log.Debug("fetching header ipld")
|
||||
headerBytes, err := shared.FetchIPLD(tx, c.CID)
|
||||
if err != nil {
|
||||
return ipfs.BlockModel{}, err
|
||||
}
|
||||
return ipfs.BlockModel{
|
||||
Data: headerBytes,
|
||||
CID: c.CID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FetchUncles fetches uncles
|
||||
func (f *IPLDPGFetcher) FetchUncles(tx *sqlx.Tx, cids []UncleModel) ([]ipfs.BlockModel, error) {
|
||||
log.Debug("fetching uncle iplds")
|
||||
uncleIPLDs := make([]ipfs.BlockModel, len(cids))
|
||||
for i, c := range cids {
|
||||
uncleBytes, err := shared.FetchIPLD(tx, c.CID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncleIPLDs[i] = ipfs.BlockModel{
|
||||
Data: uncleBytes,
|
||||
CID: c.CID,
|
||||
}
|
||||
}
|
||||
return uncleIPLDs, nil
|
||||
}
|
||||
|
||||
// FetchTrxs fetches transactions
|
||||
func (f *IPLDPGFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockModel, error) {
|
||||
log.Debug("fetching transaction iplds")
|
||||
trxIPLDs := make([]ipfs.BlockModel, len(cids))
|
||||
for i, c := range cids {
|
||||
txBytes, err := shared.FetchIPLD(tx, c.CID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trxIPLDs[i] = ipfs.BlockModel{
|
||||
Data: txBytes,
|
||||
CID: c.CID,
|
||||
}
|
||||
}
|
||||
return trxIPLDs, nil
|
||||
}
|
||||
|
||||
// FetchRcts fetches receipts
|
||||
func (f *IPLDPGFetcher) FetchRcts(tx *sqlx.Tx, cids []ReceiptModel) ([]ipfs.BlockModel, error) {
|
||||
log.Debug("fetching receipt iplds")
|
||||
rctIPLDs := make([]ipfs.BlockModel, len(cids))
|
||||
for i, c := range cids {
|
||||
rctBytes, err := shared.FetchIPLD(tx, c.CID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rctIPLDs[i] = ipfs.BlockModel{
|
||||
Data: rctBytes,
|
||||
CID: c.CID,
|
||||
}
|
||||
}
|
||||
return rctIPLDs, nil
|
||||
}
|
||||
|
||||
// FetchState fetches state nodes
|
||||
func (f *IPLDPGFetcher) FetchState(tx *sqlx.Tx, cids []StateNodeModel) ([]StateNode, error) {
|
||||
log.Debug("fetching state iplds")
|
||||
stateNodes := make([]StateNode, 0, len(cids))
|
||||
for _, stateNode := range cids {
|
||||
if stateNode.CID == "" {
|
||||
continue
|
||||
}
|
||||
stateBytes, err := shared.FetchIPLD(tx, stateNode.CID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateNodes = append(stateNodes, StateNode{
|
||||
IPLD: ipfs.BlockModel{
|
||||
Data: stateBytes,
|
||||
CID: stateNode.CID,
|
||||
},
|
||||
StateLeafKey: common.HexToHash(stateNode.StateKey),
|
||||
Type: ResolveToNodeType(stateNode.NodeType),
|
||||
Path: stateNode.Path,
|
||||
})
|
||||
}
|
||||
return stateNodes, nil
|
||||
}
|
||||
|
||||
// FetchStorage fetches storage nodes
|
||||
func (f *IPLDPGFetcher) FetchStorage(tx *sqlx.Tx, cids []StorageNodeWithStateKeyModel) ([]StorageNode, error) {
|
||||
log.Debug("fetching storage iplds")
|
||||
storageNodes := make([]StorageNode, 0, len(cids))
|
||||
for _, storageNode := range cids {
|
||||
if storageNode.CID == "" || storageNode.StateKey == "" {
|
||||
continue
|
||||
}
|
||||
storageBytes, err := shared.FetchIPLD(tx, storageNode.CID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storageNodes = append(storageNodes, StorageNode{
|
||||
IPLD: ipfs.BlockModel{
|
||||
Data: storageBytes,
|
||||
CID: storageNode.CID,
|
||||
},
|
||||
StateLeafKey: common.HexToHash(storageNode.StateKey),
|
||||
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
|
||||
Type: ResolveToNodeType(storageNode.NodeType),
|
||||
Path: storageNode.Path,
|
||||
})
|
||||
}
|
||||
return storageNodes, nil
|
||||
}
|
65
pkg/super_node/eth/ipld_pg_fetcher_test.go
Normal file
65
pkg/super_node/eth/ipld_pg_fetcher_test.go
Normal file
@ -0,0 +1,65 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
db *postgres.DB
|
||||
pubAndIndexer *eth.IPLDPublisherAndIndexer
|
||||
fetcher *eth.IPLDPGFetcher
|
||||
)
|
||||
|
||||
var _ = Describe("IPLDPGFetcher", func() {
|
||||
Describe("Fetch", func() {
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
db, err = shared.SetupDB()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
pubAndIndexer = eth.NewIPLDPublisherAndIndexer(db)
|
||||
_, err = pubAndIndexer.Publish(mocks.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
fetcher = eth.NewIPLDPGFetcher(db)
|
||||
})
|
||||
AfterEach(func() {
|
||||
eth.TearDownDB(db)
|
||||
})
|
||||
|
||||
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
|
||||
i, err := fetcher.Fetch(mocks.MockCIDWrapper)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
iplds, ok := i.(eth.IPLDs)
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(iplds.TotalDifficulty).To(Equal(mocks.MockConvertedPayload.TotalDifficulty))
|
||||
Expect(iplds.BlockNumber).To(Equal(mocks.MockConvertedPayload.Block.Number()))
|
||||
Expect(iplds.Header).To(Equal(mocks.MockIPLDs.Header))
|
||||
Expect(len(iplds.Uncles)).To(Equal(0))
|
||||
Expect(iplds.Transactions).To(Equal(mocks.MockIPLDs.Transactions))
|
||||
Expect(iplds.Receipts).To(Equal(mocks.MockIPLDs.Receipts))
|
||||
Expect(iplds.StateNodes).To(Equal(mocks.MockIPLDs.StateNodes))
|
||||
Expect(iplds.StorageNodes).To(Equal(mocks.MockIPLDs.StorageNodes))
|
||||
})
|
||||
})
|
||||
})
|
@ -216,15 +216,15 @@ var (
|
||||
})
|
||||
|
||||
nonce1 = uint64(1)
|
||||
contractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
||||
contractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
|
||||
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
||||
ContractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
|
||||
contractPathHash = crypto.Keccak256Hash([]byte{'\x06'})
|
||||
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
|
||||
ContractAccount, _ = rlp.EncodeToBytes(state.Account{
|
||||
Nonce: nonce1,
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: contractCodeHash.Bytes(),
|
||||
Root: common.HexToHash(contractRoot),
|
||||
CodeHash: ContractCodeHash.Bytes(),
|
||||
Root: common.HexToHash(ContractRoot),
|
||||
})
|
||||
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
|
||||
ContractLeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
||||
@ -233,16 +233,16 @@ var (
|
||||
})
|
||||
|
||||
nonce0 = uint64(0)
|
||||
accountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
||||
accountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
||||
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||
accountPathHash = crypto.Keccak256Hash([]byte{'\x0c'})
|
||||
AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
|
||||
AccountLeafKey = testhelpers.Account2LeafKey
|
||||
Account, _ = rlp.EncodeToBytes(state.Account{
|
||||
Nonce: nonce0,
|
||||
Balance: big.NewInt(1000),
|
||||
CodeHash: accountCodeHash.Bytes(),
|
||||
Root: common.HexToHash(accountRoot),
|
||||
CodeHash: AccountCodeHash.Bytes(),
|
||||
Root: common.HexToHash(AccountRoot),
|
||||
})
|
||||
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
|
||||
AccountLeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
||||
@ -374,14 +374,14 @@ var (
|
||||
contractPathHash: {
|
||||
Balance: big.NewInt(0).String(),
|
||||
Nonce: nonce1,
|
||||
CodeHash: contractCodeHash.Bytes(),
|
||||
StorageRoot: common.HexToHash(contractRoot).String(),
|
||||
CodeHash: ContractCodeHash.Bytes(),
|
||||
StorageRoot: common.HexToHash(ContractRoot).String(),
|
||||
},
|
||||
accountPathHash: {
|
||||
Balance: big.NewInt(1000).String(),
|
||||
Nonce: nonce0,
|
||||
CodeHash: accountCodeHash.Bytes(),
|
||||
StorageRoot: common.HexToHash(accountRoot).String(),
|
||||
CodeHash: AccountCodeHash.Bytes(),
|
||||
StorageRoot: common.HexToHash(AccountRoot).String(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
227
pkg/super_node/eth/publishAndIndexer.go
Normal file
227
pkg/super_node/eth/publishAndIndexer.go
Normal file
@ -0,0 +1,227 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
common2 "github.com/vulcanize/vulcanizedb/pkg/eth/converters/common"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/ipld"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
// IPLDPublisherAndIndexer satisfies the IPLDPublisher interface for ethereum
|
||||
// It interfaces directly with the public.blocks table of PG-IPFS rather than going through an ipfs intermediary
|
||||
// It publishes and indexes IPLDs together in a single sqlx.Tx
|
||||
type IPLDPublisherAndIndexer struct {
|
||||
indexer *CIDIndexer
|
||||
}
|
||||
|
||||
// NewIPLDPublisherAndIndexer creates a pointer to a new IPLDPublisherAndIndexer which satisfies the IPLDPublisher interface
|
||||
func NewIPLDPublisherAndIndexer(db *postgres.DB) *IPLDPublisherAndIndexer {
|
||||
return &IPLDPublisherAndIndexer{
|
||||
indexer: NewCIDIndexer(db),
|
||||
}
|
||||
}
|
||||
|
||||
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
||||
func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
|
||||
ipldPayload, ok := payload.(ConvertedPayload)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("eth publisher expected payload type %T got %T", ConvertedPayload{}, payload)
|
||||
}
|
||||
// Generate the iplds
|
||||
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, err := ipld.FromBlockAndReceipts(ipldPayload.Block, ipldPayload.Receipts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := pub.indexer.db.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Publish trie nodes
|
||||
for _, node := range txTrieNodes {
|
||||
if err := shared.PublishIPLD(tx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, node := range rctTrieNodes {
|
||||
if err := shared.PublishIPLD(tx, node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Publish and index header
|
||||
if err := shared.PublishIPLD(tx, headerNode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reward := common2.CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts)
|
||||
header := HeaderModel{
|
||||
CID: headerNode.Cid().String(),
|
||||
ParentHash: ipldPayload.Block.ParentHash().String(),
|
||||
BlockNumber: ipldPayload.Block.Number().String(),
|
||||
BlockHash: ipldPayload.Block.Hash().String(),
|
||||
TotalDifficulty: ipldPayload.TotalDifficulty.String(),
|
||||
Reward: reward.String(),
|
||||
Bloom: ipldPayload.Block.Bloom().Bytes(),
|
||||
StateRoot: ipldPayload.Block.Root().String(),
|
||||
RctRoot: ipldPayload.Block.ReceiptHash().String(),
|
||||
TxRoot: ipldPayload.Block.TxHash().String(),
|
||||
UncleRoot: ipldPayload.Block.UncleHash().String(),
|
||||
Timestamp: ipldPayload.Block.Time(),
|
||||
}
|
||||
headerID, err := pub.indexer.indexHeaderCID(tx, header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Publish and index uncles
|
||||
for _, uncleNode := range uncleNodes {
|
||||
if err := shared.PublishIPLD(tx, uncleNode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncleReward := common2.CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncleNode.Number.Int64())
|
||||
uncle := UncleModel{
|
||||
CID: uncleNode.Cid().String(),
|
||||
ParentHash: uncleNode.ParentHash.String(),
|
||||
BlockHash: uncleNode.Hash().String(),
|
||||
Reward: uncleReward.String(),
|
||||
}
|
||||
if err := pub.indexer.indexUncleCID(tx, uncle, headerID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Publish and index txs and receipts
|
||||
for i, txNode := range txNodes {
|
||||
if err := shared.PublishIPLD(tx, txNode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rctNode := rctNodes[i]
|
||||
if err := shared.PublishIPLD(tx, rctNode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txModel := ipldPayload.TxMetaData[i]
|
||||
txModel.CID = txNode.Cid().String()
|
||||
txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rctModel := ipldPayload.ReceiptMetaData[i]
|
||||
rctModel.CID = rctNode.Cid().String()
|
||||
if err := pub.indexer.indexReceiptCID(tx, rctModel, txID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Publish and index state and storage
|
||||
err = pub.publishAndIndexStateAndStorage(tx, ipldPayload, headerID)
|
||||
|
||||
// This IPLDPublisher does both publishing and indexing, we do not need to pass anything forward to the indexer
|
||||
return nil, err // return err variable explicitly so that we return the err = tx.Commit() assignment in the defer
|
||||
}
|
||||
|
||||
func (pub *IPLDPublisherAndIndexer) publishAndIndexStateAndStorage(tx *sqlx.Tx, ipldPayload ConvertedPayload, headerID int64) error {
|
||||
// Publish and index state and storage
|
||||
for _, stateNode := range ipldPayload.StateNodes {
|
||||
stateIPLD, err := ipld.FromStateTrieRLP(stateNode.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := shared.PublishIPLD(tx, stateIPLD); err != nil {
|
||||
return err
|
||||
}
|
||||
stateModel := StateNodeModel{
|
||||
Path: stateNode.Path,
|
||||
StateKey: stateNode.LeafKey.String(),
|
||||
CID: stateIPLD.Cid().String(),
|
||||
NodeType: ResolveFromNodeType(stateNode.Type),
|
||||
}
|
||||
stateID, err := pub.indexer.indexStateCID(tx, stateModel, headerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we have a leaf, decode and index the account data and publish and index any associated storage diffs
|
||||
if stateNode.Type == statediff.Leaf {
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(stateNode.Value, &i); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(i) != 2 {
|
||||
return fmt.Errorf("IPLDPublisherAndIndexer expected state leaf node rlp to decode into two elements")
|
||||
}
|
||||
var account state.Account
|
||||
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
|
||||
return err
|
||||
}
|
||||
accountModel := StateAccountModel{
|
||||
Balance: account.Balance.String(),
|
||||
Nonce: account.Nonce,
|
||||
CodeHash: account.CodeHash,
|
||||
StorageRoot: account.Root.String(),
|
||||
}
|
||||
if err := pub.indexer.indexStateAccount(tx, accountModel, stateID); err != nil {
|
||||
return err
|
||||
}
|
||||
statePathHash := crypto.Keccak256Hash(stateNode.Path)
|
||||
for _, storageNode := range ipldPayload.StorageNodes[statePathHash] {
|
||||
storageIPLD, err := ipld.FromStorageTrieRLP(storageNode.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := shared.PublishIPLD(tx, storageIPLD); err != nil {
|
||||
return err
|
||||
}
|
||||
storageModel := StorageNodeModel{
|
||||
Path: storageNode.Path,
|
||||
StorageKey: storageNode.LeafKey.Hex(),
|
||||
CID: storageIPLD.Cid().String(),
|
||||
NodeType: ResolveFromNodeType(storageNode.Type),
|
||||
}
|
||||
if err := pub.indexer.indexStorageCID(tx, storageModel, stateID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Index satisfies the shared.CIDIndexer interface
|
||||
func (pub *IPLDPublisherAndIndexer) Index(cids shared.CIDsForIndexing) error {
|
||||
return nil
|
||||
}
|
237
pkg/super_node/eth/publishAndIndexer_test.go
Normal file
237
pkg/super_node/eth/publishAndIndexer_test.go
Normal file
@ -0,0 +1,237 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-ipfs-blockstore"
|
||||
"github.com/ipfs/go-ipfs-ds-help"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
|
||||
var _ = Describe("PublishAndIndexer", func() {
|
||||
var (
|
||||
db *postgres.DB
|
||||
err error
|
||||
repo *eth.IPLDPublisherAndIndexer
|
||||
ipfsPgGet = `SELECT data FROM public.blocks
|
||||
WHERE key = $1`
|
||||
)
|
||||
BeforeEach(func() {
|
||||
db, err = shared.SetupDB()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
repo = eth.NewIPLDPublisherAndIndexer(db)
|
||||
})
|
||||
AfterEach(func() {
|
||||
eth.TearDownDB(db)
|
||||
})
|
||||
|
||||
Describe("Publish", func() {
|
||||
It("Published and indexes header IPLDs in a single tx", func() {
|
||||
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
|
||||
Expect(emptyReturn).To(BeNil())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
pgStr := `SELECT cid, td, reward, id
|
||||
FROM eth.header_cids
|
||||
WHERE block_number = $1`
|
||||
// check header was properly indexed
|
||||
type res struct {
|
||||
CID string
|
||||
TD string
|
||||
Reward string
|
||||
ID int
|
||||
}
|
||||
header := new(res)
|
||||
err = db.QueryRowx(pgStr, 1).StructScan(header)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(header.CID).To(Equal(mocks.HeaderCID.String()))
|
||||
Expect(header.TD).To(Equal(mocks.MockBlock.Difficulty().String()))
|
||||
Expect(header.Reward).To(Equal("5000000000000000000"))
|
||||
dc, err := cid.Decode(header.CID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
mhKey := dshelp.CidToDsKey(dc)
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
var data []byte
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(data).To(Equal(mocks.MockHeaderRlp))
|
||||
})
|
||||
|
||||
It("Publishes and indexes transaction IPLDs in a single tx", func() {
|
||||
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
|
||||
Expect(emptyReturn).To(BeNil())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// check that txs were properly indexed
|
||||
trxs := make([]string, 0)
|
||||
pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
err = db.Select(&trxs, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(trxs)).To(Equal(3))
|
||||
Expect(shared.ListContainsString(trxs, mocks.Trx1CID.String())).To(BeTrue())
|
||||
Expect(shared.ListContainsString(trxs, mocks.Trx2CID.String())).To(BeTrue())
|
||||
Expect(shared.ListContainsString(trxs, mocks.Trx3CID.String())).To(BeTrue())
|
||||
// and published
|
||||
for _, c := range trxs {
|
||||
dc, err := cid.Decode(c)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
mhKey := dshelp.CidToDsKey(dc)
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
var data []byte
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
switch c {
|
||||
case mocks.Trx1CID.String():
|
||||
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(0)))
|
||||
case mocks.Trx2CID.String():
|
||||
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(1)))
|
||||
case mocks.Trx3CID.String():
|
||||
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(2)))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
It("Publishes and indexes receipt IPLDs in a single tx", func() {
|
||||
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
|
||||
Expect(emptyReturn).To(BeNil())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// check receipts were properly indexed
|
||||
rcts := make([]string, 0)
|
||||
pgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
||||
AND transaction_cids.header_id = header_cids.id
|
||||
AND header_cids.block_number = $1`
|
||||
err = db.Select(&rcts, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(rcts)).To(Equal(3))
|
||||
Expect(shared.ListContainsString(rcts, mocks.Rct1CID.String())).To(BeTrue())
|
||||
Expect(shared.ListContainsString(rcts, mocks.Rct2CID.String())).To(BeTrue())
|
||||
Expect(shared.ListContainsString(rcts, mocks.Rct3CID.String())).To(BeTrue())
|
||||
// and published
|
||||
for _, c := range rcts {
|
||||
dc, err := cid.Decode(c)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
mhKey := dshelp.CidToDsKey(dc)
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
var data []byte
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
switch c {
|
||||
case mocks.Rct1CID.String():
|
||||
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(0)))
|
||||
case mocks.Rct2CID.String():
|
||||
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(1)))
|
||||
case mocks.Rct3CID.String():
|
||||
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(2)))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
It("Publishes and indexes state IPLDs in a single tx", func() {
|
||||
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
|
||||
Expect(emptyReturn).To(BeNil())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// check that state nodes were properly indexed and published
|
||||
stateNodes := make([]eth.StateNodeModel, 0)
|
||||
pgStr := `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
|
||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
err = db.Select(&stateNodes, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(stateNodes)).To(Equal(2))
|
||||
for _, stateNode := range stateNodes {
|
||||
var data []byte
|
||||
dc, err := cid.Decode(stateNode.CID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
mhKey := dshelp.CidToDsKey(dc)
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
|
||||
var account eth.StateAccountModel
|
||||
err = db.Get(&account, pgStr, stateNode.ID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
if stateNode.CID == mocks.State1CID.String() {
|
||||
Expect(stateNode.NodeType).To(Equal(2))
|
||||
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.ContractLeafKey).Hex()))
|
||||
Expect(stateNode.Path).To(Equal([]byte{'\x06'}))
|
||||
Expect(data).To(Equal(mocks.ContractLeafNode))
|
||||
Expect(account).To(Equal(eth.StateAccountModel{
|
||||
ID: account.ID,
|
||||
StateID: stateNode.ID,
|
||||
Balance: "0",
|
||||
CodeHash: mocks.ContractCodeHash.Bytes(),
|
||||
StorageRoot: mocks.ContractRoot,
|
||||
Nonce: 1,
|
||||
}))
|
||||
}
|
||||
if stateNode.CID == mocks.State2CID.String() {
|
||||
Expect(stateNode.NodeType).To(Equal(2))
|
||||
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.AccountLeafKey).Hex()))
|
||||
Expect(stateNode.Path).To(Equal([]byte{'\x0c'}))
|
||||
Expect(data).To(Equal(mocks.AccountLeafNode))
|
||||
Expect(account).To(Equal(eth.StateAccountModel{
|
||||
ID: account.ID,
|
||||
StateID: stateNode.ID,
|
||||
Balance: "1000",
|
||||
CodeHash: mocks.AccountCodeHash.Bytes(),
|
||||
StorageRoot: mocks.AccountRoot,
|
||||
Nonce: 0,
|
||||
}))
|
||||
}
|
||||
}
|
||||
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
|
||||
})
|
||||
|
||||
It("Publishes and indexes storage IPLDs in a single tx", func() {
|
||||
emptyReturn, err := repo.Publish(mocks.MockConvertedPayload)
|
||||
Expect(emptyReturn).To(BeNil())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// check that storage nodes were properly indexed
|
||||
storageNodes := make([]eth.StorageNodeWithStateKeyModel, 0)
|
||||
pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
|
||||
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
||||
WHERE storage_cids.state_id = state_cids.id
|
||||
AND state_cids.header_id = header_cids.id
|
||||
AND header_cids.block_number = $1`
|
||||
err = db.Select(&storageNodes, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(storageNodes)).To(Equal(1))
|
||||
Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{
|
||||
CID: mocks.StorageCID.String(),
|
||||
NodeType: 2,
|
||||
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
|
||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||
Path: []byte{},
|
||||
}))
|
||||
var data []byte
|
||||
dc, err := cid.Decode(storageNodes[0].CID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
mhKey := dshelp.CidToDsKey(dc)
|
||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(data).To(Equal(mocks.StorageLeafNode))
|
||||
})
|
||||
})
|
||||
})
|
@ -45,7 +45,7 @@ type IPLDPublisher struct {
|
||||
StoragePutter ipfs.DagPutter
|
||||
}
|
||||
|
||||
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
|
||||
// NewIPLDPublisher creates a pointer to a new IPLDPublisher which satisfies the IPLDPublisher interface
|
||||
func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) {
|
||||
node, err := ipfs.InitIPFSNode(ipfsPath)
|
||||
if err != nil {
|
||||
|
@ -52,6 +52,7 @@ type Config struct {
|
||||
DB *postgres.DB
|
||||
DBConfig config.Database
|
||||
IPFSPath string
|
||||
IPFSMode shared.IPFSMode
|
||||
|
||||
HTTPClient interface{} // Note this client is expected to support the retrieval of the specified data type(s)
|
||||
NodeInfo core.Node // Info for the associated node
|
||||
@ -81,8 +82,8 @@ func NewReSyncConfig() (*Config, error) {
|
||||
viper.BindEnv("resync.timeout", shared.HTTP_TIMEOUT)
|
||||
|
||||
timeout := viper.GetInt("resync.timeout")
|
||||
if timeout < 15 {
|
||||
timeout = 15
|
||||
if timeout < 5 {
|
||||
timeout = 5
|
||||
}
|
||||
c.Timeout = time.Second * time.Duration(timeout)
|
||||
|
||||
@ -92,12 +93,18 @@ func NewReSyncConfig() (*Config, error) {
|
||||
c.ClearOldCache = viper.GetBool("resync.clearOldCache")
|
||||
c.ResetValidation = viper.GetBool("resync.resetValidation")
|
||||
|
||||
c.IPFSMode, err = shared.GetIPFSMode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.IPFSMode == shared.LocalInterface || c.IPFSMode == shared.RemoteClient {
|
||||
c.IPFSPath, err = shared.GetIPFSPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resyncType := viper.GetString("resync.type")
|
||||
c.ResyncType, err = shared.GenerateResyncTypeFromString(resyncType)
|
||||
c.ResyncType, err = shared.GenerateDataTypeFromString(resyncType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -106,7 +113,7 @@ func NewReSyncConfig() (*Config, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok, err := shared.SupportedResyncType(c.ResyncType, c.Chain); !ok {
|
||||
if ok, err := shared.SupportedDataType(c.ResyncType, c.Chain); !ok {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
utils "github.com/vulcanize/vulcanizedb/libraries/shared/utilities"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
|
||||
)
|
||||
@ -64,11 +64,11 @@ type Service struct {
|
||||
|
||||
// NewResyncService creates and returns a resync service from the provided settings
|
||||
func NewResyncService(settings *Config) (Resync, error) {
|
||||
publisher, err := super_node.NewIPLDPublisher(settings.Chain, settings.IPFSPath)
|
||||
publisher, err := super_node.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexer, err := super_node.NewCIDIndexer(settings.Chain, settings.DB)
|
||||
indexer, err := super_node.NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
PayloadChanBufferSize = 20000
|
||||
PayloadChanBufferSize = 2000
|
||||
)
|
||||
|
||||
// SuperNode is the top level interface for streaming, converting to IPLDs, publishing,
|
||||
@ -109,11 +109,11 @@ func NewSuperNode(settings *Config) (SuperNode, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sn.Publisher, err = NewIPLDPublisher(settings.Chain, settings.IPFSPath)
|
||||
sn.Publisher, err = NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sn.Indexer, err = NewCIDIndexer(settings.Chain, settings.DB)
|
||||
sn.Indexer, err = NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -128,7 +128,7 @@ func NewSuperNode(settings *Config) (SuperNode, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sn.IPLDFetcher, err = NewIPLDFetcher(settings.Chain, settings.IPFSPath)
|
||||
sn.IPLDFetcher, err = NewIPLDFetcher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -220,7 +220,13 @@ func (sap *Service) Sync(wg *sync.WaitGroup, screenAndServePayload chan<- shared
|
||||
default:
|
||||
}
|
||||
// Forward the payload to the publishAndIndex workers
|
||||
// this channel acts as a ring buffer
|
||||
select {
|
||||
case publishAndIndexPayload <- ipldPayload:
|
||||
default:
|
||||
<-publishAndIndexPayload
|
||||
publishAndIndexPayload <- ipldPayload
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
log.Errorf("super node subscription error for chain %s: %v", sap.chain.String(), err)
|
||||
case <-sap.QuitChan:
|
||||
@ -244,12 +250,12 @@ func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan shared
|
||||
log.Debugf("publishing %s data streamed at head height %d", sap.chain.String(), payload.Height())
|
||||
cidPayload, err := sap.Publisher.Publish(payload)
|
||||
if err != nil {
|
||||
log.Errorf("super node publishAndIndex worker %d error for chain %s: %v", id, sap.chain.String(), err)
|
||||
log.Errorf("super node publishAndIndex worker %d publishing error for chain %s: %v", id, sap.chain.String(), err)
|
||||
continue
|
||||
}
|
||||
log.Debugf("indexing %s data streamed at head height %d", sap.chain.String(), payload.Height())
|
||||
if err := sap.Indexer.Index(cidPayload); err != nil {
|
||||
log.Errorf("super node publishAndIndex worker %d error for chain %s: %v", id, sap.chain.String(), err)
|
||||
log.Errorf("super node publishAndIndex worker %d indexing error for chain %s: %v", id, sap.chain.String(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -57,8 +57,8 @@ func (r DataType) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateResyncTypeFromString
|
||||
func GenerateResyncTypeFromString(str string) (DataType, error) {
|
||||
// GenerateDataTypeFromString
|
||||
func GenerateDataTypeFromString(str string) (DataType, error) {
|
||||
switch strings.ToLower(str) {
|
||||
case "full", "f":
|
||||
return Full, nil
|
||||
@ -79,7 +79,7 @@ func GenerateResyncTypeFromString(str string) (DataType, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func SupportedResyncType(d DataType, c ChainType) (bool, error) {
|
||||
func SupportedDataType(d DataType, c ChainType) (bool, error) {
|
||||
switch c {
|
||||
case Ethereum:
|
||||
switch d {
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
// Env variables
|
||||
const (
|
||||
IPFS_PATH = "IPFS_PATH"
|
||||
IPFS_MODE = "IPFS_MODE"
|
||||
HTTP_TIMEOUT = "HTTP_TIMEOUT"
|
||||
|
||||
ETH_WS_PATH = "ETH_WS_PATH"
|
||||
@ -82,6 +83,16 @@ func GetIPFSPath() (string, error) {
|
||||
return ipfsPath, nil
|
||||
}
|
||||
|
||||
// GetIPFSMode returns the ipfs mode of operation from the config or env variable
|
||||
func GetIPFSMode() (IPFSMode, error) {
|
||||
viper.BindEnv("ipfs.mode", IPFS_MODE)
|
||||
ipfsMode := viper.GetString("ipfs.mode")
|
||||
if ipfsMode == "" {
|
||||
return DirectPostgres, nil
|
||||
}
|
||||
return NewIPFSMode(ipfsMode)
|
||||
}
|
||||
|
||||
// GetBtcNodeAndClient returns btc node info from path url
|
||||
func GetBtcNodeAndClient(path string) (core.Node, *rpcclient.ConnConfig) {
|
||||
viper.BindEnv("bitcoin.nodeID", BTC_NODE_ID)
|
||||
|
@ -19,7 +19,14 @@ package shared
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ipfs/go-ipfs-blockstore"
|
||||
"github.com/ipfs/go-ipfs-ds-help"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
@ -69,3 +76,40 @@ func HandleNullAddr(to common.Address) string {
|
||||
}
|
||||
return to.Hex()
|
||||
}
|
||||
|
||||
// Rollback sql transaction and log any error
|
||||
func Rollback(tx *sqlx.Tx) {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// PublishIPLD is used to insert an ipld into Postgres blockstore with the provided tx
|
||||
func PublishIPLD(tx *sqlx.Tx, i node.Node) error {
|
||||
dbKey := dshelp.CidToDsKey(i.Cid())
|
||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
||||
raw := i.RawData()
|
||||
_, err := tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
|
||||
return err
|
||||
}
|
||||
|
||||
// FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx
|
||||
func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) {
|
||||
mhKey, err := MultihashKeyFromCIDString(cid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1`
|
||||
var block []byte
|
||||
return block, tx.Get(&block, pgStr, mhKey)
|
||||
}
|
||||
|
||||
// MultihashKeyFromCIDString converts a cid string into a blockstore-prefixed multihash db key string
|
||||
func MultihashKeyFromCIDString(c string) (string, error) {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dbKey := dshelp.CidToDsKey(dc)
|
||||
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
||||
}
|
||||
|
58
pkg/super_node/shared/ipfs_mode.go
Normal file
58
pkg/super_node/shared/ipfs_mode.go
Normal file
@ -0,0 +1,58 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package shared
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IPFSMode enum for specifying how we want to interface and publish objects to IPFS
|
||||
type IPFSMode int
|
||||
|
||||
const (
|
||||
Unknown IPFSMode = iota
|
||||
LocalInterface
|
||||
RemoteClient
|
||||
DirectPostgres
|
||||
)
|
||||
|
||||
func (c IPFSMode) String() string {
|
||||
switch c {
|
||||
case LocalInterface:
|
||||
return "Local"
|
||||
case RemoteClient:
|
||||
return "Remote"
|
||||
case DirectPostgres:
|
||||
return "Postgres"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func NewIPFSMode(name string) (IPFSMode, error) {
|
||||
switch strings.ToLower(name) {
|
||||
case "local", "interface", "minimal":
|
||||
return LocalInterface, nil
|
||||
case "remote", "client":
|
||||
return RemoteClient, errors.New("remote IPFS client mode is not currently supported")
|
||||
case "postgres", "direct":
|
||||
return DirectPostgres, nil
|
||||
default:
|
||||
return Unknown, errors.New("unrecognized name for ipfs mode")
|
||||
}
|
||||
}
|
@ -19,22 +19,22 @@ package version
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
VersionMajor = 0 // Major version component of the current release
|
||||
VersionMinor = 1 // Minor version component of the current release
|
||||
VersionPatch = 1 // Patch version component of the current release
|
||||
VersionMeta = "alpha" // Version metadata to append to the version string
|
||||
Major = 0 // Major version component of the current release
|
||||
Minor = 1 // Minor version component of the current release
|
||||
Patch = 2 // Patch version component of the current release
|
||||
Meta = "alpha" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
// Version holds the textual version string.
|
||||
var Version = func() string {
|
||||
return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch)
|
||||
return fmt.Sprintf("%d.%d.%d", Major, Minor, Patch)
|
||||
}()
|
||||
|
||||
// VersionWithMeta holds the textual version string including the metadata.
|
||||
var VersionWithMeta = func() string {
|
||||
v := Version
|
||||
if VersionMeta != "" {
|
||||
v += "-" + VersionMeta
|
||||
if Meta != "" {
|
||||
v += "-" + Meta
|
||||
}
|
||||
return v
|
||||
}()
|
||||
|
Loading…
Reference in New Issue
Block a user