Merge pull request #178 from vulcanize/geth_api

Geth api
This commit is contained in:
Ian Norden 2020-02-20 16:08:28 -06:00 committed by GitHub
commit 1412f5a7f5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
90 changed files with 5398 additions and 3374 deletions

View File

@ -40,7 +40,6 @@ var (
cfgFile string
databaseConfig config.Database
genConfig config.Plugin
subscriptionConfig config.Subscription
ipc string
levelDbPath string
queueRecheckInterval time.Duration

View File

@ -1,126 +0,0 @@
// Copyright © 2019 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"path/filepath"
syn "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/utils"
)
// screenAndServeCmd represents the screenAndServe command
var screenAndServeCmd = &cobra.Command{
Use: "screenAndServe",
Short: "Serve super-node data requests to requesting clients",
Long: ` It then opens up WS and IPC servers on top of the super-node ETH-IPLD index which
relays relevant data to requesting clients. In this mode, the super-node can only relay data which it has
already indexed it does not stream out live data.`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
screenAndServe()
},
}
func init() {
rootCmd.AddCommand(screenAndServeCmd)
}
func screenAndServe() {
superNode, newNodeErr := newSuperNodeWithoutPairedGethNode()
if newNodeErr != nil {
logWithCommand.Fatal(newNodeErr)
}
wg := &syn.WaitGroup{}
quitChan := make(chan bool, 1)
emptyPayloadChan := make(chan ipfs.IPLDPayload)
superNode.ScreenAndServe(wg, emptyPayloadChan, quitChan)
serverErr := startServers(superNode)
if serverErr != nil {
logWithCommand.Fatal(serverErr)
}
wg.Wait()
}
func startServers(superNode super_node.NodeInterface) error {
var ipcPath string
ipcPath = viper.GetString("server.ipcPath")
if ipcPath == "" {
home, homeDirErr := os.UserHomeDir()
if homeDirErr != nil {
return homeDirErr
}
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
}
_, _, ipcErr := rpc.StartIPCEndpoint(ipcPath, superNode.APIs())
if ipcErr != nil {
return ipcErr
}
var wsEndpoint string
wsEndpoint = viper.GetString("server.wsEndpoint")
if wsEndpoint == "" {
wsEndpoint = "127.0.0.1:8080"
}
var exposeAll = true
var wsOrigins []string
_, _, wsErr := rpc.StartWSEndpoint(wsEndpoint, superNode.APIs(), []string{"vdb"}, wsOrigins, exposeAll)
if wsErr != nil {
return wsErr
}
return nil
}
func newSuperNodeWithoutPairedGethNode() (super_node.NodeInterface, error) {
ipfsPath = viper.GetString("client.ipfsPath")
if ipfsPath == "" {
home, homeDirErr := os.UserHomeDir()
if homeDirErr != nil {
return nil, homeDirErr
}
ipfsPath = filepath.Join(home, ".ipfs")
}
ipfsInitErr := ipfs.InitIPFSPlugins()
if ipfsInitErr != nil {
return nil, ipfsInitErr
}
ipldFetcher, newFetcherErr := ipfs.NewIPLDFetcher(ipfsPath)
if newFetcherErr != nil {
return nil, newFetcherErr
}
db := utils.LoadPostgres(databaseConfig, core.Node{})
return &super_node.Service{
IPLDFetcher: ipldFetcher,
Retriever: super_node.NewCIDRetriever(&db),
Resolver: ipfs.NewIPLDResolver(),
Subscriptions: make(map[common.Hash]map[rpc.ID]super_node.Subscription),
SubscriptionTypes: make(map[common.Hash]config.Subscription),
GethNode: core.Node{},
}, nil
}

View File

@ -18,7 +18,6 @@ package cmd
import (
"bytes"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
@ -30,41 +29,46 @@ import (
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// streamSubscribeCmd represents the streamSubscribe command
var streamSubscribeCmd = &cobra.Command{
Use: "streamSubscribe",
Short: "This command is used to subscribe to the super node stream with the provided filters",
// streamEthSubscriptionCmd represents the streamEthSubscription command
var streamEthSubscriptionCmd = &cobra.Command{
Use: "streamEthSubscription",
Short: "This command is used to subscribe to the super node eth stream with the provided filters",
Long: `This command is for demo and testing purposes and is used to subscribe to the super node with the provided subscription configuration parameters.
It does not do anything with the data streamed from the super node other than unpack it and print it out for demonstration purposes.`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
streamSubscribe()
streamEthSubscription()
},
}
func init() {
rootCmd.AddCommand(streamSubscribeCmd)
rootCmd.AddCommand(streamEthSubscriptionCmd)
}
func streamSubscribe() {
func streamEthSubscription() {
// Prep the subscription config/filters to be sent to the server
configureSubscription()
ethSubConfig, err := config.NewEthSubscriptionConfig()
if err != nil {
log.Fatal(err)
}
// Create a new rpc client and a subscription streamer with that client
rpcClient := getRPCClient()
str := streamer.NewSuperNodeStreamer(rpcClient)
// Buffered channel for reading subscription payloads
payloadChan := make(chan streamer.SuperNodePayload, 20000)
payloadChan := make(chan super_node.Payload, 20000)
// Subscribe to the super node service with the given config/filter parameters
sub, err := str.Stream(payloadChan, subscriptionConfig)
sub, err := str.Stream(payloadChan, ethSubConfig)
if err != nil {
logWithCommand.Fatal(err)
}
@ -73,11 +77,16 @@ func streamSubscribe() {
for {
select {
case payload := <-payloadChan:
if payload.ErrMsg != "" {
logWithCommand.Error(payload.ErrMsg)
if payload.Err != "" {
logWithCommand.Error(payload.Err)
continue
}
for _, headerRlp := range payload.HeadersRlp {
data, ok := payload.Data.(eth.StreamPayload)
if !ok {
logWithCommand.Warnf("payload data expected type %T got %T", eth.StreamPayload{}, payload.Data)
continue
}
for _, headerRlp := range data.HeadersRlp {
var header types.Header
err = rlp.Decode(bytes.NewBuffer(headerRlp), &header)
if err != nil {
@ -87,7 +96,7 @@ func streamSubscribe() {
fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
fmt.Printf("header: %v\n", header)
}
for _, trxRlp := range payload.TransactionsRlp {
for _, trxRlp := range data.TransactionsRlp {
var trx types.Transaction
buff := bytes.NewBuffer(trxRlp)
stream := rlp.NewStream(buff, 0)
@ -99,7 +108,7 @@ func streamSubscribe() {
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
fmt.Printf("trx: %v\n", trx)
}
for _, rctRlp := range payload.ReceiptsRlp {
for _, rctRlp := range data.ReceiptsRlp {
var rct types.ReceiptForStorage
buff := bytes.NewBuffer(rctRlp)
stream := rlp.NewStream(buff, 0)
@ -121,7 +130,7 @@ func streamSubscribe() {
}
}
// This assumes leafs only
for key, stateRlp := range payload.StateNodesRlp {
for key, stateRlp := range data.StateNodesRlp {
var acct state.Account
err = rlp.Decode(bytes.NewBuffer(stateRlp), &acct)
if err != nil {
@ -132,7 +141,7 @@ func streamSubscribe() {
key.Hex(), acct.Root.Hex(), acct.Balance.Int64())
fmt.Printf("state account: %v\n", acct)
}
for stateKey, mappedRlp := range payload.StorageNodesRlp {
for stateKey, mappedRlp := range data.StorageNodesRlp {
fmt.Printf("Storage for state key %s ", stateKey.Hex())
for storageKey, storageRlp := range mappedRlp {
fmt.Printf("with storage key %s\n", storageKey.Hex())
@ -163,61 +172,8 @@ func streamSubscribe() {
}
}
func configureSubscription() {
logWithCommand.Info("loading subscription config")
subscriptionConfig = config.Subscription{
// Below default to false, which means we do not backfill by default
BackFill: viper.GetBool("subscription.backfill"),
BackFillOnly: viper.GetBool("subscription.backfillOnly"),
// Below default to 0
// 0 start means we start at the beginning and 0 end means we continue indefinitely
StartingBlock: big.NewInt(viper.GetInt64("subscription.startingBlock")),
EndingBlock: big.NewInt(viper.GetInt64("subscription.endingBlock")),
// Below default to false, which means we get all headers by default
HeaderFilter: config.HeaderFilter{
Off: viper.GetBool("subscription.headerFilter.off"),
Uncles: viper.GetBool("subscription.headerFilter.uncles"),
},
// Below defaults to false and two slices of length 0
// Which means we get all transactions by default
TrxFilter: config.TrxFilter{
Off: viper.GetBool("subscription.trxFilter.off"),
Src: viper.GetStringSlice("subscription.trxFilter.src"),
Dst: viper.GetStringSlice("subscription.trxFilter.dst"),
},
// Below defaults to false and one slice of length 0
// Which means we get all receipts by default
ReceiptFilter: config.ReceiptFilter{
Off: viper.GetBool("subscription.receiptFilter.off"),
Contracts: viper.GetStringSlice("subscription.receiptFilter.contracts"),
Topic0s: viper.GetStringSlice("subscription.receiptFilter.topic0s"),
},
// Below defaults to two false, and a slice of length 0
// Which means we get all state leafs by default, but no intermediate nodes
StateFilter: config.StateFilter{
Off: viper.GetBool("subscription.stateFilter.off"),
IntermediateNodes: viper.GetBool("subscription.stateFilter.intermediateNodes"),
Addresses: viper.GetStringSlice("subscription.stateFilter.addresses"),
},
// Below defaults to two false, and two slices of length 0
// Which means we get all storage leafs by default, but no intermediate nodes
StorageFilter: config.StorageFilter{
Off: viper.GetBool("subscription.storageFilter.off"),
IntermediateNodes: viper.GetBool("subscription.storageFilter.intermediateNodes"),
Addresses: viper.GetStringSlice("subscription.storageFilter.addresses"),
StorageKeys: viper.GetStringSlice("subscription.storageFilter.storageKeys"),
},
}
}
func getRPCClient() core.RPCClient {
vulcPath := viper.GetString("subscription.path")
vulcPath := viper.GetString("superNode.ethSubscription.path")
if vulcPath == "" {
vulcPath = "ws://127.0.0.1:8080" // default to and try the default ws url if no path is provided
}

109
cmd/superNode.go Normal file
View File

@ -0,0 +1,109 @@
// Copyright © 2020 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"sync"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
// superNodeCmd represents the superNode command
var superNodeCmd = &cobra.Command{
Use: "superNode",
Short: "VulcanizeDB SuperNode",
Long: `This command configures a VulcanizeDB SuperNode.
The Sync process streams all chain data from the appropriate chain, processes this data into IPLD objects
and publishes them to IPFS. It then indexes the CIDs against useful data fields/metadata in Postgres.
The Serve process creates and exposes a rpc subscription server over ws and ipc. Transformers can subscribe to
these endpoints to stream
The BackFill process spins up a background process which periodically probes the Postgres database to identify
and fill in gaps in the data
`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
superNode()
},
}
func init() {
rootCmd.AddCommand(superNodeCmd)
}
func superNode() {
superNode, superNodeConfig, err := newSuperNode()
if err != nil {
logWithCommand.Fatal(err)
}
wg := &sync.WaitGroup{}
var forwardQuitChan chan bool
var forwardPayloadChan chan interface{}
if superNodeConfig.Serve {
forwardQuitChan = make(chan bool)
forwardPayloadChan = make(chan interface{}, super_node.PayloadChanBufferSize)
superNode.ScreenAndServe(wg, forwardPayloadChan, forwardQuitChan)
if err := startServers(superNode, superNodeConfig); err != nil {
logWithCommand.Fatal(err)
}
}
if superNodeConfig.Sync {
if err := superNode.SyncAndPublish(wg, forwardPayloadChan, forwardQuitChan); err != nil {
logWithCommand.Fatal(err)
}
}
if superNodeConfig.BackFill {
backFiller, err := super_node.NewBackFillService(superNodeConfig)
if err != nil {
logWithCommand.Fatal(err)
}
backFiller.FillGaps(wg, nil)
}
wg.Wait()
}
func newSuperNode() (super_node.SuperNode, *config.SuperNode, error) {
superNodeConfig, err := config.NewSuperNodeConfig()
if err != nil {
return nil, nil, err
}
sn, err := super_node.NewSuperNode(superNodeConfig)
if err != nil {
return nil, nil, err
}
return sn, superNodeConfig, nil
}
func startServers(superNode super_node.SuperNode, settings *config.SuperNode) error {
_, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, superNode.APIs())
if err != nil {
return err
}
_, _, err = rpc.StartWSEndpoint(settings.WSEndpoint, superNode.APIs(), []string{"vdb"}, nil, true)
if err != nil {
return err
}
_, _, err = rpc.StartHTTPEndpoint(settings.HTTPEndpoint, superNode.APIs(), []string{"eth"}, nil, nil, rpc.HTTPTimeouts{})
return err
}

View File

@ -1,124 +0,0 @@
// Copyright © 2019 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"path/filepath"
syn "sync"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/utils"
)
// syncAndPublishCmd represents the syncAndPublish command
var syncAndPublishCmd = &cobra.Command{
Use: "syncAndPublish",
Short: "Syncs all Ethereum data into IPFS, indexing the CIDs",
Long: `This command works alongside a modified geth node which streams
all block and state (diff) data over a websocket subscription. This process
then converts the eth data to IPLD objects and publishes them to IPFS. Additionally,
it maintains a local index of the IPLD objects' CIDs in Postgres.`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
syncAndPublish()
},
}
var ipfsPath string
func init() {
rootCmd.AddCommand(syncAndPublishCmd)
}
func syncAndPublish() {
superNode, newNodeErr := newSuperNode()
if newNodeErr != nil {
logWithCommand.Fatal(newNodeErr)
}
wg := &syn.WaitGroup{}
syncAndPubErr := superNode.SyncAndPublish(wg, nil, nil)
if syncAndPubErr != nil {
logWithCommand.Fatal(syncAndPubErr)
}
if viper.GetBool("superNodeBackFill.on") && viper.GetString("superNodeBackFill.rpcPath") != "" {
backfiller, newBackFillerErr := newBackFiller()
if newBackFillerErr != nil {
logWithCommand.Fatal(newBackFillerErr)
}
backfiller.FillGaps(wg, nil)
}
wg.Wait() // If an error was thrown, wg.Add was never called and this will fall through
}
func getBlockChainAndClient(path string) (*eth.BlockChain, core.RPCClient) {
rawRPCClient, dialErr := rpc.Dial(path)
if dialErr != nil {
logWithCommand.Fatal(dialErr)
}
rpcClient := client.NewRPCClient(rawRPCClient, ipc)
ethClient := ethclient.NewClient(rawRPCClient)
vdbEthClient := client.NewEthClient(ethClient)
vdbNode := node.MakeNode(rpcClient)
transactionConverter := vRpc.NewRPCTransactionConverter(ethClient)
blockChain := eth.NewBlockChain(vdbEthClient, rpcClient, vdbNode, transactionConverter)
return blockChain, rpcClient
}
func newSuperNode() (super_node.NodeInterface, error) {
blockChain, rpcClient := getBlockChainAndClient(ipc)
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
quitChan := make(chan bool)
ipfsPath = viper.GetString("client.ipfsPath")
if ipfsPath == "" {
home, homeDirErr := os.UserHomeDir()
if homeDirErr != nil {
logWithCommand.Fatal(homeDirErr)
}
ipfsPath = filepath.Join(home, ".ipfs")
}
workers := viper.GetInt("client.workers")
if workers < 1 {
workers = 1
}
return super_node.NewSuperNode(ipfsPath, &db, rpcClient, quitChan, workers, blockChain.Node())
}
func newBackFiller() (super_node.BackFillInterface, error) {
blockChain, archivalRPCClient := getBlockChainAndClient(viper.GetString("superNodeBackFill.rpcPath"))
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
freq := viper.GetInt("superNodeBackFill.frequency")
var frequency time.Duration
if freq <= 0 {
frequency = time.Minute * 5
} else {
frequency = time.Duration(freq)
}
return super_node.NewBackFillService(ipfsPath, &db, archivalRPCClient, time.Minute*frequency, super_node.DefaultMaxBatchSize)
}

View File

@ -1,75 +0,0 @@
// Copyright © 2019 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
syn "sync"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// syncPublishScreenAndServeCmd represents the syncPublishScreenAndServe command
var syncPublishScreenAndServeCmd = &cobra.Command{
Use: "syncPublishScreenAndServe",
Short: "Syncs all Ethereum data into IPFS, indexing the CIDs, and uses this to serve data requests to requesting clients",
Long: `This command works alongside a modified geth node which streams
all block and state (diff) data over a websocket subscription. This process
then converts the eth data to IPLD objects and publishes them to IPFS. Additionally,
it maintains a local index of the IPLD objects' CIDs in Postgres. It then opens up a server which
relays relevant data to requesting clients.`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
syncPublishScreenAndServe()
},
}
func init() {
rootCmd.AddCommand(syncPublishScreenAndServeCmd)
}
func syncPublishScreenAndServe() {
superNode, newNodeErr := newSuperNode()
if newNodeErr != nil {
logWithCommand.Fatal(newNodeErr)
}
wg := &syn.WaitGroup{}
forwardPayloadChan := make(chan ipfs.IPLDPayload, 20000)
forwardQuitChan := make(chan bool, 1)
syncAndPubErr := superNode.SyncAndPublish(wg, forwardPayloadChan, forwardQuitChan)
if syncAndPubErr != nil {
logWithCommand.Fatal(syncAndPubErr)
}
superNode.ScreenAndServe(wg, forwardPayloadChan, forwardQuitChan)
if viper.GetBool("superNodeBackFill.on") && viper.GetString("superNodeBackFill.rpcPath") != "" {
backfiller, newBackFillerErr := newBackFiller()
if newBackFillerErr != nil {
logWithCommand.Fatal(newBackFillerErr)
}
backfiller.FillGaps(wg, nil)
}
serverErr := startServers(superNode)
if serverErr != nil {
logWithCommand.Fatal(serverErr)
}
wg.Wait()
}

View File

@ -3,8 +3,9 @@ CREATE TABLE public.header_cids (
id SERIAL PRIMARY KEY,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
uncle BOOLEAN NOT NULL,
td BIGINT,
UNIQUE (block_number, block_hash)
);

View File

@ -0,0 +1,12 @@
-- +goose Up
CREATE TABLE public.uncle_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
UNIQUE (header_id, block_hash)
);
-- +goose Down
DROP TABLE public.uncle_cids;

View File

@ -3,6 +3,7 @@ CREATE TABLE public.transaction_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
tx_hash VARCHAR(66) NOT NULL,
index INTEGER NOT NULL,
cid TEXT NOT NULL,
dst VARCHAR(66) NOT NULL,
src VARCHAR(66) NOT NULL,

View File

@ -4,7 +4,10 @@ CREATE TABLE public.receipt_cids (
tx_id INTEGER NOT NULL REFERENCES transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
cid TEXT NOT NULL,
contract VARCHAR(66),
topic0s VARCHAR(66)[]
topic0s VARCHAR(66)[],
topic1s VARCHAR(66)[],
topic2s VARCHAR(66)[],
topic3s VARCHAR(66)[]
);
-- +goose Down

View File

@ -3,7 +3,7 @@
--
-- Dumped from database version 10.10
-- Dumped by pg_dump version 11.5
-- Dumped by pg_dump version 12.1
SET statement_timeout = 0;
SET lock_timeout = 0;
@ -18,8 +18,6 @@ SET row_security = off;
SET default_tablespace = '';
SET default_with_oids = false;
--
-- Name: addresses; Type: TABLE; Schema: public; Owner: -
--
@ -315,8 +313,9 @@ CREATE TABLE public.header_cids (
id integer NOT NULL,
block_number bigint NOT NULL,
block_hash character varying(66) NOT NULL,
parent_hash character varying(66) NOT NULL,
cid text NOT NULL,
uncle boolean NOT NULL
td bigint
);
@ -593,7 +592,10 @@ CREATE TABLE public.receipt_cids (
tx_id integer NOT NULL,
cid text NOT NULL,
contract character varying(66),
topic0s character varying(66)[]
topic0s character varying(66)[],
topic1s character varying(66)[],
topic2s character varying(66)[],
topic3s character varying(66)[]
);
@ -725,6 +727,7 @@ CREATE TABLE public.transaction_cids (
id integer NOT NULL,
header_id integer NOT NULL,
tx_hash character varying(66) NOT NULL,
index integer NOT NULL,
cid text NOT NULL,
dst character varying(66) NOT NULL,
src character varying(66) NOT NULL
@ -751,6 +754,39 @@ CREATE SEQUENCE public.transaction_cids_id_seq
ALTER SEQUENCE public.transaction_cids_id_seq OWNED BY public.transaction_cids.id;
--
-- Name: uncle_cids; Type: TABLE; Schema: public; Owner: -
--
CREATE TABLE public.uncle_cids (
id integer NOT NULL,
header_id integer NOT NULL,
block_hash character varying(66) NOT NULL,
parent_hash character varying(66) NOT NULL,
cid text NOT NULL
);
--
-- Name: uncle_cids_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
CREATE SEQUENCE public.uncle_cids_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: uncle_cids_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
ALTER SEQUENCE public.uncle_cids_id_seq OWNED BY public.uncle_cids.id;
--
-- Name: uncles; Type: TABLE; Schema: public; Owner: -
--
@ -1013,6 +1049,13 @@ ALTER TABLE ONLY public.storage_diff ALTER COLUMN id SET DEFAULT nextval('public
ALTER TABLE ONLY public.transaction_cids ALTER COLUMN id SET DEFAULT nextval('public.transaction_cids_id_seq'::regclass);
--
-- Name: uncle_cids id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncle_cids ALTER COLUMN id SET DEFAULT nextval('public.uncle_cids_id_seq'::regclass);
--
-- Name: uncles id; Type: DEFAULT; Schema: public; Owner: -
--
@ -1314,6 +1357,22 @@ ALTER TABLE ONLY public.transaction_cids
ADD CONSTRAINT transaction_cids_pkey PRIMARY KEY (id);
--
-- Name: uncle_cids uncle_cids_header_id_block_hash_key; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncle_cids
ADD CONSTRAINT uncle_cids_header_id_block_hash_key UNIQUE (header_id, block_hash);
--
-- Name: uncle_cids uncle_cids_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncle_cids
ADD CONSTRAINT uncle_cids_pkey PRIMARY KEY (id);
--
-- Name: uncles uncles_block_id_hash_key; Type: CONSTRAINT; Schema: public; Owner: -
--
@ -1575,6 +1634,14 @@ ALTER TABLE ONLY public.transaction_cids
ADD CONSTRAINT transaction_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: uncle_cids uncle_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--
ALTER TABLE ONLY public.uncle_cids
ADD CONSTRAINT uncle_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: uncles uncles_block_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
--

View File

@ -7,10 +7,11 @@ RUN apk add busybox-extras
# this is probably a noob move, but I want apk from alpine for the above but need to avoid Go 1.13 below as this error still occurs https://github.com/ipfs/go-ipfs/issues/6603
FROM golang:1.12.4 as builder
# Get and build vulcanizedb ipfs_concurreny fork
RUN go get -u -d github.com/vulcanize/vulcanizedb
RUN yum install -y libusb1-devel systemd-devel
# Get and build vulcanizedb
ADD . /go/src/github.com/vulcanize/vulcanizedb
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
RUN git checkout ipfs_concurrency
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o vulcanizedb .
# Get and build vulcanize's go-ipfs fork
@ -18,17 +19,9 @@ RUN go get -u -d github.com/ipfs/go-ipfs
WORKDIR /go/src/github.com/ipfs/go-ipfs
RUN git remote add vulcanize https://github.com/vulcanize/go-ipfs.git
RUN git fetch vulcanize
RUN git checkout -b pg_ipfs vulcanize/postgres_update
RUN git checkout -b pg_ipfs v0.4.22-alpha
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipfs ./cmd/ipfs
# Get and build vulcanize's geth fork
RUN go get -u -d github.com/ethereum/go-ethereum
WORKDIR /go/src/github.com/ethereum/go-ethereum
RUN git remote add vulcanize https://github.com/vulcanize/go-ethereum.git
RUN git fetch vulcanize
RUN git checkout -b statediff_geth vulcanize/statediffing
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o geth ./cmd/geth
# Build migration tool
RUN go get -u -d github.com/pressly/goose/cmd/goose
WORKDIR /go/src/github.com/pressly/goose/cmd/goose
@ -41,19 +34,7 @@ FROM alpine
WORKDIR /app
ARG USER
ARG config_file=environments/syncPublishScreenAndServe.toml
ARG vdb_dbname="vulcanize_public"
ARG vdb_hostname="localhost"
ARG vdb_port="5432"
ARG vdb_user="postgres"
ARG vdb_password
# setup environment
ENV VDB_PG_NAME="$vdb_dbname"
ENV VDB_PG_HOSTNAME="$vdb_hostname"
ENV VDB_PG_PORT="$vdb_port"
ENV VDB_PG_USER="$vdb_user"
ENV VDB_PG_PASSWORD="$vdb_password"
ARG config_file=environments/superNode.toml
RUN adduser -D 5000 $USER
USER $USER
@ -68,7 +49,6 @@ COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb vulcani
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
COPY --from=builder /go/src/github.com/ipfs/go-ipfs/ipfs ipfs
COPY --from=builder /go/src/github.com/ethereum/go-ethereum/geth geth
EXPOSE 8080

View File

@ -9,6 +9,7 @@ test $VDB_PG_NAME
test $VDB_PG_HOSTNAME
test $VDB_PG_PORT
test $VDB_PG_USER
test $IPFS_INIT
set +e
# Export our database variables so that the IPFS Postgres plugin can use them
@ -26,35 +27,30 @@ echo "Connecting with: $VDB_PG_CONNECT"
echo "Running database migrations"
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
# If the db migrations ran without err
if [ $? -eq 0 ]; then
# Initialize PG-IPFS
echo "Initializing Postgres-IPFS profile"
./ipfs init --profile=postgresds
if [[ $? -eq 0 ]]; then
# and IPFS_INIT is true
if [[ "$IPFS_INIT" = true ]] ; then
# initialize PG-IPFS
echo "Initializing Postgres-IPFS profile"
./ipfs init --profile=postgresds
else
echo "IPFS profile already initialized, skipping initialization"
fi
else
echo "Could not run migrations. Are the database details correct?"
exit
fi
# If IPFS initialization was successful
if [ $? -eq 0 ]; then
# Begin the state-diffing Geth process
echo "Beginning the state-diffing Geth process"
./geth --statediff --statediff.streamblock --ws --syncmode=full 2>&1 | tee -a log.txt &
sleep 1
else
echo "Could not initialize Postgres backed IPFS profile. Are the database details correct?"
exit
fi
# If Geth startup was successful
if [ $? -eq 0 ]; then
if [[ $? -eq 0 ]]; then
# Wait until block synchronisation has begun
echo "Waiting for block synchronization to begin"
( tail -f -n0 log.txt & ) | grep -q "Block synchronisation started" # this blocks til we see "Block synchronisation started"
# And then spin up the syncPublishScreenAndServe Vulcanizedb service
echo "Beginning the syncPublishScreenAndServe vulcanizedb process"
./vulcanizedb syncPublishScreenAndServe --config=config.toml 2>&1 | tee -a log.txt &
./vulcanizedb superNode --config=config.toml 2>&1 | tee -a log.txt &
else
echo "Could not initialize state-diffing Geth."
exit

View File

@ -1,345 +0,0 @@
# Super Node
Vulcanizedb can act as an index for Ethereum data stored on IPFS through the use of the `syncAndPublish` and
`syncPublishScreenAndServe` commands.
## Manual Setup
These commands work in conjunction with a [state-diffing full Geth node](https://github.com/vulcanize/go-ethereum/tree/statediffing)
and IPFS.
### IPFS
To start, download and install [IPFS](https://github.com/vulcanize/go-ipfs)
`go get github.com/ipfs/go-ipfs`
`cd $GOPATH/src/github.com/ipfs/go-ipfs`
`make install`
If we want to use Postgres as our backing datastore, we need to use the vulcanize fork of go-ipfs.
Start by adding the fork and switching over to it:
`git remote add vulcanize https://github.com/vulcanize/go-ipfs.git`
`git fetch vulcanize`
`git checkout -b postgres_update vulcanize/postgres_update`
Now install this fork of ipfs, first be sure to remove any previous installation.
`make install`
Check that is installed properly by running
`ipfs`
You should see the CLI info/help output.
And now we initialize with the `postgresds` profile.
If ipfs was previously initialized we will need to remove the old profile first.
We also need to provide env variables for the postgres connection:
We can either set these manually, e.g.
```bash
export IPFS_PGHOST=
export IPFS_PGUSER=
export IPFS_PGDATABASE=
export IPFS_PGPORT=
export IPFS_PGPASSWORD=
```
And then run the ipfs command
`ipfs init --profile=postgresds`
Or we can use the pre-made script at `GOPATH/src/github.com/ipfs/go-ipfs/misc/utility/ipfs_postgres.sh`
which has usage:
`./ipfs_postgres.sh <IPFS_PGHOST> <IPFS_PGPORT> <IPFS_PGUSER> <IPFS_PGDATABASE>"`
and will ask us to enter the password, avoiding storing it to an ENV variable.
Once we have initialized ipfs, that is all we need to do with it- we do not need to run a daemon during the subsequent processes (in fact, we can't).
### Geth
For Geth, we currently *require* a special fork, and we can set this up as follows:
Begin by downloading geth and switching to the vulcanize/rpc_statediffing branch
`go get github.com/ethereum/go-ethereum`
`cd $GOPATH/src/github.com/ethereum/go-ethereum`
`git remote add vulcanize https://github.com/vulcanize/go-ethereum.git`
`git fetch vulcanize`
`git checkout -b statediffing vulcanize/statediffing`
Now, install this fork of geth (make sure any old versions have been uninstalled/binaries removed first)
`make geth`
And run the output binary with statediffing turned on:
`cd $GOPATH/src/github.com/ethereum/go-ethereum/build/bin`
`./geth --statediff --statediff.streamblock --ws --syncmode=full`
Note: other CLI options- statediff specific ones included- can be explored with `./geth help`
The output from geth should mention that it is `Starting statediff service` and block synchronization should begin shortly thereafter.
Note that until it receives a subscriber, the statediffing process does essentially nothing. Once a subscription is received, this
will be indicated in the output.
Also in the output will be the websocket url and ipc paths that we will use to subscribe to the statediffing process.
The default ws url is "ws://127.0.0.1:8546" and the default ipcPath- on Darwin systems only- is "Users/user/Library/Ethereum/geth.ipc"
### Vulcanizedb
There are two commands to choose from:
#### syncAndPublish
`syncAndPublih` performs the functions of the super node- syncing data from Geth, converting them to IPLDs,
publishing those IPLDs to IPFS, and creating a local Postgres index to relate their CIDS to useful metadata.
Usage:
`./vulcanizedb syncAndPublish --config=<config_file.toml>`
The config file for the `syncAndPublish` command looks very similar to the basic config file
```toml
[database]
name = "vulcanize_demo"
hostname = "localhost"
port = 5432
[client]
ipcPath = "ws://127.0.0.1:8546"
ipfsPath = "/Users/user/.ipfs"
```
With an additional field, `client.ipcPath`, that is either the ws url or the ipc path that Geth has exposed (the url and path output
when the geth sync was started), and `client.ipfsPath` which is the path the ipfs datastore directory.
#### syncPublishScreenAndServe
`syncPublishScreenAndServe` does everything that `syncAndPublish` does, plus it opens up an RPC server which exposes
an endpoint to allow transformers to subscribe to subsets of the sync-and-published data that are relevant to their transformations
Usage:
`./vulcanizedb syncPublishScreenAndServe --config=<config_file.toml>`
The config file for the `syncPublishScreenAndServe` command has two additional fields and looks like:
```toml
[database]
name = "vulcanize_demo"
hostname = "localhost"
port = 5432
[client]
ipcPath = "ws://127.0.0.1:8546"
ipfsPath = "/Users/user/.ipfs"
[server]
ipcPath = "/Users/user/.vulcanize/vulcanize.ipc"
wsEndpoint = "127.0.0.1:80"
[superNodeBackFill]
on = false
ipcPath = ""
frequency = 5
```
The additional `server.ipcPath` and `server.wsEndpoint` fields are used to set what ipc endpoint and ws url
the `syncPublishScreenAndServe` rpc server will expose itself to subscribing transformers over, respectively.
Any valid and available path and endpoint is acceptable, but keep in mind that this path and endpoint need to
be known by transformers for them to subscribe to the super node.
Because the super node syncs data from a geth full node as it progresses through its block synchronization, there is potential
for the super node to miss data both at the beginning of the sync due to lag between initialization of the two processes and
anywhere throughout the sync if the processes are interrupted. The `superNodeBackFill` config mapping is used to optionally configure
the super node with an archival geth client that exposes a `statediff.StateDiffAt` rpc endpoint, to enable it to fill in these data gaps.
`superNodeBackFill.on` turns the backfill process on, the `superNodeBackFill.ipcPath` is the rpc path for the archival geth node, and `superNodeBackFill.frequency`
sets at what frequency (in minutes) the backfill process checks for and fills in gaps.
## Dockerfile Setup
The below provides step-by-step directions for how to setup the super node using the provided Dockerfile on an AWS Linux AMI instance.
Note that the instance will need sufficient memory and storage for this to work.
1. Install basic dependencies
```
sudo yum update
sudo yum install -y curl gpg gcc gcc-c++ make git
```
2. Install Go 1.12
```
wget https://dl.google.com/go/go1.12.6.linux-amd64.tar.gz
tar -xzf go1.12.6.linux-amd64.tar.gz
sudo mv go /usr/local
```
3. Edit .bash_profile to export GOPATH
```
export GOROOT=/usr/local/go
export GOPATH=$HOME/go
export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
```
4. Install and setup Postgres
```
sudo yum install postgresql postgresql96-server
sudo service postgresql96 initdb
sudo service postgresql96 start
sudo -u postgres createuser -s ec2-user
sudo -u postgres createdb ec2-user
sudo su postgres
psql
ALTER USER "ec2-user" WITH SUPERUSER;
/q
exit
```
4b. Edit hba_file to trust connections
```
psql
SHOW hba_file;
/q
sudo vim {PATH_TO_FILE}
```
4c. Stop and restart Postgres server to affect changes
```
sudo service postgresql96 stop
sudo service postgresql96 start
```
5. Install and start Docker (exit and re-enter ec2 instance afterwards to affect changes)
```
sudo yum install -y docker
sudo service docker start
sudo usermod -aG docker ec2-user
```
6. Fetch the repository and switch to this working branch
```
go get github.com/vulcanize/vulcanizedb
cd $GOPATH/src/github.com/vulcanize/vulcanizedb
git checkout ipfs_concurrency
```
7. Create the db
```
createdb vulcanize_public
```
8. Build and run the Docker image
```
cd $GOPATH/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node
docker build .
docker run --network host -e VDB_PG_CONNECT=postgres://localhost:5432/vulcanize_public?sslmode=disable {IMAGE_ID}
```
## Subscribing
A transformer can subscribe to the `syncPublishScreenAndServe` service over its ipc or ws endpoints, when subscribing the transformer
specifies which subsets of the synced data it is interested in and the server will forward only these data.
The `streamSubscribe` command serves as a simple demonstration/example of subscribing to the super-node feed, it subscribes with a set of parameters
defined in the loaded config file, and prints the streamed data to stdout. To build transformers that subscribe to and use super-node data,
the shared/libraries/streamer can be used.
Usage:
`./vulcanizedb streamSubscribe --config=<config_file.toml>`
The config for `streamSubscribe` has the `subscribe` set of parameters, for example:
```toml
[subscription]
path = "ws://127.0.0.1:8080"
backfill = true
backfillOnly = false
startingBlock = 0
endingBlock = 0
[subscription.headerFilter]
off = false
uncles = false
[subscription.trxFilter]
off = false
src = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
dst = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
[subscription.receiptFilter]
off = false
topic0s = [
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
]
[subscription.stateFilter]
off = false
addresses = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
]
intermediateNodes = false
[subscription.storageFilter]
off = true
addresses = [
"",
""
]
storageKeys = [
"",
""
]
intermediateNodes = false
```
`subscription.path` is used to define the ws url OR ipc endpoint we will subscribe to the super-node over
(the `server.ipcPath` or `server.wsEndpoint` that the super-node has defined in their config file).
`subscription.backfill` specifies whether or not the super-node should look up historical data in its cache and
send that to the subscriber, if this is set to `false` then the super-node only forwards newly synced/incoming data.
`subscription.backfillOnly` will tell the super-node to only send historical data and not stream incoming data going forward.
`subscription.startingBlock` is the starting block number for the range we want to receive data in.
`subscription.endingBlock` is the ending block number for the range we want to receive data in;
setting to 0 means there is no end/we will continue indefinitely.
`subscription.headerFilter` has two sub-options: `off` and `uncles`. Setting `off` to true tells the super-node to
not send any headers to the subscriber; setting `uncles` to true tells the super-node to send uncles in addition to normal headers.
`subscription.trxFilter` has three sub-options: `off`, `src`, and `dst`. Setting `off` to true tells the super-node to
not send any transactions to the subscriber; `src` and `dst` are string arrays which can be filled with ETH addresses we want to filter transactions for,
if they have any addresses then the super-node will only send transactions that were sent or received by the addresses contained
in `src` and `dst`, respectively.
`subscription.receiptFilter` has two sub-options: `off` and `topics`. Setting `off` to true tells the super-node to
not send any receipts to the subscriber; `topic0s` is a string array which can be filled with event topics we want to filter for,
if it has any topics then the super-node will only send receipts that contain logs which have that topic0.
`subscription.stateFilter` has three sub-options: `off`, `addresses`, and `intermediateNodes`. Setting `off` to true tells the super-node to
not send any state data to the subscriber; `addresses` is a string array which can be filled with ETH addresses we want to filter state for,
if it has any addresses then the super-node will only send state leafs (accounts) corresponding to those account addresses. By default the super-node
only sends along state leafs, if we want to receive branch and extension nodes as well `intermediateNodes` can be set to `true`.
`subscription.storageFilter` has four sub-options: `off`, `addresses`, `storageKeys`, and `intermediateNodes`. Setting `off` to true tells the super-node to
not send any storage data to the subscriber; `addresses` is a string array which can be filled with ETH addresses we want to filter storage for,
if it has any addresses then the super-node will only send storage nodes from the storage tries at those state addresses. `storageKeys` is another string
array that can be filled with storage keys we want to filter storage data for. It is important to note that the storageKeys are the actual keccak256 hashes, whereas
the addresses in the `addresses` fields are the ETH addresses and not their keccak256 hashes that serve as the actual state keys. By default the super-node
only sends along storage leafs, if we want to receive branch and extension nodes as well `intermediateNodes` can be set to `true`.

View File

@ -0,0 +1,215 @@
## Super Node Setup
Vulcanizedb can act as an index for chain data stored on IPFS through the use of the `superNode` command.
### Manual Setup
These commands work in conjunction with a [state-diffing full Geth node](https://github.com/vulcanize/go-ethereum/tree/statediffing)
and IPFS.
#### IPFS
To start, download and install [IPFS](https://github.com/vulcanize/go-ipfs)
`go get github.com/ipfs/go-ipfs`
`cd $GOPATH/src/github.com/ipfs/go-ipfs`
`make install`
If we want to use Postgres as our backing datastore, we need to use the vulcanize fork of go-ipfs.
Start by adding the fork and switching over to it:
`git remote add vulcanize https://github.com/vulcanize/go-ipfs.git`
`git fetch vulcanize`
`git checkout -b postgres_update vulcanize/postgres_update`
Now install this fork of ipfs, first be sure to remove any previous installation.
`make install`
Check that is installed properly by running
`ipfs`
You should see the CLI info/help output.
And now we initialize with the `postgresds` profile.
If ipfs was previously initialized we will need to remove the old profile first.
We also need to provide env variables for the postgres connection:
We can either set these manually, e.g.
```bash
export IPFS_PGHOST=
export IPFS_PGUSER=
export IPFS_PGDATABASE=
export IPFS_PGPORT=
export IPFS_PGPASSWORD=
```
And then run the ipfs command
`ipfs init --profile=postgresds`
Or we can use the pre-made script at `GOPATH/src/github.com/ipfs/go-ipfs/misc/utility/ipfs_postgres.sh`
which has usage:
`./ipfs_postgres.sh <IPFS_PGHOST> <IPFS_PGPORT> <IPFS_PGUSER> <IPFS_PGDATABASE>"`
and will ask us to enter the password, avoiding storing it to an ENV variable.
Once we have initialized ipfs, that is all we need to do with it- we do not need to run a daemon during the subsequent processes (in fact, we can't).
#### Geth
For Geth, we currently *require* a special fork, and we can set this up as follows:
Begin by downloading geth and switching to the vulcanize/rpc_statediffing branch
`go get github.com/ethereum/go-ethereum`
`cd $GOPATH/src/github.com/ethereum/go-ethereum`
`git remote add vulcanize https://github.com/vulcanize/go-ethereum.git`
`git fetch vulcanize`
`git checkout -b statediffing vulcanize/statediff_at_anyblock-1.9.9`
Now, install this fork of geth (make sure any old versions have been uninstalled/binaries removed first)
`make geth`
And run the output binary with statediffing turned on:
`cd $GOPATH/src/github.com/ethereum/go-ethereum/build/bin`
`./geth --statediff --statediff.streamblock --ws --syncmode=full`
Note: other CLI options- statediff specific ones included- can be explored with `./geth help`
The output from geth should mention that it is `Starting statediff service` and block synchronization should begin shortly thereafter.
Note that until it receives a subscriber, the statediffing process does essentially nothing. Once a subscription is received, this
will be indicated in the output.
Also in the output will be the websocket url and ipc paths that we will use to subscribe to the statediffing process.
The default ws url is "ws://127.0.0.1:8546" and the default ipcPath- on Darwin systems only- is "Users/user/Library/Ethereum/geth.ipc"
#### Vulcanizedb
The `superNode` command is used to initialize and run an instance of the VulcanizeDB SuperNode
Usage:
`./vulcanizedb superNode --config=<config_file.toml`
The config file contains the parameters needed to initialize a SuperNode with the appropriate chain, settings, and services
`./vulcanizedb syncAndPublish --config=<config_file.toml>`
```toml
[superNode]
chain = "ethereum"
ipfsPath = "/root/.ipfs"
[superNode.database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
user = "ec2-user"
[superNode.sync]
on = true
wsPath = "ws://127.0.0.1:8546"
workers = 1
[superNode.server]
on = true
ipcPath = "/root/.vulcanize/vulcanize.ipc"
wsPath = "127.0.0.1:8080"
[superNode.backFill]
on = false
httpPath = ""
frequency = 5
batchSize = 50
```
### Dockerfile Setup
The below provides step-by-step directions for how to setup the super node using the provided Dockerfile on an AWS Linux AMI instance.
Note that the instance will need sufficient memory and storage for this to work.
1. Install basic dependencies
```
sudo yum update
sudo yum install -y curl gpg gcc gcc-c++ make git
```
2. Install Go 1.12
```
wget https://dl.google.com/go/go1.12.6.linux-amd64.tar.gz
tar -xzf go1.12.6.linux-amd64.tar.gz
sudo mv go /usr/local
```
3. Edit .bash_profile to export GOPATH
```
export GOROOT=/usr/local/go
export GOPATH=$HOME/go
export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
```
4. Install and setup Postgres
```
sudo yum install postgresql postgresql96-server
sudo service postgresql96 initdb
sudo service postgresql96 start
sudo -u postgres createuser -s ec2-user
sudo -u postgres createdb ec2-user
sudo su postgres
psql
ALTER USER "ec2-user" WITH SUPERUSER;
\q
exit
```
4b. Edit hba_file to trust local connections
```
psql
SHOW hba_file;
/q
sudo vim {PATH_TO_FILE}
```
4c. Stop and restart Postgres server to affect changes
```
sudo service postgresql96 stop
sudo service postgresql96 start
```
5. Install and start Docker (exit and re-enter ec2 instance afterwards to affect changes)
```
sudo yum install -y docker
sudo service docker start
sudo usermod -aG docker ec2-user
```
6. Fetch the repository
```
go get github.com/vulcanize/vulcanizedb
cd $GOPATH/src/github.com/vulcanize/vulcanizedb
```
7. Create the db
```
createdb vulcanize_public
```
8. Build and run the Docker image
```
cd $GOPATH/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node
docker build .
docker run --network host -e IPFS_INIT=true -e VDB_PG_NAME=vulcanize_public -e VDB_PG_HOSTNAME=localhost -e VDB_PG_PORT=5432 -e VDB_PG_USER=postgres -e VDB_PG_PASSWORD=password {IMAGE_ID}
```

View File

@ -0,0 +1,96 @@
## SuperNode Subscription
A transformer can subscribe to the SueprNode service over its ipc or ws endpoints, when subscribing the transformer
specifies the chain and a set of parameters which define which subsets of that chain's data the server should feed to them.
### Ethereum data
The `streamEthSubscribe` command serves as a simple demonstration/example of subscribing to the super-node Ethereum feed, it subscribes with a set of parameters
defined in the loaded config file, and prints the streamed data to stdout. To build transformers that subscribe to and use super-node Ethereum data,
the shared/libraries/streamer can be used.
Usage:
`./vulcanizedb streamEthSubscribe --config=<config_file.toml>`
The config for `streamEthSubscribe` has a set of parameters to fill the [EthSubscription config structure](../../pkg/super_node/config/eth_subscription.go)
```toml
[superNode]
[superNode.ethSubscription]
historicalData = true
historicalDataOnly = false
startingBlock = 0
endingBlock = 0
wsPath = "ws://127.0.0.1:8080"
[superNode.ethSubscription.headerFilter]
off = false
uncles = false
[superNode.ethSubscription.txFilter]
off = false
src = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
dst = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
[superNode.ethSubscription.receiptFilter]
off = false
contracts = []
topics = [
[
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
]
]
[superNode.ethSubscription.stateFilter]
off = false
addresses = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
]
intermediateNodes = false
[superNode.ethSubscription.storageFilter]
off = true
addresses = []
storageKeys = []
intermediateNodes = false
```
`ethSubscription.path` is used to define the SuperNode ws url OR ipc endpoint we subscribe to
`ethSubscription.historicalData` specifies whether or not the super-node should look up historical data in its cache and
send that to the subscriber, if this is set to `false` then the super-node only streams newly synced/incoming data
`ethSubscription.historicalDataOnly` will tell the super-node to only send historical data with the specified range and
not stream forward syncing data
`ethSubscription.startingBlock` is the starting block number for the range we want to receive data in
`ethSubscription.endingBlock` is the ending block number for the range we want to receive data in;
setting to 0 means there is no end/we will continue streaming indefinitely.
`ethSubscription.headerFilter` has two sub-options: `off` and `uncles`. Setting `off` to true tells the super-node to
not send any headers to the subscriber; setting `uncles` to true tells the super-node to send uncles in addition to normal headers.
`ethSubscription.txFilter` has three sub-options: `off`, `src`, and `dst`. Setting `off` to true tells the super-node to
not send any transactions to the subscriber; `src` and `dst` are string arrays which can be filled with ETH addresses we want to filter transactions for,
if they have any addresses then the super-node will only send transactions that were sent or received by the addresses contained
in `src` and `dst`, respectively.
`ethSubscription.receiptFilter` has four sub-options: `off`, `topics`, `contracts` and `matchTxs`. Setting `off` to true tells the super-node to
not send any receipts to the subscriber; `topic0s` is a string array which can be filled with event topics we want to filter for,
if it has any topics then the super-node will only send receipts that contain logs which have that topic0. Similarly, `contracts` is
a string array which can be filled with contract addresses we want to filter for, if it contains any contract addresses the super-node will
only send receipts that correspond to one of those contracts. `matchTrxs` is a bool which when set to true any receipts that correspond to filtered for
transactions will be sent by the super-node, regardless of whether or not the receipt satisfies the `topics` or `contracts` filters.
`ethSubscription.stateFilter` has three sub-options: `off`, `addresses`, and `intermediateNodes`. Setting `off` to true tells the super-node to
not send any state data to the subscriber; `addresses` is a string array which can be filled with ETH addresses we want to filter state for,
if it has any addresses then the super-node will only send state leafs (accounts) corresponding to those account addresses. By default the super-node
only sends along state leafs, if we want to receive branch and extension nodes as well `intermediateNodes` can be set to `true`.
`ethSubscription.storageFilter` has four sub-options: `off`, `addresses`, `storageKeys`, and `intermediateNodes`. Setting `off` to true tells the super-node to
not send any storage data to the subscriber; `addresses` is a string array which can be filled with ETH addresses we want to filter storage for,
if it has any addresses then the super-node will only send storage nodes from the storage tries at those state addresses. `storageKeys` is another string
array that can be filled with storage keys we want to filter storage data for. It is important to note that the storageKeys are the actual keccak256 hashes, whereas
the addresses in the `addresses` fields are the ETH addresses and not their keccak256 hashes that serve as the actual state keys. By default the super-node
only sends along storage leafs, if we want to receive branch and extension nodes as well `intermediateNodes` can be set to `true`.

View File

@ -0,0 +1,25 @@
[superNode]
chain = "ethereum"
ipfsPath = "/root/.ipfs"
[superNode.database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
user = "ec2-user"
[superNode.sync]
on = true
wsPath = "ws://127.0.0.1:8546"
workers = 1
[superNode.server]
on = true
ipcPath = "/root/.vulcanize/vulcanize.ipc"
wsPath = "127.0.0.1:8080"
[superNode.backFill]
on = true
httpPath = "http://127.0.0.1:8545"
frequency = 5
batchSize = 50

View File

@ -1,35 +1,38 @@
[subscription]
path = "ws://127.0.0.1:8080"
backfill = true
backfillOnly = false
startingBlock = 0
endingBlock = 0
[subscription.headerFilter]
off = false
uncles = false
[subscription.trxFilter]
off = false
src = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
dst = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
[subscription.receiptFilter]
off = false
contracts = []
topic0s = [
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
]
[subscription.stateFilter]
off = false
addresses = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
]
intermediateNodes = false
[subscription.storageFilter]
off = true
addresses = []
storageKeys = []
intermediateNodes = false
[superNode]
[superNode.ethSubscription]
historicalData = true
historicalDataOnly = false
startingBlock = 0
endingBlock = 0
wsPath = "ws://127.0.0.1:8080"
[superNode.ethSubscription.headerFilter]
off = false
uncles = false
[superNode.ethSubscription.txFilter]
off = false
src = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
dst = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
[superNode.ethSubscription.receiptFilter]
off = false
contracts = []
topics = [
[
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
]
]
[superNode.ethSubscription.stateFilter]
off = false
addresses = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
]
intermediateNodes = false
[superNode.ethSubscription.storageFilter]
off = true
addresses = []
storageKeys = []
intermediateNodes = false

View File

@ -1,18 +0,0 @@
[database]
name = "vulcanize_public"
hostname = "localhost"
port = 5432
user = "ec2-user"
[client]
ipcPath = "ws://127.0.0.1:8546"
ipfsPath = "/root/.ipfs"
[server]
ipcPath = "/root/.vulcanize/vulcanize.ipc"
wsEndpoint = "127.0.0.1:8080"
[superNodeBackFill]
on = false
rpcPath = ""
frequency = 5

22
go.mod
View File

@ -6,7 +6,7 @@ require (
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9
github.com/Stebalien/go-bitfield v0.0.1
github.com/allegro/bigcache v0.0.0-20190618191010-69ea0af04088
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d
github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8
@ -14,6 +14,7 @@ require (
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/cenkalti/backoff/v3 v3.0.0
github.com/cheekybits/genny v1.0.0
github.com/coocood/freecache v1.1.0 // indirect
github.com/coreos/go-semver v0.3.0
github.com/cskr/pubsub v1.0.2
github.com/dave/jennifer v1.3.0
@ -35,7 +36,7 @@ require (
github.com/golang/protobuf v1.3.2
github.com/golang/snappy v0.0.1
github.com/google/uuid v1.1.1
github.com/gorilla/websocket v1.4.0
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989
github.com/hashicorp/errwrap v1.0.0
github.com/hashicorp/go-multierror v1.0.0
github.com/hashicorp/golang-lru v0.5.3
@ -83,13 +84,13 @@ require (
github.com/ipfs/go-verifcid v0.0.1
github.com/ipfs/interface-go-ipfs-core v0.1.0
github.com/jackpal/gateway v1.0.5
github.com/jackpal/go-nat-pmp v1.0.1
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
github.com/jbenet/go-is-domain v1.0.2
github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2
github.com/jbenet/goprocess v0.1.3
github.com/jessevdk/go-flags v1.4.0 // indirect
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356
github.com/kisielk/errcheck v1.2.0 // indirect
github.com/kkdai/bstream v0.0.0-20181106074824-b3251f7901ec // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2
@ -158,7 +159,7 @@ require (
github.com/multiformats/go-multibase v0.0.1
github.com/multiformats/go-multihash v0.0.6
github.com/multiformats/go-multistream v0.1.0
github.com/olekukonko/tablewriter v0.0.1
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c
github.com/onsi/ginkgo v1.8.0
github.com/onsi/gomega v1.5.0
github.com/opentracing/opentracing-go v1.1.0
@ -181,10 +182,10 @@ require (
github.com/spf13/pflag v1.0.3
github.com/spf13/viper v1.4.0
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2
github.com/syndtr/goleveldb v1.0.0
github.com/tyler-smith/go-bip39 v1.0.0
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
github.com/vulcanize/eth-block-extractor v0.0.0-20190801172153-2835f21156aa
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f
@ -213,7 +214,6 @@ require (
gopkg.in/fsnotify.v1 v1.4.7
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v2 v2.2.2
)
@ -225,4 +225,4 @@ replace github.com/ipfs/go-ipfs v0.4.22 => github.com/vulcanize/go-ipfs v0.4.22-
replace github.com/ipfs/go-ipfs-config v0.0.3 => github.com/vulcanize/go-ipfs-config v0.0.8-alpha
replace github.com/ethereum/go-ethereum v1.9.1 => github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101
replace github.com/ethereum/go-ethereum v1.9.1 => github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8

85
go.sum
View File

@ -4,17 +4,36 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/Stebalien/go-bitfield v0.0.0-20180330043415-076a62f9ce6e/go.mod h1:3oM7gXIttpYDAJXpVNnSCiUMYBLIZ6cb1t+Ip982MRo=
github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo=
github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s=
github.com/VictoriaMetrics/fastcache v1.5.3 h1:2odJnXLbFZcoV9KYtQ+7TH1UOq3dn3AssMgieaezkR4=
github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v0.0.0-20190618191010-69ea0af04088 h1:98xHUPwc06h3/UklWP/wZjARk6fxAFEGkEZ0E1UJReo=
github.com/allegro/bigcache v0.0.0-20190618191010-69ea0af04088/go.mod h1:qw9PmPMRP4u9TMCeXEA+M4m2lvVM+B/URHNUtxFcERc=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 h1:7ABPr1+uJdqESAdlVevnc/2FJGiC/K3uMg1JiELeF+0=
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
@ -22,6 +41,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d h1:QgeLLoPD3kRVmeu/1al9iIpIANMi9O1zXFm8BnYGCJg=
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d/go.mod h1:Jbj8eKecMNwf0KFI75skSUZqMB4UCRcndUScVBTWyUI=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
@ -41,10 +61,16 @@ github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U=
github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -56,6 +82,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0=
github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis=
github.com/dave/jennifer v1.3.0 h1:p3tl41zjjCZTNBytMwrUuiAnherNUZktlhPTKoF/sEk=
@ -67,6 +94,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
github.com/davidlazar/go-crypto v0.0.0-20190522120613-62389b5e4ae0 h1:t2BzsfK9SPTlddm0l5PgRQp5fBzByku985NYG1otL/U=
github.com/davidlazar/go-crypto v0.0.0-20190522120613-62389b5e4ae0/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
@ -80,15 +108,20 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlespiau/covertool v0.0.0-20180314162135-b0c4c6d0583a/go.mod h1:/eQMcW3eA1bzKx23ZYI2H3tXPdJB5JWYTHzoUPBvQY4=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
github.com/elastic/gosigar v0.10.4 h1:6jfw75dsoflhBMRdO6QPzQUgLqUYTsQQQRkkcsHsuPo=
github.com/elastic/gosigar v0.10.4/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A=
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg=
github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
@ -100,6 +133,7 @@ github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclK
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -113,6 +147,7 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -121,6 +156,7 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -128,6 +164,9 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 h1:giknQ4mEuDFmmHSrGcbargOuLHQGtywqo4mheITex54=
github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@ -138,6 +177,7 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
@ -146,11 +186,13 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/ipfs/bbloom v0.0.1 h1:s7KkiBPfxCeDVo47KySjK0ACPc5GJRUxFpdyWEuDjhw=
github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI=
github.com/ipfs/go-bitswap v0.0.3/go.mod h1:jadAZYsP/tcRMl47ZhFxhaNuDQoXawT8iHMg+iFoQbg=
@ -261,6 +303,8 @@ github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
github.com/jbenet/go-is-domain v1.0.2 h1:11r5MSptcNFZyBoqubBQnVMUKRWLuRjL1banaIk+iYo=
@ -279,9 +323,12 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0 h1:S8kWZLXHpcOq3nGAvIs0oDgd4CXxkxE3hkDVRjTu7ro=
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw=
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@ -295,6 +342,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
@ -478,12 +526,17 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
github.com/marten-seemann/qtls v0.2.4 h1:mCJ6i1jAqcsm9XODrSGvXECodoAb1STta+TkxJCwCnE=
github.com/marten-seemann/qtls v0.2.4/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
@ -534,9 +587,13 @@ github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wS
github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ=
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk=
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
@ -548,11 +605,13 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -574,15 +633,22 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=
github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@ -594,6 +660,7 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
@ -612,28 +679,41 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f h1:T7YHzO3/eqD/kv5m9+TLM4XuEAkN7NPj5pnZHqaOo/Q=
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2 h1:o6NMd68tuqfQ0ZFnz2d16xzFNLWxrCvqF40InOJJHSM=
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tyler-smith/go-bip39 v1.0.0 h1:FOHg9gaQLeBBRbHE/QrTLfEiBHy5pQ/yXzf9JG5pYFM=
github.com/tyler-smith/go-bip39 v1.0.0/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vulcanize/eth-block-extractor v0.0.0-20190801172153-2835f21156aa h1:hCBMOksqjQ38BAl+jH47wWtcQJsyALeTTlQnqWi9Cog=
github.com/vulcanize/eth-block-extractor v0.0.0-20190801172153-2835f21156aa/go.mod h1:+c+U08Q9eVV/X45zrCEu1RU1lYFI4qIhPGn/WpCmrV4=
github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101 h1:fsHhBzscAwi4u7/F033SFJwTIz+46D8uDWMu2/ZdvzA=
github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101/go.mod h1:9i0pGnKDUFFr8yC/n8xyrNBVfhYlpwE8J3Ge6ThKvug=
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8 h1:BHt0OW0rTgndFjSju7brF3dPceXWQuEV0IdtY8BjjT8=
github.com/vulcanize/go-ethereum v1.5.10-0.20200116224441-2a980ec3dcb8/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo=
github.com/vulcanize/go-ipfs v0.4.22-alpha h1:W+6njT14KWllMhABRFtPndqHw8SHCt5SqD4YX528kxM=
github.com/vulcanize/go-ipfs v0.4.22-alpha/go.mod h1:uaekWWeoaA0A9Dv1LObOKCSh9kIzTpZ5RbKW4g5CQHE=
github.com/vulcanize/go-ipfs-config v0.0.8-alpha h1:peaFvbEcPShF6ymOd8flqKkFz4YfcrNr/UOO7FmbWoQ=
@ -721,6 +801,7 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -751,6 +832,7 @@ golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -785,7 +867,9 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
@ -794,4 +878,5 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -18,6 +18,7 @@ import (
"github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/pkg/fakes"
)

View File

@ -18,19 +18,15 @@
package streamer
import (
"encoding/json"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
// ISuperNodeStreamer is the interface for streaming SuperNodePayloads from a vulcanizeDB super node
type ISuperNodeStreamer interface {
Stream(payloadChan chan SuperNodePayload, streamFilters config.Subscription) (*rpc.ClientSubscription, error)
Stream(payloadChan chan super_node.Payload, params super_node.SubscriptionSettings) (*rpc.ClientSubscription, error)
}
// SuperNodeStreamer is the underlying struct for the ISuperNodeStreamer interface
@ -46,39 +42,6 @@ func NewSuperNodeStreamer(client core.RPCClient) *SuperNodeStreamer {
}
// Stream is the main loop for subscribing to data from a vulcanizedb super node
func (sds *SuperNodeStreamer) Stream(payloadChan chan SuperNodePayload, streamFilters config.Subscription) (*rpc.ClientSubscription, error) {
return sds.Client.Subscribe("vdb", payloadChan, "stream", streamFilters)
}
// Payload holds the data returned from the super node to the requesting client
type SuperNodePayload struct {
BlockNumber *big.Int `json:"blockNumber"`
HeadersRlp [][]byte `json:"headersRlp"`
UnclesRlp [][]byte `json:"unclesRlp"`
TransactionsRlp [][]byte `json:"transactionsRlp"`
ReceiptsRlp [][]byte `json:"receiptsRlp"`
StateNodesRlp map[common.Hash][]byte `json:"stateNodesRlp"`
StorageNodesRlp map[common.Hash]map[common.Hash][]byte `json:"storageNodesRlp"`
ErrMsg string `json:"errMsg"`
encoded []byte
err error
}
func (sd *SuperNodePayload) ensureEncoded() {
if sd.encoded == nil && sd.err == nil {
sd.encoded, sd.err = json.Marshal(sd)
}
}
// Length to implement Encoder interface for StateDiff
func (sd *SuperNodePayload) Length() int {
sd.ensureEncoded()
return len(sd.encoded)
}
// Encode to implement Encoder interface for StateDiff
func (sd *SuperNodePayload) Encode() ([]byte, error) {
sd.ensureEncoded()
return sd.encoded, sd.err
func (sds *SuperNodeStreamer) Stream(payloadChan chan super_node.Payload, params super_node.SubscriptionSettings) (*rpc.ClientSubscription, error) {
return sds.Client.Subscribe("vdb", payloadChan, "stream", params)
}

View File

@ -17,15 +17,15 @@
package transformer
import (
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
)
type SuperNodeTransformer interface {
Init() error
Execute() error
GetConfig() config.Subscription
GetConfig() super_node.SubscriptionSettings
}
type SuperNodeTransformerInitializer func(db *postgres.DB, subCon config.Subscription, client core.RPCClient) SuperNodeTransformer
type SuperNodeTransformerInitializer func(db *postgres.DB, subCon super_node.SubscriptionSettings, client core.RPCClient) SuperNodeTransformer

View File

@ -22,6 +22,7 @@ package watcher
import (
"fmt"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"

View File

@ -1,62 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config
import "math/big"
// Subscription config is used by a subscribing transformer to specifiy which data to receive from the super node
type Subscription struct {
BackFill bool
BackFillOnly bool
StartingBlock *big.Int
EndingBlock *big.Int // set to 0 or a negative value to have no ending block
HeaderFilter HeaderFilter
TrxFilter TrxFilter
ReceiptFilter ReceiptFilter
StateFilter StateFilter
StorageFilter StorageFilter
}
type HeaderFilter struct {
Off bool
Uncles bool
}
type TrxFilter struct {
Off bool
Src []string
Dst []string
}
type ReceiptFilter struct {
Off bool
Contracts []string
Topic0s []string
}
type StateFilter struct {
Off bool
Addresses []string // is converted to state key by taking its keccak256 hash
IntermediateNodes bool
}
type StorageFilter struct {
Off bool
Addresses []string
StorageKeys []string
IntermediateNodes bool
}

View File

@ -17,6 +17,8 @@
package repositories_test
import (
"math/rand"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core"
@ -25,7 +27,6 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/fakes"
"github.com/vulcanize/vulcanizedb/test_config"
"math/rand"
)
var _ = Describe("Checked Headers repository", func() {

View File

@ -18,6 +18,7 @@ package repositories
import (
"database/sql"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"

View File

@ -20,12 +20,14 @@ import (
"bytes"
"encoding/json"
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/core"
"math/rand"
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
var (

View File

@ -18,8 +18,6 @@ package fakes
import (
"context"
"errors"
"github.com/ethereum/go-ethereum/statediff"
"math/big"
"github.com/ethereum/go-ethereum/core/types"
@ -40,7 +38,7 @@ type MockRPCClient struct {
passedResult interface{}
passedBatch []client.BatchElem
passedNamespace string
passedPayloadChan chan statediff.Payload
passedPayloadChan interface{}
passedSubscribeArgs []interface{}
lengthOfBatch int
returnPOAHeader core.POAHeader
@ -51,12 +49,7 @@ type MockRPCClient struct {
func (client *MockRPCClient) Subscribe(namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error) {
client.passedNamespace = namespace
passedPayloadChan, ok := payloadChan.(chan statediff.Payload)
if !ok {
return nil, errors.New("passed in channel is not of the correct type")
}
client.passedPayloadChan = passedPayloadChan
client.passedPayloadChan = payloadChan
for _, arg := range args {
client.passedSubscribeArgs = append(client.passedSubscribeArgs, arg)
@ -66,7 +59,7 @@ func (client *MockRPCClient) Subscribe(namespace string, payloadChan interface{}
return &subscription, nil
}
func (client *MockRPCClient) AssertSubscribeCalledWith(namespace string, payloadChan chan statediff.Payload, args []interface{}) {
func (client *MockRPCClient) AssertSubscribeCalledWith(namespace string, payloadChan interface{}, args []interface{}) {
Expect(client.passedNamespace).To(Equal(namespace))
Expect(client.passedPayloadChan).To(Equal(payloadChan))
Expect(client.passedSubscribeArgs).To(Equal(args))

View File

@ -19,8 +19,6 @@ package ipfs
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/plugin/loader"
@ -58,22 +56,3 @@ func InitIPFSBlockService(ipfsPath string) (blockservice.BlockService, error) {
}
return ipfsNode.Blocks, nil
}
// AddressToKey hashes an address
func AddressToKey(address common.Address) common.Hash {
return crypto.Keccak256Hash(address[:])
}
// HexToKey hashes a hex (0x leading or not) string
func HexToKey(hex string) common.Hash {
addr := common.FromHex(hex)
return crypto.Keccak256Hash(addr[:])
}
// EmptyCIDWrapper returns whether or not the provided CIDWrapper has any Cids we need to process
func EmptyCIDWrapper(cids CIDWrapper) bool {
if len(cids.Transactions) > 0 || len(cids.Headers) > 0 || len(cids.Uncles) > 0 || len(cids.Receipts) > 0 || len(cids.StateNodes) > 0 || len(cids.StorageNodes) > 0 {
return false
}
return true
}

View File

@ -1,230 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
log "github.com/sirupsen/logrus"
)
// IPLDFetcher is an interface for fetching IPLDs
type IPLDFetcher interface {
FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error)
}
// EthIPLDFetcher is used to fetch ETH IPLD objects from IPFS
type EthIPLDFetcher struct {
BlockService blockservice.BlockService
}
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
func NewIPLDFetcher(ipfsPath string) (*EthIPLDFetcher, error) {
blockService, err := InitIPFSBlockService(ipfsPath)
if err != nil {
return nil, err
}
return &EthIPLDFetcher{
BlockService: blockService,
}, nil
}
// FetchIPLDs is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
func (f *EthIPLDFetcher) FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error) {
log.Debug("fetching iplds")
blocks := &IPLDWrapper{
BlockNumber: cids.BlockNumber,
Headers: make([]blocks.Block, 0),
Uncles: make([]blocks.Block, 0),
Transactions: make([]blocks.Block, 0),
Receipts: make([]blocks.Block, 0),
StateNodes: make(map[common.Hash]blocks.Block),
StorageNodes: make(map[common.Hash]map[common.Hash]blocks.Block),
}
headersErr := f.fetchHeaders(cids, blocks)
if headersErr != nil {
return nil, headersErr
}
unclesErr := f.fetchUncles(cids, blocks)
if unclesErr != nil {
return nil, unclesErr
}
trxsErr := f.fetchTrxs(cids, blocks)
if trxsErr != nil {
return nil, trxsErr
}
rctsErr := f.fetchRcts(cids, blocks)
if rctsErr != nil {
return nil, rctsErr
}
storageErr := f.fetchStorage(cids, blocks)
if storageErr != nil {
return nil, storageErr
}
stateErr := f.fetchState(cids, blocks)
if stateErr != nil {
return nil, stateErr
}
return blocks, nil
}
// fetchHeaders fetches headers
// It uses the f.fetchBatch method
func (f *EthIPLDFetcher) fetchHeaders(cids CIDWrapper, blocks *IPLDWrapper) error {
log.Debug("fetching header iplds")
headerCids := make([]cid.Cid, 0, len(cids.Headers))
for _, c := range cids.Headers {
dc, err := cid.Decode(c)
if err != nil {
return err
}
headerCids = append(headerCids, dc)
}
blocks.Headers = f.fetchBatch(headerCids)
if len(blocks.Headers) != len(headerCids) {
log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(blocks.Headers), len(headerCids))
}
return nil
}
// fetchUncles fetches uncles
// It uses the f.fetchBatch method
func (f *EthIPLDFetcher) fetchUncles(cids CIDWrapper, blocks *IPLDWrapper) error {
log.Debug("fetching uncle iplds")
uncleCids := make([]cid.Cid, 0, len(cids.Uncles))
for _, c := range cids.Uncles {
dc, err := cid.Decode(c)
if err != nil {
return err
}
uncleCids = append(uncleCids, dc)
}
blocks.Uncles = f.fetchBatch(uncleCids)
if len(blocks.Uncles) != len(uncleCids) {
log.Errorf("ipfs fetcher: number of uncle blocks returned (%d) does not match number expected (%d)", len(blocks.Uncles), len(uncleCids))
}
return nil
}
// fetchTrxs fetches transactions
// It uses the f.fetchBatch method
func (f *EthIPLDFetcher) fetchTrxs(cids CIDWrapper, blocks *IPLDWrapper) error {
log.Debug("fetching transaction iplds")
trxCids := make([]cid.Cid, 0, len(cids.Transactions))
for _, c := range cids.Transactions {
dc, err := cid.Decode(c)
if err != nil {
return err
}
trxCids = append(trxCids, dc)
}
blocks.Transactions = f.fetchBatch(trxCids)
if len(blocks.Transactions) != len(trxCids) {
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(blocks.Transactions), len(trxCids))
}
return nil
}
// fetchRcts fetches receipts
// It uses the f.fetchBatch method
func (f *EthIPLDFetcher) fetchRcts(cids CIDWrapper, blocks *IPLDWrapper) error {
log.Debug("fetching receipt iplds")
rctCids := make([]cid.Cid, 0, len(cids.Receipts))
for _, c := range cids.Receipts {
dc, err := cid.Decode(c)
if err != nil {
return err
}
rctCids = append(rctCids, dc)
}
blocks.Receipts = f.fetchBatch(rctCids)
if len(blocks.Receipts) != len(rctCids) {
log.Errorf("ipfs fetcher: number of receipt blocks returned (%d) does not match number expected (%d)", len(blocks.Receipts), len(rctCids))
}
return nil
}
// fetchState fetches state nodes
// It uses the single f.fetch method instead of the batch fetch, because it
// needs to maintain the data's relation to state keys
func (f *EthIPLDFetcher) fetchState(cids CIDWrapper, blocks *IPLDWrapper) error {
log.Debug("fetching state iplds")
for _, stateNode := range cids.StateNodes {
if stateNode.CID == "" || stateNode.Key == "" {
continue
}
dc, decodeErr := cid.Decode(stateNode.CID)
if decodeErr != nil {
return decodeErr
}
block, fetchErr := f.fetch(dc)
if fetchErr != nil {
return fetchErr
}
blocks.StateNodes[common.HexToHash(stateNode.Key)] = block
}
return nil
}
// fetchStorage fetches storage nodes
// It uses the single f.fetch method instead of the batch fetch, because it
// needs to maintain the data's relation to state and storage keys
func (f *EthIPLDFetcher) fetchStorage(cids CIDWrapper, blks *IPLDWrapper) error {
log.Debug("fetching storage iplds")
for _, storageNode := range cids.StorageNodes {
if storageNode.CID == "" || storageNode.Key == "" || storageNode.StateKey == "" {
continue
}
dc, decodeErr := cid.Decode(storageNode.CID)
if decodeErr != nil {
return decodeErr
}
blk, fetchErr := f.fetch(dc)
if fetchErr != nil {
return fetchErr
}
if blks.StorageNodes[common.HexToHash(storageNode.StateKey)] == nil {
blks.StorageNodes[common.HexToHash(storageNode.StateKey)] = make(map[common.Hash]blocks.Block)
}
blks.StorageNodes[common.HexToHash(storageNode.StateKey)][common.HexToHash(storageNode.Key)] = blk
}
return nil
}
// fetch is used to fetch a single cid
func (f *EthIPLDFetcher) fetch(cid cid.Cid) (blocks.Block, error) {
return f.BlockService.GetBlock(context.Background(), cid)
}
// fetchBatch is used to fetch a batch of IPFS data blocks by cid
// There is no guarantee all are fetched, and no error in such a case, so
// downstream we will need to confirm which CIDs were fetched in the result set
func (f *EthIPLDFetcher) fetchBatch(cids []cid.Cid) []blocks.Block {
fetchedBlocks := make([]blocks.Block, 0, len(cids))
blockChan := f.BlockService.GetBlocks(context.Background(), cids)
for block := range blockChan {
fetchedBlocks = append(fetchedBlocks, block)
}
return fetchedBlocks
}

22
pkg/ipfs/models.go Normal file
View File

@ -0,0 +1,22 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs
type BlockModel struct {
CID string `db:"key"`
Data []byte `db:"data"`
}

View File

@ -1,97 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
)
// IPLDResolver is the interface to resolving IPLDs
type IPLDResolver interface {
ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperNodePayload
}
// EthIPLDResolver is the underlying struct to support the IPLDResolver interface
type EthIPLDResolver struct{}
// NewIPLDResolver returns a pointer to an EthIPLDResolver which satisfies the IPLDResolver interface
func NewIPLDResolver() *EthIPLDResolver {
return &EthIPLDResolver{}
}
// ResolveIPLDs is the exported method for resolving all of the ETH IPLDs packaged in an IpfsBlockWrapper
func (eir *EthIPLDResolver) ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperNodePayload {
response := &streamer.SuperNodePayload{
BlockNumber: ipfsBlocks.BlockNumber,
StateNodesRlp: make(map[common.Hash][]byte),
StorageNodesRlp: make(map[common.Hash]map[common.Hash][]byte),
}
eir.resolveHeaders(ipfsBlocks.Headers, response)
eir.resolveUncles(ipfsBlocks.Uncles, response)
eir.resolveTransactions(ipfsBlocks.Transactions, response)
eir.resolveReceipts(ipfsBlocks.Receipts, response)
eir.resolveState(ipfsBlocks.StateNodes, response)
eir.resolveStorage(ipfsBlocks.StorageNodes, response)
return *response
}
func (eir *EthIPLDResolver) resolveHeaders(blocks []blocks.Block, response *streamer.SuperNodePayload) {
for _, block := range blocks {
raw := block.RawData()
response.HeadersRlp = append(response.HeadersRlp, raw)
}
}
func (eir *EthIPLDResolver) resolveUncles(blocks []blocks.Block, response *streamer.SuperNodePayload) {
for _, block := range blocks {
raw := block.RawData()
response.UnclesRlp = append(response.UnclesRlp, raw)
}
}
func (eir *EthIPLDResolver) resolveTransactions(blocks []blocks.Block, response *streamer.SuperNodePayload) {
for _, block := range blocks {
raw := block.RawData()
response.TransactionsRlp = append(response.TransactionsRlp, raw)
}
}
func (eir *EthIPLDResolver) resolveReceipts(blocks []blocks.Block, response *streamer.SuperNodePayload) {
for _, block := range blocks {
raw := block.RawData()
response.ReceiptsRlp = append(response.ReceiptsRlp, raw)
}
}
func (eir *EthIPLDResolver) resolveState(blocks map[common.Hash]blocks.Block, response *streamer.SuperNodePayload) {
for key, block := range blocks {
raw := block.RawData()
response.StateNodesRlp[key] = raw
}
}
func (eir *EthIPLDResolver) resolveStorage(blocks map[common.Hash]map[common.Hash]blocks.Block, response *streamer.SuperNodePayload) {
for stateKey, storageBlocks := range blocks {
response.StorageNodesRlp[stateKey] = make(map[common.Hash][]byte)
for storageKey, storageVal := range storageBlocks {
raw := storageVal.RawData()
response.StorageNodesRlp[stateKey][storageKey] = raw
}
}
}

View File

@ -1,114 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ipfs/go-block-format"
)
// CIDWrapper is used to package CIDs retrieved from the local Postgres cache and direct fetching of IPLDs
type CIDWrapper struct {
BlockNumber *big.Int
Headers []string
Uncles []string
Transactions []string
Receipts []string
StateNodes []StateNodeCID
StorageNodes []StorageNodeCID
}
// IPLDWrapper is used to package raw IPLD block data fetched from IPFS
type IPLDWrapper struct {
BlockNumber *big.Int
Headers []blocks.Block
Uncles []blocks.Block
Transactions []blocks.Block
Receipts []blocks.Block
StateNodes map[common.Hash]blocks.Block
StorageNodes map[common.Hash]map[common.Hash]blocks.Block
}
// IPLDPayload is a custom type which packages raw ETH data for the IPFS publisher
type IPLDPayload struct {
HeaderRLP []byte
BlockNumber *big.Int
BlockHash common.Hash
BlockBody *types.Body
TrxMetaData []*TrxMetaData
Receipts types.Receipts
ReceiptMetaData []*ReceiptMetaData
StateNodes map[common.Hash]StateNode
StorageNodes map[common.Hash][]StorageNode
}
// StateNode struct used to flag node as leaf or not
type StateNode struct {
Value []byte
Leaf bool
}
// StorageNode struct used to flag node as leaf or not
type StorageNode struct {
Key common.Hash
Value []byte
Leaf bool
}
// CIDPayload is a struct to hold all the CIDs and their meta data
type CIDPayload struct {
BlockNumber string
BlockHash common.Hash
HeaderCID string
UncleCIDs map[common.Hash]string
TransactionCIDs map[common.Hash]*TrxMetaData
ReceiptCIDs map[common.Hash]*ReceiptMetaData
StateNodeCIDs map[common.Hash]StateNodeCID
StorageNodeCIDs map[common.Hash][]StorageNodeCID
}
// StateNodeCID is used to associate a leaf flag with a state node cid
type StateNodeCID struct {
CID string
Leaf bool
Key string `db:"state_key"`
}
// StorageNodeCID is used to associate a leaf flag with a storage node cid
type StorageNodeCID struct {
Key string `db:"storage_key"`
CID string
Leaf bool
StateKey string `db:"state_key"`
}
// ReceiptMetaData wraps some additional data around our receipt CIDs for indexing
type ReceiptMetaData struct {
CID string
Topic0s []string
ContractAddress string
}
// TrxMetaData wraps some additional data around our transaction CID for indexing
type TrxMetaData struct {
CID string
Src string
Dst string
}

View File

@ -22,8 +22,6 @@ import (
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
@ -35,47 +33,47 @@ const APIVersion = "0.0.1"
// PublicSuperNodeAPI is the public api for the super node
type PublicSuperNodeAPI struct {
sni NodeInterface
sn SuperNode
}
// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process
func NewPublicSuperNodeAPI(superNodeInterface NodeInterface) *PublicSuperNodeAPI {
func NewPublicSuperNodeAPI(superNodeInterface SuperNode) *PublicSuperNodeAPI {
return &PublicSuperNodeAPI{
sni: superNodeInterface,
sn: superNodeInterface,
}
}
// Stream is the public method to setup a subscription that fires off SyncPublishScreenAndServe payloads as they are created
func (api *PublicSuperNodeAPI) Stream(ctx context.Context, streamFilters config.Subscription) (*rpc.Subscription, error) {
// Stream is the public method to setup a subscription that fires off super node payloads as they are processed
func (api *PublicSuperNodeAPI) Stream(ctx context.Context, params SubscriptionSettings) (*rpc.Subscription, error) {
// ensure that the RPC connection supports subscriptions
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return nil, rpc.ErrNotificationsUnsupported
}
// create subscription and start waiting for statediff events
// create subscription and start waiting for stream events
rpcSub := notifier.CreateSubscription()
go func() {
// subscribe to events from the SyncPublishScreenAndServe service
payloadChannel := make(chan streamer.SuperNodePayload, payloadChanBufferSize)
payloadChannel := make(chan Payload, PayloadChanBufferSize)
quitChan := make(chan bool, 1)
go api.sni.Subscribe(rpcSub.ID, payloadChannel, quitChan, streamFilters)
go api.sn.Subscribe(rpcSub.ID, payloadChannel, quitChan, params)
// loop and await state diff payloads and relay them to the subscriber with then notifier
// loop and await payloads and relay them to the subscriber using notifier
for {
select {
case packet := <-payloadChannel:
if notifyErr := notifier.Notify(rpcSub.ID, packet); notifyErr != nil {
log.Error("Failed to send state diff packet", "err", notifyErr)
api.sni.Unsubscribe(rpcSub.ID)
if err := notifier.Notify(rpcSub.ID, packet); err != nil {
log.Error("Failed to send super node packet", "err", err)
api.sn.Unsubscribe(rpcSub.ID)
return
}
case <-rpcSub.Err():
api.sni.Unsubscribe(rpcSub.ID)
api.sn.Unsubscribe(rpcSub.ID)
return
case <-quitChan:
// don't need to unsubscribe, SyncPublishScreenAndServe service does so before sending the quit signal
// don't need to unsubscribe to super node, the service does so before sending the quit signal this way
return
}
}
@ -84,7 +82,7 @@ func (api *PublicSuperNodeAPI) Stream(ctx context.Context, streamFilters config.
return rpcSub, nil
}
// Node is a public rpc method to allow transformers to fetch the Geth node info for the super node
// Node is a public rpc method to allow transformers to fetch the node info for the super node
func (api *PublicSuperNodeAPI) Node() core.Node {
return api.sni.Node()
return api.sn.Node()
}

View File

@ -22,14 +22,14 @@ import (
"sync/atomic"
"time"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
"github.com/ethereum/go-ethereum/params"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
const (
@ -45,35 +45,55 @@ type BackFillInterface interface {
// BackFillService for filling in gaps in the super node
type BackFillService struct {
// Interface for converting statediff payloads into ETH-IPLD object payloads
Converter ipfs.PayloadConverter
// Interface for publishing the ETH-IPLD payloads to IPFS
Publisher ipfs.IPLDPublisher
// Interface for indexing the CIDs of the published ETH-IPLDs in Postgres
Repository CIDRepository
// Interface for converting payloads into IPLD object payloads
Converter shared.PayloadConverter
// Interface for publishing the IPLD payloads to IPFS
Publisher shared.IPLDPublisher
// Interface for indexing the CIDs of the published IPLDs in Postgres
Indexer shared.CIDIndexer
// Interface for searching and retrieving CIDs from Postgres index
Retriever CIDRetriever
// State-diff fetcher; needs to be configured with an archival core.RpcClient
Fetcher fetcher.StateDiffFetcher
Retriever shared.CIDRetriever
// Interface for fetching payloads over at historical blocks; over http
Fetcher shared.PayloadFetcher
// Check frequency
GapCheckFrequency time.Duration
// size of batch fetches
// Size of batch fetches
BatchSize uint64
}
// NewBackFillService returns a new BackFillInterface
func NewBackFillService(ipfsPath string, db *postgres.DB, archivalNodeRPCClient core.RPCClient, freq time.Duration, batchSize uint64) (BackFillInterface, error) {
publisher, err := ipfs.NewIPLDPublisher(ipfsPath)
func NewBackFillService(settings *config.SuperNode) (BackFillInterface, error) {
publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath)
if err != nil {
return nil, err
}
indexer, err := NewCIDIndexer(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
converter, err := NewPayloadConverter(settings.Chain, params.MainnetChainConfig)
if err != nil {
return nil, err
}
retriever, err := NewCIDRetriever(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
fetcher, err := NewPaylaodFetcher(settings.Chain, settings.HTTPClient)
if err != nil {
return nil, err
}
batchSize := settings.BatchSize
if batchSize == 0 {
batchSize = DefaultMaxBatchSize
}
return &BackFillService{
Repository: NewCIDRepository(db),
Converter: ipfs.NewPayloadConverter(params.MainnetChainConfig),
Indexer: indexer,
Converter: converter,
Publisher: publisher,
Retriever: NewCIDRetriever(db),
Fetcher: fetcher.NewStateDiffFetcher(archivalNodeRPCClient),
GapCheckFrequency: freq,
Retriever: retriever,
Fetcher: fetcher,
GapCheckFrequency: settings.Frequency,
BatchSize: batchSize,
}, nil
}
@ -93,23 +113,24 @@ func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup, quitChan <-chan bool) {
return
case <-ticker.C:
log.Info("searching for gaps in the super node database")
startingBlock, firstBlockErr := bfs.Retriever.RetrieveFirstBlockNumber()
if firstBlockErr != nil {
log.Error(firstBlockErr)
startingBlock, err := bfs.Retriever.RetrieveFirstBlockNumber()
if err != nil {
log.Error(err)
continue
}
if startingBlock != 1 {
log.Info("found gap at the beginning of the sync")
bfs.fillGaps(1, uint64(startingBlock-1))
}
gaps, gapErr := bfs.Retriever.RetrieveGapsInData()
if gapErr != nil {
log.Error(gapErr)
gaps, err := bfs.Retriever.RetrieveGapsInData()
if err != nil {
log.Error(err)
continue
}
for _, gap := range gaps {
bfs.fillGaps(gap[0], gap[1])
if err := bfs.fillGaps(gap.Start, gap.Stop); err != nil {
log.Error(err)
}
}
}
}
@ -117,14 +138,13 @@ func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup, quitChan <-chan bool) {
log.Info("fillGaps goroutine successfully spun up")
}
func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) {
func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) error {
log.Infof("going to fill in gap from %d to %d", startingBlock, endingBlock)
errChan := make(chan error)
done := make(chan bool)
backFillInitErr := bfs.backFill(startingBlock, endingBlock, errChan, done)
if backFillInitErr != nil {
log.Error(backFillInitErr)
return
err := bfs.backFill(startingBlock, endingBlock, errChan, done)
if err != nil {
return err
}
for {
select {
@ -132,7 +152,7 @@ func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) {
log.Error(err)
case <-done:
log.Infof("finished filling in gap from %d to %d", startingBlock, endingBlock)
return
return nil
}
}
}
@ -165,24 +185,26 @@ func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan
<-forwardDone
}
go func(blockHeights []uint64) {
payloads, fetchErr := bfs.Fetcher.FetchStateDiffsAt(blockHeights)
if fetchErr != nil {
errChan <- fetchErr
payloads, err := bfs.Fetcher.FetchAt(blockHeights)
if err != nil {
errChan <- err
}
for _, payload := range payloads {
ipldPayload, convertErr := bfs.Converter.Convert(payload)
if convertErr != nil {
errChan <- convertErr
ipldPayload, err := bfs.Converter.Convert(payload)
if err != nil {
errChan <- err
continue
}
cidPayload, publishErr := bfs.Publisher.Publish(ipldPayload)
if publishErr != nil {
errChan <- publishErr
// make backfiller a part of super_node service and forward these
// ipldPayload the the regular publishAndIndex and screenAndServe channels
// this would allow us to stream backfilled data to subscribers
cidPayload, err := bfs.Publisher.Publish(ipldPayload)
if err != nil {
errChan <- err
continue
}
indexErr := bfs.Repository.Index(cidPayload)
if indexErr != nil {
errChan <- indexErr
if err := bfs.Indexer.Index(cidPayload); err != nil {
errChan <- err
}
}
// when this goroutine is done, send out a signal

View File

@ -24,43 +24,42 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
mocks2 "github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
mocks3 "github.com/vulcanize/vulcanizedb/pkg/super_node/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
var _ = Describe("BackFiller", func() {
Describe("FillGaps", func() {
It("Periodically checks for and fills in gaps in the super node's data", func() {
mockCidRepo := &mocks3.CIDRepository{
mockCidRepo := &mocks.CIDIndexer{
ReturnErr: nil,
}
mockPublisher := &mocks.IterativeIPLDPublisher{
ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
ReturnIPLDPayload: []*eth.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
ReturnErr: nil,
}
mockRetriever := &mocks3.MockCIDRetriever{
mockRetriever := &mocks.MockCIDRetriever{
FirstBlockNumberToReturn: 1,
GapsToRetrieve: [][2]uint64{
GapsToRetrieve: []shared.Gap{
{
100, 101,
Start: 100, Stop: 101,
},
},
}
mockFetcher := &mocks2.StateDiffFetcher{
mockFetcher := &mocks.StateDiffFetcher{
PayloadsToReturn: map[uint64]statediff.Payload{
100: mocks.MockStateDiffPayload,
101: mocks.MockStateDiffPayload,
},
}
backfiller := &super_node.BackFillService{
Repository: mockCidRepo,
Indexer: mockCidRepo,
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,
@ -88,32 +87,32 @@ var _ = Describe("BackFiller", func() {
})
It("Works for single block `ranges`", func() {
mockCidRepo := &mocks3.CIDRepository{
mockCidRepo := &mocks.CIDIndexer{
ReturnErr: nil,
}
mockPublisher := &mocks.IterativeIPLDPublisher{
ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload},
ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload},
ReturnIPLDPayload: []*eth.IPLDPayload{mocks.MockIPLDPayload},
ReturnErr: nil,
}
mockRetriever := &mocks3.MockCIDRetriever{
mockRetriever := &mocks.MockCIDRetriever{
FirstBlockNumberToReturn: 1,
GapsToRetrieve: [][2]uint64{
GapsToRetrieve: []shared.Gap{
{
100, 100,
Start: 100, Stop: 100,
},
},
}
mockFetcher := &mocks2.StateDiffFetcher{
mockFetcher := &mocks.StateDiffFetcher{
PayloadsToReturn: map[uint64]statediff.Payload{
100: mocks.MockStateDiffPayload,
},
}
backfiller := &super_node.BackFillService{
Repository: mockCidRepo,
Indexer: mockCidRepo,
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,
@ -138,29 +137,29 @@ var _ = Describe("BackFiller", func() {
})
It("Finds beginning gap", func() {
mockCidRepo := &mocks3.CIDRepository{
mockCidRepo := &mocks.CIDIndexer{
ReturnErr: nil,
}
mockPublisher := &mocks.IterativeIPLDPublisher{
ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
ReturnIPLDPayload: []*eth.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
ReturnErr: nil,
}
mockRetriever := &mocks3.MockCIDRetriever{
mockRetriever := &mocks.MockCIDRetriever{
FirstBlockNumberToReturn: 3,
GapsToRetrieve: [][2]uint64{},
GapsToRetrieve: []shared.Gap{},
}
mockFetcher := &mocks2.StateDiffFetcher{
mockFetcher := &mocks.StateDiffFetcher{
PayloadsToReturn: map[uint64]statediff.Payload{
1: mocks.MockStateDiffPayload,
2: mocks.MockStateDiffPayload,
},
}
backfiller := &super_node.BackFillService{
Repository: mockCidRepo,
Indexer: mockCidRepo,
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,

View File

@ -0,0 +1,58 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config
import (
"errors"
"strings"
)
// ChainType enum for specifying blockchain
type ChainType int
const (
Unknown ChainType = iota
Ethereum
Bitcoin
Omni
)
func (c ChainType) String() string {
switch c {
case Ethereum:
return "Ethereum"
case Bitcoin:
return "Bitcoin"
case Omni:
return "Omni"
default:
return ""
}
}
func NewChainType(name string) (ChainType, error) {
switch strings.ToLower(name) {
case "ethereum", "eth":
return Ethereum, nil
case "bitcoin", "btc", "xbt":
return Bitcoin, nil
case "omni":
return Omni, nil
default:
return Unknown, errors.New("invalid name for chain")
}
}

View File

@ -0,0 +1,167 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/eth"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/utils"
)
// SuperNode config struct
type SuperNode struct {
// Ubiquitous fields
Chain ChainType
IPFSPath string
DB *postgres.DB
DBConfig config.Database
Quit chan bool
// Server fields
Serve bool
WSEndpoint string
HTTPEndpoint string
IPCEndpoint string
// Sync params
Sync bool
Workers int
WSClient core.RPCClient
NodeInfo core.Node
// Backfiller params
BackFill bool
HTTPClient core.RPCClient
Frequency time.Duration
BatchSize uint64
}
// NewSuperNodeConfig is used to initialize a SuperNode config from a config .toml file
func NewSuperNodeConfig() (*SuperNode, error) {
sn := new(SuperNode)
sn.DBConfig = config.Database{
Name: viper.GetString("superNode.database.name"),
Hostname: viper.GetString("superNode.database.hostname"),
Port: viper.GetInt("superNode.database.port"),
User: viper.GetString("superNode.database.user"),
Password: viper.GetString("superNode.database.password"),
}
var err error
sn.Chain, err = NewChainType(viper.GetString("superNode.chain"))
if err != nil {
return nil, err
}
ipfsPath := viper.GetString("superNode.ipfsPath")
if ipfsPath == "" {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
ipfsPath = filepath.Join(home, ".ipfs")
}
sn.IPFSPath = ipfsPath
sn.Serve = viper.GetBool("superNode.server.on")
sn.Sync = viper.GetBool("superNode.sync.on")
if sn.Sync {
workers := viper.GetInt("superNode.sync.workers")
if workers < 1 {
workers = 1
}
sn.Workers = workers
sn.NodeInfo, sn.WSClient, err = getNodeAndClient(sn.Chain, viper.GetString("superNode.sync.wsPath"))
}
if sn.Serve {
wsPath := viper.GetString("superNode.server.wsPath")
if wsPath == "" {
wsPath = "ws://127.0.0.1:8546"
}
sn.WSEndpoint = wsPath
ipcPath := viper.GetString("superNode.server.ipcPath")
if ipcPath == "" {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
}
sn.IPCEndpoint = ipcPath
httpPath := viper.GetString("superNode.server.httpPath")
if httpPath == "" {
httpPath = "http://127.0.0.1:8547"
}
sn.HTTPEndpoint = httpPath
}
db := utils.LoadPostgres(sn.DBConfig, sn.NodeInfo)
sn.DB = &db
sn.Quit = make(chan bool)
if viper.GetBool("superNode.backFill.on") {
if err := sn.BackFillFields(); err != nil {
return nil, err
}
}
return sn, err
}
// BackFillFields is used to fill in the BackFill fields of the config
func (sn *SuperNode) BackFillFields() error {
sn.BackFill = true
_, httpClient, err := getNodeAndClient(sn.Chain, viper.GetString("superNode.backFill.httpPath"))
if err != nil {
return err
}
sn.HTTPClient = httpClient
freq := viper.GetInt("superNode.backFill.frequency")
var frequency time.Duration
if freq <= 0 {
frequency = time.Minute * 5
} else {
frequency = time.Duration(freq)
}
sn.Frequency = frequency
sn.BatchSize = uint64(viper.GetInt64("superNode.backFill.batchSize"))
return nil
}
func getNodeAndClient(chain ChainType, path string) (core.Node, core.RPCClient, error) {
switch chain {
case Ethereum:
rawRPCClient, err := rpc.Dial(path)
if err != nil {
return core.Node{}, nil, err
}
rpcClient := client.NewRPCClient(rawRPCClient, path)
ethClient := ethclient.NewClient(rawRPCClient)
vdbEthClient := client.NewEthClient(ethClient)
vdbNode := node.MakeNode(rpcClient)
transactionConverter := vRpc.NewRPCTransactionConverter(ethClient)
blockChain := eth.NewBlockChain(vdbEthClient, rpcClient, vdbNode, transactionConverter)
return blockChain.Node(), rpcClient, nil
default:
return core.Node{}, nil, fmt.Errorf("unrecognized chain type %s", chain.String())
}
}

View File

@ -0,0 +1,151 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config
import (
"errors"
"math/big"
"github.com/spf13/viper"
)
// EthSubscription config is used by a subscriber to specify what eth data to stream from the super node
type EthSubscription struct {
BackFill bool
BackFillOnly bool
Start *big.Int
End *big.Int // set to 0 or a negative value to have no ending block
HeaderFilter HeaderFilter
TxFilter TxFilter
ReceiptFilter ReceiptFilter
StateFilter StateFilter
StorageFilter StorageFilter
}
// HeaderFilter contains filter settings for headers
type HeaderFilter struct {
Off bool
Uncles bool
}
// TxFilter contains filter settings for txs
type TxFilter struct {
Off bool
Src []string
Dst []string
}
// ReceiptFilter contains filter settings for receipts
type ReceiptFilter struct {
Off bool
MatchTxs bool // turn on to retrieve receipts that pair with retrieved transactions
Contracts []string
Topics [][]string
}
// StateFilter contains filter settings for state
type StateFilter struct {
Off bool
Addresses []string // is converted to state key by taking its keccak256 hash
IntermediateNodes bool
}
// StorageFilter contains filter settings for storage
type StorageFilter struct {
Off bool
Addresses []string
StorageKeys []string
IntermediateNodes bool
}
// Init is used to initialize a EthSubscription struct with env variables
func NewEthSubscriptionConfig() (*EthSubscription, error) {
sc := new(EthSubscription)
// Below default to false, which means we do not backfill by default
sc.BackFill = viper.GetBool("superNode.ethSubscription.historicalData")
sc.BackFillOnly = viper.GetBool("superNode.ethSubscription.historicalDataOnly")
// Below default to 0
// 0 start means we start at the beginning and 0 end means we continue indefinitely
sc.Start = big.NewInt(viper.GetInt64("superNode.ethSubscription.startingBlock"))
sc.End = big.NewInt(viper.GetInt64("superNode.ethSubscription.endingBlock"))
// Below default to false, which means we get all headers and no uncles by default
sc.HeaderFilter = HeaderFilter{
Off: viper.GetBool("superNode.ethSubscription.off"),
Uncles: viper.GetBool("superNode.ethSubscription.uncles"),
}
// Below defaults to false and two slices of length 0
// Which means we get all transactions by default
sc.TxFilter = TxFilter{
Off: viper.GetBool("superNode.ethSubscription.txFilter.off"),
Src: viper.GetStringSlice("superNode.ethSubscription.txFilter.src"),
Dst: viper.GetStringSlice("superNode.ethSubscription.txFilter.dst"),
}
// Below defaults to false and one slice of length 0
// Which means we get all receipts by default
t := viper.Get("superNode.ethSubscription.receiptFilter.topics")
topics, ok := t.([][]string)
if !ok {
return nil, errors.New("superNode.ethSubscription.receiptFilter.topics needs to be a slice of string slices")
}
sc.ReceiptFilter = ReceiptFilter{
Off: viper.GetBool("superNode.ethSubscription.receiptFilter.off"),
MatchTxs: viper.GetBool("superNode.ethSubscription.receiptFilter.matchTxs"),
Contracts: viper.GetStringSlice("superNode.ethSubscription.receiptFilter.contracts"),
Topics: topics,
}
// Below defaults to two false, and a slice of length 0
// Which means we get all state leafs by default, but no intermediate nodes
sc.StateFilter = StateFilter{
Off: viper.GetBool("superNode.ethSubscription.stateFilter.off"),
IntermediateNodes: viper.GetBool("superNode.ethSubscription.stateFilter.intermediateNodes"),
Addresses: viper.GetStringSlice("superNode.ethSubscription.stateFilter.addresses"),
}
// Below defaults to two false, and two slices of length 0
// Which means we get all storage leafs by default, but no intermediate nodes
sc.StorageFilter = StorageFilter{
Off: viper.GetBool("superNode.ethSubscription.storageFilter.off"),
IntermediateNodes: viper.GetBool("superNode.ethSubscription.storageFilter.intermediateNodes"),
Addresses: viper.GetStringSlice("superNode.ethSubscription.storageFilter.addresses"),
StorageKeys: viper.GetStringSlice("superNode.ethSubscription.storageFilter.storageKeys"),
}
return sc, nil
}
// StartingBlock satisfies the SubscriptionSettings() interface
func (sc *EthSubscription) StartingBlock() *big.Int {
return sc.Start
}
// EndingBlock satisfies the SubscriptionSettings() interface
func (sc *EthSubscription) EndingBlock() *big.Int {
return sc.End
}
// HistoricalData satisfies the SubscriptionSettings() interface
func (sc *EthSubscription) HistoricalData() bool {
return sc.BackFill
}
// HistoricalDataOnly satisfies the SubscriptionSettings() interface
func (sc *EthSubscription) HistoricalDataOnly() bool {
return sc.BackFillOnly
}
// ChainType satisfies the SubscriptionSettings() interface
func (sc *EthSubscription) ChainType() ChainType {
return Ethereum
}

View File

@ -0,0 +1,155 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node
import (
"fmt"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// NewResponseFilterer constructs a ResponseFilterer for the provided chain type
func NewResponseFilterer(chain config.ChainType) (shared.ResponseFilterer, error) {
switch chain {
case config.Ethereum:
return eth.NewResponseFilterer(), nil
default:
return nil, fmt.Errorf("invalid chain %T for filterer constructor", chain)
}
}
// NewCIDIndexer constructs a CIDIndexer for the provided chain type
func NewCIDIndexer(chain config.ChainType, db *postgres.DB) (shared.CIDIndexer, error) {
switch chain {
case config.Ethereum:
return eth.NewCIDIndexer(db), nil
default:
return nil, fmt.Errorf("invalid chain %T for indexer constructor", chain)
}
}
// NewCIDRetriever constructs a CIDRetriever for the provided chain type
func NewCIDRetriever(chain config.ChainType, db *postgres.DB) (shared.CIDRetriever, error) {
switch chain {
case config.Ethereum:
return eth.NewCIDRetriever(db), nil
default:
return nil, fmt.Errorf("invalid chain %T for retriever constructor", chain)
}
}
// NewPayloadStreamer constructs a PayloadStreamer for the provided chain type
func NewPayloadStreamer(chain config.ChainType, client interface{}) (shared.PayloadStreamer, chan interface{}, error) {
switch chain {
case config.Ethereum:
ethClient, ok := client.(core.RPCClient)
if !ok {
var expectedClientType core.RPCClient
return nil, nil, fmt.Errorf("ethereum payload constructor expected client type %T got %T", expectedClientType, client)
}
streamChan := make(chan interface{}, eth.PayloadChanBufferSize)
return eth.NewPayloadStreamer(ethClient), streamChan, nil
default:
return nil, nil, fmt.Errorf("invalid chain %T for streamer constructor", chain)
}
}
// NewPaylaodFetcher constructs a PayloadFetcher for the provided chain type
func NewPaylaodFetcher(chain config.ChainType, client interface{}) (shared.PayloadFetcher, error) {
switch chain {
case config.Ethereum:
batchClient, ok := client.(eth.BatchClient)
if !ok {
var expectedClient eth.BatchClient
return nil, fmt.Errorf("ethereum fetcher constructor expected client type %T got %T", expectedClient, client)
}
return eth.NewPayloadFetcher(batchClient), nil
default:
return nil, fmt.Errorf("invalid chain %T for fetcher constructor", chain)
}
}
// NewPayloadConverter constructs a PayloadConverter for the provided chain type
func NewPayloadConverter(chain config.ChainType, settings interface{}) (shared.PayloadConverter, error) {
switch chain {
case config.Ethereum:
ethConfig, ok := settings.(*params.ChainConfig)
if !ok {
return nil, fmt.Errorf("ethereum converter constructor expected config type %T got %T", &params.ChainConfig{}, settings)
}
return eth.NewPayloadConverter(ethConfig), nil
default:
return nil, fmt.Errorf("invalid chain %T for converter constructor", chain)
}
}
// NewIPLDFetcher constructs an IPLDFetcher for the provided chain type
func NewIPLDFetcher(chain config.ChainType, ipfsPath string) (shared.IPLDFetcher, error) {
switch chain {
case config.Ethereum:
return eth.NewIPLDFetcher(ipfsPath)
default:
return nil, fmt.Errorf("invalid chain %T for fetcher constructor", chain)
}
}
// NewIPLDPublisher constructs an IPLDPublisher for the provided chain type
func NewIPLDPublisher(chain config.ChainType, ipfsPath string) (shared.IPLDPublisher, error) {
switch chain {
case config.Ethereum:
return eth.NewIPLDPublisher(ipfsPath)
default:
return nil, fmt.Errorf("invalid chain %T for publisher constructor", chain)
}
}
// NewIPLDResolver constructs an IPLDResolver for the provided chain type
func NewIPLDResolver(chain config.ChainType) (shared.IPLDResolver, error) {
switch chain {
case config.Ethereum:
return eth.NewIPLDResolver(), nil
default:
return nil, fmt.Errorf("invalid chain %T for resolver constructor", chain)
}
}
// NewPublicAPI constructs a PublicAPI for the provided chain type
func NewPublicAPI(chain config.ChainType, db *postgres.DB, ipfsPath string) (rpc.API, error) {
switch chain {
case config.Ethereum:
backend, err := eth.NewEthBackend(db, ipfsPath)
if err != nil {
return rpc.API{}, err
}
return rpc.API{
Namespace: eth.APIName,
Version: eth.APIVersion,
Service: eth.NewPublicEthAPI(backend),
Public: true,
}, nil
default:
return rpc.API{}, fmt.Errorf("invalid chain %T for public api constructor", chain)
}
}

386
pkg/super_node/eth/api.go Normal file
View File

@ -0,0 +1,386 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ipfs/go-block-format"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
// APIName is the namespace for the super node's eth api
const APIName = "eth"
// APIVersion is the version of the super node's eth api
const APIVersion = "0.0.1"
type PublicEthAPI struct {
b *Backend
}
// NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend
func NewPublicEthAPI(b *Backend) *PublicEthAPI {
return &PublicEthAPI{
b: b,
}
}
// BlockNumber returns the block number of the chain head.
func (pea *PublicEthAPI) BlockNumber() hexutil.Uint64 {
number, _ := pea.b.retriever.RetrieveLastBlockNumber()
return hexutil.Uint64(number)
}
// GetLogs returns logs matching the given argument that are stored within the state.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery) ([]*types.Log, error) {
// Convert FilterQuery into ReceiptFilter
addrStrs := make([]string, len(crit.Addresses))
for i, addr := range crit.Addresses {
addrStrs[i] = addr.String()
}
topicStrSets := make([][]string, 4)
for i, topicSet := range crit.Topics {
if i > 3 {
break
}
for _, topic := range topicSet {
topicStrSets[i] = append(topicStrSets[i], topic.String())
}
}
filter := config.ReceiptFilter{
Contracts: addrStrs,
Topics: topicStrSets,
}
tx, err := pea.b.db.Beginx()
if err != nil {
return nil, err
}
// If we have a blockhash to filter on, fire off single retrieval query
if crit.BlockHash != nil {
rctCIDs, err := pea.b.retriever.RetrieveRctCIDs(tx, filter, 0, crit.BlockHash, nil)
if err != nil {
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, err
}
rctIPLDs, err := pea.b.fetcher.FetchRcts(rctCIDs)
if err != nil {
return nil, err
}
return extractLogsOfInterest(rctIPLDs, filter.Topics)
}
// Otherwise, create block range from criteria
// nil values are filled in; to request a single block have both ToBlock and FromBlock equal that number
startingBlock := crit.FromBlock
endingBlock := crit.ToBlock
if startingBlock == nil {
startingBlockInt, err := pea.b.retriever.RetrieveFirstBlockNumber()
if err != nil {
return nil, err
}
startingBlock = big.NewInt(startingBlockInt)
}
if endingBlock == nil {
endingBlockInt, err := pea.b.retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
endingBlock = big.NewInt(endingBlockInt)
}
start := startingBlock.Int64()
end := endingBlock.Int64()
allRctCIDs := make([]ReceiptModel, 0)
for i := start; i <= end; i++ {
rctCIDs, err := pea.b.retriever.RetrieveRctCIDs(tx, filter, i, nil, nil)
if err != nil {
return nil, err
}
allRctCIDs = append(allRctCIDs, rctCIDs...)
}
if err := tx.Commit(); err != nil {
return nil, err
}
rctIPLDs, err := pea.b.fetcher.FetchRcts(allRctCIDs)
if err != nil {
return nil, err
}
return extractLogsOfInterest(rctIPLDs, filter.Topics)
}
// GetHeaderByNumber returns the requested canonical block header.
// * When blockNr is -1 the chain head is returned.
// * We cannot support pending block calls since we do not have an active miner
func (pea *PublicEthAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) {
header, err := pea.b.HeaderByNumber(ctx, number)
if header != nil && err == nil {
return pea.rpcMarshalHeader(header)
}
return nil, err
}
// GetBlockByNumber returns the requested canonical block.
// * When blockNr is -1 the chain head is returned.
// * We cannot support pending block calls since we do not have an active miner
// * When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned.
func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
block, err := pea.b.BlockByNumber(ctx, number)
if block != nil && err == nil {
return pea.rpcMarshalBlock(block, true, fullTx)
}
return nil, err
}
// GetBlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
// detail, otherwise only the transaction hash is returned.
func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) {
block, err := pea.b.BlockByHash(ctx, hash)
if block != nil {
return pea.rpcMarshalBlock(block, true, fullTx)
}
return nil, err
}
// GetTransactionByHash returns the transaction for the given hash
// SuperNode cannot currently handle pending/tx_pool txs
func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
// Try to return an already finalized transaction
tx, blockHash, blockNumber, index, err := pea.b.GetTransaction(ctx, hash)
if err != nil {
return nil, err
}
if tx != nil {
return newRPCTransaction(tx, blockHash, blockNumber, index), nil
}
// Transaction unknown, return as such
return nil, nil
}
// extractLogsOfInterest returns logs from the receipt IPLD
func extractLogsOfInterest(rctIPLDs []blocks.Block, wantedTopics [][]string) ([]*types.Log, error) {
var logs []*types.Log
for _, rctIPLD := range rctIPLDs {
rctRLP := rctIPLD.RawData()
var rct types.Receipt
if err := rlp.DecodeBytes(rctRLP, &rct); err != nil {
return nil, err
}
for _, log := range rct.Logs {
if wanted := wantedLog(wantedTopics, log.Topics); wanted == true {
logs = append(logs, log)
}
}
}
return logs, nil
}
// returns true if the log matches on the filter
func wantedLog(wantedTopics [][]string, actualTopics []common.Hash) bool {
// actualTopics will always have length <= 4
// wantedTopics will always have length == 4
matches := 0
for i, actualTopic := range actualTopics {
// If we have topics in this filter slot, count as a match if the actualTopic matches one of the ones in this filter slot
if len(wantedTopics[i]) > 0 {
matches += sliceContainsHash(wantedTopics[i], actualTopic)
} else {
// Filter slot is empty, not matching any topics at this slot => counts as a match
matches++
}
}
if matches == len(actualTopics) {
return true
}
return false
}
// returns 1 if the slice contains the hash, 0 if it does not
func sliceContainsHash(slice []string, hash common.Hash) int {
for _, str := range slice {
if str == hash.String() {
return 1
}
}
return 0
}
// rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires
// a `PublicEthAPI`.
func (pea *PublicEthAPI) rpcMarshalHeader(header *types.Header) (map[string]interface{}, error) {
fields := RPCMarshalHeader(header)
td, err := pea.b.GetTd(header.Hash())
if err != nil {
return nil, err
}
fields["totalDifficulty"] = (*hexutil.Big)(td)
return fields, nil
}
// RPCMarshalHeader converts the given header to the RPC output.
// This function is eth/internal so we have to make our own version here...
func RPCMarshalHeader(head *types.Header) map[string]interface{} {
return map[string]interface{}{
"number": (*hexutil.Big)(head.Number),
"hash": head.Hash(),
"parentHash": head.ParentHash,
"nonce": head.Nonce,
"mixHash": head.MixDigest,
"sha3Uncles": head.UncleHash,
"logsBloom": head.Bloom,
"stateRoot": head.Root,
"miner": head.Coinbase,
"difficulty": (*hexutil.Big)(head.Difficulty),
"extraData": hexutil.Bytes(head.Extra),
"size": hexutil.Uint64(head.Size()),
"gasLimit": hexutil.Uint64(head.GasLimit),
"gasUsed": hexutil.Uint64(head.GasUsed),
"timestamp": hexutil.Uint64(head.Time),
"transactionsRoot": head.TxHash,
"receiptsRoot": head.ReceiptHash,
}
}
// rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires
// a `PublicBlockchainAPI`.
func (pea *PublicEthAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
fields, err := RPCMarshalBlock(b, inclTx, fullTx)
if err != nil {
return nil, err
}
td, err := pea.b.GetTd(b.Hash())
if err != nil {
return nil, err
}
fields["totalDifficulty"] = (*hexutil.Big)(td)
return fields, err
}
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain
// transaction hashes.
func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
fields := RPCMarshalHeader(block.Header())
fields["size"] = hexutil.Uint64(block.Size())
if inclTx {
formatTx := func(tx *types.Transaction) (interface{}, error) {
return tx.Hash(), nil
}
if fullTx {
formatTx = func(tx *types.Transaction) (interface{}, error) {
return newRPCTransactionFromBlockHash(block, tx.Hash()), nil
}
}
txs := block.Transactions()
transactions := make([]interface{}, len(txs))
var err error
for i, tx := range txs {
if transactions[i], err = formatTx(tx); err != nil {
return nil, err
}
}
fields["transactions"] = transactions
}
uncles := block.Uncles()
uncleHashes := make([]common.Hash, len(uncles))
for i, uncle := range uncles {
uncleHashes[i] = uncle.Hash()
}
fields["uncles"] = uncleHashes
return fields, nil
}
// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation.
func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction {
for idx, tx := range b.Transactions() {
if tx.Hash() == hash {
return newRPCTransactionFromBlockIndex(b, uint64(idx))
}
}
return nil
}
// newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation.
func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransaction {
txs := b.Transactions()
if index >= uint64(len(txs)) {
return nil
}
return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index)
}
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
type RPCTransaction struct {
BlockHash *common.Hash `json:"blockHash"`
BlockNumber *hexutil.Big `json:"blockNumber"`
From common.Address `json:"from"`
Gas hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
Hash common.Hash `json:"hash"`
Input hexutil.Bytes `json:"input"`
Nonce hexutil.Uint64 `json:"nonce"`
To *common.Address `json:"to"`
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
Value *hexutil.Big `json:"value"`
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
}
// newRPCTransaction returns a transaction that will serialize to the RPC
// representation, with the given location metadata set (if available).
func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction {
var signer types.Signer = types.FrontierSigner{}
if tx.Protected() {
signer = types.NewEIP155Signer(tx.ChainId())
}
from, _ := types.Sender(signer, tx)
v, r, s := tx.RawSignatureValues()
result := &RPCTransaction{
From: from,
Gas: hexutil.Uint64(tx.Gas()),
GasPrice: (*hexutil.Big)(tx.GasPrice()),
Hash: tx.Hash(),
Input: hexutil.Bytes(tx.Data()),
Nonce: hexutil.Uint64(tx.Nonce()),
To: tx.To(),
Value: (*hexutil.Big)(tx.Value()),
V: (*hexutil.Big)(v),
R: (*hexutil.Big)(r),
S: (*hexutil.Big)(s),
}
if blockHash != (common.Hash{}) {
result.BlockHash = &blockHash
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
result.TransactionIndex = (*hexutil.Uint64)(&index)
}
return result
}

View File

@ -0,0 +1,311 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"context"
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
var (
errPendingBlockNumber = errors.New("pending block number not supported")
)
type Backend struct {
retriever *CIDRetriever
fetcher *IPLDFetcher
db *postgres.DB
}
func NewEthBackend(db *postgres.DB, ipfsPath string) (*Backend, error) {
r := NewCIDRetriever(db)
f, err := NewIPLDFetcher(ipfsPath)
if err != nil {
return nil, err
}
return &Backend{
retriever: r,
fetcher: f,
db: db,
}, nil
}
func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Header, error) {
number := blockNumber.Int64()
var err error
if blockNumber == rpc.LatestBlockNumber {
number, err = b.retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
}
if blockNumber == rpc.PendingBlockNumber {
return nil, errPendingBlockNumber
}
// Retrieve the CIDs for headers at this height
tx, err := b.db.Beginx()
if err != nil {
return nil, err
}
headerCids, err := b.retriever.RetrieveHeaderCIDs(tx, number)
if err != nil {
if err := tx.Rollback(); err != nil {
logrus.Error(err)
}
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, err
}
// If there are none, throw an error
if len(headerCids) < 1 {
return nil, fmt.Errorf("header at block %d is not available", number)
}
// Fetch the header IPLDs for those CIDs
headerIPLDs, err := b.fetcher.FetchHeaders([]HeaderModel{headerCids[0]})
if err != nil {
return nil, err
}
// Decode the first header at this block height and return it
// We throw an error in FetchHeaders() if the number of headers does not match the number of CIDs and we already
// confirmed the number of CIDs is greater than 0 so there is no need to bound check the slice before accessing
header := new(types.Header)
if err := rlp.DecodeBytes(headerIPLDs[0].RawData(), header); err != nil {
return nil, err
}
return header, nil
}
// GetTd retrieves and returns the total difficulty at the given block hash
func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
pgStr := `SELECT header_cids.td FROM header_cids
WHERE header_cids.block_hash = $1`
var tdStr string
err := b.db.Select(&tdStr, pgStr, blockHash.String())
if err != nil {
return nil, err
}
td, ok := new(big.Int).SetString(tdStr, 10)
if !ok {
return nil, errors.New("total difficulty retrieved from Postgres cannot be converted to an integer")
}
return td, nil
}
// GetLogs returns all the logs for the given block hash
func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
tx, err := b.db.Beginx()
if err != nil {
return nil, err
}
receiptCIDs, err := b.retriever.RetrieveRctCIDs(tx, config.ReceiptFilter{}, 0, &hash, nil)
if err != nil {
if err := tx.Rollback(); err != nil {
logrus.Error(err)
}
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, err
}
if len(receiptCIDs) == 0 {
return nil, nil
}
receiptIPLDs, err := b.fetcher.FetchRcts(receiptCIDs)
if err != nil {
return nil, err
}
logs := make([][]*types.Log, len(receiptIPLDs))
for i, rctIPLD := range receiptIPLDs {
var rct types.Receipt
if err := rlp.DecodeBytes(rctIPLD.RawData(), &rct); err != nil {
return nil, err
}
logs[i] = rct.Logs
}
return logs, nil
}
// BlockByNumber returns the requested canonical block.
// Since the SuperNode can contain forked blocks, it is recommended to fetch BlockByHash as
// fetching by number can return non-deterministic results (returns the first block found at that height)
func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Block, error) {
number := blockNumber.Int64()
var err error
if blockNumber == rpc.LatestBlockNumber {
number, err = b.retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
}
if blockNumber == rpc.PendingBlockNumber {
return nil, errPendingBlockNumber
}
// Retrieve all the CIDs for the block
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.retriever.RetrieveBlockByNumber(number)
if err != nil {
return nil, err
}
// Fetch and decode the header IPLD
headerIPLDs, err := b.fetcher.FetchHeaders([]HeaderModel{headerCID})
if err != nil {
return nil, err
}
var header *types.Header
if err := rlp.DecodeBytes(headerIPLDs[0].RawData(), header); err != nil {
return nil, err
}
// Fetch and decode the uncle IPLDs
uncleIPLDs, err := b.fetcher.FetchUncles(uncleCIDs)
if err != nil {
return nil, err
}
var uncles []*types.Header
for _, uncleIPLD := range uncleIPLDs {
var uncle *types.Header
if err := rlp.DecodeBytes(uncleIPLD.RawData(), uncle); err != nil {
return nil, err
}
uncles = append(uncles, uncle)
}
// Fetch and decode the transaction IPLDs
txIPLDs, err := b.fetcher.FetchTrxs(txCIDs)
if err != nil {
return nil, err
}
var transactions []*types.Transaction
for _, txIPLD := range txIPLDs {
var tx *types.Transaction
if err := rlp.DecodeBytes(txIPLD.RawData(), tx); err != nil {
return nil, err
}
transactions = append(transactions, tx)
}
// Fetch and decode the receipt IPLDs
rctIPLDs, err := b.fetcher.FetchRcts(rctCIDs)
if err != nil {
return nil, err
}
var receipts []*types.Receipt
for _, rctIPLD := range rctIPLDs {
var receipt *types.Receipt
if err := rlp.DecodeBytes(rctIPLD.RawData(), receipt); err != nil {
return nil, err
}
receipts = append(receipts, receipt)
}
// Compose everything together into a complete block
return types.NewBlock(header, transactions, uncles, receipts), nil
}
// BlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
// detail, otherwise only the transaction hash is returned.
func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
// Retrieve all the CIDs for the block
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.retriever.RetrieveBlockByHash(hash)
if err != nil {
return nil, err
}
// Fetch and decode the header IPLD
headerIPLDs, err := b.fetcher.FetchHeaders([]HeaderModel{headerCID})
if err != nil {
return nil, err
}
var header *types.Header
if err := rlp.DecodeBytes(headerIPLDs[0].RawData(), header); err != nil {
return nil, err
}
// Fetch and decode the uncle IPLDs
uncleIPLDs, err := b.fetcher.FetchUncles(uncleCIDs)
if err != nil {
return nil, err
}
var uncles []*types.Header
for _, uncleIPLD := range uncleIPLDs {
var uncle *types.Header
if err := rlp.DecodeBytes(uncleIPLD.RawData(), uncle); err != nil {
return nil, err
}
uncles = append(uncles, uncle)
}
// Fetch and decode the transaction IPLDs
txIPLDs, err := b.fetcher.FetchTrxs(txCIDs)
if err != nil {
return nil, err
}
var transactions []*types.Transaction
for _, txIPLD := range txIPLDs {
var tx *types.Transaction
if err := rlp.DecodeBytes(txIPLD.RawData(), tx); err != nil {
return nil, err
}
transactions = append(transactions, tx)
}
// Fetch and decode the receipt IPLDs
rctIPLDs, err := b.fetcher.FetchRcts(rctCIDs)
if err != nil {
return nil, err
}
var receipts []*types.Receipt
for _, rctIPLD := range rctIPLDs {
var receipt *types.Receipt
if err := rlp.DecodeBytes(rctIPLD.RawData(), receipt); err != nil {
return nil, err
}
receipts = append(receipts, receipt)
}
// Compose everything together into a complete block
return types.NewBlock(header, transactions, uncles, receipts), nil
}
// GetTransaction retrieves a tx by hash
// It also returns the blockhash, blocknumber, and tx index associated with the transaction
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
pgStr := `SELECT transaction_cids.cid, transaction_cids.index, header_cids.block_hash, header_cids.block_number
FROM transaction_cids, header_cids
WHERE transaction_cids.header_id = header_cids.id
AND transaction_cids.tx_hash = $1`
var txCIDWithHeaderInfo struct {
CID string `db:"cid"`
Index int64 `db:"index"`
BlockHash string `db:"block_hash"`
BlockNumber int64 `db:"block_number"`
}
if err := b.db.Get(&txCIDWithHeaderInfo, pgStr, txHash.String()); err != nil {
return nil, common.Hash{}, 0, 0, err
}
txIPLD, err := b.fetcher.FetchTrxs([]TxModel{{CID: txCIDWithHeaderInfo.CID}})
if err != nil {
return nil, common.Hash{}, 0, 0, err
}
var transaction *types.Transaction
if err := rlp.DecodeBytes(txIPLD[0].RawData(), transaction); err != nil {
return nil, common.Hash{}, 0, 0, err
}
return transaction, common.HexToHash(txCIDWithHeaderInfo.BlockHash), uint64(txCIDWithHeaderInfo.BlockNumber), uint64(txCIDWithHeaderInfo.Index), nil
}

View File

@ -14,9 +14,11 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
@ -24,59 +26,59 @@ import (
"github.com/ethereum/go-ethereum/statediff"
)
// PayloadConverter interface is used to convert a geth statediff.Payload to our IPLDPayload type
type PayloadConverter interface {
Convert(payload statediff.Payload) (*IPLDPayload, error)
}
// Converter is the underlying struct for the PayloadConverter interface
type Converter struct {
// PayloadConverter satisfies the PayloadConverter interface for ethereum
type PayloadConverter struct {
chainConfig *params.ChainConfig
}
// NewPayloadConverter creates a pointer to a new Converter which satisfies the PayloadConverter interface
func NewPayloadConverter(chainConfig *params.ChainConfig) *Converter {
return &Converter{
func NewPayloadConverter(chainConfig *params.ChainConfig) *PayloadConverter {
return &PayloadConverter{
chainConfig: chainConfig,
}
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
func (pc *PayloadConverter) Convert(payload interface{}) (interface{}, error) {
stateDiffPayload, ok := payload.(statediff.Payload)
if !ok {
return nil, fmt.Errorf("eth converter: expected payload type %T got %T", statediff.Payload{}, payload)
}
// Unpack block rlp to access fields
block := new(types.Block)
decodeErr := rlp.DecodeBytes(payload.BlockRlp, block)
if decodeErr != nil {
return nil, decodeErr
if err := rlp.DecodeBytes(stateDiffPayload.BlockRlp, block); err != nil {
return nil, err
}
// Process and publish headers
header := block.Header()
headerRlp, encodeErr := rlp.EncodeToBytes(header)
if encodeErr != nil {
return nil, encodeErr
headerRlp, err := rlp.EncodeToBytes(header)
if err != nil {
return nil, err
}
trxLen := len(block.Transactions())
convertedPayload := &IPLDPayload{
BlockHash: block.Hash(),
BlockNumber: block.Number(),
TotalDifficulty: stateDiffPayload.TotalDifficulty,
Block: block,
HeaderRLP: headerRlp,
BlockBody: block.Body(),
TrxMetaData: make([]*TrxMetaData, 0, trxLen),
TrxMetaData: make([]TxModel, 0, trxLen),
Receipts: make(types.Receipts, 0, trxLen),
ReceiptMetaData: make([]*ReceiptMetaData, 0, trxLen),
StateNodes: make(map[common.Hash]StateNode),
StorageNodes: make(map[common.Hash][]StorageNode),
ReceiptMetaData: make([]ReceiptModel, 0, trxLen),
StateNodes: make([]TrieNode, 0),
StorageNodes: make(map[common.Hash][]TrieNode),
}
signer := types.MakeSigner(pc.chainConfig, block.Number())
transactions := block.Transactions()
for _, trx := range transactions {
for i, trx := range transactions {
// Extract to and from data from the the transactions for indexing
from, senderErr := types.Sender(signer, trx)
if senderErr != nil {
return nil, senderErr
from, err := types.Sender(signer, trx)
if err != nil {
return nil, err
}
txMeta := &TrxMetaData{
Dst: handleNullAddr(trx.To()),
Src: handleNullAddr(&from),
txMeta := TxModel{
Dst: handleNullAddr(trx.To()),
Src: handleNullAddr(&from),
TxHash: trx.Hash().String(),
Index: int64(i),
}
// txMeta will have same index as its corresponding trx in the convertedPayload.BlockBody
convertedPayload.TrxMetaData = append(convertedPayload.TrxMetaData, txMeta)
@ -84,14 +86,12 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
// Decode receipts for this block
receipts := make(types.Receipts, 0)
decodeErr = rlp.DecodeBytes(payload.ReceiptsRlp, &receipts)
if decodeErr != nil {
return nil, decodeErr
if err := rlp.DecodeBytes(stateDiffPayload.ReceiptsRlp, &receipts); err != nil {
return nil, err
}
// Derive any missing fields
deriveErr := receipts.DeriveFields(pc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions())
if deriveErr != nil {
return nil, deriveErr
if err := receipts.DeriveFields(pc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
return nil, err
}
for i, receipt := range receipts {
// If the transaction for this receipt has a "to" address, the above DeriveFields() fails to assign it to the receipt's ContractAddress
@ -100,16 +100,21 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
if transactions[i].To() != nil {
receipt.ContractAddress = *transactions[i].To()
}
// Extract topic0 data from the receipt's logs for indexing
rctMeta := &ReceiptMetaData{
Topic0s: make([]string, 0, len(receipt.Logs)),
ContractAddress: receipt.ContractAddress.Hex(),
}
// Extract topic and contract data from the receipt for indexing
topicSets := make([][]string, 4)
for _, log := range receipt.Logs {
if len(log.Topics) < 1 {
continue
for i := range topicSets {
if i < len(log.Topics) {
topicSets[i] = append(topicSets[i], log.Topics[i].Hex())
}
}
rctMeta.Topic0s = append(rctMeta.Topic0s, log.Topics[0].Hex())
}
rctMeta := ReceiptModel{
Topic0s: topicSets[0],
Topic1s: topicSets[1],
Topic2s: topicSets[2],
Topic3s: topicSets[3],
Contract: receipt.ContractAddress.Hex(),
}
// receipt and rctMeta will have same indexes
convertedPayload.Receipts = append(convertedPayload.Receipts, receipt)
@ -118,18 +123,18 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
// Unpack state diff rlp to access fields
stateDiff := new(statediff.StateDiff)
decodeErr = rlp.DecodeBytes(payload.StateDiffRlp, stateDiff)
if decodeErr != nil {
return nil, decodeErr
if err := rlp.DecodeBytes(stateDiffPayload.StateDiffRlp, stateDiff); err != nil {
return nil, err
}
for _, createdAccount := range stateDiff.CreatedAccounts {
hashKey := common.BytesToHash(createdAccount.Key)
convertedPayload.StateNodes[hashKey] = StateNode{
convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
Key: hashKey,
Value: createdAccount.Value,
Leaf: createdAccount.Leaf,
}
})
for _, storageDiff := range createdAccount.Storage {
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
Key: common.BytesToHash(storageDiff.Key),
Value: storageDiff.Value,
Leaf: storageDiff.Leaf,
@ -138,12 +143,13 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
}
for _, deletedAccount := range stateDiff.DeletedAccounts {
hashKey := common.BytesToHash(deletedAccount.Key)
convertedPayload.StateNodes[hashKey] = StateNode{
convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
Key: hashKey,
Value: deletedAccount.Value,
Leaf: deletedAccount.Leaf,
}
})
for _, storageDiff := range deletedAccount.Storage {
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
Key: common.BytesToHash(storageDiff.Key),
Value: storageDiff.Value,
Leaf: storageDiff.Leaf,
@ -152,12 +158,13 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
}
for _, updatedAccount := range stateDiff.UpdatedAccounts {
hashKey := common.BytesToHash(updatedAccount.Key)
convertedPayload.StateNodes[hashKey] = StateNode{
convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
Key: hashKey,
Value: updatedAccount.Value,
Leaf: updatedAccount.Leaf,
}
})
for _, storageDiff := range updatedAccount.Storage {
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
Key: common.BytesToHash(storageDiff.Key),
Value: storageDiff.Value,
Leaf: storageDiff.Leaf,

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
package eth_test
import (
"github.com/ethereum/go-ethereum/params"
@ -22,34 +22,31 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var _ = Describe("Converter", func() {
Describe("Convert", func() {
It("Converts mock statediff.Payloads into the expected IPLDPayloads", func() {
converter := ipfs.NewPayloadConverter(params.MainnetChainConfig)
converterPayload, err := converter.Convert(mocks.MockStateDiffPayload)
converter := eth.NewPayloadConverter(params.MainnetChainConfig)
payload, err := converter.Convert(mocks.MockStateDiffPayload)
Expect(err).ToNot(HaveOccurred())
Expect(converterPayload.BlockNumber).To(Equal(mocks.BlockNumber))
Expect(converterPayload.BlockHash).To(Equal(mocks.MockBlock.Hash()))
Expect(converterPayload.StateNodes).To(Equal(mocks.MockStateNodes))
Expect(converterPayload.StorageNodes).To(Equal(mocks.MockStorageNodes))
gotBody, err := rlp.EncodeToBytes(converterPayload.BlockBody)
convertedPayload, ok := payload.(*eth.IPLDPayload)
Expect(ok).To(BeTrue())
Expect(convertedPayload.Block.Number().String()).To(Equal(mocks.BlockNumber.String()))
Expect(convertedPayload.Block.Hash().String()).To(Equal(mocks.MockBlock.Hash().String()))
Expect(convertedPayload.StateNodes).To(Equal(mocks.MockStateNodes))
Expect(convertedPayload.StorageNodes).To(Equal(mocks.MockStorageNodes))
Expect(convertedPayload.TotalDifficulty.Int64()).To(Equal(mocks.MockStateDiffPayload.TotalDifficulty.Int64()))
gotBody, err := rlp.EncodeToBytes(convertedPayload.Block.Body())
Expect(err).ToNot(HaveOccurred())
expectedBody, err := rlp.EncodeToBytes(mocks.MockBlock.Body())
Expect(err).ToNot(HaveOccurred())
Expect(gotBody).To(Equal(expectedBody))
Expect(converterPayload.HeaderRLP).To(Equal(mocks.MockHeaderRlp))
Expect(converterPayload.TrxMetaData).To(Equal(mocks.MockTrxMeta))
Expect(converterPayload.ReceiptMetaData).To(Equal(mocks.MockRctMeta))
})
It(" Throws an error if the wrong chain config is used", func() {
converter := ipfs.NewPayloadConverter(params.TestnetChainConfig)
_, err := converter.Convert(mocks.MockStateDiffPayload)
Expect(err).To(HaveOccurred())
Expect(convertedPayload.HeaderRLP).To(Equal(mocks.MockHeaderRlp))
Expect(convertedPayload.TrxMetaData).To(Equal(mocks.MockTrxMeta))
Expect(convertedPayload.ReceiptMetaData).To(Equal(mocks.MockRctMeta))
})
})
})

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
package eth_test
import (
"io/ioutil"
@ -25,9 +25,9 @@ import (
"github.com/sirupsen/logrus"
)
func TestIPFS(t *testing.T) {
func TestETHSuperNode(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "IPFS Suite Test")
RunSpecs(t, "Super Node ETH Suite Test")
}
var _ = BeforeSuite(func() {

View File

@ -0,0 +1,274 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
// ResponseFilterer satisfies the ResponseFilterer interface for ethereum
type ResponseFilterer struct{}
// NewResponseFilterer creates a new Filterer satisfying the ResponseFilterer interface
func NewResponseFilterer() *ResponseFilterer {
return &ResponseFilterer{}
}
// Filter is used to filter through eth data to extract and package requested data into a Payload
func (s *ResponseFilterer) Filter(filter, payload interface{}) (interface{}, error) {
ethFilters, ok := filter.(*config.EthSubscription)
if !ok {
return StreamPayload{}, fmt.Errorf("eth filterer expected filter type %T got %T", &config.EthSubscription{}, filter)
}
ethPayload, ok := payload.(*IPLDPayload)
if !ok {
return StreamPayload{}, fmt.Errorf("eth filterer expected payload type %T got %T", &IPLDPayload{}, payload)
}
if checkRange(ethFilters.Start.Int64(), ethFilters.End.Int64(), ethPayload.Block.Number().Int64()) {
response := new(StreamPayload)
if err := s.filterHeaders(ethFilters.HeaderFilter, response, ethPayload); err != nil {
return StreamPayload{}, err
}
txHashes, err := s.filterTransactions(ethFilters.TxFilter, response, ethPayload)
if err != nil {
return StreamPayload{}, err
}
var filterTxs []common.Hash
if ethFilters.ReceiptFilter.MatchTxs {
filterTxs = txHashes
}
if err := s.filerReceipts(ethFilters.ReceiptFilter, response, ethPayload, filterTxs); err != nil {
return StreamPayload{}, err
}
if err := s.filterState(ethFilters.StateFilter, response, ethPayload); err != nil {
return StreamPayload{}, err
}
if err := s.filterStorage(ethFilters.StorageFilter, response, ethPayload); err != nil {
return StreamPayload{}, err
}
response.BlockNumber = ethPayload.Block.Number()
return *response, nil
}
return StreamPayload{}, nil
}
func (s *ResponseFilterer) filterHeaders(headerFilter config.HeaderFilter, response *StreamPayload, payload *IPLDPayload) error {
if !headerFilter.Off {
response.HeadersRlp = append(response.HeadersRlp, payload.HeaderRLP)
if headerFilter.Uncles {
response.UnclesRlp = make([][]byte, 0, len(payload.Block.Body().Uncles))
for _, uncle := range payload.Block.Body().Uncles {
uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil {
return err
}
response.UnclesRlp = append(response.UnclesRlp, uncleRlp)
}
}
}
return nil
}
func checkRange(start, end, actual int64) bool {
if (end <= 0 || end >= actual) && start <= actual {
return true
}
return false
}
func (s *ResponseFilterer) filterTransactions(trxFilter config.TxFilter, response *StreamPayload, payload *IPLDPayload) ([]common.Hash, error) {
trxHashes := make([]common.Hash, 0, len(payload.Block.Body().Transactions))
if !trxFilter.Off {
for i, trx := range payload.Block.Body().Transactions {
if checkTransactions(trxFilter.Src, trxFilter.Dst, payload.TrxMetaData[i].Src, payload.TrxMetaData[i].Dst) {
trxBuffer := new(bytes.Buffer)
if err := trx.EncodeRLP(trxBuffer); err != nil {
return nil, err
}
trxHashes = append(trxHashes, trx.Hash())
response.TransactionsRlp = append(response.TransactionsRlp, trxBuffer.Bytes())
}
}
}
return trxHashes, nil
}
func checkTransactions(wantedSrc, wantedDst []string, actualSrc, actualDst string) bool {
// If we aren't filtering for any addresses, every transaction is a go
if len(wantedDst) == 0 && len(wantedSrc) == 0 {
return true
}
for _, src := range wantedSrc {
if src == actualSrc {
return true
}
}
for _, dst := range wantedDst {
if dst == actualDst {
return true
}
}
return false
}
func (s *ResponseFilterer) filerReceipts(receiptFilter config.ReceiptFilter, response *StreamPayload, payload *IPLDPayload, trxHashes []common.Hash) error {
if !receiptFilter.Off {
for i, receipt := range payload.Receipts {
// topics is always length 4
topics := [][]string{payload.ReceiptMetaData[i].Topic0s, payload.ReceiptMetaData[i].Topic1s, payload.ReceiptMetaData[i].Topic2s, payload.ReceiptMetaData[i].Topic3s}
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.Contracts, payload.ReceiptMetaData[i].Contract, trxHashes) {
receiptForStorage := (*types.ReceiptForStorage)(receipt)
receiptBuffer := new(bytes.Buffer)
if err := receiptForStorage.EncodeRLP(receiptBuffer); err != nil {
return err
}
response.ReceiptsRlp = append(response.ReceiptsRlp, receiptBuffer.Bytes())
}
}
}
return nil
}
func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics [][]string, wantedContracts []string, actualContract string, wantedTrxHashes []common.Hash) bool {
// If we aren't filtering for any topics, contracts, or corresponding trxs then all receipts are a go
if len(wantedTopics) == 0 && len(wantedContracts) == 0 && len(wantedTrxHashes) == 0 {
return true
}
// Keep receipts that are from watched txs
for _, wantedTrxHash := range wantedTrxHashes {
if bytes.Equal(wantedTrxHash.Bytes(), rct.TxHash.Bytes()) {
return true
}
}
// If there are no wanted contract addresses, we keep all receipts that match the topic filter
if len(wantedContracts) == 0 {
if match := filterMatch(wantedTopics, actualTopics); match == true {
return true
}
}
// If there are wanted contract addresses to filter on
for _, wantedAddr := range wantedContracts {
// and this is an address of interest
if wantedAddr == actualContract {
// we keep the receipt if it matches on the topic filter
if match := filterMatch(wantedTopics, actualTopics); match == true {
return true
}
}
}
return false
}
func filterMatch(wantedTopics, actualTopics [][]string) bool {
// actualTopics should always be length 4, members could be nil slices though
lenWantedTopics := len(wantedTopics)
matches := 0
for i, actualTopicSet := range actualTopics {
if i < lenWantedTopics {
// If we have topics in this filter slot, count as a match if one of the topics matches
if len(wantedTopics[i]) > 0 {
matches += slicesShareString(actualTopicSet, wantedTopics[i])
} else {
// Filter slot is empty, not matching any topics at this slot => counts as a match
matches++
}
} else {
// Filter slot doesn't exist, not matching any topics at this slot => count as a match
matches++
}
}
if matches == 4 {
return true
}
return false
}
// returns 1 if the two slices have a string in common, 0 if they do not
func slicesShareString(slice1, slice2 []string) int {
for _, str1 := range slice1 {
for _, str2 := range slice2 {
if str1 == str2 {
return 1
}
}
}
return 0
}
func (s *ResponseFilterer) filterState(stateFilter config.StateFilter, response *StreamPayload, payload *IPLDPayload) error {
if !stateFilter.Off {
response.StateNodesRlp = make(map[common.Hash][]byte)
keyFilters := make([]common.Hash, len(stateFilter.Addresses))
for i, addr := range stateFilter.Addresses {
keyFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
}
for _, stateNode := range payload.StateNodes {
if checkNodeKeys(keyFilters, stateNode.Key) {
if stateNode.Leaf || stateFilter.IntermediateNodes {
response.StateNodesRlp[stateNode.Key] = stateNode.Value
}
}
}
}
return nil
}
func checkNodeKeys(wantedKeys []common.Hash, actualKey common.Hash) bool {
// If we aren't filtering for any specific keys, all nodes are a go
if len(wantedKeys) == 0 {
return true
}
for _, key := range wantedKeys {
if bytes.Equal(key.Bytes(), actualKey.Bytes()) {
return true
}
}
return false
}
func (s *ResponseFilterer) filterStorage(storageFilter config.StorageFilter, response *StreamPayload, payload *IPLDPayload) error {
if !storageFilter.Off {
response.StorageNodesRlp = make(map[common.Hash]map[common.Hash][]byte)
stateKeyFilters := make([]common.Hash, len(storageFilter.Addresses))
for i, addr := range storageFilter.Addresses {
stateKeyFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
}
storageKeyFilters := make([]common.Hash, len(storageFilter.StorageKeys))
for i, store := range storageFilter.StorageKeys {
storageKeyFilters[i] = common.HexToHash(store)
}
for stateKey, storageNodes := range payload.StorageNodes {
if checkNodeKeys(stateKeyFilters, stateKey) {
response.StorageNodesRlp[stateKey] = make(map[common.Hash][]byte)
for _, storageNode := range storageNodes {
if checkNodeKeys(storageKeyFilters, storageNode.Key) {
response.StorageNodesRlp[stateKey][storageNode.Key] = storageNode.Value
}
}
}
}
}
return nil
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node_test
package eth_test
import (
"bytes"
@ -23,12 +23,13 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
var (
filterer super_node.ResponseFilterer
filterer *eth.ResponseFilterer
expectedRctForStorageRLP1 []byte
expectedRctForStorageRLP2 []byte
)
@ -36,33 +37,38 @@ var (
var _ = Describe("Filterer", func() {
Describe("FilterResponse", func() {
BeforeEach(func() {
filterer = super_node.NewResponseFilterer()
filterer = eth.NewResponseFilterer()
expectedRctForStorageRLP1 = getReceiptForStorageRLP(mocks.MockReceipts, 0)
expectedRctForStorageRLP2 = getReceiptForStorageRLP(mocks.MockReceipts, 1)
})
It("Transcribes all the data from the IPLDPayload into the SuperNodePayload if given an open filter", func() {
superNodePayload, err := filterer.FilterResponse(openFilter, *mocks.MockIPLDPayload)
It("Transcribes all the data from the IPLDPayload into the StreamPayload if given an open filter", func() {
payload, err := filterer.Filter(openFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeeNodePayload.HeadersRlp))
Expect(superNodePayload.UnclesRlp).To(Equal(mocks.MockSeeNodePayload.UnclesRlp))
superNodePayload, ok := payload.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeedNodePayload.HeadersRlp))
var unclesRlp [][]byte
Expect(superNodePayload.UnclesRlp).To(Equal(unclesRlp))
Expect(len(superNodePayload.TransactionsRlp)).To(Equal(2))
Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload.ReceiptsRlp)).To(Equal(2))
Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
Expect(len(superNodePayload.StateNodesRlp)).To(Equal(2))
Expect(superNodePayload.StateNodesRlp[mocks.ContractLeafKey]).To(Equal(mocks.ValueBytes))
Expect(superNodePayload.StateNodesRlp[mocks.AnotherContractLeafKey]).To(Equal(mocks.AnotherValueBytes))
Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeeNodePayload.StorageNodesRlp))
Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeedNodePayload.StorageNodesRlp))
})
It("Applies filters from the provided config.Subscription", func() {
superNodePayload1, err := filterer.FilterResponse(rctContractFilter, *mocks.MockIPLDPayload)
payload1, err := filterer.Filter(rctContractFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(superNodePayload1.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
superNodePayload1, ok := payload1.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload1.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload1.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload1.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload1.TransactionsRlp)).To(Equal(0))
@ -71,9 +77,11 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload1.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload1.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
superNodePayload2, err := filterer.FilterResponse(rctTopicsFilter, *mocks.MockIPLDPayload)
payload2, err := filterer.Filter(rctTopicsFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(superNodePayload2.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
superNodePayload2, ok := payload2.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload2.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload2.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload2.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload2.TransactionsRlp)).To(Equal(0))
@ -82,9 +90,11 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload2.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload2.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP1))
superNodePayload3, err := filterer.FilterResponse(rctTopicsAndContractFilter, *mocks.MockIPLDPayload)
payload3, err := filterer.Filter(rctTopicsAndContractFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(superNodePayload3.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
superNodePayload3, ok := payload3.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload3.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload3.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload3.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload3.TransactionsRlp)).To(Equal(0))
@ -93,9 +103,11 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload3.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload3.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP1))
superNodePayload4, err := filterer.FilterResponse(rctContractsAndTopicFilter, *mocks.MockIPLDPayload)
payload4, err := filterer.Filter(rctContractsAndTopicFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(superNodePayload4.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
superNodePayload4, ok := payload4.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload4.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload4.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload4.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload4.TransactionsRlp)).To(Equal(0))
@ -104,35 +116,41 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload4.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload4.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
superNodePayload5, err := filterer.FilterResponse(rctsForAllCollectedTrxs, *mocks.MockIPLDPayload)
payload5, err := filterer.Filter(rctsForAllCollectedTrxs, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(superNodePayload5.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
superNodePayload5, ok := payload5.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload5.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload5.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload5.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload5.TransactionsRlp)).To(Equal(2))
Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload5.StorageNodesRlp)).To(Equal(0))
Expect(len(superNodePayload5.StateNodesRlp)).To(Equal(0))
Expect(len(superNodePayload5.ReceiptsRlp)).To(Equal(2))
Expect(super_node.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
Expect(super_node.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
superNodePayload6, err := filterer.FilterResponse(rctsForSelectCollectedTrxs, *mocks.MockIPLDPayload)
payload6, err := filterer.Filter(rctsForSelectCollectedTrxs, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(superNodePayload6.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
superNodePayload6, ok := payload6.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload6.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload6.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload6.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload6.TransactionsRlp)).To(Equal(1))
Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload6.StorageNodesRlp)).To(Equal(0))
Expect(len(superNodePayload6.StateNodesRlp)).To(Equal(0))
Expect(len(superNodePayload6.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload4.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
superNodePayload7, err := filterer.FilterResponse(stateFilter, *mocks.MockIPLDPayload)
payload7, err := filterer.Filter(stateFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(superNodePayload7.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
superNodePayload7, ok := payload7.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload7.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload7.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload7.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload7.TransactionsRlp)).To(Equal(0))
@ -140,6 +158,18 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload7.ReceiptsRlp)).To(Equal(0))
Expect(len(superNodePayload7.StateNodesRlp)).To(Equal(1))
Expect(superNodePayload7.StateNodesRlp[mocks.ContractLeafKey]).To(Equal(mocks.ValueBytes))
payload8, err := filterer.Filter(rctTopicsAndContractFilterFail, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
superNodePayload8, ok := payload8.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload8.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload8.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload8.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload8.TransactionsRlp)).To(Equal(0))
Expect(len(superNodePayload8.StorageNodesRlp)).To(Equal(0))
Expect(len(superNodePayload8.StateNodesRlp)).To(Equal(0))
Expect(len(superNodePayload8.ReceiptsRlp)).To(Equal(0))
})
})
})

View File

@ -0,0 +1,148 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
)
// Indexer satisfies the Indexer interface for ethereum
type CIDIndexer struct {
db *postgres.DB
}
// NewCIDIndexer creates a new pointer to a Indexer which satisfies the CIDIndexer interface
func NewCIDIndexer(db *postgres.DB) *CIDIndexer {
return &CIDIndexer{
db: db,
}
}
// Index indexes a cidPayload in Postgres
func (in *CIDIndexer) Index(cids interface{}) error {
cidPayload, ok := cids.(*CIDPayload)
if !ok {
return fmt.Errorf("eth indexer expected cids type %T got %T", &CIDPayload{}, cids)
}
tx, err := in.db.Beginx()
if err != nil {
return err
}
headerID, err := in.indexHeaderCID(tx, cidPayload.HeaderCID)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
return err
}
for _, uncle := range cidPayload.UncleCIDs {
if err := in.indexUncleCID(tx, uncle, headerID); err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
return err
}
}
if err := in.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID); err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
return err
}
if err := in.indexStateAndStorageCIDs(tx, cidPayload, headerID); err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
return err
}
return tx.Commit()
}
func (repo *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) {
var headerID int64
err := tx.QueryRowx(`INSERT INTO public.header_cids (block_number, block_hash, parent_hash, cid, td) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td) = ($3, $4, $5)
RETURNING id`,
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty).Scan(&headerID)
return headerID, err
}
func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int64) error {
_, err := tx.Exec(`INSERT INTO public.uncle_cids (block_hash, header_id, parent_hash, cid) VALUES ($1, $2, $3, $4)
ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid) = ($3, $4)`,
uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID)
return err
}
func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for _, trxCidMeta := range payload.TransactionCIDs {
var txID int64
err := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src, index) VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index) = ($3, $4, $5, $6)
RETURNING id`,
headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index).Scan(&txID)
if err != nil {
return err
}
receiptCidMeta, ok := payload.ReceiptCIDs[common.HexToHash(trxCidMeta.TxHash)]
if ok {
if err := in.indexReceiptCID(tx, receiptCidMeta, txID); err != nil {
return err
}
}
}
return nil
}
func (in *CIDIndexer) indexReceiptCID(tx *sqlx.Tx, cidMeta ReceiptModel, txID int64) error {
_, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid, contract, topic0s, topic1s, topic2s, topic3s) VALUES ($1, $2, $3, $4, $5, $6, $7)`,
txID, cidMeta.CID, cidMeta.Contract, pq.Array(cidMeta.Topic0s), pq.Array(cidMeta.Topic1s), pq.Array(cidMeta.Topic2s), pq.Array(cidMeta.Topic3s))
return err
}
func (in *CIDIndexer) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for _, stateCID := range payload.StateNodeCIDs {
var stateID int64
err := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4)
ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4)
RETURNING id`,
headerID, stateCID.StateKey, stateCID.CID, stateCID.Leaf).Scan(&stateID)
if err != nil {
return err
}
for _, storageCID := range payload.StorageNodeCIDs[common.HexToHash(stateCID.StateKey)] {
if err := in.indexStorageCID(tx, storageCID, stateID); err != nil {
return err
}
}
}
return nil
}
func (in *CIDIndexer) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeModel, stateID int64) error {
_, err := tx.Exec(`INSERT INTO public.storage_cids (state_id, storage_key, cid, leaf) VALUES ($1, $2, $3, $4)
ON CONFLICT (state_id, storage_key) DO UPDATE SET (cid, leaf) = ($3, $4)`,
stateID, storageCID.StorageKey, storageCID.CID, storageCID.Leaf)
return err
}

View File

@ -14,45 +14,50 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node_test
package eth_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
eth2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var _ = Describe("Repository", func() {
var _ = Describe("Indexer", func() {
var (
db *postgres.DB
err error
repo super_node.CIDRepository
repo *eth2.CIDIndexer
)
BeforeEach(func() {
db, err = super_node.SetupDB()
db, err = eth.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = super_node.NewCIDRepository(db)
repo = eth2.NewCIDIndexer(db)
})
AfterEach(func() {
super_node.TearDownDB(db)
eth.TearDownDB(db)
})
Describe("Index", func() {
It("Indexes CIDs and related metadata into vulcanizedb", func() {
err = repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
pgStr := `SELECT cid FROM header_cids
WHERE block_number = $1 AND uncle IS FALSE`
pgStr := `SELECT cid, td FROM header_cids
WHERE block_number = $1`
// check header was properly indexed
headers := make([]string, 0)
err = db.Select(&headers, pgStr, 1)
type res struct {
CID string
TD string
}
headers := new(res)
err = db.QueryRowx(pgStr, 1).StructScan(headers)
Expect(err).ToNot(HaveOccurred())
Expect(len(headers)).To(Equal(1))
Expect(headers[0]).To(Equal("mockHeaderCID"))
Expect(headers.CID).To(Equal("mockHeaderCID"))
Expect(headers.TD).To(Equal("1337"))
// check trxs were properly indexed
trxs := make([]string, 0)
pgStr = `SELECT transaction_cids.cid FROM transaction_cids INNER JOIN header_cids ON (transaction_cids.header_id = header_cids.id)
@ -60,8 +65,8 @@ var _ = Describe("Repository", func() {
err = db.Select(&trxs, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(2))
Expect(super_node.ListContainsString(trxs, "mockTrxCID1")).To(BeTrue())
Expect(super_node.ListContainsString(trxs, "mockTrxCID2")).To(BeTrue())
Expect(shared.ListContainsString(trxs, "mockTrxCID1")).To(BeTrue())
Expect(shared.ListContainsString(trxs, "mockTrxCID2")).To(BeTrue())
// check receipts were properly indexed
rcts := make([]string, 0)
pgStr = `SELECT receipt_cids.cid FROM receipt_cids, transaction_cids, header_cids
@ -71,10 +76,10 @@ var _ = Describe("Repository", func() {
err = db.Select(&rcts, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(rcts)).To(Equal(2))
Expect(super_node.ListContainsString(rcts, "mockRctCID1")).To(BeTrue())
Expect(super_node.ListContainsString(rcts, "mockRctCID2")).To(BeTrue())
Expect(shared.ListContainsString(rcts, "mockRctCID1")).To(BeTrue())
Expect(shared.ListContainsString(rcts, "mockRctCID2")).To(BeTrue())
// check that state nodes were properly indexed
stateNodes := make([]ipfs.StateNodeCID, 0)
stateNodes := make([]eth.StateNodeModel, 0)
pgStr = `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&stateNodes, pgStr, 1)
@ -83,15 +88,15 @@ var _ = Describe("Repository", func() {
for _, stateNode := range stateNodes {
if stateNode.CID == "mockStateCID1" {
Expect(stateNode.Leaf).To(Equal(true))
Expect(stateNode.Key).To(Equal(mocks.ContractLeafKey.Hex()))
Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex()))
}
if stateNode.CID == "mockStateCID2" {
Expect(stateNode.Leaf).To(Equal(true))
Expect(stateNode.Key).To(Equal(mocks.AnotherContractLeafKey.Hex()))
Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex()))
}
}
// check that storage nodes were properly indexed
storageNodes := make([]ipfs.StorageNodeCID, 0)
storageNodes := make([]eth.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key, storage_cids.leaf FROM storage_cids, state_cids, header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
@ -99,11 +104,11 @@ var _ = Describe("Repository", func() {
err = db.Select(&storageNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(storageNodes)).To(Equal(1))
Expect(storageNodes[0]).To(Equal(ipfs.StorageNodeCID{
CID: "mockStorageCID",
Leaf: true,
Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
StateKey: mocks.ContractLeafKey.Hex(),
Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{
CID: "mockStorageCID",
Leaf: true,
StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
StateKey: mocks.ContractLeafKey.Hex(),
}))
})
})

View File

@ -0,0 +1,234 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"context"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
var (
errUnexpectedNumberOfIPLDs = errors.New("ipfs batch fetch returned unexpected number of IPLDs")
)
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
type IPLDFetcher struct {
BlockService blockservice.BlockService
}
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
func NewIPLDFetcher(ipfsPath string) (*IPLDFetcher, error) {
blockService, err := ipfs.InitIPFSBlockService(ipfsPath)
if err != nil {
return nil, err
}
return &IPLDFetcher{
BlockService: blockService,
}, nil
}
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
func (f *IPLDFetcher) Fetch(cids interface{}) (interface{}, error) {
cidWrapper, ok := cids.(*CIDWrapper)
if !ok {
return nil, fmt.Errorf("eth fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
}
log.Debug("fetching iplds")
iplds := new(IPLDWrapper)
iplds.BlockNumber = cidWrapper.BlockNumber
var err error
iplds.Headers, err = f.FetchHeaders(cidWrapper.Headers)
if err != nil {
return nil, err
}
iplds.Uncles, err = f.FetchUncles(cidWrapper.Uncles)
if err != nil {
return nil, err
}
iplds.Transactions, err = f.FetchTrxs(cidWrapper.Transactions)
if err != nil {
return nil, err
}
iplds.Receipts, err = f.FetchRcts(cidWrapper.Receipts)
if err != nil {
return nil, err
}
iplds.StateNodes, err = f.FetchState(cidWrapper.StateNodes)
if err != nil {
return nil, err
}
iplds.StorageNodes, err = f.FetchStorage(cidWrapper.StorageNodes)
if err != nil {
return nil, err
}
return iplds, nil
}
// FetchHeaders fetches headers
// It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([]blocks.Block, error) {
log.Debug("fetching header iplds")
headerCids := make([]cid.Cid, 0, len(cids))
for _, c := range cids {
dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
headerCids = append(headerCids, dc)
}
headers := f.fetchBatch(headerCids)
if len(headers) != len(headerCids) {
log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(headers), len(headerCids))
return headers, errUnexpectedNumberOfIPLDs
}
return headers, nil
}
// FetchUncles fetches uncles
// It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchUncles(cids []UncleModel) ([]blocks.Block, error) {
log.Debug("fetching uncle iplds")
uncleCids := make([]cid.Cid, 0, len(cids))
for _, c := range cids {
dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
uncleCids = append(uncleCids, dc)
}
uncles := f.fetchBatch(uncleCids)
if len(uncles) != len(uncleCids) {
log.Errorf("ipfs fetcher: number of uncle blocks returned (%d) does not match number expected (%d)", len(uncles), len(uncleCids))
return uncles, errUnexpectedNumberOfIPLDs
}
return uncles, nil
}
// FetchTrxs fetches transactions
// It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]blocks.Block, error) {
log.Debug("fetching transaction iplds")
trxCids := make([]cid.Cid, 0, len(cids))
for _, c := range cids {
dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
trxCids = append(trxCids, dc)
}
trxs := f.fetchBatch(trxCids)
if len(trxs) != len(trxCids) {
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(trxs), len(trxCids))
return trxs, errUnexpectedNumberOfIPLDs
}
return trxs, nil
}
// FetchRcts fetches receipts
// It uses the f.fetchBatch method
func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([]blocks.Block, error) {
log.Debug("fetching receipt iplds")
rctCids := make([]cid.Cid, 0, len(cids))
for _, c := range cids {
dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
rctCids = append(rctCids, dc)
}
rcts := f.fetchBatch(rctCids)
if len(rcts) != len(rctCids) {
log.Errorf("ipfs fetcher: number of receipt blocks returned (%d) does not match number expected (%d)", len(rcts), len(rctCids))
return rcts, errUnexpectedNumberOfIPLDs
}
return rcts, nil
}
// FetchState fetches state nodes
// It uses the single f.fetch method instead of the batch fetch, because it
// needs to maintain the data's relation to state keys
func (f *IPLDFetcher) FetchState(cids []StateNodeModel) (map[common.Hash]blocks.Block, error) {
log.Debug("fetching state iplds")
stateNodes := make(map[common.Hash]blocks.Block)
for _, stateNode := range cids {
if stateNode.CID == "" || stateNode.StateKey == "" {
continue
}
dc, err := cid.Decode(stateNode.CID)
if err != nil {
return nil, err
}
state, err := f.fetch(dc)
if err != nil {
return nil, err
}
stateNodes[common.HexToHash(stateNode.StateKey)] = state
}
return stateNodes, nil
}
// FetchStorage fetches storage nodes
// It uses the single f.fetch method instead of the batch fetch, because it
// needs to maintain the data's relation to state and storage keys
func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) (map[common.Hash]map[common.Hash]blocks.Block, error) {
log.Debug("fetching storage iplds")
storageNodes := make(map[common.Hash]map[common.Hash]blocks.Block)
for _, storageNode := range cids {
if storageNode.CID == "" || storageNode.StorageKey == "" || storageNode.StateKey == "" {
continue
}
dc, err := cid.Decode(storageNode.CID)
if err != nil {
return nil, err
}
storage, err := f.fetch(dc)
if err != nil {
return nil, err
}
if storageNodes[common.HexToHash(storageNode.StateKey)] == nil {
storageNodes[common.HexToHash(storageNode.StateKey)] = make(map[common.Hash]blocks.Block)
}
storageNodes[common.HexToHash(storageNode.StateKey)][common.HexToHash(storageNode.StorageKey)] = storage
}
return storageNodes, nil
}
// fetch is used to fetch a single cid
func (f *IPLDFetcher) fetch(cid cid.Cid) (blocks.Block, error) {
return f.BlockService.GetBlock(context.Background(), cid)
}
// fetchBatch is used to fetch a batch of IPFS data blocks by cid
// There is no guarantee all are fetched, and no error in such a case, so
// downstream we will need to confirm which CIDs were fetched in the result set
func (f *IPLDFetcher) fetchBatch(cids []cid.Cid) []blocks.Block {
fetchedBlocks := make([]blocks.Block, 0, len(cids))
blockChan := f.BlockService.GetBlocks(context.Background(), cids)
for block := range blockChan {
fetchedBlocks = append(fetchedBlocks, block)
}
return fetchedBlocks
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
package eth_test
import (
"math/big"
@ -24,8 +24,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var (
@ -45,28 +45,44 @@ var (
mockStorageBlock2 = blocks.NewBlock(mockStorageData2)
mockBlocks = []blocks.Block{mockHeaderBlock, mockUncleBlock, mockTrxBlock, mockReceiptBlock, mockStateBlock, mockStorageBlock1, mockStorageBlock2}
mockBlockService *mocks.MockIPFSBlockService
mockCIDWrapper = ipfs.CIDWrapper{
BlockNumber: big.NewInt(9000),
Headers: []string{mockHeaderBlock.Cid().String()},
Uncles: []string{mockUncleBlock.Cid().String()},
Transactions: []string{mockTrxBlock.Cid().String()},
Receipts: []string{mockReceiptBlock.Cid().String()},
StateNodes: []ipfs.StateNodeCID{{
CID: mockStateBlock.Cid().String(),
Leaf: true,
Key: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
}},
StorageNodes: []ipfs.StorageNodeCID{{
CID: mockStorageBlock1.Cid().String(),
mockCIDWrapper = &eth.CIDWrapper{
BlockNumber: big.NewInt(9000),
Headers: []eth.HeaderModel{
{
CID: mockHeaderBlock.Cid().String(),
},
},
Uncles: []eth.UncleModel{
{
CID: mockUncleBlock.Cid().String(),
},
},
Transactions: []eth.TxModel{
{
CID: mockTrxBlock.Cid().String(),
},
},
Receipts: []eth.ReceiptModel{
{
CID: mockReceiptBlock.Cid().String(),
},
},
StateNodes: []eth.StateNodeModel{{
CID: mockStateBlock.Cid().String(),
Leaf: true,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
Key: "0000000000000000000000000000000000000000000000000000000000000001",
}},
StorageNodes: []eth.StorageNodeWithStateKeyModel{{
CID: mockStorageBlock1.Cid().String(),
Leaf: true,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
StorageKey: "0000000000000000000000000000000000000000000000000000000000000001",
},
{
CID: mockStorageBlock2.Cid().String(),
Leaf: true,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
Key: "0000000000000000000000000000000000000000000000000000000000000002",
CID: mockStorageBlock2.Cid().String(),
Leaf: true,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
StorageKey: "0000000000000000000000000000000000000000000000000000000000000002",
}},
}
)
@ -81,10 +97,12 @@ var _ = Describe("Fetcher", func() {
})
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
fetcher := new(ipfs.EthIPLDFetcher)
fetcher := new(eth.IPLDFetcher)
fetcher.BlockService = mockBlockService
iplds, err := fetcher.FetchIPLDs(mockCIDWrapper)
i, err := fetcher.Fetch(mockCIDWrapper)
Expect(err).ToNot(HaveOccurred())
iplds, ok := i.(*eth.IPLDWrapper)
Expect(ok).To(BeTrue())
Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber))
Expect(len(iplds.Headers)).To(Equal(1))
Expect(iplds.Headers[0]).To(Equal(mockHeaderBlock))

View File

@ -21,33 +21,41 @@ import (
"github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// PayloadConverter is the underlying struct for the Converter interface
type PayloadConverter struct {
PassedStatediffPayload statediff.Payload
ReturnIPLDPayload *ipfs.IPLDPayload
ReturnIPLDPayload *eth.IPLDPayload
ReturnErr error
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
func (pc *PayloadConverter) Convert(payload statediff.Payload) (*ipfs.IPLDPayload, error) {
pc.PassedStatediffPayload = payload
func (pc *PayloadConverter) Convert(payload interface{}) (interface{}, error) {
stateDiffPayload, ok := payload.(statediff.Payload)
if !ok {
return nil, fmt.Errorf("convert expected payload type %T got %T", statediff.Payload{}, payload)
}
pc.PassedStatediffPayload = stateDiffPayload
return pc.ReturnIPLDPayload, pc.ReturnErr
}
// IterativePayloadConverter is the underlying struct for the Converter interface
type IterativePayloadConverter struct {
PassedStatediffPayload []statediff.Payload
ReturnIPLDPayload []*ipfs.IPLDPayload
ReturnIPLDPayload []*eth.IPLDPayload
ReturnErr error
iteration int
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
func (pc *IterativePayloadConverter) Convert(payload statediff.Payload) (*ipfs.IPLDPayload, error) {
pc.PassedStatediffPayload = append(pc.PassedStatediffPayload, payload)
func (pc *IterativePayloadConverter) Convert(payload interface{}) (interface{}, error) {
stateDiffPayload, ok := payload.(statediff.Payload)
if !ok {
return nil, fmt.Errorf("convert expected payload type %T got %T", statediff.Payload{}, payload)
}
pc.PassedStatediffPayload = append(pc.PassedStatediffPayload, stateDiffPayload)
if len(pc.PassedStatediffPayload) < pc.iteration+1 {
return nil, fmt.Errorf("IterativePayloadConverter does not have a payload to return at iteration %d", pc.iteration)
}

View File

@ -0,0 +1,50 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"errors"
"sync/atomic"
"github.com/ethereum/go-ethereum/statediff"
)
// StateDiffFetcher mock for tests
type StateDiffFetcher struct {
PayloadsToReturn map[uint64]statediff.Payload
FetchErrs map[uint64]error
CalledAtBlockHeights [][]uint64
CalledTimes int64
}
// FetchStateDiffsAt mock method
func (fetcher *StateDiffFetcher) FetchAt(blockHeights []uint64) ([]interface{}, error) {
if fetcher.PayloadsToReturn == nil {
return nil, errors.New("mock StateDiffFetcher needs to be initialized with payloads to return")
}
atomic.AddInt64(&fetcher.CalledTimes, 1) // thread-safe increment
fetcher.CalledAtBlockHeights = append(fetcher.CalledAtBlockHeights, blockHeights)
results := make([]interface{}, 0, len(blockHeights))
for _, height := range blockHeights {
results = append(results, fetcher.PayloadsToReturn[height])
err, ok := fetcher.FetchErrs[height]
if ok && err != nil {
return nil, err
}
}
return results, nil
}

View File

@ -16,16 +16,24 @@
package mocks
import "github.com/vulcanize/vulcanizedb/pkg/ipfs"
import (
"fmt"
// CIDRepository is the underlying struct for the Repository interface
type CIDRepository struct {
PassedCIDPayload []*ipfs.CIDPayload
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// CIDIndexer is the underlying struct for the Indexer interface
type CIDIndexer struct {
PassedCIDPayload []*eth.CIDPayload
ReturnErr error
}
// Index indexes a cidPayload in Postgres
func (repo *CIDRepository) Index(cidPayload *ipfs.CIDPayload) error {
func (repo *CIDIndexer) Index(cids interface{}) error {
cidPayload, ok := cids.(*eth.CIDPayload)
if !ok {
return fmt.Errorf("index expected cids type %T got %T", &eth.CIDPayload{}, cids)
}
repo.PassedCIDPayload = append(repo.PassedCIDPayload, cidPayload)
return repo.ReturnErr
}

View File

@ -19,33 +19,41 @@ package mocks
import (
"fmt"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// IPLDPublisher is the underlying struct for the Publisher interface
type IPLDPublisher struct {
PassedIPLDPayload *ipfs.IPLDPayload
ReturnCIDPayload *ipfs.CIDPayload
PassedIPLDPayload *eth.IPLDPayload
ReturnCIDPayload *eth.CIDPayload
ReturnErr error
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IPLDPublisher) Publish(payload *ipfs.IPLDPayload) (*ipfs.CIDPayload, error) {
pub.PassedIPLDPayload = payload
func (pub *IPLDPublisher) Publish(payload interface{}) (interface{}, error) {
ipldPayload, ok := payload.(*eth.IPLDPayload)
if !ok {
return nil, fmt.Errorf("publish expected payload type %T got %T", &eth.IPLDPayload{}, payload)
}
pub.PassedIPLDPayload = ipldPayload
return pub.ReturnCIDPayload, pub.ReturnErr
}
// IterativeIPLDPublisher is the underlying struct for the Publisher interface; used in testing
type IterativeIPLDPublisher struct {
PassedIPLDPayload []*ipfs.IPLDPayload
ReturnCIDPayload []*ipfs.CIDPayload
PassedIPLDPayload []*eth.IPLDPayload
ReturnCIDPayload []*eth.CIDPayload
ReturnErr error
iteration int
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IterativeIPLDPublisher) Publish(payload *ipfs.IPLDPayload) (*ipfs.CIDPayload, error) {
pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, payload)
func (pub *IterativeIPLDPublisher) Publish(payload interface{}) (interface{}, error) {
ipldPayload, ok := payload.(*eth.IPLDPayload)
if !ok {
return nil, fmt.Errorf("publish expected payload type %T got %T", &eth.IPLDPayload{}, payload)
}
pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, ipldPayload)
if len(pub.ReturnCIDPayload) < pub.iteration+1 {
return nil, fmt.Errorf("IterativeIPLDPublisher does not have a payload to return at iteration %d", pub.iteration)
}

View File

@ -0,0 +1,64 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
// MockCIDRetriever is a mock CID retriever for use in tests
type MockCIDRetriever struct {
GapsToRetrieve []shared.Gap
GapsToRetrieveErr error
CalledTimes int
FirstBlockNumberToReturn int64
RetrieveFirstBlockNumberErr error
}
// RetrieveCIDs mock method
func (*MockCIDRetriever) Retrieve(filter interface{}, blockNumber int64) (interface{}, bool, error) {
panic("implement me")
}
// RetrieveLastBlockNumber mock method
func (*MockCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
panic("implement me")
}
// RetrieveFirstBlockNumber mock method
func (mcr *MockCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
return mcr.FirstBlockNumberToReturn, mcr.RetrieveFirstBlockNumberErr
}
// RetrieveGapsInData mock method
func (mcr *MockCIDRetriever) RetrieveGapsInData() ([]shared.Gap, error) {
mcr.CalledTimes++
return mcr.GapsToRetrieve, mcr.GapsToRetrieveErr
}
// SetGapsToRetrieve mock method
func (mcr *MockCIDRetriever) SetGapsToRetrieve(gaps []shared.Gap) {
if mcr.GapsToRetrieve == nil {
mcr.GapsToRetrieve = make([]shared.Gap, 0)
}
mcr.GapsToRetrieve = append(mcr.GapsToRetrieve, gaps...)
}
func (mcr *MockCIDRetriever) Database() *postgres.DB {
panic("implement me")
}

View File

@ -0,0 +1,43 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
)
// StateDiffStreamer is the underlying struct for the Streamer interface
type StateDiffStreamer struct {
PassedPayloadChan chan interface{}
ReturnSub *rpc.ClientSubscription
ReturnErr error
StreamPayloads []statediff.Payload
}
// Stream is the main loop for subscribing to data from the Geth state diff process
func (sds *StateDiffStreamer) Stream(payloadChan chan interface{}) (*rpc.ClientSubscription, error) {
sds.PassedPayloadChan = payloadChan
go func() {
for _, payload := range sds.StreamPayloads {
sds.PassedPayloadChan <- payload
}
}()
return sds.ReturnSub, sds.ReturnErr
}

View File

@ -23,24 +23,24 @@ import (
"math/big"
rand2 "math/rand"
"github.com/ipfs/go-block-format"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
"github.com/ipfs/go-block-format"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
eth2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// Test variables
var (
// block data
BlockNumber = big.NewInt(rand2.Int63())
BlockNumber = big.NewInt(1)
MockHeader = types.Header{
Time: 0,
Number: BlockNumber,
@ -53,32 +53,86 @@ var (
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts)
MockBlockRlp, _ = rlp.EncodeToBytes(MockBlock)
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
MockTrxMeta = []*ipfs.TrxMetaData{
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
mockTopic11 = common.HexToHash("0x04")
mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07")
MockTrxMeta = []eth.TxModel{
{
CID: "", // This is empty until we go to publish to ipfs
Src: senderAddr.Hex(),
Dst: "0x0000000000000000000000000000000000000000",
CID: "", // This is empty until we go to publish to ipfs
Src: senderAddr.Hex(),
Dst: Address.String(),
Index: 0,
TxHash: MockTransactions[0].Hash().String(),
},
{
CID: "",
Src: senderAddr.Hex(),
Dst: "0x0000000000000000000000000000000000000001",
CID: "",
Src: senderAddr.Hex(),
Dst: AnotherAddress.String(),
Index: 1,
TxHash: MockTransactions[1].Hash().String(),
},
}
MockRctMeta = []*ipfs.ReceiptMetaData{
MockTrxMetaPostPublsh = []eth.TxModel{
{
CID: "mockTrxCID1", // This is empty until we go to publish to ipfs
Src: senderAddr.Hex(),
Dst: Address.String(),
Index: 0,
TxHash: MockTransactions[0].Hash().String(),
},
{
CID: "mockTrxCID2",
Src: senderAddr.Hex(),
Dst: AnotherAddress.String(),
Index: 1,
TxHash: MockTransactions[1].Hash().String(),
},
}
MockRctMeta = []eth.ReceiptModel{
{
CID: "",
Topic0s: []string{
"0x0000000000000000000000000000000000000000000000000000000000000004",
mockTopic11.String(),
},
ContractAddress: "0x0000000000000000000000000000000000000000",
Topic1s: []string{
mockTopic12.String(),
},
Contract: Address.String(),
},
{
CID: "",
Topic0s: []string{
"0x0000000000000000000000000000000000000000000000000000000000000005",
mockTopic21.String(),
},
ContractAddress: "0x0000000000000000000000000000000000000001",
Topic1s: []string{
mockTopic22.String(),
},
Contract: AnotherAddress.String(),
},
}
MockRctMetaPostPublish = []eth.ReceiptModel{
{
CID: "mockRctCID1",
Topic0s: []string{
mockTopic11.String(),
},
Topic1s: []string{
mockTopic12.String(),
},
Contract: Address.String(),
},
{
CID: "mockRctCID2",
Topic0s: []string{
mockTopic21.String(),
},
Topic1s: []string{
mockTopic22.String(),
},
Contract: AnotherAddress.String(),
},
}
@ -100,10 +154,8 @@ var (
Leaf: true,
}}
emptyStorage = make([]statediff.StorageDiff, 0)
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
ContractLeafKey = ipfs.AddressToKey(Address)
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
AnotherContractLeafKey = ipfs.AddressToKey(AnotherAddress)
ContractLeafKey = crypto.Keccak256Hash(Address.Bytes())
AnotherContractLeafKey = crypto.Keccak256Hash(AnotherAddress.Bytes())
testAccount = state.Account{
Nonce: NonceValue,
Balance: big.NewInt(BalanceValue),
@ -139,17 +191,31 @@ var (
CreatedAccounts: CreatedAccountDiffs,
}
MockStateDiffBytes, _ = rlp.EncodeToBytes(MockStateDiff)
MockStateNodes = map[common.Hash]ipfs.StateNode{
ContractLeafKey: {
MockStateNodes = []eth.TrieNode{
{
Key: ContractLeafKey,
Value: ValueBytes,
Leaf: true,
},
AnotherContractLeafKey: {
{
Key: AnotherContractLeafKey,
Value: AnotherValueBytes,
Leaf: true,
},
}
MockStorageNodes = map[common.Hash][]ipfs.StorageNode{
MockStateMetaPostPublish = []eth.StateNodeModel{
{
CID: "mockStateCID1",
Leaf: true,
StateKey: ContractLeafKey.String(),
},
{
CID: "mockStateCID2",
Leaf: true,
StateKey: AnotherContractLeafKey.String(),
},
}
MockStorageNodes = map[common.Hash][]eth.TrieNode{
ContractLeafKey: {
{
Key: common.BytesToHash(StorageKey),
@ -161,131 +227,75 @@ var (
// aggregate payloads
MockStateDiffPayload = statediff.Payload{
BlockRlp: MockBlockRlp,
StateDiffRlp: MockStateDiffBytes,
ReceiptsRlp: ReceiptsRlp,
BlockRlp: MockBlockRlp,
StateDiffRlp: MockStateDiffBytes,
ReceiptsRlp: ReceiptsRlp,
TotalDifficulty: big.NewInt(1337),
}
MockIPLDPayload = &ipfs.IPLDPayload{
BlockNumber: big.NewInt(1),
BlockHash: MockBlock.Hash(),
Receipts: MockReceipts,
HeaderRLP: MockHeaderRlp,
BlockBody: MockBlock.Body(),
TrxMetaData: []*ipfs.TrxMetaData{
{
CID: "",
Src: senderAddr.Hex(),
Dst: "0x0000000000000000000000000000000000000000",
},
{
CID: "",
Src: senderAddr.Hex(),
Dst: "0x0000000000000000000000000000000000000001",
},
},
ReceiptMetaData: []*ipfs.ReceiptMetaData{
{
CID: "",
Topic0s: []string{
"0x0000000000000000000000000000000000000000000000000000000000000004",
},
ContractAddress: "0x0000000000000000000000000000000000000000",
},
{
CID: "",
Topic0s: []string{
"0x0000000000000000000000000000000000000000000000000000000000000005",
},
ContractAddress: "0x0000000000000000000000000000000000000001",
},
},
StorageNodes: MockStorageNodes,
StateNodes: MockStateNodes,
MockIPLDPayload = &eth.IPLDPayload{
TotalDifficulty: big.NewInt(1337),
Block: MockBlock,
Receipts: MockReceipts,
HeaderRLP: MockHeaderRlp,
TrxMetaData: MockTrxMeta,
ReceiptMetaData: MockRctMeta,
StorageNodes: MockStorageNodes,
StateNodes: MockStateNodes,
}
MockCIDPayload = &ipfs.CIDPayload{
BlockNumber: "1",
BlockHash: MockBlock.Hash(),
HeaderCID: "mockHeaderCID",
UncleCIDs: make(map[common.Hash]string),
TransactionCIDs: map[common.Hash]*ipfs.TrxMetaData{
MockTransactions[0].Hash(): {
CID: "mockTrxCID1",
Dst: "0x0000000000000000000000000000000000000000",
Src: senderAddr.Hex(),
},
MockTransactions[1].Hash(): {
CID: "mockTrxCID2",
Dst: "0x0000000000000000000000000000000000000001",
Src: senderAddr.Hex(),
},
MockCIDPayload = &eth.CIDPayload{
HeaderCID: eth2.HeaderModel{
BlockHash: MockBlock.Hash().String(),
BlockNumber: MockBlock.Number().String(),
CID: "mockHeaderCID",
ParentHash: MockBlock.ParentHash().String(),
TotalDifficulty: "1337",
},
ReceiptCIDs: map[common.Hash]*ipfs.ReceiptMetaData{
MockTransactions[0].Hash(): {
CID: "mockRctCID1",
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000004"},
ContractAddress: "0x0000000000000000000000000000000000000000",
},
MockTransactions[1].Hash(): {
CID: "mockRctCID2",
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000005"},
ContractAddress: "0x0000000000000000000000000000000000000001",
},
UncleCIDs: []eth2.UncleModel{},
TransactionCIDs: MockTrxMetaPostPublsh,
ReceiptCIDs: map[common.Hash]eth.ReceiptModel{
MockTransactions[0].Hash(): MockRctMetaPostPublish[0],
MockTransactions[1].Hash(): MockRctMetaPostPublish[1],
},
StateNodeCIDs: map[common.Hash]ipfs.StateNodeCID{
ContractLeafKey: {
CID: "mockStateCID1",
Leaf: true,
Key: "",
},
AnotherContractLeafKey: {
CID: "mockStateCID2",
Leaf: true,
Key: "",
},
},
StorageNodeCIDs: map[common.Hash][]ipfs.StorageNodeCID{
StateNodeCIDs: MockStateMetaPostPublish,
StorageNodeCIDs: map[common.Hash][]eth.StorageNodeModel{
ContractLeafKey: {
{
CID: "mockStorageCID",
Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
Leaf: true,
StateKey: "",
CID: "mockStorageCID",
StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
Leaf: true,
},
},
},
}
MockCIDWrapper = &ipfs.CIDWrapper{
BlockNumber: big.NewInt(1),
Headers: []string{"mockHeaderCID"},
Transactions: []string{"mockTrxCID1", "mockTrxCID2"},
Receipts: []string{"mockRctCID1", "mockRctCID2"},
Uncles: []string{},
StateNodes: []ipfs.StateNodeCID{
MockCIDWrapper = &eth.CIDWrapper{
BlockNumber: big.NewInt(1),
Headers: []eth2.HeaderModel{
{
CID: "mockStateCID1",
Leaf: true,
Key: ContractLeafKey.Hex(),
},
{
CID: "mockStateCID2",
Leaf: true,
Key: AnotherContractLeafKey.Hex(),
BlockNumber: "1",
BlockHash: MockBlock.Hash().String(),
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
CID: "mockHeaderCID",
TotalDifficulty: "1337",
},
},
StorageNodes: []ipfs.StorageNodeCID{
Transactions: MockTrxMetaPostPublsh,
Receipts: MockRctMetaPostPublish,
Uncles: []eth2.UncleModel{},
StateNodes: MockStateMetaPostPublish,
StorageNodes: []eth.StorageNodeWithStateKeyModel{
{
CID: "mockStorageCID",
Leaf: true,
StateKey: ContractLeafKey.Hex(),
Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
CID: "mockStorageCID",
Leaf: true,
StateKey: ContractLeafKey.Hex(),
StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
},
},
}
MockIPLDWrapper = ipfs.IPLDWrapper{
MockIPLDWrapper = &eth.IPLDWrapper{
BlockNumber: big.NewInt(1),
Headers: []blocks.Block{
blocks.NewBlock(MockHeaderRlp),
@ -309,9 +319,10 @@ var (
},
}
MockSeeNodePayload = streamer.SuperNodePayload{
MockSeedNodePayload = eth2.StreamPayload{
BlockNumber: big.NewInt(1),
HeadersRlp: [][]byte{MockHeaderRlp},
UnclesRlp: [][]byte{},
TransactionsRlp: [][]byte{MockTransactions.GetRlp(0), MockTransactions.GetRlp(1)},
ReceiptsRlp: [][]byte{MockTransactions.GetRlp(0), MockTransactions.GetRlp(1)},
StateNodesRlp: map[common.Hash][]byte{
@ -329,8 +340,8 @@ var (
// createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
// make transactions
trx1 := types.NewTransaction(0, common.HexToAddress("0x0"), big.NewInt(1000), 50, big.NewInt(100), nil)
trx2 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(2000), 100, big.NewInt(200), nil)
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), nil)
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), nil)
transactionSigner := types.MakeSigner(params.MainnetChainConfig, BlockNumber)
mockCurve := elliptic.P256()
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
@ -350,19 +361,15 @@ func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common
log.Fatal(err)
}
// make receipts
mockTopic1 := common.HexToHash("0x04")
mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
mockReceipt1.ContractAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
mockLog1 := &types.Log{
Topics: []common.Hash{mockTopic1},
Topics: []common.Hash{mockTopic11, mockTopic12},
}
mockReceipt1.Logs = []*types.Log{mockLog1}
mockReceipt1.TxHash = signedTrx1.Hash()
mockTopic2 := common.HexToHash("0x05")
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
mockReceipt2.ContractAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
mockLog2 := &types.Log{
Topics: []common.Hash{mockTopic2},
Topics: []common.Hash{mockTopic21, mockTopic22},
}
mockReceipt2.Logs = []*types.Log{mockLog2}
mockReceipt2.TxHash = signedTrx2.Hash()

View File

@ -0,0 +1,82 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import "github.com/lib/pq"
type HeaderModel struct {
ID int64 `db:"id"`
BlockNumber string `db:"block_number"`
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
TotalDifficulty string `db:"td"`
}
type UncleModel struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
}
type TxModel struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
Index int64 `db:"index"`
TxHash string `db:"tx_hash"`
CID string `db:"cid"`
Dst string `db:"dst"`
Src string `db:"src"`
}
type ReceiptModel struct {
ID int64 `db:"id"`
TxID int64 `db:"tx_id"`
CID string `db:"cid"`
Contract string `db:"contract"`
Topic0s pq.StringArray `db:"topic0s"`
Topic1s pq.StringArray `db:"topic1s"`
Topic2s pq.StringArray `db:"topic2s"`
Topic3s pq.StringArray `db:"topic3s"`
}
type StateNodeModel struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
StateKey string `db:"state_key"`
Leaf bool `db:"leaf"`
CID string `db:"cid"`
}
type StorageNodeModel struct {
ID int64 `db:"id"`
StateID int64 `db:"state_id"`
StorageKey string `db:"storage_key"`
Leaf bool `db:"leaf"`
CID string `db:"cid"`
}
type StorageNodeWithStateKeyModel struct {
ID int64 `db:"id"`
StateID int64 `db:"state_id"`
StateKey string `db:"state_key"`
StorageKey string `db:"storage_key"`
Leaf bool `db:"leaf"`
CID string `db:"cid"`
}

View File

@ -0,0 +1,74 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
)
// BatchClient is an interface to a batch-fetching geth rpc client; created to allow mock insertion
type BatchClient interface {
BatchCall(batch []client.BatchElem) error
}
// PayloadFetcher satisfies the PayloadFetcher interface for ethereum
type PayloadFetcher struct {
// PayloadFetcher is thread-safe as long as the underlying client is thread-safe, since it has/modifies no other state
// http.Client is thread-safe
client BatchClient
}
const method = "statediff_stateDiffAt"
// NewStateDiffFetcher returns a PayloadFetcher
func NewPayloadFetcher(bc BatchClient) *PayloadFetcher {
return &PayloadFetcher{
client: bc,
}
}
// FetchAt fetches the statediff payloads at the given block heights
// Calls StateDiffAt(ctx context.Context, blockNumber uint64) (*Payload, error)
func (fetcher *PayloadFetcher) FetchAt(blockHeights []uint64) ([]interface{}, error) {
batch := make([]client.BatchElem, 0)
for _, height := range blockHeights {
batch = append(batch, client.BatchElem{
Method: method,
Args: []interface{}{height},
Result: new(statediff.Payload),
})
}
batchErr := fetcher.client.BatchCall(batch)
if batchErr != nil {
return nil, fmt.Errorf("PayloadFetcher err: %s", batchErr.Error())
}
results := make([]interface{}, 0, len(blockHeights))
for _, batchElem := range batch {
if batchElem.Error != nil {
return nil, fmt.Errorf("PayloadFetcher err: %s", batchElem.Error.Error())
}
payload, ok := batchElem.Result.(*statediff.Payload)
if ok {
results = append(results, *payload)
}
}
return results, nil
}

View File

@ -0,0 +1,59 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
var _ = Describe("StateDiffFetcher", func() {
Describe("FetchStateDiffsAt", func() {
var (
mc *mocks.BackFillerClient
stateDiffFetcher *eth.PayloadFetcher
)
BeforeEach(func() {
mc = new(mocks.BackFillerClient)
setDiffAtErr1 := mc.SetReturnDiffAt(test_data.BlockNumber.Uint64(), test_data.MockStatediffPayload)
Expect(setDiffAtErr1).ToNot(HaveOccurred())
setDiffAtErr2 := mc.SetReturnDiffAt(test_data.BlockNumber2.Uint64(), test_data.MockStatediffPayload2)
Expect(setDiffAtErr2).ToNot(HaveOccurred())
stateDiffFetcher = eth.NewPayloadFetcher(mc)
})
It("Batch calls statediff_stateDiffAt", func() {
blockHeights := []uint64{
test_data.BlockNumber.Uint64(),
test_data.BlockNumber2.Uint64(),
}
stateDiffPayloads, fetchErr := stateDiffFetcher.FetchAt(blockHeights)
Expect(fetchErr).ToNot(HaveOccurred())
Expect(len(stateDiffPayloads)).To(Equal(2))
payload1, ok := stateDiffPayloads[0].(statediff.Payload)
Expect(ok).To(BeTrue())
payload2, ok := stateDiffPayloads[1].(statediff.Payload)
Expect(ok).To(BeTrue())
Expect(payload1).To(Equal(test_data.MockStatediffPayload))
Expect(payload2).To(Equal(test_data.MockStatediffPayload2))
})
})
})

View File

@ -14,14 +14,16 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs
package eth
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_header"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_receipts"
@ -31,13 +33,8 @@ import (
rlp2 "github.com/vulcanize/eth-block-extractor/pkg/wrappers/rlp"
)
// IPLDPublisher is the interface for publishing an IPLD payload
type IPLDPublisher interface {
Publish(payload *IPLDPayload) (*CIDPayload, error)
}
// Publisher is the underlying struct for the IPLDPublisher interface
type Publisher struct {
// IPLDPublisher satisfies the IPLDPublisher for ethereum
type IPLDPublisher struct {
HeaderPutter ipfs.DagPutter
TransactionPutter ipfs.DagPutter
ReceiptPutter ipfs.DagPutter
@ -46,12 +43,12 @@ type Publisher struct {
}
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
func NewIPLDPublisher(ipfsPath string) (*Publisher, error) {
func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) {
node, err := ipfs.InitIPFSNode(ipfsPath)
if err != nil {
return nil, err
}
return &Publisher{
return &IPLDPublisher{
HeaderPutter: eth_block_header.NewBlockHeaderDagPutter(node, rlp2.RlpDecoder{}),
TransactionPutter: eth_block_transactions.NewBlockTransactionsDagPutter(node),
ReceiptPutter: eth_block_receipts.NewEthBlockReceiptDagPutter(node),
@ -61,56 +58,69 @@ func NewIPLDPublisher(ipfsPath string) (*Publisher, error) {
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
func (pub *IPLDPublisher) Publish(payload interface{}) (interface{}, error) {
ipldPayload, ok := payload.(*IPLDPayload)
if !ok {
return nil, fmt.Errorf("eth publisher expected payload type %T got %T", &IPLDPayload{}, payload)
}
// Process and publish headers
headerCid, headersErr := pub.publishHeaders(payload.HeaderRLP)
if headersErr != nil {
return nil, headersErr
headerCid, err := pub.publishHeader(ipldPayload.HeaderRLP)
if err != nil {
return nil, err
}
header := HeaderModel{
CID: headerCid,
ParentHash: ipldPayload.Block.ParentHash().String(),
BlockNumber: ipldPayload.Block.Number().String(),
BlockHash: ipldPayload.Block.Hash().String(),
TotalDifficulty: ipldPayload.TotalDifficulty.String(),
}
// Process and publish uncles
uncleCids := make(map[common.Hash]string)
for _, uncle := range payload.BlockBody.Uncles {
uncleRlp, encodeErr := rlp.EncodeToBytes(uncle)
if encodeErr != nil {
return nil, encodeErr
uncleCids := make([]UncleModel, 0, len(ipldPayload.Block.Uncles()))
for _, uncle := range ipldPayload.Block.Uncles() {
uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil {
return nil, err
}
cid, unclesErr := pub.publishHeaders(uncleRlp)
if unclesErr != nil {
return nil, unclesErr
uncleCid, err := pub.publishHeader(uncleRlp)
if err != nil {
return nil, err
}
uncleCids[uncle.Hash()] = cid
uncleCids = append(uncleCids, UncleModel{
CID: uncleCid,
ParentHash: uncle.ParentHash.String(),
BlockHash: uncle.Hash().String(),
})
}
// Process and publish transactions
transactionCids, trxsErr := pub.publishTransactions(payload.BlockBody, payload.TrxMetaData)
if trxsErr != nil {
return nil, trxsErr
transactionCids, err := pub.publishTransactions(ipldPayload.Block.Body(), ipldPayload.TrxMetaData)
if err != nil {
return nil, err
}
// Process and publish receipts
receiptsCids, rctsErr := pub.publishReceipts(payload.Receipts, payload.ReceiptMetaData)
if rctsErr != nil {
return nil, rctsErr
receiptsCids, err := pub.publishReceipts(ipldPayload.Receipts, ipldPayload.ReceiptMetaData)
if err != nil {
return nil, err
}
// Process and publish state leafs
stateNodeCids, stateErr := pub.publishStateNodes(payload.StateNodes)
if stateErr != nil {
return nil, stateErr
stateNodeCids, err := pub.publishStateNodes(ipldPayload.StateNodes)
if err != nil {
return nil, err
}
// Process and publish storage leafs
storageNodeCids, storageErr := pub.publishStorageNodes(payload.StorageNodes)
if storageErr != nil {
return nil, storageErr
storageNodeCids, err := pub.publishStorageNodes(ipldPayload.StorageNodes)
if err != nil {
return nil, err
}
// Package CIDs and their metadata into a single struct
return &CIDPayload{
BlockHash: payload.BlockHash,
BlockNumber: payload.BlockNumber.String(),
HeaderCID: headerCid,
HeaderCID: header,
UncleCIDs: uncleCids,
TransactionCIDs: transactionCids,
ReceiptCIDs: receiptsCids,
@ -119,7 +129,7 @@ func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
}, nil
}
func (pub *Publisher) publishHeaders(headerRLP []byte) (string, error) {
func (pub *IPLDPublisher) publishHeader(headerRLP []byte) (string, error) {
headerCids, err := pub.HeaderPutter.DagPut(headerRLP)
if err != nil {
return "", err
@ -130,23 +140,28 @@ func (pub *Publisher) publishHeaders(headerRLP []byte) (string, error) {
return headerCids[0], nil
}
func (pub *Publisher) publishTransactions(blockBody *types.Body, trxMeta []*TrxMetaData) (map[common.Hash]*TrxMetaData, error) {
func (pub *IPLDPublisher) publishTransactions(blockBody *types.Body, trxMeta []TxModel) ([]TxModel, error) {
transactionCids, err := pub.TransactionPutter.DagPut(blockBody)
if err != nil {
return nil, err
}
if len(transactionCids) != len(blockBody.Transactions) {
if len(transactionCids) != len(trxMeta) {
return nil, errors.New("expected one CID for each transaction")
}
mappedTrxCids := make(map[common.Hash]*TrxMetaData, len(transactionCids))
for i, trx := range blockBody.Transactions {
mappedTrxCids[trx.Hash()] = trxMeta[i]
mappedTrxCids[trx.Hash()].CID = transactionCids[i]
mappedTrxCids := make([]TxModel, len(transactionCids))
for i, cid := range transactionCids {
mappedTrxCids[i] = TxModel{
CID: cid,
Index: trxMeta[i].Index,
TxHash: trxMeta[i].TxHash,
Src: trxMeta[i].Src,
Dst: trxMeta[i].Dst,
}
}
return mappedTrxCids, nil
}
func (pub *Publisher) publishReceipts(receipts types.Receipts, receiptMeta []*ReceiptMetaData) (map[common.Hash]*ReceiptMetaData, error) {
func (pub *IPLDPublisher) publishReceipts(receipts types.Receipts, receiptMeta []ReceiptModel) (map[common.Hash]ReceiptModel, error) {
receiptsCids, err := pub.ReceiptPutter.DagPut(receipts)
if err != nil {
return nil, err
@ -154,18 +169,24 @@ func (pub *Publisher) publishReceipts(receipts types.Receipts, receiptMeta []*Re
if len(receiptsCids) != len(receipts) {
return nil, errors.New("expected one CID for each receipt")
}
// Keep receipts associated with their transaction
mappedRctCids := make(map[common.Hash]*ReceiptMetaData, len(receiptsCids))
// Map receipt cids to their transaction hashes
mappedRctCids := make(map[common.Hash]ReceiptModel, len(receiptsCids))
for i, rct := range receipts {
mappedRctCids[rct.TxHash] = receiptMeta[i]
mappedRctCids[rct.TxHash].CID = receiptsCids[i]
mappedRctCids[rct.TxHash] = ReceiptModel{
CID: receiptsCids[i],
Contract: receiptMeta[i].Contract,
Topic0s: receiptMeta[i].Topic0s,
Topic1s: receiptMeta[i].Topic1s,
Topic2s: receiptMeta[i].Topic2s,
Topic3s: receiptMeta[i].Topic3s,
}
}
return mappedRctCids, nil
}
func (pub *Publisher) publishStateNodes(stateNodes map[common.Hash]StateNode) (map[common.Hash]StateNodeCID, error) {
stateNodeCids := make(map[common.Hash]StateNodeCID)
for addrKey, node := range stateNodes {
func (pub *IPLDPublisher) publishStateNodes(stateNodes []TrieNode) ([]StateNodeModel, error) {
stateNodeCids := make([]StateNodeModel, 0, len(stateNodes))
for _, node := range stateNodes {
stateNodeCid, err := pub.StatePutter.DagPut(node.Value)
if err != nil {
return nil, err
@ -173,18 +194,19 @@ func (pub *Publisher) publishStateNodes(stateNodes map[common.Hash]StateNode) (m
if len(stateNodeCid) != 1 {
return nil, errors.New("single CID expected to be returned for state leaf")
}
stateNodeCids[addrKey] = StateNodeCID{
CID: stateNodeCid[0],
Leaf: node.Leaf,
}
stateNodeCids = append(stateNodeCids, StateNodeModel{
StateKey: node.Key.String(),
CID: stateNodeCid[0],
Leaf: node.Leaf,
})
}
return stateNodeCids, nil
}
func (pub *Publisher) publishStorageNodes(storageNodes map[common.Hash][]StorageNode) (map[common.Hash][]StorageNodeCID, error) {
storageLeafCids := make(map[common.Hash][]StorageNodeCID)
func (pub *IPLDPublisher) publishStorageNodes(storageNodes map[common.Hash][]TrieNode) (map[common.Hash][]StorageNodeModel, error) {
storageLeafCids := make(map[common.Hash][]StorageNodeModel)
for addrKey, storageTrie := range storageNodes {
storageLeafCids[addrKey] = make([]StorageNodeCID, 0, len(storageTrie))
storageLeafCids[addrKey] = make([]StorageNodeModel, 0, len(storageTrie))
for _, node := range storageTrie {
storageNodeCid, err := pub.StoragePutter.DagPut(node.Value)
if err != nil {
@ -193,10 +215,11 @@ func (pub *Publisher) publishStorageNodes(storageNodes map[common.Hash][]Storage
if len(storageNodeCid) != 1 {
return nil, errors.New("single CID expected to be returned for storage leaf")
}
storageLeafCids[addrKey] = append(storageLeafCids[addrKey], StorageNodeCID{
Key: node.Key.Hex(),
CID: storageNodeCid[0],
Leaf: node.Leaf,
// Map storage node cids to their state key hashes
storageLeafCids[addrKey] = append(storageLeafCids[addrKey], StorageNodeModel{
StorageKey: node.Key.Hex(),
CID: storageNodeCid[0],
Leaf: node.Leaf,
})
}
}

View File

@ -14,15 +14,15 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
package eth_test
import (
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var (
@ -47,35 +47,38 @@ var _ = Describe("Publisher", func() {
mockHeaderDagPutter.CIDsToReturn = []string{"mockHeaderCID"}
mockTrxDagPutter.CIDsToReturn = []string{"mockTrxCID1", "mockTrxCID2"}
mockRctDagPutter.CIDsToReturn = []string{"mockRctCID1", "mockRctCID2"}
val1 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[mocks.ContractLeafKey].Value)
val2 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[mocks.AnotherContractLeafKey].Value)
val1 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[0].Value)
val2 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[1].Value)
mockStateDagPutter.CIDsToReturn = map[common.Hash][]string{
val1: {"mockStateCID1"},
val2: {"mockStateCID2"},
}
mockStorageDagPutter.CIDsToReturn = []string{"mockStorageCID"}
publisher := ipfs.Publisher{
publisher := eth.IPLDPublisher{
HeaderPutter: mockHeaderDagPutter,
TransactionPutter: mockTrxDagPutter,
ReceiptPutter: mockRctDagPutter,
StatePutter: mockStateDagPutter,
StoragePutter: mockStorageDagPutter,
}
cidPayload, err := publisher.Publish(mocks.MockIPLDPayload)
payload, err := publisher.Publish(mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(cidPayload.BlockNumber).To(Equal(mocks.MockCIDPayload.BlockNumber))
Expect(cidPayload.BlockHash).To(Equal(mocks.MockCIDPayload.BlockHash))
cidPayload, ok := payload.(*eth.CIDPayload)
Expect(ok).To(BeTrue())
Expect(cidPayload.HeaderCID.TotalDifficulty).To(Equal(mocks.MockIPLDPayload.TotalDifficulty.String()))
Expect(cidPayload.HeaderCID.BlockNumber).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockNumber))
Expect(cidPayload.HeaderCID.BlockHash).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockHash))
Expect(cidPayload.UncleCIDs).To(Equal(mocks.MockCIDPayload.UncleCIDs))
Expect(cidPayload.HeaderCID).To(Equal(mocks.MockCIDPayload.HeaderCID))
Expect(len(cidPayload.TransactionCIDs)).To(Equal(2))
Expect(cidPayload.TransactionCIDs[mocks.MockTransactions[0].Hash()]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[mocks.MockTransactions[0].Hash()]))
Expect(cidPayload.TransactionCIDs[mocks.MockTransactions[1].Hash()]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[mocks.MockTransactions[1].Hash()]))
Expect(cidPayload.TransactionCIDs[0]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[0]))
Expect(cidPayload.TransactionCIDs[1]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[1]))
Expect(len(cidPayload.ReceiptCIDs)).To(Equal(2))
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[0].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[0].Hash()]))
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[1].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[1].Hash()]))
Expect(len(cidPayload.StateNodeCIDs)).To(Equal(2))
Expect(cidPayload.StateNodeCIDs[mocks.ContractLeafKey]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[mocks.ContractLeafKey]))
Expect(cidPayload.StateNodeCIDs[mocks.AnotherContractLeafKey]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[mocks.AnotherContractLeafKey]))
Expect(cidPayload.StateNodeCIDs[0]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[0]))
Expect(cidPayload.StateNodeCIDs[1]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[1]))
Expect(cidPayload.StorageNodeCIDs).To(Equal(mocks.MockCIDPayload.StorageNodeCIDs))
})
})

View File

@ -0,0 +1,100 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format"
)
// IPLDResolver satisfies the IPLDResolver interface for ethereum
type IPLDResolver struct{}
// NewIPLDResolver returns a pointer to an IPLDResolver which satisfies the IPLDResolver interface
func NewIPLDResolver() *IPLDResolver {
return &IPLDResolver{}
}
// Resolve is the exported method for resolving all of the ETH IPLDs packaged in an IpfsBlockWrapper
func (eir *IPLDResolver) Resolve(iplds interface{}) (interface{}, error) {
ipfsBlocks, ok := iplds.(*IPLDWrapper)
if !ok {
return StreamPayload{}, fmt.Errorf("eth resolver expected iplds type %T got %T", &IPLDWrapper{}, iplds)
}
return StreamPayload{
BlockNumber: ipfsBlocks.BlockNumber,
HeadersRlp: eir.ResolveHeaders(ipfsBlocks.Headers),
UnclesRlp: eir.ResolveUncles(ipfsBlocks.Uncles),
TransactionsRlp: eir.ResolveTransactions(ipfsBlocks.Transactions),
ReceiptsRlp: eir.ResolveReceipts(ipfsBlocks.Receipts),
StateNodesRlp: eir.ResolveState(ipfsBlocks.StateNodes),
StorageNodesRlp: eir.ResolveStorage(ipfsBlocks.StorageNodes),
}, nil
}
func (eir *IPLDResolver) ResolveHeaders(iplds []blocks.Block) [][]byte {
headerRlps := make([][]byte, 0, len(iplds))
for _, ipld := range iplds {
headerRlps = append(headerRlps, ipld.RawData())
}
return headerRlps
}
func (eir *IPLDResolver) ResolveUncles(iplds []blocks.Block) [][]byte {
uncleRlps := make([][]byte, 0, len(iplds))
for _, ipld := range iplds {
uncleRlps = append(uncleRlps, ipld.RawData())
}
return uncleRlps
}
func (eir *IPLDResolver) ResolveTransactions(iplds []blocks.Block) [][]byte {
trxs := make([][]byte, 0, len(iplds))
for _, ipld := range iplds {
trxs = append(trxs, ipld.RawData())
}
return trxs
}
func (eir *IPLDResolver) ResolveReceipts(iplds []blocks.Block) [][]byte {
rcts := make([][]byte, 0, len(iplds))
for _, ipld := range iplds {
rcts = append(rcts, ipld.RawData())
}
return rcts
}
func (eir *IPLDResolver) ResolveState(iplds map[common.Hash]blocks.Block) map[common.Hash][]byte {
stateNodes := make(map[common.Hash][]byte, len(iplds))
for key, ipld := range iplds {
stateNodes[key] = ipld.RawData()
}
return stateNodes
}
func (eir *IPLDResolver) ResolveStorage(iplds map[common.Hash]map[common.Hash]blocks.Block) map[common.Hash]map[common.Hash][]byte {
storageNodes := make(map[common.Hash]map[common.Hash][]byte)
for stateKey, storageIPLDs := range iplds {
storageNodes[stateKey] = make(map[common.Hash][]byte)
for storageKey, storageVal := range storageIPLDs {
storageNodes[stateKey][storageKey] = storageVal.RawData()
}
}
return storageNodes
}

View File

@ -14,39 +14,42 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
package eth_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var (
resolver ipfs.IPLDResolver
resolver *eth.IPLDResolver
)
var _ = Describe("Resolver", func() {
Describe("ResolveIPLDs", func() {
BeforeEach(func() {
resolver = ipfs.NewIPLDResolver()
resolver = eth.NewIPLDResolver()
})
It("Resolves IPLD data to their correct geth data types and packages them to send to requesting transformers", func() {
superNodePayload := resolver.ResolveIPLDs(mocks.MockIPLDWrapper)
Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeeNodePayload.HeadersRlp))
Expect(superNodePayload.UnclesRlp).To(Equal(mocks.MockSeeNodePayload.UnclesRlp))
payload, err := resolver.Resolve(mocks.MockIPLDWrapper)
Expect(err).ToNot(HaveOccurred())
superNodePayload, ok := payload.(eth.StreamPayload)
Expect(ok).To(BeTrue())
Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeedNodePayload.HeadersRlp))
Expect(superNodePayload.UnclesRlp).To(Equal(mocks.MockSeedNodePayload.UnclesRlp))
Expect(len(superNodePayload.TransactionsRlp)).To(Equal(2))
Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload.ReceiptsRlp)).To(Equal(2))
Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload.StateNodesRlp)).To(Equal(2))
Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeeNodePayload.StorageNodesRlp))
Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeedNodePayload.StorageNodesRlp))
})
})
})

View File

@ -0,0 +1,487 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
// CIDRetriever satisfies the CIDRetriever interface for ethereum
type CIDRetriever struct {
db *postgres.DB
}
// NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface
func NewCIDRetriever(db *postgres.DB) *CIDRetriever {
return &CIDRetriever{
db: db,
}
}
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
func (ecr *CIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number ASC LIMIT 1")
return blockNumber, err
}
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number DESC LIMIT 1 ")
return blockNumber, err
}
// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
func (ecr *CIDRetriever) Retrieve(filter interface{}, blockNumber int64) (interface{}, bool, error) {
streamFilter, ok := filter.(*config.EthSubscription)
if !ok {
return nil, true, fmt.Errorf("eth retriever expected filter type %T got %T", &config.EthSubscription{}, filter)
}
log.Debug("retrieving cids")
tx, err := ecr.db.Beginx()
if err != nil {
return nil, true, err
}
cw := new(CIDWrapper)
cw.BlockNumber = big.NewInt(blockNumber)
// Retrieve cached header CIDs
if !streamFilter.HeaderFilter.Off {
cw.Headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("header cid retrieval error")
return nil, true, err
}
if streamFilter.HeaderFilter.Uncles {
for _, headerCID := range cw.Headers {
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("uncle cid retrieval error")
return nil, true, err
}
cw.Uncles = append(cw.Uncles, uncleCIDs...)
}
}
}
// Retrieve cached trx CIDs
if !streamFilter.TxFilter.Off {
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, streamFilter.TxFilter, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("transaction cid retrieval error")
return nil, true, err
}
}
trxIds := make([]int64, 0, len(cw.Transactions))
for _, tx := range cw.Transactions {
trxIds = append(trxIds, tx.ID)
}
// Retrieve cached receipt CIDs
if !streamFilter.ReceiptFilter.Off {
cw.Receipts, err = ecr.RetrieveRctCIDs(tx, streamFilter.ReceiptFilter, blockNumber, nil, trxIds)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("receipt cid retrieval error")
return nil, true, err
}
}
// Retrieve cached state CIDs
if !streamFilter.StateFilter.Off {
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, streamFilter.StateFilter, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("state cid retrieval error")
return nil, true, err
}
}
// Retrieve cached storage CIDs
if !streamFilter.StorageFilter.Off {
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, streamFilter.StorageFilter, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("storage cid retrieval error")
return nil, true, err
}
}
return cw, empty(cw), tx.Commit()
}
func empty(cidWrapper *CIDWrapper) bool {
if len(cidWrapper.Transactions) > 0 || len(cidWrapper.Headers) > 0 || len(cidWrapper.Uncles) > 0 || len(cidWrapper.Receipts) > 0 || len(cidWrapper.StateNodes) > 0 || len(cidWrapper.StorageNodes) > 0 {
return false
}
return true
}
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]HeaderModel, error) {
log.Debug("retrieving header cids for block ", blockNumber)
headers := make([]HeaderModel, 0)
pgStr := `SELECT * FROM header_cids
WHERE block_number = $1`
return headers, tx.Select(&headers, pgStr, blockNumber)
}
// RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]UncleModel, error) {
log.Debug("retrieving uncle cids for block id ", headerID)
headers := make([]UncleModel, 0)
pgStr := `SELECT * FROM uncle_cids
WHERE header_id = $1`
return headers, tx.Select(&headers, pgStr, headerID)
}
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
// also returns the ids for the returned transaction cids
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter config.TxFilter, blockNumber int64) ([]TxModel, error) {
log.Debug("retrieving transaction cids for block ", blockNumber)
args := make([]interface{}, 0, 3)
results := make([]TxModel, 0)
pgStr := `SELECT transaction_cids.id, transaction_cids.header_id,
transaction_cids.tx_hash, transaction_cids.cid,
transaction_cids.dst, transaction_cids.src, transaction_cids.index
FROM transaction_cids INNER JOIN header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
args = append(args, blockNumber)
if len(txFilter.Dst) > 0 {
pgStr += ` AND transaction_cids.dst = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(txFilter.Dst))
}
if len(txFilter.Src) > 0 {
pgStr += ` AND transaction_cids.src = ANY($3::VARCHAR(66)[])`
args = append(args, pq.Array(txFilter.Src))
}
return results, tx.Select(&results, pgStr, args...)
}
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight that conform to the provided
// filter parameters and correspond to the provided tx ids
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter config.ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]ReceiptModel, error) {
log.Debug("retrieving receipt cids for block ", blockNumber)
id := 1
args := make([]interface{}, 0, 4)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid,
receipt_cids.contract, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s
FROM receipt_cids, transaction_cids, header_cids
WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id`
if blockNumber > 0 {
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
args = append(args, blockNumber)
id++
}
if blockHash != nil {
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
args = append(args, blockHash.String())
id++
}
if len(rctFilter.Contracts) > 0 {
// Filter on contract addresses if there are any
pgStr += fmt.Sprintf(` AND ((receipt_cids.contract = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(rctFilter.Contracts))
id++
// Filter on topics if there are any
if len(rctFilter.Topics) > 0 {
pgStr += " AND ("
first := true
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
}
pgStr += ")"
}
pgStr += ")"
// Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
pgStr += ")"
} else { // If there are no contract addresses to filter on
// Filter on topics if there are any
if len(rctFilter.Topics) > 0 {
pgStr += " AND (("
first := true
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
}
pgStr += ")"
// Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
pgStr += ")"
} else if rctFilter.MatchTxs && len(trxIds) > 0 {
// If there are no contract addresses or topics to filter on,
// Filter on txIDs if there are any and we are matching txs
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
}
receiptCids := make([]ReceiptModel, 0)
return receiptCids, tx.Select(&receiptCids, pgStr, args...)
}
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided blockheight that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter config.StateFilter, blockNumber int64) ([]StateNodeModel, error) {
log.Debug("retrieving state cids for block ", blockNumber)
args := make([]interface{}, 0, 2)
pgStr := `SELECT state_cids.id, state_cids.header_id,
state_cids.state_key, state_cids.leaf, state_cids.cid
FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
args = append(args, blockNumber)
addrLen := len(stateFilter.Addresses)
if addrLen > 0 {
keys := make([]string, addrLen)
for i, addr := range stateFilter.Addresses {
keys[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes()).String()
}
pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(keys))
}
if !stateFilter.IntermediateNodes {
pgStr += ` AND state_cids.leaf = TRUE`
}
stateNodeCIDs := make([]StateNodeModel, 0)
return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...)
}
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided blockheight that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter config.StorageFilter, blockNumber int64) ([]StorageNodeWithStateKeyModel, error) {
log.Debug("retrieving storage cids for block ", blockNumber)
args := make([]interface{}, 0, 3)
pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_key,
storage_cids.leaf, storage_cids.cid, state_cids.state_key FROM storage_cids, state_cids, header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
args = append(args, blockNumber)
addrLen := len(storageFilter.Addresses)
if addrLen > 0 {
keys := make([]string, addrLen)
for i, addr := range storageFilter.Addresses {
keys[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes()).String()
}
pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(keys))
if len(storageFilter.StorageKeys) > 0 {
pgStr += ` AND storage_cids.storage_key = ANY($3::VARCHAR(66)[])`
args = append(args, pq.Array(storageFilter.StorageKeys))
}
} else if len(storageFilter.StorageKeys) > 0 {
pgStr += ` AND storage_cids.storage_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(storageFilter.StorageKeys))
}
if !storageFilter.IntermediateNodes {
pgStr += ` AND storage_cids.leaf = TRUE`
}
storageNodeCIDs := make([]StorageNodeWithStateKeyModel, 0)
return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...)
}
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
func (ecr *CIDRetriever) RetrieveGapsInData() ([]shared.Gap, error) {
pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM header_cids
LEFT JOIN header_cids r on header_cids.block_number = r.block_number - 1
LEFT JOIN header_cids fr on header_cids.block_number < fr.block_number
WHERE r.block_number is NULL and fr.block_number IS NOT NULL
GROUP BY header_cids.block_number, r.block_number`
results := make([]struct {
Start uint64 `db:"start"`
Stop uint64 `db:"stop"`
}, 0)
err := ecr.db.Select(&results, pgStr)
if err != nil {
return nil, err
}
gaps := make([]shared.Gap, len(results))
for i, res := range results {
gaps[i] = shared.Gap{
Start: res.Start,
Stop: res.Stop,
}
}
return gaps, nil
}
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (HeaderModel, []UncleModel, []TxModel, []ReceiptModel, error) {
log.Debug("retrieving block cids for block hash ", blockHash.String())
tx, err := ecr.db.Beginx()
if err != nil {
return HeaderModel{}, nil, nil, nil, err
}
headerCID, err := ecr.RetrieveHeaderCIDByHash(tx, blockHash)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("header cid retrieval error")
return HeaderModel{}, nil, nil, nil, err
}
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("uncle cid retrieval error")
return HeaderModel{}, nil, nil, nil, err
}
txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("tx cid retrieval error")
return HeaderModel{}, nil, nil, nil, err
}
txIDs := make([]int64, len(txCIDs))
for i, txCID := range txCIDs {
txIDs[i] = txCID.ID
}
rctCIDs, err := ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("rct cid retrieval error")
return HeaderModel{}, nil, nil, nil, err
}
return headerCID, uncleCIDs, txCIDs, rctCIDs, tx.Commit()
}
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (HeaderModel, []UncleModel, []TxModel, []ReceiptModel, error) {
log.Debug("retrieving block cids for block number ", blockNumber)
tx, err := ecr.db.Beginx()
if err != nil {
return HeaderModel{}, nil, nil, nil, err
}
headerCID, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("header cid retrieval error")
return HeaderModel{}, nil, nil, nil, err
}
if len(headerCID) < 1 {
return HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
}
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].ID)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("uncle cid retrieval error")
return HeaderModel{}, nil, nil, nil, err
}
txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("tx cid retrieval error")
return HeaderModel{}, nil, nil, nil, err
}
txIDs := make([]int64, len(txCIDs))
for i, txCID := range txCIDs {
txIDs[i] = txCID.ID
}
rctCIDs, err := ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("rct cid retrieval error")
return HeaderModel{}, nil, nil, nil, err
}
return headerCID[0], uncleCIDs, txCIDs, rctCIDs, tx.Commit()
}
// RetrieveHeaderCIDByHash returns the header for the given block hash
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (HeaderModel, error) {
log.Debug("retrieving header cids for block hash ", blockHash.String())
pgStr := `SELECT * FROM header_cids
WHERE block_hash = $1`
var headerCID HeaderModel
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
}
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]TxModel, error) {
log.Debug("retrieving tx cids for block id ", headerID)
pgStr := `SELECT * FROM transaction_cids
WHERE transaction_cids.header_id = $1`
var txCIDs []TxModel
return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
}
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]ReceiptModel, error) {
log.Debugf("retrieving receipt cids for tx ids %v", txIDs)
pgStr := `SELECT * FROM receipt_cids
WHERE receipt_cids.tx_id = ANY($1::INTEGER[])`
var rctCIDs []ReceiptModel
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs))
}

View File

@ -0,0 +1,535 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"math/big"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
eth2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
var (
openFilter = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{},
TxFilter: config.TxFilter{},
ReceiptFilter: config.ReceiptFilter{},
StateFilter: config.StateFilter{},
StorageFilter: config.StorageFilter{},
}
rctContractFilter = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Contracts: []string{mocks.AnotherAddress.String()},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctTopicsFilter = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000004"}},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctTopicsAndContractFilter = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Topics: [][]string{
{"0x0000000000000000000000000000000000000000000000000000000000000004"},
{"0x0000000000000000000000000000000000000000000000000000000000000006"},
},
Contracts: []string{mocks.Address.String()},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctTopicsAndContractFilterFail = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Topics: [][]string{
{"0x0000000000000000000000000000000000000000000000000000000000000004"},
{"0x0000000000000000000000000000000000000000000000000000000000000007"}, // This topic won't match on the mocks.Address.String() contract receipt
},
Contracts: []string{mocks.Address.String()},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctContractsAndTopicFilter = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000005"}},
Contracts: []string{mocks.Address.String(), mocks.AnotherAddress.String()},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctsForAllCollectedTrxs = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TxFilter: config.TxFilter{}, // Trx filter open so we will collect all trxs, therefore we will also collect all corresponding rcts despite rct filter
ReceiptFilter: config.ReceiptFilter{
MatchTxs: true,
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000006"}}, // Topic0 isn't one of the topic0s we have
Contracts: []string{"0x0000000000000000000000000000000000000002"}, // Contract isn't one of the contracts we have
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctsForSelectCollectedTrxs = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TxFilter: config.TxFilter{
Dst: []string{mocks.AnotherAddress.String()}, // We only filter for one of the trxs so we will only get the one corresponding receipt
},
ReceiptFilter: config.ReceiptFilter{
MatchTxs: true,
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000006"}}, // Topic0 isn't one of the topic0s we have
Contracts: []string{"0x0000000000000000000000000000000000000002"}, // Contract isn't one of the contracts we have
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
stateFilter = &config.EthSubscription{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Off: true,
},
StateFilter: config.StateFilter{
Addresses: []string{mocks.Address.Hex()},
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
)
var _ = Describe("Retriever", func() {
var (
db *postgres.DB
repo *eth2.CIDIndexer
retriever *eth2.CIDRetriever
)
BeforeEach(func() {
var err error
db, err = eth.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = eth2.NewCIDIndexer(db)
retriever = eth2.NewCIDRetriever(db)
})
AfterEach(func() {
eth.TearDownDB(db)
})
Describe("Retrieve", func() {
BeforeEach(func() {
err := repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
})
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
cids, empty, err := retriever.Retrieve(openFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
cidWrapper, ok := cids.(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper.Headers)).To(Equal(1))
expectedHeaderCIDs := mocks.MockCIDWrapper.Headers
expectedHeaderCIDs[0].ID = cidWrapper.Headers[0].ID
Expect(cidWrapper.Headers).To(Equal(expectedHeaderCIDs))
Expect(len(cidWrapper.Transactions)).To(Equal(2))
Expect(eth.TxModelsContainsCID(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
Expect(eth.TxModelsContainsCID(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
Expect(len(cidWrapper.Receipts)).To(Equal(2))
Expect(eth.ReceiptModelsContainsCID(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[0].CID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[1].CID)).To(BeTrue())
Expect(len(cidWrapper.StateNodes)).To(Equal(2))
for _, stateNode := range cidWrapper.StateNodes {
if stateNode.CID == "mockStateCID1" {
Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex()))
Expect(stateNode.Leaf).To(Equal(true))
}
if stateNode.CID == "mockStateCID2" {
Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex()))
Expect(stateNode.Leaf).To(Equal(true))
}
}
Expect(len(cidWrapper.StorageNodes)).To(Equal(1))
expectedStorageNodeCIDs := mocks.MockCIDWrapper.StorageNodes
expectedStorageNodeCIDs[0].ID = cidWrapper.StorageNodes[0].ID
expectedStorageNodeCIDs[0].StateID = cidWrapper.StorageNodes[0].StateID
Expect(cidWrapper.StorageNodes).To(Equal(expectedStorageNodeCIDs))
})
It("Applies filters from the provided config.Subscription", func() {
cids1, empty, err := retriever.Retrieve(rctContractFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
cidWrapper1, ok := cids1.(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper1.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper1.Headers)).To(Equal(0))
Expect(len(cidWrapper1.Transactions)).To(Equal(0))
Expect(len(cidWrapper1.StateNodes)).To(Equal(0))
Expect(len(cidWrapper1.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper1.Receipts)).To(Equal(1))
expectedReceiptCID := mocks.MockCIDWrapper.Receipts[1]
expectedReceiptCID.ID = cidWrapper1.Receipts[0].ID
expectedReceiptCID.TxID = cidWrapper1.Receipts[0].TxID
Expect(cidWrapper1.Receipts[0]).To(Equal(expectedReceiptCID))
cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
cidWrapper2, ok := cids2.(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper2.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper2.Headers)).To(Equal(0))
Expect(len(cidWrapper2.Transactions)).To(Equal(0))
Expect(len(cidWrapper2.StateNodes)).To(Equal(0))
Expect(len(cidWrapper2.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper2.Receipts)).To(Equal(1))
expectedReceiptCID = mocks.MockCIDWrapper.Receipts[0]
expectedReceiptCID.ID = cidWrapper2.Receipts[0].ID
expectedReceiptCID.TxID = cidWrapper2.Receipts[0].TxID
Expect(cidWrapper2.Receipts[0]).To(Equal(expectedReceiptCID))
cids3, empty, err := retriever.Retrieve(rctTopicsAndContractFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
cidWrapper3, ok := cids3.(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper3.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper3.Headers)).To(Equal(0))
Expect(len(cidWrapper3.Transactions)).To(Equal(0))
Expect(len(cidWrapper3.StateNodes)).To(Equal(0))
Expect(len(cidWrapper3.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper3.Receipts)).To(Equal(1))
expectedReceiptCID = mocks.MockCIDWrapper.Receipts[0]
expectedReceiptCID.ID = cidWrapper3.Receipts[0].ID
expectedReceiptCID.TxID = cidWrapper3.Receipts[0].TxID
Expect(cidWrapper3.Receipts[0]).To(Equal(expectedReceiptCID))
cids4, empty, err := retriever.Retrieve(rctContractsAndTopicFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
cidWrapper4, ok := cids4.(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper4.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper4.Headers)).To(Equal(0))
Expect(len(cidWrapper4.Transactions)).To(Equal(0))
Expect(len(cidWrapper4.StateNodes)).To(Equal(0))
Expect(len(cidWrapper4.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper4.Receipts)).To(Equal(1))
expectedReceiptCID = mocks.MockCIDWrapper.Receipts[1]
expectedReceiptCID.ID = cidWrapper4.Receipts[0].ID
expectedReceiptCID.TxID = cidWrapper4.Receipts[0].TxID
Expect(cidWrapper4.Receipts[0]).To(Equal(expectedReceiptCID))
cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
cidWrapper5, ok := cids5.(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper5.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper5.Headers)).To(Equal(0))
Expect(len(cidWrapper5.Transactions)).To(Equal(2))
Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, "mockTrxCID1")).To(BeTrue())
Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, "mockTrxCID2")).To(BeTrue())
Expect(len(cidWrapper5.StateNodes)).To(Equal(0))
Expect(len(cidWrapper5.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper5.Receipts)).To(Equal(2))
Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, "mockRctCID1")).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, "mockRctCID2")).To(BeTrue())
cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
cidWrapper6, ok := cids6.(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper6.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper6.Headers)).To(Equal(0))
Expect(len(cidWrapper6.Transactions)).To(Equal(1))
expectedTxCID := mocks.MockCIDWrapper.Transactions[1]
expectedTxCID.ID = cidWrapper6.Transactions[0].ID
expectedTxCID.HeaderID = cidWrapper6.Transactions[0].HeaderID
Expect(cidWrapper6.Transactions[0]).To(Equal(expectedTxCID))
Expect(len(cidWrapper6.StateNodes)).To(Equal(0))
Expect(len(cidWrapper6.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper6.Receipts)).To(Equal(1))
expectedReceiptCID = mocks.MockCIDWrapper.Receipts[1]
expectedReceiptCID.ID = cidWrapper6.Receipts[0].ID
expectedReceiptCID.TxID = cidWrapper6.Receipts[0].TxID
Expect(cidWrapper6.Receipts[0]).To(Equal(expectedReceiptCID))
cids7, empty, err := retriever.Retrieve(stateFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
cidWrapper7, ok := cids7.(*eth.CIDWrapper)
Expect(ok).To(BeTrue())
Expect(cidWrapper7.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper7.Headers)).To(Equal(0))
Expect(len(cidWrapper7.Transactions)).To(Equal(0))
Expect(len(cidWrapper7.Receipts)).To(Equal(0))
Expect(len(cidWrapper7.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper7.StateNodes)).To(Equal(1))
Expect(cidWrapper7.StateNodes[0]).To(Equal(eth.StateNodeModel{
ID: cidWrapper7.StateNodes[0].ID,
HeaderID: cidWrapper7.StateNodes[0].HeaderID,
Leaf: true,
StateKey: mocks.ContractLeafKey.Hex(),
CID: "mockStateCID1",
}))
_, empty, err = retriever.Retrieve(rctTopicsAndContractFilterFail, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).To(BeTrue())
})
})
Describe("RetrieveFirstBlockNumber", func() {
It("Gets the number of the first block that has data in the database", func() {
err := repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1)))
})
It("Gets the number of the first block that has data in the database", func() {
payload := *mocks.MockCIDPayload
payload.HeaderCID.BlockNumber = "1010101"
err := repo.Index(&payload)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the first block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload
payload1.HeaderCID.BlockNumber = "1010101"
payload2 := payload1
payload2.HeaderCID.BlockNumber = "5"
err := repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(5)))
})
})
Describe("RetrieveLastBlockNumber", func() {
It("Gets the number of the latest block that has data in the database", func() {
err := repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload := *mocks.MockCIDPayload
payload.HeaderCID.BlockNumber = "1010101"
err := repo.Index(&payload)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload
payload1.HeaderCID.BlockNumber = "1010101"
payload2 := payload1
payload2.HeaderCID.BlockNumber = "5"
err := repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
})
Describe("RetrieveGapsInData", func() {
It("Doesn't return gaps if there are none", func() {
payload1 := *mocks.MockCIDPayload
payload1.HeaderCID.BlockNumber = "2"
payload2 := payload1
payload2.HeaderCID.BlockNumber = "3"
err := repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData()
Expect(err).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(0))
})
It("Doesn't return the gap from 0 to the earliest block", func() {
payload := *mocks.MockCIDPayload
payload.HeaderCID.BlockNumber = "5"
err := repo.Index(&payload)
Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData()
Expect(err).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(0))
})
It("Finds gap between two entries", func() {
payload1 := *mocks.MockCIDPayload
payload1.HeaderCID.BlockNumber = "1010101"
payload2 := payload1
payload2.HeaderCID.BlockNumber = "5"
err := repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData()
Expect(err).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(1))
Expect(gaps[0].Start).To(Equal(uint64(6)))
Expect(gaps[0].Stop).To(Equal(uint64(1010100)))
})
It("Finds gaps between multiple entries", func() {
payload1 := *mocks.MockCIDPayload
payload1.HeaderCID.BlockNumber = "1010101"
payload2 := payload1
payload2.HeaderCID.BlockNumber = "5"
payload3 := payload2
payload3.HeaderCID.BlockNumber = "100"
payload4 := payload3
payload4.HeaderCID.BlockNumber = "101"
payload5 := payload4
payload5.HeaderCID.BlockNumber = "102"
payload6 := payload5
payload6.HeaderCID.BlockNumber = "1000"
err := repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload3)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload4)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload5)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload6)
Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData()
Expect(err).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(3))
Expect(shared.ListContainsGap(gaps, shared.Gap{Start: 6, Stop: 99})).To(BeTrue())
Expect(shared.ListContainsGap(gaps, shared.Gap{Start: 103, Stop: 999})).To(BeTrue())
Expect(shared.ListContainsGap(gaps, shared.Gap{Start: 1001, Stop: 1010100})).To(BeTrue())
})
})
})

View File

@ -0,0 +1,46 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
const (
PayloadChanBufferSize = 20000 // the max eth sub buffer size
)
// PayloadStreamer satisfies the PayloadStreamer interface for ethereum
type PayloadStreamer struct {
Client core.RPCClient
}
// NewPayloadStreamer creates a pointer to a new StateDiffStreamer which satisfies the PayloadStreamer interface
func NewPayloadStreamer(client core.RPCClient) *PayloadStreamer {
return &PayloadStreamer{
Client: client,
}
}
// Stream is the main loop for subscribing to data from the Geth state diff process
func (sds *PayloadStreamer) Stream(payloadChan chan interface{}) (*rpc.ClientSubscription, error) {
logrus.Info("streaming diffs from geth")
return sds.Client.Subscribe("statediff", payloadChan, "stream")
}

View File

@ -0,0 +1,34 @@
// Copyright 2019 Vulcanize
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eth_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/fakes"
"github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
var _ = Describe("StateDiff Streamer", func() {
It("subscribes to the geth statediff service", func() {
client := &fakes.MockRPCClient{}
streamer := eth.NewPayloadStreamer(client)
payloadChan := make(chan interface{})
_, err := streamer.Stream(payloadChan)
Expect(err).NotTo(HaveOccurred())
client.AssertSubscribeCalledWith("statediff", payloadChan, []interface{}{"stream"})
})
})

View File

@ -14,11 +14,9 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node
package eth
import (
"bytes"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/config"
@ -57,10 +55,10 @@ func TearDownDB(db *postgres.DB) {
Expect(err).NotTo(HaveOccurred())
}
// ListContainsString used to check if a list of strings contains a particular string
func ListContainsString(sss []string, s string) bool {
for _, str := range sss {
if s == str {
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
func TxModelsContainsCID(txs []TxModel, cid string) bool {
for _, tx := range txs {
if tx.CID == cid {
return true
}
}
@ -68,19 +66,9 @@ func ListContainsString(sss []string, s string) bool {
}
// ListContainsBytes used to check if a list of byte arrays contains a particular byte array
func ListContainsBytes(bbb [][]byte, b []byte) bool {
for _, by := range bbb {
if bytes.Equal(by, b) {
return true
}
}
return false
}
// ListContainsRange used to check if a list of [2]uint64 contains a particula [2]uint64
func ListContainsRange(rangeList [][2]uint64, rng [2]uint64) bool {
for _, rangeInList := range rangeList {
if rangeInList == rng {
func ReceiptModelsContainsCID(rcts []ReceiptModel, cid string) bool {
for _, rct := range rcts {
if rct.CID == cid {
return true
}
}

119
pkg/super_node/eth/types.go Normal file
View File

@ -0,0 +1,119 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"encoding/json"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ipfs/go-block-format"
)
// IPLDPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
// Returned by PayloadConverter
// Passed to IPLDPublisher and ResponseFilterer
type IPLDPayload struct {
TotalDifficulty *big.Int
Block *types.Block
HeaderRLP []byte
TrxMetaData []TxModel
Receipts types.Receipts
ReceiptMetaData []ReceiptModel
StateNodes []TrieNode
StorageNodes map[common.Hash][]TrieNode
}
// Trie struct used to flag node as leaf or not
type TrieNode struct {
Key common.Hash
Value []byte
Leaf bool
}
// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres
// Returned by IPLDPublisher
// Passed to CIDIndexer
type CIDPayload struct {
HeaderCID HeaderModel
UncleCIDs []UncleModel
TransactionCIDs []TxModel
ReceiptCIDs map[common.Hash]ReceiptModel
StateNodeCIDs []StateNodeModel
StorageNodeCIDs map[common.Hash][]StorageNodeModel
}
// CIDWrapper is used to direct fetching of IPLDs from IPFS
// Returned by CIDRetriever
// Passed to IPLDFetcher
type CIDWrapper struct {
BlockNumber *big.Int
Headers []HeaderModel
Uncles []UncleModel
Transactions []TxModel
Receipts []ReceiptModel
StateNodes []StateNodeModel
StorageNodes []StorageNodeWithStateKeyModel
}
// IPLDWrapper is used to package raw IPLD block data fetched from IPFS
// Returned by IPLDFetcher
// Passed to IPLDResolver
type IPLDWrapper struct {
BlockNumber *big.Int
Headers []blocks.Block
Uncles []blocks.Block
Transactions []blocks.Block
Receipts []blocks.Block
StateNodes map[common.Hash]blocks.Block
StorageNodes map[common.Hash]map[common.Hash]blocks.Block
}
// StreamPayload holds the data streamed from the super node eth service to the requesting clients
// Returned by IPLDResolver and ResponseFilterer
// Passed to client subscriptions
type StreamPayload struct {
BlockNumber *big.Int `json:"blockNumber"`
HeadersRlp [][]byte `json:"headersRlp"`
UnclesRlp [][]byte `json:"unclesRlp"`
TransactionsRlp [][]byte `json:"transactionsRlp"`
ReceiptsRlp [][]byte `json:"receiptsRlp"`
StateNodesRlp map[common.Hash][]byte `json:"stateNodesRlp"`
StorageNodesRlp map[common.Hash]map[common.Hash][]byte `json:"storageNodesRlp"`
encoded []byte
err error
}
func (sd *StreamPayload) ensureEncoded() {
if sd.encoded == nil && sd.err == nil {
sd.encoded, sd.err = json.Marshal(sd)
}
}
// Length to implement Encoder interface for StateDiff
func (sd *StreamPayload) Length() int {
sd.ensureEncoded()
return len(sd.encoded)
}
// Encode to implement Encoder interface for StateDiff
func (sd *StreamPayload) Encode() ([]byte, error) {
sd.ensureEncoded()
return sd.encoded, sd.err
}

View File

@ -1,246 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node
import (
"bytes"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// ResponseFilterer is the inteface used to screen eth data and package appropriate data into a response payload
type ResponseFilterer interface {
FilterResponse(streamFilters config.Subscription, payload ipfs.IPLDPayload) (streamer.SuperNodePayload, error)
}
// Filterer is the underlying struct for the ResponseFilterer interface
type Filterer struct{}
// NewResponseFilterer creates a new Filterer satisfying the ResponseFilterer interface
func NewResponseFilterer() *Filterer {
return &Filterer{}
}
// FilterResponse is used to filter through eth data to extract and package requested data into a Payload
func (s *Filterer) FilterResponse(streamFilters config.Subscription, payload ipfs.IPLDPayload) (streamer.SuperNodePayload, error) {
response := new(streamer.SuperNodePayload)
headersErr := s.filterHeaders(streamFilters, response, payload)
if headersErr != nil {
return streamer.SuperNodePayload{}, headersErr
}
txHashes, trxsErr := s.filterTransactions(streamFilters, response, payload)
if trxsErr != nil {
return streamer.SuperNodePayload{}, trxsErr
}
rctsErr := s.filerReceipts(streamFilters, response, payload, txHashes)
if rctsErr != nil {
return streamer.SuperNodePayload{}, rctsErr
}
stateErr := s.filterState(streamFilters, response, payload)
if stateErr != nil {
return streamer.SuperNodePayload{}, stateErr
}
storageErr := s.filterStorage(streamFilters, response, payload)
if storageErr != nil {
return streamer.SuperNodePayload{}, storageErr
}
response.BlockNumber = payload.BlockNumber
return *response, nil
}
func (s *Filterer) filterHeaders(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
if !streamFilters.HeaderFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
response.HeadersRlp = append(response.HeadersRlp, payload.HeaderRLP)
if streamFilters.HeaderFilter.Uncles {
for _, uncle := range payload.BlockBody.Uncles {
uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil {
return err
}
response.UnclesRlp = append(response.UnclesRlp, uncleRlp)
}
}
}
return nil
}
func checkRange(start, end, actual int64) bool {
if (end <= 0 || end >= actual) && start <= actual {
return true
}
return false
}
func (s *Filterer) filterTransactions(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) ([]common.Hash, error) {
trxHashes := make([]common.Hash, 0, len(payload.BlockBody.Transactions))
if !streamFilters.TrxFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
for i, trx := range payload.BlockBody.Transactions {
if checkTransactions(streamFilters.TrxFilter.Src, streamFilters.TrxFilter.Dst, payload.TrxMetaData[i].Src, payload.TrxMetaData[i].Dst) {
trxBuffer := new(bytes.Buffer)
err := trx.EncodeRLP(trxBuffer)
if err != nil {
return nil, err
}
trxHashes = append(trxHashes, trx.Hash())
response.TransactionsRlp = append(response.TransactionsRlp, trxBuffer.Bytes())
}
}
}
return trxHashes, nil
}
func checkTransactions(wantedSrc, wantedDst []string, actualSrc, actualDst string) bool {
// If we aren't filtering for any addresses, every transaction is a go
if len(wantedDst) == 0 && len(wantedSrc) == 0 {
return true
}
for _, src := range wantedSrc {
if src == actualSrc {
return true
}
}
for _, dst := range wantedDst {
if dst == actualDst {
return true
}
}
return false
}
func (s *Filterer) filerReceipts(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload, trxHashes []common.Hash) error {
if !streamFilters.ReceiptFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
for i, receipt := range payload.Receipts {
if checkReceipts(receipt, streamFilters.ReceiptFilter.Topic0s, payload.ReceiptMetaData[i].Topic0s, streamFilters.ReceiptFilter.Contracts, payload.ReceiptMetaData[i].ContractAddress, trxHashes) {
receiptForStorage := (*types.ReceiptForStorage)(receipt)
receiptBuffer := new(bytes.Buffer)
err := receiptForStorage.EncodeRLP(receiptBuffer)
if err != nil {
return err
}
response.ReceiptsRlp = append(response.ReceiptsRlp, receiptBuffer.Bytes())
}
}
}
return nil
}
func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics, wantedContracts []string, actualContract string, wantedTrxHashes []common.Hash) bool {
// If we aren't filtering for any topics or contracts, all topics are a go
if len(wantedTopics) == 0 && len(wantedContracts) == 0 {
return true
}
// No matter what filters we have, we keep receipts for the trxs we are interested in
for _, wantedTrxHash := range wantedTrxHashes {
if bytes.Equal(wantedTrxHash.Bytes(), rct.TxHash.Bytes()) {
return true
}
}
if len(wantedContracts) == 0 {
// We keep all receipts that have logs we are interested in
for _, wantedTopic := range wantedTopics {
for _, actualTopic := range actualTopics {
if wantedTopic == actualTopic {
return true
}
}
}
} else { // We keep receipts that belong to one of the specified contracts and have logs with topics if we aren't filtering on topics
for _, wantedContract := range wantedContracts {
if wantedContract == actualContract {
if len(wantedTopics) == 0 {
return true
}
// Or if we have contracts and topics to filter on we only keep receipts that satisfy both conditions
for _, wantedTopic := range wantedTopics {
for _, actualTopic := range actualTopics {
if wantedTopic == actualTopic {
return true
}
}
}
}
}
}
return false
}
func (s *Filterer) filterState(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
if !streamFilters.StateFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
response.StateNodesRlp = make(map[common.Hash][]byte)
keyFilters := make([]common.Hash, 0, len(streamFilters.StateFilter.Addresses))
for _, addr := range streamFilters.StateFilter.Addresses {
keyFilter := ipfs.AddressToKey(common.HexToAddress(addr))
keyFilters = append(keyFilters, keyFilter)
}
for key, stateNode := range payload.StateNodes {
if checkNodeKeys(keyFilters, key) {
if stateNode.Leaf || streamFilters.StateFilter.IntermediateNodes {
response.StateNodesRlp[key] = stateNode.Value
}
}
}
}
return nil
}
func checkNodeKeys(wantedKeys []common.Hash, actualKey common.Hash) bool {
// If we aren't filtering for any specific keys, all nodes are a go
if len(wantedKeys) == 0 {
return true
}
for _, key := range wantedKeys {
if bytes.Equal(key.Bytes(), actualKey.Bytes()) {
return true
}
}
return false
}
func (s *Filterer) filterStorage(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
if !streamFilters.StorageFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
response.StorageNodesRlp = make(map[common.Hash]map[common.Hash][]byte)
stateKeyFilters := make([]common.Hash, 0, len(streamFilters.StorageFilter.Addresses))
for _, addr := range streamFilters.StorageFilter.Addresses {
keyFilter := ipfs.AddressToKey(common.HexToAddress(addr))
stateKeyFilters = append(stateKeyFilters, keyFilter)
}
storageKeyFilters := make([]common.Hash, 0, len(streamFilters.StorageFilter.StorageKeys))
for _, store := range streamFilters.StorageFilter.StorageKeys {
keyFilter := ipfs.HexToKey(store)
storageKeyFilters = append(storageKeyFilters, keyFilter)
}
for stateKey, storageNodes := range payload.StorageNodes {
if checkNodeKeys(stateKeyFilters, stateKey) {
response.StorageNodesRlp[stateKey] = make(map[common.Hash][]byte)
for _, storageNode := range storageNodes {
if checkNodeKeys(storageKeyFilters, storageNode.Key) {
response.StorageNodesRlp[stateKey][storageNode.Key] = storageNode.Value
}
}
}
}
}
return nil
}

37
pkg/super_node/helpers.go Normal file
View File

@ -0,0 +1,37 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node
import log "github.com/sirupsen/logrus"
func sendNonBlockingErr(sub Subscription, err error) {
log.Error(err)
select {
case sub.PayloadChan <- Payload{nil, err.Error()}:
default:
log.Infof("unable to send error to subscription %s", sub.ID)
}
}
func sendNonBlockingQuit(sub Subscription) {
select {
case sub.QuitChan <- true:
log.Infof("closing subscription %s", sub.ID)
default:
log.Infof("unable to close subscription %s; channel has no receiver", sub.ID)
}
}

View File

@ -1,44 +0,0 @@
package mocks
import (
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// MockCIDRetriever is a mock CID retriever for use in tests
type MockCIDRetriever struct {
GapsToRetrieve [][2]uint64
GapsToRetrieveErr error
CalledTimes int
FirstBlockNumberToReturn int64
RetrieveFirstBlockNumberErr error
}
// RetrieveCIDs mock method
func (*MockCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) {
panic("implement me")
}
// RetrieveLastBlockNumber mock method
func (*MockCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
panic("implement me")
}
// RetrieveFirstBlockNumber mock method
func (mcr *MockCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
return mcr.FirstBlockNumberToReturn, mcr.RetrieveFirstBlockNumberErr
}
// RetrieveGapsInData mock method
func (mcr *MockCIDRetriever) RetrieveGapsInData() ([][2]uint64, error) {
mcr.CalledTimes++
return mcr.GapsToRetrieve, mcr.GapsToRetrieveErr
}
// SetGapsToRetrieve mock method
func (mcr *MockCIDRetriever) SetGapsToRetrieve(gaps [][2]uint64) {
if mcr.GapsToRetrieve == nil {
mcr.GapsToRetrieve = make([][2]uint64, 0)
}
mcr.GapsToRetrieve = append(mcr.GapsToRetrieve, gaps...)
}

View File

@ -1,156 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node
import (
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// CIDRepository is an interface for indexing ipfs.CIDPayloads
type CIDRepository interface {
Index(cidPayload *ipfs.CIDPayload) error
}
// Repository is the underlying struct for the CIDRepository interface
type Repository struct {
db *postgres.DB
}
// NewCIDRepository creates a new pointer to a Repository which satisfies the CIDRepository interface
func NewCIDRepository(db *postgres.DB) *Repository {
return &Repository{
db: db,
}
}
// Index indexes a cidPayload in Postgres
func (repo *Repository) Index(cidPayload *ipfs.CIDPayload) error {
tx, beginErr := repo.db.Beginx()
if beginErr != nil {
return beginErr
}
headerID, headerErr := repo.indexHeaderCID(tx, cidPayload.HeaderCID, cidPayload.BlockNumber, cidPayload.BlockHash.Hex())
if headerErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
return headerErr
}
for uncleHash, cid := range cidPayload.UncleCIDs {
uncleErr := repo.indexUncleCID(tx, cid, cidPayload.BlockNumber, uncleHash.Hex())
if uncleErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
return uncleErr
}
}
trxAndRctErr := repo.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID)
if trxAndRctErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
return trxAndRctErr
}
stateAndStorageErr := repo.indexStateAndStorageCIDs(tx, cidPayload, headerID)
if stateAndStorageErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
return stateAndStorageErr
}
return tx.Commit()
}
func (repo *Repository) indexHeaderCID(tx *sqlx.Tx, cid, blockNumber, hash string) (int64, error) {
var headerID int64
err := tx.QueryRowx(`INSERT INTO public.header_cids (block_number, block_hash, cid, uncle) VALUES ($1, $2, $3, $4)
ON CONFLICT (block_number, block_hash) DO UPDATE SET (cid, uncle) = ($3, $4)
RETURNING id`,
blockNumber, hash, cid, false).Scan(&headerID)
return headerID, err
}
func (repo *Repository) indexUncleCID(tx *sqlx.Tx, cid, blockNumber, hash string) error {
_, err := tx.Exec(`INSERT INTO public.header_cids (block_number, block_hash, cid, uncle) VALUES ($1, $2, $3, $4)
ON CONFLICT (block_number, block_hash) DO UPDATE SET (cid, uncle) = ($3, $4)`,
blockNumber, hash, cid, true)
return err
}
func (repo *Repository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error {
for hash, trxCidMeta := range payload.TransactionCIDs {
var txID int64
queryErr := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src) = ($3, $4, $5)
RETURNING id`,
headerID, hash.Hex(), trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src).Scan(&txID)
if queryErr != nil {
return queryErr
}
receiptCidMeta, ok := payload.ReceiptCIDs[hash]
if ok {
rctErr := repo.indexReceiptCID(tx, receiptCidMeta, txID)
if rctErr != nil {
return rctErr
}
}
}
return nil
}
func (repo *Repository) indexReceiptCID(tx *sqlx.Tx, cidMeta *ipfs.ReceiptMetaData, txID int64) error {
_, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid, contract, topic0s) VALUES ($1, $2, $3, $4)`,
txID, cidMeta.CID, cidMeta.ContractAddress, pq.Array(cidMeta.Topic0s))
return err
}
func (repo *Repository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error {
for accountKey, stateCID := range payload.StateNodeCIDs {
var stateID int64
queryErr := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4)
ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4)
RETURNING id`,
headerID, accountKey.Hex(), stateCID.CID, stateCID.Leaf).Scan(&stateID)
if queryErr != nil {
return queryErr
}
for _, storageCID := range payload.StorageNodeCIDs[accountKey] {
storageErr := repo.indexStorageCID(tx, storageCID, stateID)
if storageErr != nil {
return storageErr
}
}
}
return nil
}
func (repo *Repository) indexStorageCID(tx *sqlx.Tx, storageCID ipfs.StorageNodeCID, stateID int64) error {
_, err := tx.Exec(`INSERT INTO public.storage_cids (state_id, storage_key, cid, leaf) VALUES ($1, $2, $3, $4)
ON CONFLICT (state_id, storage_key) DO UPDATE SET (cid, leaf) = ($3, $4)`,
stateID, storageCID.Key, storageCID.CID, storageCID.Leaf)
return err
}

View File

@ -1,336 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node
import (
"math/big"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// CIDRetriever is the interface for retrieving CIDs from the Postgres cache
type CIDRetriever interface {
RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error)
RetrieveLastBlockNumber() (int64, error)
RetrieveFirstBlockNumber() (int64, error)
RetrieveGapsInData() ([][2]uint64, error)
}
// EthCIDRetriever is the underlying struct supporting the CIDRetriever interface
type EthCIDRetriever struct {
db *postgres.DB
}
// NewCIDRetriever returns a pointer to a new EthCIDRetriever which supports the CIDRetriever interface
func NewCIDRetriever(db *postgres.DB) *EthCIDRetriever {
return &EthCIDRetriever{
db: db,
}
}
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
func (ecr *EthCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number ASC LIMIT 1")
return blockNumber, err
}
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
func (ecr *EthCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number DESC LIMIT 1 ")
return blockNumber, err
}
// RetrieveCIDs is used to retrieve all of the CIDs which conform to the passed StreamFilters
func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) {
log.Debug("retrieving cids")
tx, beginErr := ecr.db.Beginx()
if beginErr != nil {
return nil, beginErr
}
// THIS IS SUPER EXPENSIVE HAVING TO CYCLE THROUGH EACH BLOCK, NEED BETTER WAY TO FETCH CIDS
// WHILE STILL MAINTAINING RELATION INFO ABOUT WHAT BLOCK THE CIDS BELONG TO
cw := new(ipfs.CIDWrapper)
cw.BlockNumber = big.NewInt(blockNumber)
// Retrieve cached header CIDs
if !streamFilters.HeaderFilter.Off {
var headersErr error
cw.Headers, headersErr = ecr.retrieveHeaderCIDs(tx, streamFilters, blockNumber)
if headersErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
log.Error("header cid retrieval error")
return nil, headersErr
}
if streamFilters.HeaderFilter.Uncles {
var unclesErr error
cw.Uncles, unclesErr = ecr.retrieveUncleCIDs(tx, streamFilters, blockNumber)
if unclesErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
log.Error("uncle cid retrieval error")
return nil, unclesErr
}
}
}
// Retrieve cached trx CIDs
var trxIds []int64
if !streamFilters.TrxFilter.Off {
var trxsErr error
cw.Transactions, trxIds, trxsErr = ecr.retrieveTrxCIDs(tx, streamFilters, blockNumber)
if trxsErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
log.Error("transaction cid retrieval error")
return nil, trxsErr
}
}
// Retrieve cached receipt CIDs
if !streamFilters.ReceiptFilter.Off {
var rctsErr error
cw.Receipts, rctsErr = ecr.retrieveRctCIDs(tx, streamFilters, blockNumber, trxIds)
if rctsErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
log.Error("receipt cid retrieval error")
return nil, rctsErr
}
}
// Retrieve cached state CIDs
if !streamFilters.StateFilter.Off {
var stateErr error
cw.StateNodes, stateErr = ecr.retrieveStateCIDs(tx, streamFilters, blockNumber)
if stateErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
log.Error("state cid retrieval error")
return nil, stateErr
}
}
// Retrieve cached storage CIDs
if !streamFilters.StorageFilter.Off {
var storageErr error
cw.StorageNodes, storageErr = ecr.retrieveStorageCIDs(tx, streamFilters, blockNumber)
if storageErr != nil {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
log.Error(rollbackErr)
}
log.Error("storage cid retrieval error")
return nil, storageErr
}
}
return cw, tx.Commit()
}
func (ecr *EthCIDRetriever) retrieveHeaderCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, error) {
log.Debug("retrieving header cids for block ", blockNumber)
headers := make([]string, 0)
pgStr := `SELECT cid FROM header_cids
WHERE block_number = $1 AND uncle IS FALSE`
err := tx.Select(&headers, pgStr, blockNumber)
return headers, err
}
func (ecr *EthCIDRetriever) retrieveUncleCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, error) {
log.Debug("retrieving header cids for block ", blockNumber)
headers := make([]string, 0)
pgStr := `SELECT cid FROM header_cids
WHERE block_number = $1 AND uncle IS TRUE`
err := tx.Select(&headers, pgStr, blockNumber)
return headers, err
}
func (ecr *EthCIDRetriever) retrieveTrxCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, []int64, error) {
log.Debug("retrieving transaction cids for block ", blockNumber)
args := make([]interface{}, 0, 3)
type result struct {
ID int64 `db:"id"`
Cid string `db:"cid"`
}
results := make([]result, 0)
pgStr := `SELECT transaction_cids.id, transaction_cids.cid FROM transaction_cids INNER JOIN header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
args = append(args, blockNumber)
if len(streamFilters.TrxFilter.Dst) > 0 {
pgStr += ` AND transaction_cids.dst = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(streamFilters.TrxFilter.Dst))
}
if len(streamFilters.TrxFilter.Src) > 0 {
pgStr += ` AND transaction_cids.src = ANY($3::VARCHAR(66)[])`
args = append(args, pq.Array(streamFilters.TrxFilter.Src))
}
err := tx.Select(&results, pgStr, args...)
if err != nil {
return nil, nil, err
}
ids := make([]int64, 0, len(results))
cids := make([]string, 0, len(results))
for _, res := range results {
cids = append(cids, res.Cid)
ids = append(ids, res.ID)
}
return cids, ids, nil
}
func (ecr *EthCIDRetriever) retrieveRctCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64, trxIds []int64) ([]string, error) {
log.Debug("retrieving receipt cids for block ", blockNumber)
args := make([]interface{}, 0, 4)
pgStr := `SELECT receipt_cids.cid FROM receipt_cids, transaction_cids, header_cids
WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
args = append(args, blockNumber)
if len(streamFilters.ReceiptFilter.Topic0s) > 0 {
pgStr += ` AND ((receipt_cids.topic0s && $2::VARCHAR(66)[]`
args = append(args, pq.Array(streamFilters.ReceiptFilter.Topic0s))
if len(streamFilters.ReceiptFilter.Contracts) > 0 {
pgStr += ` AND receipt_cids.contract = ANY($3::VARCHAR(66)[]))`
args = append(args, pq.Array(streamFilters.ReceiptFilter.Contracts))
if len(trxIds) > 0 {
pgStr += ` OR receipt_cids.tx_id = ANY($4::INTEGER[]))`
args = append(args, pq.Array(trxIds))
} else {
pgStr += `)`
}
} else {
pgStr += `)`
if len(trxIds) > 0 {
pgStr += ` OR receipt_cids.tx_id = ANY($3::INTEGER[]))`
args = append(args, pq.Array(trxIds))
} else {
pgStr += `)`
}
}
} else {
if len(streamFilters.ReceiptFilter.Contracts) > 0 {
pgStr += ` AND (receipt_cids.contract = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(streamFilters.ReceiptFilter.Contracts))
if len(trxIds) > 0 {
pgStr += ` OR receipt_cids.tx_id = ANY($3::INTEGER[]))`
args = append(args, pq.Array(trxIds))
} else {
pgStr += `)`
}
} else if len(trxIds) > 0 {
pgStr += ` AND receipt_cids.tx_id = ANY($2::INTEGER[])`
args = append(args, pq.Array(trxIds))
}
}
receiptCids := make([]string, 0)
err := tx.Select(&receiptCids, pgStr, args...)
return receiptCids, err
}
func (ecr *EthCIDRetriever) retrieveStateCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]ipfs.StateNodeCID, error) {
log.Debug("retrieving state cids for block ", blockNumber)
args := make([]interface{}, 0, 2)
pgStr := `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
args = append(args, blockNumber)
addrLen := len(streamFilters.StateFilter.Addresses)
if addrLen > 0 {
keys := make([]string, 0, addrLen)
for _, addr := range streamFilters.StateFilter.Addresses {
keys = append(keys, ipfs.HexToKey(addr).Hex())
}
pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(keys))
}
if !streamFilters.StorageFilter.IntermediateNodes {
pgStr += ` AND state_cids.leaf = TRUE`
}
stateNodeCIDs := make([]ipfs.StateNodeCID, 0)
err := tx.Select(&stateNodeCIDs, pgStr, args...)
return stateNodeCIDs, err
}
func (ecr *EthCIDRetriever) retrieveStorageCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]ipfs.StorageNodeCID, error) {
log.Debug("retrieving storage cids for block ", blockNumber)
args := make([]interface{}, 0, 3)
pgStr := `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key, storage_cids.leaf FROM storage_cids, state_cids, header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
args = append(args, blockNumber)
addrLen := len(streamFilters.StorageFilter.Addresses)
if addrLen > 0 {
keys := make([]string, 0, addrLen)
for _, addr := range streamFilters.StorageFilter.Addresses {
keys = append(keys, ipfs.HexToKey(addr).Hex())
}
pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(keys))
}
if len(streamFilters.StorageFilter.StorageKeys) > 0 {
pgStr += ` AND storage_cids.storage_key = ANY($3::VARCHAR(66)[])`
args = append(args, pq.Array(streamFilters.StorageFilter.StorageKeys))
}
if !streamFilters.StorageFilter.IntermediateNodes {
pgStr += ` AND storage_cids.leaf = TRUE`
}
storageNodeCIDs := make([]ipfs.StorageNodeCID, 0)
err := tx.Select(&storageNodeCIDs, pgStr, args...)
return storageNodeCIDs, err
}
type gap struct {
Start uint64 `db:"start"`
Stop uint64 `db:"stop"`
}
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
func (ecr *EthCIDRetriever) RetrieveGapsInData() ([][2]uint64, error) {
pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM header_cids
LEFT JOIN header_cids r on header_cids.block_number = r.block_number - 1
LEFT JOIN header_cids fr on header_cids.block_number < fr.block_number
WHERE r.block_number is NULL and fr.block_number IS NOT NULL
GROUP BY header_cids.block_number, r.block_number`
gaps := make([]gap, 0)
err := ecr.db.Select(&gaps, pgStr)
if err != nil {
return nil, err
}
gapRanges := make([][2]uint64, 0)
for _, gap := range gaps {
gapRanges = append(gapRanges, [2]uint64{gap.Start, gap.Stop})
}
return gapRanges, nil
}

View File

@ -1,453 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package super_node_test
import (
"math/big"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
)
var (
retriever super_node.CIDRetriever
openFilter = config.Subscription{
StartingBlock: big.NewInt(0),
EndingBlock: big.NewInt(1),
HeaderFilter: config.HeaderFilter{},
TrxFilter: config.TrxFilter{},
ReceiptFilter: config.ReceiptFilter{},
StateFilter: config.StateFilter{},
StorageFilter: config.StorageFilter{},
}
rctContractFilter = config.Subscription{
StartingBlock: big.NewInt(0),
EndingBlock: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TrxFilter: config.TrxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Contracts: []string{"0x0000000000000000000000000000000000000001"},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctTopicsFilter = config.Subscription{
StartingBlock: big.NewInt(0),
EndingBlock: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TrxFilter: config.TrxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000004"},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctTopicsAndContractFilter = config.Subscription{
StartingBlock: big.NewInt(0),
EndingBlock: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TrxFilter: config.TrxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000004", "0x0000000000000000000000000000000000000000000000000000000000000005"},
Contracts: []string{"0x0000000000000000000000000000000000000000"},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctContractsAndTopicFilter = config.Subscription{
StartingBlock: big.NewInt(0),
EndingBlock: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TrxFilter: config.TrxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000005"},
Contracts: []string{"0x0000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000001"},
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctsForAllCollectedTrxs = config.Subscription{
StartingBlock: big.NewInt(0),
EndingBlock: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TrxFilter: config.TrxFilter{}, // Trx filter open so we will collect all trxs, therefore we will also collect all corresponding rcts despite rct filter
ReceiptFilter: config.ReceiptFilter{
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000006"}, // Topic isn't one of the topics we have
Contracts: []string{"0x0000000000000000000000000000000000000002"}, // Contract isn't one of the contracts we have
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
rctsForSelectCollectedTrxs = config.Subscription{
StartingBlock: big.NewInt(0),
EndingBlock: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TrxFilter: config.TrxFilter{
Dst: []string{"0x0000000000000000000000000000000000000001"}, // We only filter for one of the trxs so we will only get the one corresponding receipt
},
ReceiptFilter: config.ReceiptFilter{
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000006"}, // Topic isn't one of the topics we have
Contracts: []string{"0x0000000000000000000000000000000000000002"}, // Contract isn't one of the contracts we have
},
StateFilter: config.StateFilter{
Off: true,
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
stateFilter = config.Subscription{
StartingBlock: big.NewInt(0),
EndingBlock: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
TrxFilter: config.TrxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
Off: true,
},
StateFilter: config.StateFilter{
Addresses: []string{mocks.Address.Hex()},
},
StorageFilter: config.StorageFilter{
Off: true,
},
}
)
var _ = Describe("Retriever", func() {
var (
db *postgres.DB
repo super_node.CIDRepository
)
BeforeEach(func() {
var err error
db, err = super_node.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = super_node.NewCIDRepository(db)
retriever = super_node.NewCIDRetriever(db)
})
AfterEach(func() {
super_node.TearDownDB(db)
})
Describe("RetrieveCIDs", func() {
BeforeEach(func() {
indexErr := repo.Index(mocks.MockCIDPayload)
Expect(indexErr).ToNot(HaveOccurred())
})
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
cidWrapper, err := retriever.RetrieveCIDs(openFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(cidWrapper.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper.Headers)).To(Equal(1))
Expect(cidWrapper.Headers).To(Equal(mocks.MockCIDWrapper.Headers))
Expect(len(cidWrapper.Transactions)).To(Equal(2))
Expect(super_node.ListContainsString(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[0])).To(BeTrue())
Expect(super_node.ListContainsString(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[1])).To(BeTrue())
Expect(len(cidWrapper.Receipts)).To(Equal(2))
Expect(super_node.ListContainsString(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[0])).To(BeTrue())
Expect(super_node.ListContainsString(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[1])).To(BeTrue())
Expect(len(cidWrapper.StateNodes)).To(Equal(2))
for _, stateNode := range cidWrapper.StateNodes {
if stateNode.CID == "mockStateCID1" {
Expect(stateNode.Key).To(Equal(mocks.ContractLeafKey.Hex()))
Expect(stateNode.Leaf).To(Equal(true))
}
if stateNode.CID == "mockStateCID2" {
Expect(stateNode.Key).To(Equal(mocks.AnotherContractLeafKey.Hex()))
Expect(stateNode.Leaf).To(Equal(true))
}
}
Expect(len(cidWrapper.StorageNodes)).To(Equal(1))
Expect(cidWrapper.StorageNodes).To(Equal(mocks.MockCIDWrapper.StorageNodes))
})
It("Applies filters from the provided config.Subscription", func() {
cidWrapper1, err1 := retriever.RetrieveCIDs(rctContractFilter, 1)
Expect(err1).ToNot(HaveOccurred())
Expect(cidWrapper1.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper1.Headers)).To(Equal(0))
Expect(len(cidWrapper1.Transactions)).To(Equal(0))
Expect(len(cidWrapper1.StateNodes)).To(Equal(0))
Expect(len(cidWrapper1.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper1.Receipts)).To(Equal(1))
Expect(cidWrapper1.Receipts[0]).To(Equal("mockRctCID2"))
cidWrapper2, err2 := retriever.RetrieveCIDs(rctTopicsFilter, 1)
Expect(err2).ToNot(HaveOccurred())
Expect(cidWrapper2.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper2.Headers)).To(Equal(0))
Expect(len(cidWrapper2.Transactions)).To(Equal(0))
Expect(len(cidWrapper2.StateNodes)).To(Equal(0))
Expect(len(cidWrapper2.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper2.Receipts)).To(Equal(1))
Expect(cidWrapper2.Receipts[0]).To(Equal("mockRctCID1"))
cidWrapper3, err3 := retriever.RetrieveCIDs(rctTopicsAndContractFilter, 1)
Expect(err3).ToNot(HaveOccurred())
Expect(cidWrapper3.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper3.Headers)).To(Equal(0))
Expect(len(cidWrapper3.Transactions)).To(Equal(0))
Expect(len(cidWrapper3.StateNodes)).To(Equal(0))
Expect(len(cidWrapper3.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper3.Receipts)).To(Equal(1))
Expect(cidWrapper3.Receipts[0]).To(Equal("mockRctCID1"))
cidWrapper4, err4 := retriever.RetrieveCIDs(rctContractsAndTopicFilter, 1)
Expect(err4).ToNot(HaveOccurred())
Expect(cidWrapper4.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper4.Headers)).To(Equal(0))
Expect(len(cidWrapper4.Transactions)).To(Equal(0))
Expect(len(cidWrapper4.StateNodes)).To(Equal(0))
Expect(len(cidWrapper4.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper4.Receipts)).To(Equal(1))
Expect(cidWrapper4.Receipts[0]).To(Equal("mockRctCID2"))
cidWrapper5, err5 := retriever.RetrieveCIDs(rctsForAllCollectedTrxs, 1)
Expect(err5).ToNot(HaveOccurred())
Expect(cidWrapper5.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper5.Headers)).To(Equal(0))
Expect(len(cidWrapper5.Transactions)).To(Equal(2))
Expect(super_node.ListContainsString(cidWrapper5.Transactions, "mockTrxCID1")).To(BeTrue())
Expect(super_node.ListContainsString(cidWrapper5.Transactions, "mockTrxCID2")).To(BeTrue())
Expect(len(cidWrapper5.StateNodes)).To(Equal(0))
Expect(len(cidWrapper5.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper5.Receipts)).To(Equal(2))
Expect(super_node.ListContainsString(cidWrapper5.Receipts, "mockRctCID1")).To(BeTrue())
Expect(super_node.ListContainsString(cidWrapper5.Receipts, "mockRctCID2")).To(BeTrue())
cidWrapper6, err6 := retriever.RetrieveCIDs(rctsForSelectCollectedTrxs, 1)
Expect(err6).ToNot(HaveOccurred())
Expect(cidWrapper6.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper6.Headers)).To(Equal(0))
Expect(len(cidWrapper6.Transactions)).To(Equal(1))
Expect(cidWrapper6.Transactions[0]).To(Equal("mockTrxCID2"))
Expect(len(cidWrapper6.StateNodes)).To(Equal(0))
Expect(len(cidWrapper6.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper6.Receipts)).To(Equal(1))
Expect(cidWrapper6.Receipts[0]).To(Equal("mockRctCID2"))
cidWrapper7, err7 := retriever.RetrieveCIDs(stateFilter, 1)
Expect(err7).ToNot(HaveOccurred())
Expect(cidWrapper7.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper7.Headers)).To(Equal(0))
Expect(len(cidWrapper7.Transactions)).To(Equal(0))
Expect(len(cidWrapper7.Receipts)).To(Equal(0))
Expect(len(cidWrapper7.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper7.StateNodes)).To(Equal(1))
Expect(cidWrapper7.StateNodes[0]).To(Equal(ipfs.StateNodeCID{
Leaf: true,
Key: mocks.ContractLeafKey.Hex(),
CID: "mockStateCID1",
}))
})
})
Describe("RetrieveFirstBlockNumber", func() {
It("Gets the number of the first block that has data in the database", func() {
indexErr := repo.Index(mocks.MockCIDPayload)
Expect(indexErr).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveFirstBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1)))
})
It("Gets the number of the first block that has data in the database", func() {
payload := *mocks.MockCIDPayload
payload.BlockNumber = "1010101"
indexErr := repo.Index(&payload)
Expect(indexErr).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveFirstBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the first block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "1010101"
payload2 := payload1
payload2.BlockNumber = "5"
indexErr := repo.Index(&payload1)
Expect(indexErr).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload2)
Expect(indexErr2).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveFirstBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(5)))
})
})
Describe("RetrieveLastBlockNumber", func() {
It("Gets the number of the latest block that has data in the database", func() {
indexErr := repo.Index(mocks.MockCIDPayload)
Expect(indexErr).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveLastBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload := *mocks.MockCIDPayload
payload.BlockNumber = "1010101"
indexErr := repo.Index(&payload)
Expect(indexErr).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveLastBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "1010101"
payload2 := payload1
payload2.BlockNumber = "5"
indexErr := repo.Index(&payload1)
Expect(indexErr).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload2)
Expect(indexErr2).ToNot(HaveOccurred())
num, retrieveErr := retriever.RetrieveLastBlockNumber()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
})
Describe("RetrieveGapsInData", func() {
It("Doesn't return gaps if there are none", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "2"
payload2 := payload1
payload2.BlockNumber = "3"
indexErr1 := repo.Index(mocks.MockCIDPayload)
Expect(indexErr1).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload1)
Expect(indexErr2).ToNot(HaveOccurred())
indexErr3 := repo.Index(&payload2)
Expect(indexErr3).ToNot(HaveOccurred())
gaps, retrieveErr := retriever.RetrieveGapsInData()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(0))
})
It("Doesn't return the gap from 0 to the earliest block", func() {
payload := *mocks.MockCIDPayload
payload.BlockNumber = "5"
indexErr := repo.Index(&payload)
Expect(indexErr).ToNot(HaveOccurred())
gaps, retrieveErr := retriever.RetrieveGapsInData()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(0))
})
It("Finds gap between two entries", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "1010101"
payload2 := payload1
payload2.BlockNumber = "5"
indexErr := repo.Index(&payload1)
Expect(indexErr).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload2)
Expect(indexErr2).ToNot(HaveOccurred())
gaps, retrieveErr := retriever.RetrieveGapsInData()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(1))
Expect(gaps[0][0]).To(Equal(uint64(6)))
Expect(gaps[0][1]).To(Equal(uint64(1010100)))
})
It("Finds gaps between multiple entries", func() {
payload1 := *mocks.MockCIDPayload
payload1.BlockNumber = "1010101"
payload2 := payload1
payload2.BlockNumber = "5"
payload3 := payload2
payload3.BlockNumber = "100"
payload4 := payload3
payload4.BlockNumber = "101"
payload5 := payload4
payload5.BlockNumber = "102"
payload6 := payload5
payload6.BlockNumber = "1000"
indexErr := repo.Index(&payload1)
Expect(indexErr).ToNot(HaveOccurred())
indexErr2 := repo.Index(&payload2)
Expect(indexErr2).ToNot(HaveOccurred())
indexErr3 := repo.Index(&payload3)
Expect(indexErr3).ToNot(HaveOccurred())
indexErr4 := repo.Index(&payload4)
Expect(indexErr4).ToNot(HaveOccurred())
indexErr5 := repo.Index(&payload5)
Expect(indexErr5).ToNot(HaveOccurred())
indexErr6 := repo.Index(&payload6)
Expect(indexErr6).ToNot(HaveOccurred())
gaps, retrieveErr := retriever.RetrieveGapsInData()
Expect(retrieveErr).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(3))
Expect(super_node.ListContainsRange(gaps, [2]uint64{6, 99})).To(BeTrue())
Expect(super_node.ListContainsRange(gaps, [2]uint64{103, 999})).To(BeTrue())
Expect(super_node.ListContainsRange(gaps, [2]uint64{1001, 1010100})).To(BeTrue())
})
})
})

View File

@ -17,6 +17,7 @@
package super_node
import (
"fmt"
"sync"
"github.com/ethereum/go-ethereum/common"
@ -26,35 +27,34 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
"github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
const (
payloadChanBufferSize = 20000 // the max eth sub buffer size
PayloadChanBufferSize = 20000
)
// NodeInterface is the top level interface for streaming, converting to IPLDs, publishing,
// SuperNode is the top level interface for streaming, converting to IPLDs, publishing,
// and indexing all Ethereum data; screening this data; and serving it up to subscribed clients
// This service is compatible with the Ethereum service interface (node.Service)
type NodeInterface interface {
type SuperNode interface {
// APIs(), Protocols(), Start() and Stop()
node.Service
// Main event loop for syncAndPublish processes
SyncAndPublish(wg *sync.WaitGroup, forwardPayloadChan chan<- ipfs.IPLDPayload, forwardQuitchan chan<- bool) error
SyncAndPublish(wg *sync.WaitGroup, forwardPayloadChan chan<- interface{}, forwardQuitchan chan<- bool) error
// Main event loop for handling client pub-sub
ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool)
ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan interface{}, screenAndServeQuit <-chan bool)
// Method to subscribe to receive state diff processing output
Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, quitChan chan<- bool, streamFilters config.Subscription)
Subscribe(id rpc.ID, sub chan<- Payload, quitChan chan<- bool, params SubscriptionSettings)
// Method to unsubscribe from state diff processing
Unsubscribe(id rpc.ID)
// Method to access the Geth node info for this service
// Method to access the node info for this service
Node() core.Node
}
@ -62,66 +62,96 @@ type NodeInterface interface {
type Service struct {
// Used to sync access to the Subscriptions
sync.Mutex
// Interface for streaming statediff payloads over a geth rpc subscription
Streamer streamer.Streamer
// Interface for converting statediff payloads into ETH-IPLD object payloads
Converter ipfs.PayloadConverter
// Interface for publishing the ETH-IPLD payloads to IPFS
Publisher ipfs.IPLDPublisher
// Interface for indexing the CIDs of the published ETH-IPLDs in Postgres
Repository CIDRepository
// Interface for streaming payloads over an rpc subscription
Streamer shared.PayloadStreamer
// Interface for converting raw payloads into IPLD object payloads
Converter shared.PayloadConverter
// Interface for publishing the IPLD payloads to IPFS
Publisher shared.IPLDPublisher
// Interface for indexing the CIDs of the published IPLDs in Postgres
Indexer shared.CIDIndexer
// Interface for filtering and serving data according to subscribed clients according to their specification
Filterer ResponseFilterer
// Interface for fetching ETH-IPLD objects from IPFS
IPLDFetcher ipfs.IPLDFetcher
Filterer shared.ResponseFilterer
// Interface for fetching IPLD objects from IPFS
IPLDFetcher shared.IPLDFetcher
// Interface for searching and retrieving CIDs from Postgres index
Retriever CIDRetriever
// Interface for resolving ipfs blocks to their data types
Resolver ipfs.IPLDResolver
// Chan the processor uses to subscribe to state diff payloads from the Streamer
PayloadChan chan statediff.Payload
Retriever shared.CIDRetriever
// Interface for resolving IPLDs to their data types
Resolver shared.IPLDResolver
// Chan the processor uses to subscribe to payloads from the Streamer
PayloadChan chan interface{}
// Used to signal shutdown of the service
QuitChan chan bool
// A mapping of rpc.IDs to their subscription channels, mapped to their subscription type (hash of the StreamFilters)
Subscriptions map[common.Hash]map[rpc.ID]Subscription
// A mapping of subscription hash type to the corresponding StreamFilters
SubscriptionTypes map[common.Hash]config.Subscription
// Number of workers
WorkerPoolSize int
// A mapping of subscription params hash to the corresponding subscription params
SubscriptionTypes map[common.Hash]SubscriptionSettings
// Info for the Geth node that this super node is working with
GethNode core.Node
NodeInfo core.Node
// Number of publishAndIndex workers
WorkerPoolSize int
// chain type for this service
chain config.ChainType
// Path to ipfs data dir
ipfsPath string
// Underlying db
db *postgres.DB
}
// NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct
func NewSuperNode(ipfsPath string, db *postgres.DB, rpcClient core.RPCClient, qc chan bool, workers int, node core.Node) (NodeInterface, error) {
ipfsInitErr := ipfs.InitIPFSPlugins()
if ipfsInitErr != nil {
return nil, ipfsInitErr
func NewSuperNode(settings *config.SuperNode) (SuperNode, error) {
if err := ipfs.InitIPFSPlugins(); err != nil {
return nil, err
}
publisher, newPublisherErr := ipfs.NewIPLDPublisher(ipfsPath)
if newPublisherErr != nil {
return nil, newPublisherErr
sn := new(Service)
var err error
// If we are syncing, initialize the needed interfaces
if settings.Sync {
sn.Streamer, sn.PayloadChan, err = NewPayloadStreamer(settings.Chain, settings.WSClient)
if err != nil {
return nil, err
}
sn.Converter, err = NewPayloadConverter(settings.Chain, params.MainnetChainConfig)
if err != nil {
return nil, err
}
sn.Publisher, err = NewIPLDPublisher(settings.Chain, settings.IPFSPath)
if err != nil {
return nil, err
}
sn.Indexer, err = NewCIDIndexer(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
sn.Filterer, err = NewResponseFilterer(settings.Chain)
if err != nil {
return nil, err
}
}
ipldFetcher, newFetcherErr := ipfs.NewIPLDFetcher(ipfsPath)
if newFetcherErr != nil {
return nil, newFetcherErr
// If we are serving, initialize the needed interfaces
if settings.Serve {
sn.Retriever, err = NewCIDRetriever(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
sn.IPLDFetcher, err = NewIPLDFetcher(settings.Chain, settings.IPFSPath)
if err != nil {
return nil, err
}
sn.Resolver, err = NewIPLDResolver(settings.Chain)
if err != nil {
return nil, err
}
}
return &Service{
Streamer: streamer.NewStateDiffStreamer(rpcClient),
Repository: NewCIDRepository(db),
Converter: ipfs.NewPayloadConverter(params.MainnetChainConfig),
Publisher: publisher,
Filterer: NewResponseFilterer(),
IPLDFetcher: ipldFetcher,
Retriever: NewCIDRetriever(db),
Resolver: ipfs.NewIPLDResolver(),
PayloadChan: make(chan statediff.Payload, payloadChanBufferSize),
QuitChan: qc,
Subscriptions: make(map[common.Hash]map[rpc.ID]Subscription),
SubscriptionTypes: make(map[common.Hash]config.Subscription),
WorkerPoolSize: workers,
GethNode: node,
}, nil
sn.QuitChan = settings.Quit
sn.Subscriptions = make(map[common.Hash]map[rpc.ID]Subscription)
sn.SubscriptionTypes = make(map[common.Hash]SubscriptionSettings)
sn.WorkerPoolSize = settings.Workers
sn.NodeInfo = settings.NodeInfo
sn.ipfsPath = settings.IPFSPath
sn.chain = settings.Chain
sn.db = settings.DB
return sn, nil
}
// Protocols exports the services p2p protocols, this service has none
@ -131,7 +161,7 @@ func (sap *Service) Protocols() []p2p.Protocol {
// APIs returns the RPC descriptors the super node service offers
func (sap *Service) APIs() []rpc.API {
return []rpc.API{
apis := []rpc.API{
{
Namespace: APIName,
Version: APIVersion,
@ -139,20 +169,26 @@ func (sap *Service) APIs() []rpc.API {
Public: true,
},
}
chainAPI, err := NewPublicAPI(sap.chain, sap.db, sap.ipfsPath)
if err != nil {
log.Error(err)
return apis
}
return append(apis, chainAPI)
}
// SyncAndPublish is the backend processing loop which streams data from geth, converts it to iplds, publishes them to ipfs, and indexes their cids
// This continues on no matter if or how many subscribers there are, it then forwards the data to the ScreenAndServe() loop
// which filters and sends relevant data to client subscriptions, if there are any
func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload chan<- ipfs.IPLDPayload, screenAndServeQuit chan<- bool) error {
sub, streamErr := sap.Streamer.Stream(sap.PayloadChan)
if streamErr != nil {
return streamErr
func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload chan<- interface{}, screenAndServeQuit chan<- bool) error {
sub, err := sap.Streamer.Stream(sap.PayloadChan)
if err != nil {
return err
}
wg.Add(1)
// Channels for forwarding data to the publishAndIndex workers
publishAndIndexPayload := make(chan ipfs.IPLDPayload, payloadChanBufferSize)
publishAndIndexPayload := make(chan interface{}, PayloadChanBufferSize)
publishAndIndexQuit := make(chan bool, sap.WorkerPoolSize)
// publishAndIndex worker pool to handle publishing and indexing concurrently, while
// limiting the number of Postgres connections we can possibly open so as to prevent error
@ -163,30 +199,30 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
for {
select {
case payload := <-sap.PayloadChan:
ipldPayload, convertErr := sap.Converter.Convert(payload)
if convertErr != nil {
log.Error(convertErr)
ipldPayload, err := sap.Converter.Convert(payload)
if err != nil {
log.Error(err)
continue
}
// If we have a ScreenAndServe process running, forward the payload to it
select {
case screenAndServePayload <- *ipldPayload:
case screenAndServePayload <- ipldPayload:
default:
}
// Forward the payload to the publishAndIndex workers
select {
case publishAndIndexPayload <- *ipldPayload:
case publishAndIndexPayload <- ipldPayload:
default:
}
case subErr := <-sub.Err():
log.Error(subErr)
case err := <-sub.Err():
log.Error(err)
case <-sap.QuitChan:
// If we have a ScreenAndServe process running, forward the quit signal to it
select {
case screenAndServeQuit <- true:
default:
}
// Also forward a quit signal for each of the workers
// Also forward a quit signal for each of the publishAndIndex workers
for i := 0; i < sap.WorkerPoolSize; i++ {
select {
case publishAndIndexQuit <- true:
@ -203,19 +239,18 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
return nil
}
func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan ipfs.IPLDPayload, publishAndIndexQuit <-chan bool) {
func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan interface{}, publishAndIndexQuit <-chan bool) {
go func() {
for {
select {
case payload := <-publishAndIndexPayload:
cidPayload, publishErr := sap.Publisher.Publish(&payload)
if publishErr != nil {
log.Errorf("worker %d error: %v", id, publishErr)
cidPayload, err := sap.Publisher.Publish(payload)
if err != nil {
log.Errorf("worker %d error: %v", id, err)
continue
}
indexErr := sap.Repository.Index(cidPayload)
if indexErr != nil {
log.Errorf("worker %d error: %v", id, indexErr)
if err := sap.Indexer.Index(cidPayload); err != nil {
log.Errorf("worker %d error: %v", id, err)
}
case <-publishAndIndexQuit:
log.Infof("quiting publishAndIndex worker %d", id)
@ -228,16 +263,13 @@ func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan ipfs.I
// ScreenAndServe is the loop used to screen data streamed from the state diffing eth node
// and send the appropriate portions of it to a requesting client subscription, according to their subscription configuration
func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool) {
func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan interface{}, screenAndServeQuit <-chan bool) {
wg.Add(1)
go func() {
for {
select {
case payload := <-screenAndServePayload:
sendErr := sap.sendResponse(payload)
if sendErr != nil {
log.Error(sendErr)
}
sap.sendResponse(payload)
case <-screenAndServeQuit:
log.Info("quiting ScreenAndServe process")
wg.Done()
@ -248,23 +280,25 @@ func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-c
log.Info("screenAndServe goroutine successfully spun up")
}
func (sap *Service) sendResponse(payload ipfs.IPLDPayload) error {
func (sap *Service) sendResponse(payload interface{}) {
sap.Lock()
for ty, subs := range sap.Subscriptions {
// Retrieve the subscription parameters for this subscription type
subConfig, ok := sap.SubscriptionTypes[ty]
if !ok {
log.Errorf("subscription configuration for subscription type %s not available", ty.Hex())
sap.closeType(ty)
continue
}
response, filterErr := sap.Filterer.FilterResponse(subConfig, payload)
if filterErr != nil {
log.Error(filterErr)
response, err := sap.Filterer.Filter(subConfig, payload)
if err != nil {
log.Error(err)
sap.closeType(ty)
continue
}
for id, sub := range subs {
select {
case sub.PayloadChan <- response:
case sub.PayloadChan <- Payload{response, ""}:
log.Infof("sending super node payload to subscription %s", id)
default:
log.Infof("unable to send payload to subscription %s; channel has no receiver", id)
@ -272,99 +306,102 @@ func (sap *Service) sendResponse(payload ipfs.IPLDPayload) error {
}
}
sap.Unlock()
return nil
}
// Subscribe is used by the API to subscribe to the service loop
func (sap *Service) Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, quitChan chan<- bool, streamFilters config.Subscription) {
// The params must be rlp serializable and satisfy the Params() interface
func (sap *Service) Subscribe(id rpc.ID, sub chan<- Payload, quitChan chan<- bool, params SubscriptionSettings) {
log.Info("Subscribing to the super node service")
// Subscription type is defined as the hash of its content
// Group subscriptions by type and screen payloads once for subs of the same type
by, encodeErr := rlp.EncodeToBytes(streamFilters)
if encodeErr != nil {
log.Error(encodeErr)
}
subscriptionHash := crypto.Keccak256(by)
subscriptionType := common.BytesToHash(subscriptionHash)
subscription := Subscription{
ID: id,
PayloadChan: sub,
QuitChan: quitChan,
}
if params.ChainType() != sap.chain {
sendNonBlockingErr(subscription, fmt.Errorf("subscription %s is for chain %s, service supports chain %s", id, params.ChainType().String(), sap.chain.String()))
sendNonBlockingQuit(subscription)
return
}
// Subscription type is defined as the hash of the subscription settings
by, err := rlp.EncodeToBytes(params)
if err != nil {
sendNonBlockingErr(subscription, err)
sendNonBlockingQuit(subscription)
return
}
subscriptionType := crypto.Keccak256Hash(by)
// If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
// Otherwise we only filter new data as it is streamed in from the state diffing geth node
if streamFilters.BackFill || streamFilters.BackFillOnly {
sap.backFill(subscription, id, streamFilters)
if params.HistoricalData() || params.HistoricalDataOnly() {
if err := sap.backFill(subscription, id, params); err != nil {
sendNonBlockingErr(subscription, err)
sendNonBlockingQuit(subscription)
return
}
}
if !streamFilters.BackFillOnly {
if !params.HistoricalDataOnly() {
// Add subscriber
sap.Lock()
if sap.Subscriptions[subscriptionType] == nil {
sap.Subscriptions[subscriptionType] = make(map[rpc.ID]Subscription)
}
sap.Subscriptions[subscriptionType][id] = subscription
sap.SubscriptionTypes[subscriptionType] = streamFilters
sap.SubscriptionTypes[subscriptionType] = params
sap.Unlock()
}
}
func (sap *Service) backFill(sub Subscription, id rpc.ID, con config.Subscription) {
log.Debug("back-filling data for id", id)
func (sap *Service) backFill(sub Subscription, id rpc.ID, params SubscriptionSettings) error {
log.Debug("sending historical data for subscriber", id)
// Retrieve cached CIDs relevant to this subscriber
var endingBlock int64
var startingBlock int64
var retrieveFirstBlockErr error
var retrieveLastBlockErr error
startingBlock, retrieveFirstBlockErr = sap.Retriever.RetrieveFirstBlockNumber()
if retrieveFirstBlockErr != nil {
sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "unable to set block range start; error: " + retrieveFirstBlockErr.Error(),
}
var err error
startingBlock, err = sap.Retriever.RetrieveFirstBlockNumber()
if err != nil {
return err
}
if startingBlock < con.StartingBlock.Int64() {
startingBlock = con.StartingBlock.Int64()
if startingBlock < params.StartingBlock().Int64() {
startingBlock = params.StartingBlock().Int64()
}
endingBlock, retrieveLastBlockErr = sap.Retriever.RetrieveLastBlockNumber()
if retrieveLastBlockErr != nil {
sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "unable to set block range end; error: " + retrieveLastBlockErr.Error(),
}
endingBlock, err = sap.Retriever.RetrieveLastBlockNumber()
if err != nil {
return err
}
if endingBlock > con.EndingBlock.Int64() && con.EndingBlock.Int64() > 0 && con.EndingBlock.Int64() > startingBlock {
endingBlock = con.EndingBlock.Int64()
if endingBlock > params.EndingBlock().Int64() && params.EndingBlock().Int64() > 0 && params.EndingBlock().Int64() > startingBlock {
endingBlock = params.EndingBlock().Int64()
}
log.Debug("backfill starting block:", con.StartingBlock)
log.Debug("backfill ending block:", endingBlock)
// Backfilled payloads are sent concurrently to the streamed payloads, so the receiver needs to pay attention to
// the blocknumbers in the payloads they receive to keep things in order
// TODO: separate backfill into a different rpc subscription method altogether?
log.Debug("historical data starting block:", params.StartingBlock())
log.Debug("histocial data ending block:", endingBlock)
go func() {
for i := startingBlock; i <= endingBlock; i++ {
cidWrapper, retrieveCIDsErr := sap.Retriever.RetrieveCIDs(con, i)
if retrieveCIDsErr != nil {
sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "CID retrieval error: " + retrieveCIDsErr.Error(),
}
cidWrapper, empty, err := sap.Retriever.Retrieve(params, i)
if err != nil {
sendNonBlockingErr(sub, fmt.Errorf("CID Retrieval error at block %d\r%s", i, err.Error()))
continue
}
if ipfs.EmptyCIDWrapper(*cidWrapper) {
if empty {
continue
}
blocksWrapper, fetchIPLDsErr := sap.IPLDFetcher.FetchIPLDs(*cidWrapper)
if fetchIPLDsErr != nil {
log.Error(fetchIPLDsErr)
sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "IPLD fetching error: " + fetchIPLDsErr.Error(),
}
blocksWrapper, err := sap.IPLDFetcher.Fetch(cidWrapper)
if err != nil {
sendNonBlockingErr(sub, fmt.Errorf("IPLD Fetching error at block %d\r%s", i, err.Error()))
continue
}
backFillIplds, err := sap.Resolver.Resolve(blocksWrapper)
if err != nil {
sendNonBlockingErr(sub, fmt.Errorf("IPLD Resolving error at block %d\r%s", i, err.Error()))
continue
}
backFillIplds := sap.Resolver.ResolveIPLDs(*blocksWrapper)
select {
case sub.PayloadChan <- backFillIplds:
log.Infof("sending super node back-fill payload to subscription %s", id)
case sub.PayloadChan <- Payload{backFillIplds, ""}:
log.Infof("sending super node historical data payload to subscription %s", id)
default:
log.Infof("unable to send back-fill payload to subscription %s; channel has no receiver", id)
}
}
}()
return nil
}
// Unsubscribe is used to unsubscribe to the StateDiffingService loop
@ -386,7 +423,7 @@ func (sap *Service) Unsubscribe(id rpc.ID) {
func (sap *Service) Start(*p2p.Server) error {
log.Info("Starting super node service")
wg := new(sync.WaitGroup)
payloadChan := make(chan ipfs.IPLDPayload, payloadChanBufferSize)
payloadChan := make(chan interface{}, PayloadChanBufferSize)
quitChan := make(chan bool, 1)
if err := sap.SyncAndPublish(wg, payloadChan, quitChan); err != nil {
return err
@ -398,29 +435,37 @@ func (sap *Service) Start(*p2p.Server) error {
// Stop is used to close down the service
func (sap *Service) Stop() error {
log.Info("Stopping super node service")
sap.Lock()
close(sap.QuitChan)
sap.close()
sap.Unlock()
return nil
}
// Node returns the Geth node info for this service
// Node returns the node info for this service
func (sap *Service) Node() core.Node {
return sap.GethNode
return sap.NodeInfo
}
// close is used to close all listening subscriptions
// close needs to be called with subscription access locked
func (sap *Service) close() {
sap.Lock()
for ty, subs := range sap.Subscriptions {
for id, sub := range subs {
select {
case sub.QuitChan <- true:
log.Infof("closing subscription %s", id)
default:
log.Infof("unable to close subscription %s; channel has no receiver", id)
}
for subType, subs := range sap.Subscriptions {
for _, sub := range subs {
sendNonBlockingQuit(sub)
}
delete(sap.Subscriptions, ty)
delete(sap.SubscriptionTypes, ty)
delete(sap.Subscriptions, subType)
delete(sap.SubscriptionTypes, subType)
}
sap.Unlock()
}
// closeType is used to close all subscriptions of given type
// closeType needs to be called with subscription access locked
func (sap *Service) closeType(subType common.Hash) {
subs := sap.Subscriptions[subType]
for _, sub := range subs {
sendNonBlockingQuit(sub)
}
delete(sap.Subscriptions, subType)
delete(sap.SubscriptionTypes, subType)
}

View File

@ -25,38 +25,36 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
mocks2 "github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
mocks3 "github.com/vulcanize/vulcanizedb/pkg/super_node/mocks"
mocks2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var _ = Describe("Service", func() {
Describe("SyncAndPublish", func() {
It("Streams statediff.Payloads, converts them to IPLDPayloads, publishes IPLDPayloads, and indexes CIDPayloads", func() {
wg := new(sync.WaitGroup)
payloadChan := make(chan statediff.Payload, 1)
payloadChan := make(chan interface{}, 1)
quitChan := make(chan bool, 1)
mockCidRepo := &mocks3.CIDRepository{
mockCidIndexer := &mocks2.CIDIndexer{
ReturnErr: nil,
}
mockPublisher := &mocks.IPLDPublisher{
ReturnCIDPayload: mocks.MockCIDPayload,
mockPublisher := &mocks2.IPLDPublisher{
ReturnCIDPayload: mocks2.MockCIDPayload,
ReturnErr: nil,
}
mockStreamer := &mocks2.StateDiffStreamer{
ReturnSub: &rpc.ClientSubscription{},
StreamPayloads: []statediff.Payload{
mocks.MockStateDiffPayload,
mocks2.MockStateDiffPayload,
},
ReturnErr: nil,
}
mockConverter := &mocks.PayloadConverter{
ReturnIPLDPayload: mocks.MockIPLDPayload,
mockConverter := &mocks2.PayloadConverter{
ReturnIPLDPayload: mocks2.MockIPLDPayload,
ReturnErr: nil,
}
processor := &super_node.Service{
Repository: mockCidRepo,
Indexer: mockCidIndexer,
Publisher: mockPublisher,
Streamer: mockStreamer,
Converter: mockConverter,
@ -69,10 +67,10 @@ var _ = Describe("Service", func() {
time.Sleep(2 * time.Second)
quitChan <- true
wg.Wait()
Expect(mockConverter.PassedStatediffPayload).To(Equal(mocks.MockStateDiffPayload))
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(1))
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
Expect(mockPublisher.PassedIPLDPayload).To(Equal(mocks.MockIPLDPayload))
Expect(mockConverter.PassedStatediffPayload).To(Equal(mocks2.MockStateDiffPayload))
Expect(len(mockCidIndexer.PassedCIDPayload)).To(Equal(1))
Expect(mockCidIndexer.PassedCIDPayload[0]).To(Equal(mocks2.MockCIDPayload))
Expect(mockPublisher.PassedIPLDPayload).To(Equal(mocks2.MockIPLDPayload))
Expect(mockStreamer.PassedPayloadChan).To(Equal(payloadChan))
})
})

View File

@ -0,0 +1,49 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package shared
import "bytes"
// ListContainsString used to check if a list of strings contains a particular string
func ListContainsString(sss []string, s string) bool {
for _, str := range sss {
if s == str {
return true
}
}
return false
}
// ListContainsBytes used to check if a list of byte arrays contains a particular byte array
func ListContainsBytes(bbb [][]byte, b []byte) bool {
for _, by := range bbb {
if bytes.Equal(by, b) {
return true
}
}
return false
}
// ListContainsGap used to check if a list of Gaps contains a particular Gap
func ListContainsGap(gapList []Gap, gap Gap) bool {
for _, listGap := range gapList {
if listGap == gap {
return true
}
}
return false
}

View File

@ -0,0 +1,63 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package shared
import (
"github.com/ethereum/go-ethereum/rpc"
)
// ResponseFilterer applies a filter to the streamed payload and returns a subscription response packet
type ResponseFilterer interface {
Filter(filter, payload interface{}) (response interface{}, err error)
}
// CIDIndexer indexes a set of cids with their associated meta data in Postgres
type CIDIndexer interface {
Index(cids interface{}) error
}
// CIDRetriever retrieves cids according to a provided filter and returns a cid
type CIDRetriever interface {
Retrieve(filter interface{}, blockNumber int64) (interface{}, bool, error)
RetrieveFirstBlockNumber() (int64, error)
RetrieveLastBlockNumber() (int64, error)
RetrieveGapsInData() ([]Gap, error)
}
type PayloadStreamer interface {
Stream(payloadChan chan interface{}) (*rpc.ClientSubscription, error)
}
type PayloadFetcher interface {
FetchAt(blockHeights []uint64) ([]interface{}, error)
}
type IPLDFetcher interface {
Fetch(cids interface{}) (interface{}, error)
}
type PayloadConverter interface {
Convert(payload interface{}) (interface{}, error)
}
type IPLDPublisher interface {
Publish(payload interface{}) (interface{}, error)
}
type IPLDResolver interface {
Resolve(iplds interface{}) (interface{}, error)
}

View File

@ -0,0 +1,22 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package shared
type Gap struct {
Start uint64
Stop uint64
}

View File

@ -17,11 +17,35 @@
package super_node
import (
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"math/big"
"github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
// Subscription holds the information for an individual client subscription to the super node
type Subscription struct {
PayloadChan chan<- streamer.SuperNodePayload
ID rpc.ID
PayloadChan chan<- Payload
QuitChan chan<- bool
}
// Payload is the struct for a super node stream payload
// It carries data of a type specific to the chain being supported/queried and an error message
type Payload struct {
Data interface{} `json:"data"` // e.g. for Ethereum eth.StreamPayload
Err string `json:"err"`
}
// SubscriptionSettings is the interface every subscription filter type needs to satisfy, no matter the chain
// Further specifics of the underlying filter type depend on the internal needs of the types
// which satisfy the ResponseFilterer and CIDRetriever interfaces for a specific chain
// The underlying type needs to be rlp serializable
type SubscriptionSettings interface {
StartingBlock() *big.Int
EndingBlock() *big.Int
ChainType() config.ChainType
HistoricalData() bool
HistoricalDataOnly() bool
}