diff --git a/cmd/root.go b/cmd/root.go
index 17922b30..39a374a1 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -34,13 +34,14 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
+ config2 "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
var (
cfgFile string
databaseConfig config.Database
genConfig config.Plugin
- subscriptionConfig config.Subscription
+ subscriptionConfig *config2.EthSubscription
ipc string
levelDbPath string
queueRecheckInterval time.Duration
diff --git a/cmd/screenAndServe.go b/cmd/screenAndServe.go
index ddd7e8e8..9a28d383 100644
--- a/cmd/screenAndServe.go
+++ b/cmd/screenAndServe.go
@@ -26,10 +26,10 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
- "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
"github.com/vulcanize/vulcanizedb/utils"
)
@@ -52,18 +52,17 @@ func init() {
}
func screenAndServe() {
- superNode, newNodeErr := newSuperNodeWithoutPairedGethNode()
- if newNodeErr != nil {
- logWithCommand.Fatal(newNodeErr)
+ superNode, err := newSuperNodeWithoutPairedGethNode()
+ if err != nil {
+ logWithCommand.Fatal(err)
}
wg := &syn.WaitGroup{}
quitChan := make(chan bool, 1)
- emptyPayloadChan := make(chan ipfs.IPLDPayload)
+ emptyPayloadChan := make(chan interface{})
superNode.ScreenAndServe(wg, emptyPayloadChan, quitChan)
- serverErr := startServers(superNode)
- if serverErr != nil {
- logWithCommand.Fatal(serverErr)
+ if err := startServers(superNode); err != nil {
+ logWithCommand.Fatal(err)
}
wg.Wait()
}
@@ -72,15 +71,15 @@ func startServers(superNode super_node.NodeInterface) error {
var ipcPath string
ipcPath = viper.GetString("server.ipcPath")
if ipcPath == "" {
- home, homeDirErr := os.UserHomeDir()
- if homeDirErr != nil {
- return homeDirErr
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return err
}
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
}
- _, _, ipcErr := rpc.StartIPCEndpoint(ipcPath, superNode.APIs())
- if ipcErr != nil {
- return ipcErr
+ _, _, err := rpc.StartIPCEndpoint(ipcPath, superNode.APIs())
+ if err != nil {
+ return err
}
var wsEndpoint string
@@ -90,9 +89,9 @@ func startServers(superNode super_node.NodeInterface) error {
}
var exposeAll = true
var wsOrigins []string
- _, _, wsErr := rpc.StartWSEndpoint(wsEndpoint, superNode.APIs(), []string{"vdb"}, wsOrigins, exposeAll)
- if wsErr != nil {
- return wsErr
+ _, _, err = rpc.StartWSEndpoint(wsEndpoint, superNode.APIs(), []string{"vdb"}, wsOrigins, exposeAll)
+ if err != nil {
+ return err
}
return nil
}
@@ -100,27 +99,34 @@ func startServers(superNode super_node.NodeInterface) error {
func newSuperNodeWithoutPairedGethNode() (super_node.NodeInterface, error) {
ipfsPath = viper.GetString("client.ipfsPath")
if ipfsPath == "" {
- home, homeDirErr := os.UserHomeDir()
- if homeDirErr != nil {
- return nil, homeDirErr
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return nil, err
}
ipfsPath = filepath.Join(home, ".ipfs")
}
- ipfsInitErr := ipfs.InitIPFSPlugins()
- if ipfsInitErr != nil {
- return nil, ipfsInitErr
+ if err := ipfs.InitIPFSPlugins(); err != nil {
+ return nil, err
}
- ipldFetcher, newFetcherErr := ipfs.NewIPLDFetcher(ipfsPath)
- if newFetcherErr != nil {
- return nil, newFetcherErr
+ ipldFetcher, err := super_node.NewIPLDFetcher(config.Ethereum, ipfsPath)
+ if err != nil {
+ return nil, err
}
db := utils.LoadPostgres(databaseConfig, core.Node{})
+ retriever, err := super_node.NewCIDRetriever(config.Ethereum, &db)
+ if err != nil {
+ return nil, err
+ }
+ resolver, err := super_node.NewIPLDResolver(config.Ethereum)
+ if err != nil {
+ return nil, err
+ }
return &super_node.Service{
IPLDFetcher: ipldFetcher,
- Retriever: super_node.NewCIDRetriever(&db),
- Resolver: ipfs.NewIPLDResolver(),
+ Retriever: retriever,
+ Resolver: resolver,
Subscriptions: make(map[common.Hash]map[rpc.ID]super_node.Subscription),
- SubscriptionTypes: make(map[common.Hash]config.Subscription),
- GethNode: core.Node{},
+ SubscriptionTypes: make(map[common.Hash]super_node.SubscriptionSettings),
+ NodeInfo: core.Node{},
}, nil
}
diff --git a/cmd/streamSubscribe.go b/cmd/streamSubscribe.go
index e6f97e89..9ae75a61 100644
--- a/cmd/streamSubscribe.go
+++ b/cmd/streamSubscribe.go
@@ -30,9 +30,11 @@ import (
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
- "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// streamSubscribeCmd represents the streamSubscribe command
@@ -61,10 +63,10 @@ func streamSubscribe() {
str := streamer.NewSuperNodeStreamer(rpcClient)
// Buffered channel for reading subscription payloads
- payloadChan := make(chan streamer.SuperNodePayload, 20000)
+ payloadChan := make(chan super_node.Payload, 20000)
// Subscribe to the super node service with the given config/filter parameters
- sub, err := str.Stream(payloadChan, subscriptionConfig)
+ sub, err := str.StreamETH(payloadChan, subscriptionConfig)
if err != nil {
logWithCommand.Fatal(err)
}
@@ -73,11 +75,16 @@ func streamSubscribe() {
for {
select {
case payload := <-payloadChan:
- if payload.ErrMsg != "" {
- logWithCommand.Error(payload.ErrMsg)
+ if payload.Err != "" {
+ logWithCommand.Error(payload.Err)
continue
}
- for _, headerRlp := range payload.HeadersRlp {
+ data, ok := payload.Data.(eth.StreamPayload)
+ if !ok {
+ logWithCommand.Warnf("payload data expected type %T got %T", eth.StreamPayload{}, payload.Data)
+ continue
+ }
+ for _, headerRlp := range data.HeadersRlp {
var header types.Header
err = rlp.Decode(bytes.NewBuffer(headerRlp), &header)
if err != nil {
@@ -87,7 +94,7 @@ func streamSubscribe() {
fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
fmt.Printf("header: %v\n", header)
}
- for _, trxRlp := range payload.TransactionsRlp {
+ for _, trxRlp := range data.TransactionsRlp {
var trx types.Transaction
buff := bytes.NewBuffer(trxRlp)
stream := rlp.NewStream(buff, 0)
@@ -99,7 +106,7 @@ func streamSubscribe() {
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
fmt.Printf("trx: %v\n", trx)
}
- for _, rctRlp := range payload.ReceiptsRlp {
+ for _, rctRlp := range data.ReceiptsRlp {
var rct types.ReceiptForStorage
buff := bytes.NewBuffer(rctRlp)
stream := rlp.NewStream(buff, 0)
@@ -121,7 +128,7 @@ func streamSubscribe() {
}
}
// This assumes leafs only
- for key, stateRlp := range payload.StateNodesRlp {
+ for key, stateRlp := range data.StateNodesRlp {
var acct state.Account
err = rlp.Decode(bytes.NewBuffer(stateRlp), &acct)
if err != nil {
@@ -132,7 +139,7 @@ func streamSubscribe() {
key.Hex(), acct.Root.Hex(), acct.Balance.Int64())
fmt.Printf("state account: %v\n", acct)
}
- for stateKey, mappedRlp := range payload.StorageNodesRlp {
+ for stateKey, mappedRlp := range data.StorageNodesRlp {
fmt.Printf("Storage for state key %s ", stateKey.Hex())
for storageKey, storageRlp := range mappedRlp {
fmt.Printf("with storage key %s\n", storageKey.Hex())
@@ -165,15 +172,15 @@ func streamSubscribe() {
func configureSubscription() {
logWithCommand.Info("loading subscription config")
- subscriptionConfig = config.Subscription{
+ subscriptionConfig = &config.EthSubscription{
// Below default to false, which means we do not backfill by default
BackFill: viper.GetBool("subscription.backfill"),
BackFillOnly: viper.GetBool("subscription.backfillOnly"),
// Below default to 0
// 0 start means we start at the beginning and 0 end means we continue indefinitely
- StartingBlock: big.NewInt(viper.GetInt64("subscription.startingBlock")),
- EndingBlock: big.NewInt(viper.GetInt64("subscription.endingBlock")),
+ Start: big.NewInt(viper.GetInt64("subscription.startingBlock")),
+ End: big.NewInt(viper.GetInt64("subscription.endingBlock")),
// Below default to false, which means we get all headers by default
HeaderFilter: config.HeaderFilter{
diff --git a/cmd/syncAndPublish.go b/cmd/syncAndPublish.go
index 08519d12..389d8152 100644
--- a/cmd/syncAndPublish.go
+++ b/cmd/syncAndPublish.go
@@ -33,6 +33,7 @@ import (
vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
"github.com/vulcanize/vulcanizedb/utils"
)
@@ -107,7 +108,7 @@ func newSuperNode() (super_node.NodeInterface, error) {
if workers < 1 {
workers = 1
}
- return super_node.NewSuperNode(ipfsPath, &db, rpcClient, quitChan, workers, blockChain.Node())
+ return super_node.NewSuperNode(config.Ethereum, ipfsPath, &db, rpcClient, quitChan, workers, blockChain.Node())
}
func newBackFiller() (super_node.BackFillInterface, error) {
@@ -120,5 +121,5 @@ func newBackFiller() (super_node.BackFillInterface, error) {
} else {
frequency = time.Duration(freq)
}
- return super_node.NewBackFillService(ipfsPath, &db, archivalRPCClient, time.Minute*frequency, super_node.DefaultMaxBatchSize)
+ return super_node.NewBackFillService(config.Ethereum, ipfsPath, &db, archivalRPCClient, time.Minute*frequency, super_node.DefaultMaxBatchSize)
}
diff --git a/cmd/syncPublishScreenAndServe.go b/cmd/syncPublishScreenAndServe.go
index 1d7f22d1..e181e84b 100644
--- a/cmd/syncPublishScreenAndServe.go
+++ b/cmd/syncPublishScreenAndServe.go
@@ -21,8 +21,6 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
-
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// syncPublishScreenAndServeCmd represents the syncPublishScreenAndServe command
@@ -52,7 +50,7 @@ func syncPublishScreenAndServe() {
}
wg := &syn.WaitGroup{}
- forwardPayloadChan := make(chan ipfs.IPLDPayload, 20000)
+ forwardPayloadChan := make(chan interface{}, 20000)
forwardQuitChan := make(chan bool, 1)
syncAndPubErr := superNode.SyncAndPublish(wg, forwardPayloadChan, forwardQuitChan)
if syncAndPubErr != nil {
diff --git a/db/migrations/00032_create_header_cids_table.sql b/db/migrations/00032_create_header_cids_table.sql
index f66c4b45..ca4c65a4 100644
--- a/db/migrations/00032_create_header_cids_table.sql
+++ b/db/migrations/00032_create_header_cids_table.sql
@@ -3,9 +3,10 @@ CREATE TABLE public.header_cids (
id SERIAL PRIMARY KEY,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
+ parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
uncle BOOLEAN NOT NULL,
- td BIGINT NOT NULL,
+ td BIGINT,
UNIQUE (block_number, block_hash)
);
diff --git a/db/schema.sql b/db/schema.sql
index abeb214b..8371d69e 100644
--- a/db/schema.sql
+++ b/db/schema.sql
@@ -313,9 +313,10 @@ CREATE TABLE public.header_cids (
id integer NOT NULL,
block_number bigint NOT NULL,
block_hash character varying(66) NOT NULL,
+ parent_hash character varying(66) NOT NULL,
cid text NOT NULL,
uncle boolean NOT NULL,
- td bigint NOT NULL
+ td bigint
);
diff --git a/environments/superNode.toml b/environments/superNode.toml
new file mode 100644
index 00000000..72481603
--- /dev/null
+++ b/environments/superNode.toml
@@ -0,0 +1,24 @@
+[superNode]
+ chain = "ethereum"
+ ipfsPath = "/root/.ipfs"
+
+ [superNode.database]
+ name = "vulcanize_public"
+ hostname = "localhost"
+ port = 5432
+ user = "ec2-user"
+
+ [superNode.sync]
+ on = true
+ wsPath = "ws://127.0.0.1:8546"
+ workers = 1
+
+ [superNode.server]
+ on = true
+ ipcPath = "/root/.vulcanize/vulcanize.ipc"
+ wsPath = "127.0.0.1:8080"
+
+ [superNode.backFill]
+ on = false
+ httpPath = ""
+ frequency = 5
\ No newline at end of file
diff --git a/environments/superNodeSubscription.toml b/environments/superNodeSubscription.toml
index eb1a3021..7a384ef9 100644
--- a/environments/superNodeSubscription.toml
+++ b/environments/superNodeSubscription.toml
@@ -1,35 +1,36 @@
-[subscription]
- path = "ws://127.0.0.1:8080"
- backfill = true
- backfillOnly = false
- startingBlock = 0
- endingBlock = 0
- [subscription.headerFilter]
- off = false
- uncles = false
- [subscription.trxFilter]
- off = false
- src = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
- ]
- dst = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
- ]
- [subscription.receiptFilter]
- off = false
- contracts = []
- topic0s = [
- "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
- "0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
- ]
- [subscription.stateFilter]
- off = false
- addresses = [
- "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
- ]
- intermediateNodes = false
- [subscription.storageFilter]
- off = true
- addresses = []
- storageKeys = []
- intermediateNodes = false
\ No newline at end of file
+[superNode]
+ [superNode.ethSubscription]
+ historicalData = true
+ historicalDataOnly = false
+ startingBlock = 0
+ endingBlock = 0
+ wsPath = "ws://127.0.0.1:8080"
+ [superNode.ethSubscription.headerFilter]
+ off = false
+ uncles = false
+ [superNode.ethSubscription.trxFilter]
+ off = false
+ src = [
+ "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
+ ]
+ dst = [
+ "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
+ ]
+ [superNode.ethSubscription.receiptFilter]
+ off = false
+ contracts = []
+ topic0s = [
+ "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
+ "0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
+ ]
+ [superNode.ethSubscription.stateFilter]
+ off = false
+ addresses = [
+ "0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
+ ]
+ intermediateNodes = false
+ [superNode.ethSubscription.storageFilter]
+ off = true
+ addresses = []
+ storageKeys = []
+ intermediateNodes = false
\ No newline at end of file
diff --git a/environments/syncPublishScreenAndServe.toml b/environments/syncPublishScreenAndServe.toml
deleted file mode 100644
index 32a06ead..00000000
--- a/environments/syncPublishScreenAndServe.toml
+++ /dev/null
@@ -1,18 +0,0 @@
-[database]
- name = "vulcanize_public"
- hostname = "localhost"
- port = 5432
- user = "ec2-user"
-
-[client]
- ipcPath = "ws://127.0.0.1:8546"
- ipfsPath = "/root/.ipfs"
-
-[server]
- ipcPath = "/root/.vulcanize/vulcanize.ipc"
- wsEndpoint = "127.0.0.1:8080"
-
-[superNodeBackFill]
- on = false
- rpcPath = ""
- frequency = 5
diff --git a/libraries/shared/streamer/statediff_streamer_test.go b/libraries/shared/streamer/statediff_streamer_test.go
index ab96a0b9..ef00a459 100644
--- a/libraries/shared/streamer/statediff_streamer_test.go
+++ b/libraries/shared/streamer/statediff_streamer_test.go
@@ -18,6 +18,7 @@ import (
"github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
"github.com/vulcanize/vulcanizedb/pkg/fakes"
)
diff --git a/libraries/shared/streamer/super_node_streamer.go b/libraries/shared/streamer/super_node_streamer.go
index f82921f2..56fe34a4 100644
--- a/libraries/shared/streamer/super_node_streamer.go
+++ b/libraries/shared/streamer/super_node_streamer.go
@@ -18,19 +18,15 @@
package streamer
import (
- "encoding/json"
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node"
- "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
// ISuperNodeStreamer is the interface for streaming SuperNodePayloads from a vulcanizeDB super node
type ISuperNodeStreamer interface {
- Stream(payloadChan chan SuperNodePayload, streamFilters config.Subscription) (*rpc.ClientSubscription, error)
+ Stream(payloadChan chan super_node.Payload, params super_node.SubscriptionSettings) (*rpc.ClientSubscription, error)
}
// SuperNodeStreamer is the underlying struct for the ISuperNodeStreamer interface
@@ -46,39 +42,6 @@ func NewSuperNodeStreamer(client core.RPCClient) *SuperNodeStreamer {
}
// Stream is the main loop for subscribing to data from a vulcanizedb super node
-func (sds *SuperNodeStreamer) Stream(payloadChan chan SuperNodePayload, streamFilters config.Subscription) (*rpc.ClientSubscription, error) {
- return sds.Client.Subscribe("vdb", payloadChan, "stream", streamFilters)
-}
-
-// Payload holds the data returned from the super node to the requesting client
-type SuperNodePayload struct {
- BlockNumber *big.Int `json:"blockNumber"`
- HeadersRlp [][]byte `json:"headersRlp"`
- UnclesRlp [][]byte `json:"unclesRlp"`
- TransactionsRlp [][]byte `json:"transactionsRlp"`
- ReceiptsRlp [][]byte `json:"receiptsRlp"`
- StateNodesRlp map[common.Hash][]byte `json:"stateNodesRlp"`
- StorageNodesRlp map[common.Hash]map[common.Hash][]byte `json:"storageNodesRlp"`
- ErrMsg string `json:"errMsg"`
-
- encoded []byte
- err error
-}
-
-func (sd *SuperNodePayload) ensureEncoded() {
- if sd.encoded == nil && sd.err == nil {
- sd.encoded, sd.err = json.Marshal(sd)
- }
-}
-
-// Length to implement Encoder interface for StateDiff
-func (sd *SuperNodePayload) Length() int {
- sd.ensureEncoded()
- return len(sd.encoded)
-}
-
-// Encode to implement Encoder interface for StateDiff
-func (sd *SuperNodePayload) Encode() ([]byte, error) {
- sd.ensureEncoded()
- return sd.encoded, sd.err
+func (sds *SuperNodeStreamer) Stream(payloadChan chan super_node.Payload, params super_node.SubscriptionSettings) (*rpc.ClientSubscription, error) {
+ return sds.Client.Subscribe("vdb", payloadChan, "stream", params)
}
diff --git a/libraries/shared/transformer/super_node_transformer.go b/libraries/shared/transformer/super_node_transformer.go
index 9e28cf9c..ceec9874 100644
--- a/libraries/shared/transformer/super_node_transformer.go
+++ b/libraries/shared/transformer/super_node_transformer.go
@@ -17,15 +17,15 @@
package transformer
import (
- "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node"
)
type SuperNodeTransformer interface {
Init() error
Execute() error
- GetConfig() config.Subscription
+ GetConfig() super_node.SubscriptionSettings
}
-type SuperNodeTransformerInitializer func(db *postgres.DB, subCon config.Subscription, client core.RPCClient) SuperNodeTransformer
+type SuperNodeTransformerInitializer func(db *postgres.DB, subCon super_node.SubscriptionSettings, client core.RPCClient) SuperNodeTransformer
diff --git a/libraries/shared/watcher/contract_watcher.go b/libraries/shared/watcher/contract_watcher.go
index 500a388c..3c56124d 100644
--- a/libraries/shared/watcher/contract_watcher.go
+++ b/libraries/shared/watcher/contract_watcher.go
@@ -22,6 +22,7 @@ package watcher
import (
"fmt"
+
"github.com/sirupsen/logrus"
"github.com/vulcanize/vulcanizedb/libraries/shared/transformer"
diff --git a/pkg/config/subscription.go b/pkg/config/subscription.go
deleted file mode 100644
index 53e83ca3..00000000
--- a/pkg/config/subscription.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package config
-
-import "math/big"
-
-// Subscription config is used by a subscribing transformer to specifiy which data to receive from the super node
-type Subscription struct {
- BackFill bool
- BackFillOnly bool
- StartingBlock *big.Int
- EndingBlock *big.Int // set to 0 or a negative value to have no ending block
- HeaderFilter HeaderFilter
- TrxFilter TrxFilter
- ReceiptFilter ReceiptFilter
- StateFilter StateFilter
- StorageFilter StorageFilter
-}
-
-type HeaderFilter struct {
- Off bool
- Uncles bool
-}
-
-type TrxFilter struct {
- Off bool
- Src []string
- Dst []string
-}
-
-type ReceiptFilter struct {
- Off bool
- MatchTxs bool // turn on to retrieve receipts that pair with retrieved transactions
- Contracts []string
- Topic0s []string
-}
-
-type StateFilter struct {
- Off bool
- Addresses []string // is converted to state key by taking its keccak256 hash
- IntermediateNodes bool
-}
-
-type StorageFilter struct {
- Off bool
- Addresses []string
- StorageKeys []string
- IntermediateNodes bool
-}
diff --git a/pkg/fakes/mock_rpc_client.go b/pkg/fakes/mock_rpc_client.go
index d384b0b3..3b199c13 100644
--- a/pkg/fakes/mock_rpc_client.go
+++ b/pkg/fakes/mock_rpc_client.go
@@ -18,13 +18,11 @@ package fakes
import (
"context"
- "errors"
"math/big"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core"
@@ -40,7 +38,7 @@ type MockRPCClient struct {
passedResult interface{}
passedBatch []client.BatchElem
passedNamespace string
- passedPayloadChan chan statediff.Payload
+ passedPayloadChan interface{}
passedSubscribeArgs []interface{}
lengthOfBatch int
returnPOAHeader core.POAHeader
@@ -51,12 +49,7 @@ type MockRPCClient struct {
func (client *MockRPCClient) Subscribe(namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error) {
client.passedNamespace = namespace
-
- passedPayloadChan, ok := payloadChan.(chan statediff.Payload)
- if !ok {
- return nil, errors.New("passed in channel is not of the correct type")
- }
- client.passedPayloadChan = passedPayloadChan
+ client.passedPayloadChan = payloadChan
for _, arg := range args {
client.passedSubscribeArgs = append(client.passedSubscribeArgs, arg)
@@ -66,7 +59,7 @@ func (client *MockRPCClient) Subscribe(namespace string, payloadChan interface{}
return &subscription, nil
}
-func (client *MockRPCClient) AssertSubscribeCalledWith(namespace string, payloadChan chan statediff.Payload, args []interface{}) {
+func (client *MockRPCClient) AssertSubscribeCalledWith(namespace string, payloadChan interface{}, args []interface{}) {
Expect(client.passedNamespace).To(Equal(namespace))
Expect(client.passedPayloadChan).To(Equal(payloadChan))
Expect(client.passedSubscribeArgs).To(Equal(args))
diff --git a/pkg/ipfs/helpers.go b/pkg/ipfs/helpers.go
index cb004918..8baf22b3 100644
--- a/pkg/ipfs/helpers.go
+++ b/pkg/ipfs/helpers.go
@@ -19,8 +19,6 @@ package ipfs
import (
"context"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/plugin/loader"
@@ -58,22 +56,3 @@ func InitIPFSBlockService(ipfsPath string) (blockservice.BlockService, error) {
}
return ipfsNode.Blocks, nil
}
-
-// AddressToKey hashes an address
-func AddressToKey(address common.Address) common.Hash {
- return crypto.Keccak256Hash(address[:])
-}
-
-// HexToKey hashes a hex (0x leading or not) string
-func HexToKey(hex string) common.Hash {
- addr := common.FromHex(hex)
- return crypto.Keccak256Hash(addr[:])
-}
-
-// EmptyCIDWrapper returns whether or not the provided CIDWrapper has any Cids we need to process
-func EmptyCIDWrapper(cids CIDWrapper) bool {
- if len(cids.Transactions) > 0 || len(cids.Headers) > 0 || len(cids.Uncles) > 0 || len(cids.Receipts) > 0 || len(cids.StateNodes) > 0 || len(cids.StorageNodes) > 0 {
- return false
- }
- return true
-}
diff --git a/pkg/ipfs/models.go b/pkg/ipfs/models.go
new file mode 100644
index 00000000..62093ae0
--- /dev/null
+++ b/pkg/ipfs/models.go
@@ -0,0 +1,22 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package ipfs
+
+type IPLDModel struct {
+ Key string `db:"key"`
+ Data []byte `db:"data"`
+}
diff --git a/pkg/ipfs/types.go b/pkg/ipfs/types.go
deleted file mode 100644
index f3e88b3f..00000000
--- a/pkg/ipfs/types.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package ipfs
-
-import (
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ipfs/go-block-format"
-)
-
-// CIDWrapper is used to package CIDs retrieved from the local Postgres cache and direct fetching of IPLDs
-type CIDWrapper struct {
- BlockNumber *big.Int
- Headers []string
- Uncles []string
- Transactions []string
- Receipts []string
- StateNodes []StateNodeCID
- StorageNodes []StorageNodeCID
-}
-
-// IPLDWrapper is used to package raw IPLD block data fetched from IPFS
-type IPLDWrapper struct {
- BlockNumber *big.Int
- Headers []blocks.Block
- Uncles []blocks.Block
- Transactions []blocks.Block
- Receipts []blocks.Block
- StateNodes map[common.Hash]blocks.Block
- StorageNodes map[common.Hash]map[common.Hash]blocks.Block
-}
-
-// IPLDPayload is a custom type which packages raw ETH data for the IPFS publisher
-type IPLDPayload struct {
- HeaderRLP []byte
- TotalDifficulty *big.Int
- BlockNumber *big.Int
- BlockHash common.Hash
- BlockBody *types.Body
- TrxMetaData []*TrxMetaData
- Receipts types.Receipts
- ReceiptMetaData []*ReceiptMetaData
- StateNodes map[common.Hash]StateNode
- StorageNodes map[common.Hash][]StorageNode
-}
-
-// StateNode struct used to flag node as leaf or not
-type StateNode struct {
- Value []byte
- Leaf bool
-}
-
-// StorageNode struct used to flag node as leaf or not
-type StorageNode struct {
- Key common.Hash
- Value []byte
- Leaf bool
-}
-
-// CIDPayload is a struct to hold all the CIDs and their meta data
-type CIDPayload struct {
- BlockNumber string
- BlockHash common.Hash
- TotalDifficulty string
- HeaderCID string
- UncleCIDs map[common.Hash]string
- TransactionCIDs map[common.Hash]*TrxMetaData
- ReceiptCIDs map[common.Hash]*ReceiptMetaData
- StateNodeCIDs map[common.Hash]StateNodeCID
- StorageNodeCIDs map[common.Hash][]StorageNodeCID
-}
-
-// StateNodeCID is used to associate a leaf flag with a state node cid
-type StateNodeCID struct {
- CID string
- Leaf bool
- Key string `db:"state_key"`
-}
-
-// StorageNodeCID is used to associate a leaf flag with a storage node cid
-type StorageNodeCID struct {
- Key string `db:"storage_key"`
- CID string
- Leaf bool
- StateKey string `db:"state_key"`
-}
-
-// ReceiptMetaData wraps some additional data around our receipt CIDs for indexing
-type ReceiptMetaData struct {
- CID string
- Topic0s []string
- ContractAddress string
-}
-
-// TrxMetaData wraps some additional data around our transaction CID for indexing
-type TrxMetaData struct {
- CID string
- Src string
- Dst string
-}
diff --git a/pkg/super_node/api.go b/pkg/super_node/api.go
index c3ba5a3f..39a6f148 100644
--- a/pkg/super_node/api.go
+++ b/pkg/super_node/api.go
@@ -22,8 +22,6 @@ import (
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
- "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
@@ -35,47 +33,47 @@ const APIVersion = "0.0.1"
// PublicSuperNodeAPI is the public api for the super node
type PublicSuperNodeAPI struct {
- sni NodeInterface
+ sn SuperNode
}
// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process
-func NewPublicSuperNodeAPI(superNodeInterface NodeInterface) *PublicSuperNodeAPI {
+func NewPublicSuperNodeAPI(superNodeInterface SuperNode) *PublicSuperNodeAPI {
return &PublicSuperNodeAPI{
- sni: superNodeInterface,
+ sn: superNodeInterface,
}
}
-// Stream is the public method to setup a subscription that fires off SyncPublishScreenAndServe payloads as they are created
-func (api *PublicSuperNodeAPI) Stream(ctx context.Context, streamFilters config.Subscription) (*rpc.Subscription, error) {
+// Stream is the public method to setup a subscription that fires off super node payloads as they are processed
+func (api *PublicSuperNodeAPI) Stream(ctx context.Context, params SubscriptionSettings) (*rpc.Subscription, error) {
// ensure that the RPC connection supports subscriptions
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return nil, rpc.ErrNotificationsUnsupported
}
- // create subscription and start waiting for statediff events
+ // create subscription and start waiting for stream events
rpcSub := notifier.CreateSubscription()
go func() {
// subscribe to events from the SyncPublishScreenAndServe service
- payloadChannel := make(chan streamer.SuperNodePayload, payloadChanBufferSize)
+ payloadChannel := make(chan Payload, PayloadChanBufferSize)
quitChan := make(chan bool, 1)
- go api.sni.Subscribe(rpcSub.ID, payloadChannel, quitChan, streamFilters)
+ go api.sn.Subscribe(rpcSub.ID, payloadChannel, quitChan, params)
- // loop and await state diff payloads and relay them to the subscriber with then notifier
+ // loop and await payloads and relay them to the subscriber using notifier
for {
select {
case packet := <-payloadChannel:
- if notifyErr := notifier.Notify(rpcSub.ID, packet); notifyErr != nil {
- log.Error("Failed to send state diff packet", "err", notifyErr)
- api.sni.Unsubscribe(rpcSub.ID)
+ if err := notifier.Notify(rpcSub.ID, packet); err != nil {
+ log.Error("Failed to send super node packet", "err", err)
+ api.sn.Unsubscribe(rpcSub.ID)
return
}
case <-rpcSub.Err():
- api.sni.Unsubscribe(rpcSub.ID)
+ api.sn.Unsubscribe(rpcSub.ID)
return
case <-quitChan:
- // don't need to unsubscribe, SyncPublishScreenAndServe service does so before sending the quit signal
+ // don't need to unsubscribe to super node, the service does so before sending the quit signal this way
return
}
}
@@ -84,7 +82,7 @@ func (api *PublicSuperNodeAPI) Stream(ctx context.Context, streamFilters config.
return rpcSub, nil
}
-// Node is a public rpc method to allow transformers to fetch the Geth node info for the super node
+// Node is a public rpc method to allow transformers to fetch the node info for the super node
func (api *PublicSuperNodeAPI) Node() core.Node {
- return api.sni.Node()
+ return api.sn.Node()
}
diff --git a/pkg/super_node/backfiller.go b/pkg/super_node/backfiller.go
index 55e10d41..d736c601 100644
--- a/pkg/super_node/backfiller.go
+++ b/pkg/super_node/backfiller.go
@@ -22,14 +22,14 @@ import (
"sync/atomic"
"time"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
+
"github.com/ethereum/go-ethereum/params"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
- "github.com/vulcanize/vulcanizedb/pkg/core"
- "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
const (
@@ -45,36 +45,52 @@ type BackFillInterface interface {
// BackFillService for filling in gaps in the super node
type BackFillService struct {
- // Interface for converting statediff payloads into ETH-IPLD object payloads
- Converter ipfs.PayloadConverter
- // Interface for publishing the ETH-IPLD payloads to IPFS
- Publisher ipfs.IPLDPublisher
- // Interface for indexing the CIDs of the published ETH-IPLDs in Postgres
- Repository CIDRepository
+ // Interface for converting payloads into IPLD object payloads
+ Converter shared.PayloadConverter
+ // Interface for publishing the IPLD payloads to IPFS
+ Publisher shared.IPLDPublisher
+ // Interface for indexing the CIDs of the published IPLDs in Postgres
+ Indexer shared.CIDIndexer
// Interface for searching and retrieving CIDs from Postgres index
- Retriever CIDRetriever
- // State-diff fetcher; needs to be configured with an archival core.RpcClient
- Fetcher fetcher.StateDiffFetcher
+ Retriever shared.CIDRetriever
+ // Interface for fetching payloads over at historical blocks; over http
+ Fetcher shared.PayloadFetcher
// Check frequency
GapCheckFrequency time.Duration
- // size of batch fetches
+ // Size of batch fetches
BatchSize uint64
}
// NewBackFillService returns a new BackFillInterface
-func NewBackFillService(ipfsPath string, db *postgres.DB, archivalNodeRPCClient core.RPCClient, freq time.Duration, batchSize uint64) (BackFillInterface, error) {
- publisher, err := ipfs.NewIPLDPublisher(ipfsPath)
+func NewBackFillService(settings *config.BackFill) (BackFillInterface, error) {
+ publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath)
+ if err != nil {
+ return nil, err
+ }
+ indexer, err := NewCIDIndexer(settings.Chain, settings.DB)
+ if err != nil {
+ return nil, err
+ }
+ converter, err := NewPayloadConverter(settings.Chain, params.MainnetChainConfig)
+ if err != nil {
+ return nil, err
+ }
+ retriever, err := NewCIDRetriever(settings.Chain, settings.DB)
+ if err != nil {
+ return nil, err
+ }
+ fetcher, err := NewPaylaodFetcher(settings.Chain, settings.HTTPClient)
if err != nil {
return nil, err
}
return &BackFillService{
- Repository: NewCIDRepository(db),
- Converter: ipfs.NewPayloadConverter(params.MainnetChainConfig),
+ Indexer: indexer,
+ Converter: converter,
Publisher: publisher,
- Retriever: NewCIDRetriever(db),
- Fetcher: fetcher.NewStateDiffFetcher(archivalNodeRPCClient),
- GapCheckFrequency: freq,
- BatchSize: batchSize,
+ Retriever: retriever,
+ Fetcher: fetcher,
+ GapCheckFrequency: settings.Frequency,
+ BatchSize: settings.BatchSize,
}, nil
}
@@ -93,23 +109,24 @@ func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup, quitChan <-chan bool) {
return
case <-ticker.C:
log.Info("searching for gaps in the super node database")
- startingBlock, firstBlockErr := bfs.Retriever.RetrieveFirstBlockNumber()
- if firstBlockErr != nil {
- log.Error(firstBlockErr)
+ startingBlock, err := bfs.Retriever.RetrieveFirstBlockNumber()
+ if err != nil {
+ log.Error(err)
continue
}
if startingBlock != 1 {
log.Info("found gap at the beginning of the sync")
bfs.fillGaps(1, uint64(startingBlock-1))
}
-
- gaps, gapErr := bfs.Retriever.RetrieveGapsInData()
- if gapErr != nil {
- log.Error(gapErr)
+ gaps, err := bfs.Retriever.RetrieveGapsInData()
+ if err != nil {
+ log.Error(err)
continue
}
for _, gap := range gaps {
- bfs.fillGaps(gap[0], gap[1])
+ if err := bfs.fillGaps(gap.Start, gap.Stop); err != nil {
+ log.Error(err)
+ }
}
}
}
@@ -117,14 +134,13 @@ func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup, quitChan <-chan bool) {
log.Info("fillGaps goroutine successfully spun up")
}
-func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) {
+func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) error {
log.Infof("going to fill in gap from %d to %d", startingBlock, endingBlock)
errChan := make(chan error)
done := make(chan bool)
- backFillInitErr := bfs.backFill(startingBlock, endingBlock, errChan, done)
- if backFillInitErr != nil {
- log.Error(backFillInitErr)
- return
+ err := bfs.backFill(startingBlock, endingBlock, errChan, done)
+ if err != nil {
+ return err
}
for {
select {
@@ -132,7 +148,7 @@ func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) {
log.Error(err)
case <-done:
log.Infof("finished filling in gap from %d to %d", startingBlock, endingBlock)
- return
+ return nil
}
}
}
@@ -165,24 +181,26 @@ func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan
<-forwardDone
}
go func(blockHeights []uint64) {
- payloads, fetchErr := bfs.Fetcher.FetchStateDiffsAt(blockHeights)
- if fetchErr != nil {
- errChan <- fetchErr
+ payloads, err := bfs.Fetcher.FetchAt(blockHeights)
+ if err != nil {
+ errChan <- err
}
for _, payload := range payloads {
- ipldPayload, convertErr := bfs.Converter.Convert(payload)
- if convertErr != nil {
- errChan <- convertErr
+ ipldPayload, err := bfs.Converter.Convert(payload)
+ if err != nil {
+ errChan <- err
continue
}
- cidPayload, publishErr := bfs.Publisher.Publish(ipldPayload)
- if publishErr != nil {
- errChan <- publishErr
+ // make backfiller a part of super_node service and forward these
+ // ipldPayload the the regular publishAndIndex and screenAndServe channels
+ // this would allow us to stream backfilled data to subscribers
+ cidPayload, err := bfs.Publisher.Publish(ipldPayload)
+ if err != nil {
+ errChan <- err
continue
}
- indexErr := bfs.Repository.Index(cidPayload)
- if indexErr != nil {
- errChan <- indexErr
+ if err := bfs.Indexer.Index(cidPayload); err != nil {
+ errChan <- err
}
}
// when this goroutine is done, send out a signal
diff --git a/pkg/super_node/backfiller_test.go b/pkg/super_node/backfiller_test.go
index cfa4e571..595cb9ff 100644
--- a/pkg/super_node/backfiller_test.go
+++ b/pkg/super_node/backfiller_test.go
@@ -24,43 +24,42 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- mocks2 "github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
- mocks3 "github.com/vulcanize/vulcanizedb/pkg/super_node/mocks"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
var _ = Describe("BackFiller", func() {
Describe("FillGaps", func() {
It("Periodically checks for and fills in gaps in the super node's data", func() {
- mockCidRepo := &mocks3.CIDRepository{
+ mockCidRepo := &mocks.CIDIndexer{
ReturnErr: nil,
}
mockPublisher := &mocks.IterativeIPLDPublisher{
- ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
+ ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
- ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
+ ReturnIPLDPayload: []*eth.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
ReturnErr: nil,
}
- mockRetriever := &mocks3.MockCIDRetriever{
+ mockRetriever := &mocks.MockCIDRetriever{
FirstBlockNumberToReturn: 1,
- GapsToRetrieve: [][2]uint64{
+ GapsToRetrieve: []shared.Gap{
{
- 100, 101,
+ Start: 100, Stop: 101,
},
},
}
- mockFetcher := &mocks2.StateDiffFetcher{
+ mockFetcher := &mocks.StateDiffFetcher{
PayloadsToReturn: map[uint64]statediff.Payload{
100: mocks.MockStateDiffPayload,
101: mocks.MockStateDiffPayload,
},
}
backfiller := &super_node.BackFillService{
- Repository: mockCidRepo,
+ Indexer: mockCidRepo,
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,
@@ -88,32 +87,32 @@ var _ = Describe("BackFiller", func() {
})
It("Works for single block `ranges`", func() {
- mockCidRepo := &mocks3.CIDRepository{
+ mockCidRepo := &mocks.CIDIndexer{
ReturnErr: nil,
}
mockPublisher := &mocks.IterativeIPLDPublisher{
- ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload},
+ ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
- ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload},
+ ReturnIPLDPayload: []*eth.IPLDPayload{mocks.MockIPLDPayload},
ReturnErr: nil,
}
- mockRetriever := &mocks3.MockCIDRetriever{
+ mockRetriever := &mocks.MockCIDRetriever{
FirstBlockNumberToReturn: 1,
- GapsToRetrieve: [][2]uint64{
+ GapsToRetrieve: []shared.Gap{
{
- 100, 100,
+ Start: 100, Stop: 100,
},
},
}
- mockFetcher := &mocks2.StateDiffFetcher{
+ mockFetcher := &mocks.StateDiffFetcher{
PayloadsToReturn: map[uint64]statediff.Payload{
100: mocks.MockStateDiffPayload,
},
}
backfiller := &super_node.BackFillService{
- Repository: mockCidRepo,
+ Indexer: mockCidRepo,
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,
@@ -138,29 +137,29 @@ var _ = Describe("BackFiller", func() {
})
It("Finds beginning gap", func() {
- mockCidRepo := &mocks3.CIDRepository{
+ mockCidRepo := &mocks.CIDIndexer{
ReturnErr: nil,
}
mockPublisher := &mocks.IterativeIPLDPublisher{
- ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
+ ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
- ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
+ ReturnIPLDPayload: []*eth.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
ReturnErr: nil,
}
- mockRetriever := &mocks3.MockCIDRetriever{
+ mockRetriever := &mocks.MockCIDRetriever{
FirstBlockNumberToReturn: 3,
- GapsToRetrieve: [][2]uint64{},
+ GapsToRetrieve: []shared.Gap{},
}
- mockFetcher := &mocks2.StateDiffFetcher{
+ mockFetcher := &mocks.StateDiffFetcher{
PayloadsToReturn: map[uint64]statediff.Payload{
1: mocks.MockStateDiffPayload,
2: mocks.MockStateDiffPayload,
},
}
backfiller := &super_node.BackFillService{
- Repository: mockCidRepo,
+ Indexer: mockCidRepo,
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,
diff --git a/pkg/super_node/config/chain_type.go b/pkg/super_node/config/chain_type.go
new file mode 100644
index 00000000..3b4305a9
--- /dev/null
+++ b/pkg/super_node/config/chain_type.go
@@ -0,0 +1,58 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package config
+
+import (
+ "errors"
+ "strings"
+)
+
+// ChainType enum for specifying blockchain
+type ChainType int
+
+const (
+ Unknown ChainType = iota
+ Ethereum
+ Bitcoin
+ Omni
+)
+
+func (c ChainType) String() string {
+ switch c {
+ case Ethereum:
+ return "Ethereum"
+ case Bitcoin:
+ return "Bitcoin"
+ case Omni:
+ return "Omni"
+ default:
+ return ""
+ }
+}
+
+func NewChainType(name string) (ChainType, error) {
+ switch strings.ToLower(name) {
+ case "ethereum", "eth":
+ return Ethereum, nil
+ case "bitcoin", "btc", "xbt":
+ return Bitcoin, nil
+ case "omni":
+ return Omni, nil
+ default:
+ return Unknown, errors.New("invalid name for chain")
+ }
+}
diff --git a/pkg/super_node/config/config.go b/pkg/super_node/config/config.go
new file mode 100644
index 00000000..608fa827
--- /dev/null
+++ b/pkg/super_node/config/config.go
@@ -0,0 +1,179 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package config
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/ethereum/go-ethereum/ethclient"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/spf13/viper"
+
+ "github.com/vulcanize/vulcanizedb/pkg/config"
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/client"
+ vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
+ "github.com/vulcanize/vulcanizedb/pkg/eth/node"
+ "github.com/vulcanize/vulcanizedb/utils"
+)
+
+// SuperNode config struct
+type SuperNode struct {
+ // Ubiquitous fields
+ Chain ChainType
+ IPFSPath string
+ DB *postgres.DB
+ Quit chan bool
+ // Server fields
+ Serve bool
+ WSEndpoint string
+ IPCEndpoint string
+ // Sync params
+ Sync bool
+ Workers int
+ WSClient core.RPCClient
+ NodeInfo core.Node
+ // Backfiller params
+ BackFill bool
+ BackFillSettings *BackFill
+}
+
+// NewSuperNodeConfig is used to initialize a SuperNode config
+func NewSuperNodeConfig() (*SuperNode, error) {
+ dbConfig := config.Database{
+ Name: viper.GetString("superNode.database.name"),
+ Hostname: viper.GetString("superNode.database.hostname"),
+ Port: viper.GetInt("superNode.database.port"),
+ User: viper.GetString("superNode.database.user"),
+ Password: viper.GetString("superNode.database.password"),
+ }
+ sn := new(SuperNode)
+ var err error
+ sn.Chain, err = NewChainType(viper.GetString("superNode.chain"))
+ if err != nil {
+ return nil, err
+ }
+ ipfsPath := viper.GetString("superNode.ipfsPath")
+ if ipfsPath == "" {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return nil, err
+ }
+ ipfsPath = filepath.Join(home, ".ipfs")
+ }
+ sn.IPFSPath = ipfsPath
+ sn.Serve = viper.GetBool("superNode.server.on")
+ sn.Sync = viper.GetBool("superNode.sync.on")
+ if sn.Sync {
+ workers := viper.GetInt("superNode.sync.workers")
+ if workers < 1 {
+ workers = 1
+ }
+ sn.Workers = workers
+ sn.NodeInfo, sn.WSClient, err = getNodeAndClient(sn.Chain, viper.GetString("superNode.sync.wsPath"))
+ }
+ if sn.Serve {
+ wsPath := viper.GetString("superNode.server.wsPath")
+ if wsPath == "" {
+ wsPath = "127.0.0.1:8080"
+ }
+ sn.WSEndpoint = wsPath
+ ipcPath := viper.GetString("superNode.server.ipcPath")
+ if ipcPath == "" {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return nil, err
+ }
+ ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
+ }
+ sn.IPCEndpoint = ipcPath
+ }
+ db := utils.LoadPostgres(dbConfig, sn.NodeInfo)
+ sn.DB = &db
+ sn.Quit = make(chan bool)
+ if viper.GetBool("superNode.backFill.on") {
+ sn.BackFill = true
+ sn.BackFillSettings, err = NewBackFillerConfig(dbConfig)
+ }
+ return sn, err
+}
+
+// BackFill config struct
+type BackFill struct {
+ Chain ChainType
+ IPFSPath string
+ DB *postgres.DB
+ HTTPClient core.RPCClient
+ Frequency time.Duration
+ BatchSize uint64
+}
+
+// newBackFillerConfig is used to initialize a backfiller config
+func NewBackFillerConfig(dbConfig config.Database) (*BackFill, error) {
+ bf := new(BackFill)
+ var err error
+ bf.Chain, err = NewChainType(viper.GetString("superNode.chain"))
+ if err != nil {
+ return nil, err
+ }
+ ipfsPath := viper.GetString("superNode.ipfsPath")
+ if ipfsPath == "" {
+ home, homeDirErr := os.UserHomeDir()
+ if homeDirErr != nil {
+ return nil, err
+ }
+ ipfsPath = filepath.Join(home, ".ipfs")
+ }
+ bf.IPFSPath = ipfsPath
+ node, httpClient, err := getNodeAndClient(bf.Chain, viper.GetString("superNode.backFill.httpPath"))
+ db := utils.LoadPostgres(dbConfig, node)
+ bf.DB = &db
+ bf.HTTPClient = httpClient
+ freq := viper.GetInt("superNode.backFill.frequency")
+ var frequency time.Duration
+ if freq <= 0 {
+ frequency = time.Minute * 5
+ } else {
+ frequency = time.Duration(freq)
+ }
+ bf.Frequency = frequency
+ return bf, nil
+}
+
+func getNodeAndClient(chain ChainType, path string) (core.Node, core.RPCClient, error) {
+ switch chain {
+ case Ethereum:
+ rawRPCClient, err := rpc.Dial(path)
+ if err != nil {
+ return core.Node{}, nil, err
+ }
+ rpcClient := client.NewRPCClient(rawRPCClient, path)
+ ethClient := ethclient.NewClient(rawRPCClient)
+ vdbEthClient := client.NewEthClient(ethClient)
+ vdbNode := node.MakeNode(rpcClient)
+ transactionConverter := vRpc.NewRPCTransactionConverter(ethClient)
+ blockChain := eth.NewBlockChain(vdbEthClient, rpcClient, vdbNode, transactionConverter)
+ return blockChain.Node(), rpcClient, nil
+ default:
+ return core.Node{}, nil, fmt.Errorf("unrecognized chain type %s", chain.String())
+ }
+}
diff --git a/pkg/super_node/config/eth_subscription.go b/pkg/super_node/config/eth_subscription.go
new file mode 100644
index 00000000..161efa89
--- /dev/null
+++ b/pkg/super_node/config/eth_subscription.go
@@ -0,0 +1,144 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package config
+
+import (
+ "math/big"
+
+ "github.com/spf13/viper"
+)
+
+// EthSubscription config is used by a subscriber to specify what eth data to stream from the super node
+type EthSubscription struct {
+ BackFill bool
+ BackFillOnly bool
+ Start *big.Int
+ End *big.Int // set to 0 or a negative value to have no ending block
+ HeaderFilter HeaderFilter
+ TxFilter TxFilter
+ ReceiptFilter ReceiptFilter
+ StateFilter StateFilter
+ StorageFilter StorageFilter
+}
+
+// HeaderFilter contains filter settings for headers
+type HeaderFilter struct {
+ Off bool
+ Uncles bool
+}
+
+// TxFilter contains filter settings for txs
+type TxFilter struct {
+ Off bool
+ Src []string
+ Dst []string
+}
+
+// ReceiptFilter contains filter settings for receipts
+type ReceiptFilter struct {
+ Off bool
+ MatchTxs bool // turn on to retrieve receipts that pair with retrieved transactions
+ Contracts []string
+ Topic0s []string
+}
+
+// StateFilter contains filter settings for state
+type StateFilter struct {
+ Off bool
+ Addresses []string // is converted to state key by taking its keccak256 hash
+ IntermediateNodes bool
+}
+
+// StorageFilter contains filter settings for storage
+type StorageFilter struct {
+ Off bool
+ Addresses []string
+ StorageKeys []string
+ IntermediateNodes bool
+}
+
+// Init is used to initialize a EthSubscription struct with env variables
+func NewEthSubscriptionConfig() *EthSubscription {
+ sc := new(EthSubscription)
+ // Below default to false, which means we do not backfill by default
+ sc.BackFill = viper.GetBool("superNode.ethSubscription.historicalData")
+ sc.BackFillOnly = viper.GetBool("superNode.ethSubscription.historicalDataOnly")
+ // Below default to 0
+ // 0 start means we start at the beginning and 0 end means we continue indefinitely
+ sc.Start = big.NewInt(viper.GetInt64("superNode.ethSubscription.startingBlock"))
+ sc.End = big.NewInt(viper.GetInt64("superNode.ethSubscription.endingBlock"))
+ // Below default to false, which means we get all headers and no uncles by default
+ sc.HeaderFilter = HeaderFilter{
+ Off: viper.GetBool("superNode.ethSubscription.off"),
+ Uncles: viper.GetBool("superNode.ethSubscription.uncles"),
+ }
+ // Below defaults to false and two slices of length 0
+ // Which means we get all transactions by default
+ sc.TxFilter = TxFilter{
+ Off: viper.GetBool("superNode.ethSubscription.trxFilter.off"),
+ Src: viper.GetStringSlice("superNode.ethSubscription.trxFilter.src"),
+ Dst: viper.GetStringSlice("superNode.ethSubscription.trxFilter.dst"),
+ }
+ // Below defaults to false and one slice of length 0
+ // Which means we get all receipts by default
+ sc.ReceiptFilter = ReceiptFilter{
+ Off: viper.GetBool("superNode.ethSubscription.receiptFilter.off"),
+ Contracts: viper.GetStringSlice("superNode.ethSubscription.receiptFilter.contracts"),
+ Topic0s: viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic0s"),
+ }
+ // Below defaults to two false, and a slice of length 0
+ // Which means we get all state leafs by default, but no intermediate nodes
+ sc.StateFilter = StateFilter{
+ Off: viper.GetBool("superNode.ethSubscription.stateFilter.off"),
+ IntermediateNodes: viper.GetBool("superNode.ethSubscription.stateFilter.intermediateNodes"),
+ Addresses: viper.GetStringSlice("superNode.ethSubscription.stateFilter.addresses"),
+ }
+ // Below defaults to two false, and two slices of length 0
+ // Which means we get all storage leafs by default, but no intermediate nodes
+ sc.StorageFilter = StorageFilter{
+ Off: viper.GetBool("superNode.ethSubscription.storageFilter.off"),
+ IntermediateNodes: viper.GetBool("superNode.ethSubscription.storageFilter.intermediateNodes"),
+ Addresses: viper.GetStringSlice("superNode.ethSubscription.storageFilter.addresses"),
+ StorageKeys: viper.GetStringSlice("superNode.ethSubscription.storageFilter.storageKeys"),
+ }
+ return sc
+}
+
+// StartingBlock satisfies the SubscriptionSettings() interface
+func (sc *EthSubscription) StartingBlock() *big.Int {
+ return sc.Start
+}
+
+// EndingBlock satisfies the SubscriptionSettings() interface
+func (sc *EthSubscription) EndingBlock() *big.Int {
+ return sc.End
+}
+
+// HistoricalData satisfies the SubscriptionSettings() interface
+func (sc *EthSubscription) HistoricalData() bool {
+ return sc.BackFill
+}
+
+// HistoricalDataOnly satisfies the SubscriptionSettings() interface
+func (sc *EthSubscription) HistoricalDataOnly() bool {
+ return sc.BackFillOnly
+}
+
+// ChainType satisfies the SubscriptionSettings() interface
+func (sc *EthSubscription) ChainType() ChainType {
+ return Ethereum
+}
diff --git a/pkg/super_node/constructors.go b/pkg/super_node/constructors.go
new file mode 100644
index 00000000..d7cc5ae9
--- /dev/null
+++ b/pkg/super_node/constructors.go
@@ -0,0 +1,155 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package super_node
+
+import (
+ "fmt"
+
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+)
+
+// NewResponseFilterer constructs a ResponseFilterer for the provided chain type
+func NewResponseFilterer(chain config.ChainType) (shared.ResponseFilterer, error) {
+ switch chain {
+ case config.Ethereum:
+ return eth.NewResponseFilterer(), nil
+ default:
+ return nil, fmt.Errorf("invalid chain %T for filterer constructor", chain)
+ }
+}
+
+// NewCIDIndexer constructs a CIDIndexer for the provided chain type
+func NewCIDIndexer(chain config.ChainType, db *postgres.DB) (shared.CIDIndexer, error) {
+ switch chain {
+ case config.Ethereum:
+ return eth.NewCIDIndexer(db), nil
+ default:
+ return nil, fmt.Errorf("invalid chain %T for indexer constructor", chain)
+ }
+}
+
+// NewCIDRetriever constructs a CIDRetriever for the provided chain type
+func NewCIDRetriever(chain config.ChainType, db *postgres.DB) (shared.CIDRetriever, error) {
+ switch chain {
+ case config.Ethereum:
+ return eth.NewCIDRetriever(db), nil
+ default:
+ return nil, fmt.Errorf("invalid chain %T for retriever constructor", chain)
+ }
+}
+
+// NewPayloadStreamer constructs a PayloadStreamer for the provided chain type
+func NewPayloadStreamer(chain config.ChainType, client interface{}) (shared.PayloadStreamer, chan interface{}, error) {
+ switch chain {
+ case config.Ethereum:
+ ethClient, ok := client.(core.RPCClient)
+ if !ok {
+ var expectedClientType core.RPCClient
+ return nil, nil, fmt.Errorf("ethereum payload constructor expected client type %T got %T", expectedClientType, client)
+ }
+ streamChan := make(chan interface{}, eth.PayloadChanBufferSize)
+ return eth.NewPayloadStreamer(ethClient), streamChan, nil
+ default:
+ return nil, nil, fmt.Errorf("invalid chain %T for streamer constructor", chain)
+ }
+}
+
+// NewPaylaodFetcher constructs a PayloadFetcher for the provided chain type
+func NewPaylaodFetcher(chain config.ChainType, client interface{}) (shared.PayloadFetcher, error) {
+ switch chain {
+ case config.Ethereum:
+ batchClient, ok := client.(eth.BatchClient)
+ if !ok {
+ var expectedClient eth.BatchClient
+ return nil, fmt.Errorf("ethereum fetcher constructor expected client type %T got %T", expectedClient, client)
+ }
+ return eth.NewPayloadFetcher(batchClient), nil
+ default:
+ return nil, fmt.Errorf("invalid chain %T for fetcher constructor", chain)
+ }
+}
+
+// NewPayloadConverter constructs a PayloadConverter for the provided chain type
+func NewPayloadConverter(chain config.ChainType, settings interface{}) (shared.PayloadConverter, error) {
+ switch chain {
+ case config.Ethereum:
+ ethConfig, ok := settings.(*params.ChainConfig)
+ if !ok {
+ return nil, fmt.Errorf("ethereum converter constructor expected config type %T got %T", ¶ms.ChainConfig{}, settings)
+ }
+ return eth.NewPayloadConverter(ethConfig), nil
+ default:
+ return nil, fmt.Errorf("invalid chain %T for converter constructor", chain)
+ }
+}
+
+// NewIPLDFetcher constructs an IPLDFetcher for the provided chain type
+func NewIPLDFetcher(chain config.ChainType, ipfsPath string) (shared.IPLDFetcher, error) {
+ switch chain {
+ case config.Ethereum:
+ return eth.NewIPLDFetcher(ipfsPath)
+ default:
+ return nil, fmt.Errorf("invalid chain %T for fetcher constructor", chain)
+ }
+}
+
+// NewIPLDPublisher constructs an IPLDPublisher for the provided chain type
+func NewIPLDPublisher(chain config.ChainType, ipfsPath string) (shared.IPLDPublisher, error) {
+ switch chain {
+ case config.Ethereum:
+ return eth.NewIPLDPublisher(ipfsPath)
+ default:
+ return nil, fmt.Errorf("invalid chain %T for publisher constructor", chain)
+ }
+}
+
+// NewIPLDResolver constructs an IPLDResolver for the provided chain type
+func NewIPLDResolver(chain config.ChainType) (shared.IPLDResolver, error) {
+ switch chain {
+ case config.Ethereum:
+ return eth.NewIPLDResolver(), nil
+ default:
+ return nil, fmt.Errorf("invalid chain %T for resolver constructor", chain)
+ }
+}
+
+// NewPublicAPI constructs a PublicAPI for the provided chain type
+func NewPublicAPI(chain config.ChainType, db *postgres.DB, ipfsPath string) (rpc.API, error) {
+ switch chain {
+ case config.Ethereum:
+ backend, err := eth.NewEthBackend(db, ipfsPath)
+ if err != nil {
+ return rpc.API{}, err
+ }
+ return rpc.API{
+ Namespace: eth.APIName,
+ Version: eth.APIVersion,
+ Service: eth.NewPublicEthAPI(backend),
+ Public: true,
+ }, nil
+ default:
+ return rpc.API{}, fmt.Errorf("invalid chain %T for public api constructor", chain)
+ }
+}
diff --git a/pkg/super_node/eth/api.go b/pkg/super_node/eth/api.go
index 1537c7b2..668e96e6 100644
--- a/pkg/super_node/eth/api.go
+++ b/pkg/super_node/eth/api.go
@@ -1,3 +1,19 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
package eth
import (
@@ -9,18 +25,18 @@ import (
"github.com/ethereum/go-ethereum/rpc"
)
-// APIName is the namespace used for the state diffing service API
+// APIName is the namespace for the super node's eth api
const APIName = "eth"
-// APIVersion is the version of the state diffing service API
+// APIVersion is the version of the super node's eth api
const APIVersion = "0.0.1"
type PublicEthAPI struct {
- b Backend
+ b *Backend
}
// NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend
-func NewPublicEthAPI(b Backend) *PublicEthAPI {
+func NewPublicEthAPI(b *Backend) *PublicEthAPI {
return &PublicEthAPI{
b: b,
}
diff --git a/pkg/super_node/eth/backend.go b/pkg/super_node/eth/backend.go
index 8cc46a75..f1d6f5cd 100644
--- a/pkg/super_node/eth/backend.go
+++ b/pkg/super_node/eth/backend.go
@@ -1,3 +1,19 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
package eth
import (
@@ -6,16 +22,13 @@ import (
"fmt"
"math/big"
- "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/hashicorp/golang-lru"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
- "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
)
var (
@@ -23,8 +36,8 @@ var (
)
type Backend struct {
- retriever super_node.CIDRetriever
- fetcher ipfs.IPLDFetcher
+ retriever *CIDRetriever
+ fetcher *IPLDFetcher
db *postgres.DB
headerCache *lru.Cache // Cache for the most recent block headers
@@ -32,12 +45,17 @@ type Backend struct {
numberCache *lru.Cache // Cache for the most recent block numbers
}
-func NewEthBackend(r super_node.CIDRetriever, f ipfs.IPLDFetcher) *Backend {
+func NewEthBackend(db *postgres.DB, ipfsPath string) (*Backend, error) {
+ r := NewCIDRetriever(db)
+ f, err := NewIPLDFetcher(ipfsPath)
+ if err != nil {
+ return nil, err
+ }
return &Backend{
retriever: r,
fetcher: f,
db: r.Database(),
- }
+ }, nil
}
func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Header, error) {
diff --git a/pkg/ipfs/converter.go b/pkg/super_node/eth/converter.go
similarity index 74%
rename from pkg/ipfs/converter.go
rename to pkg/super_node/eth/converter.go
index e47f8d70..bc1c3141 100644
--- a/pkg/ipfs/converter.go
+++ b/pkg/super_node/eth/converter.go
@@ -14,9 +14,11 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs
+package eth
import (
+ "fmt"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
@@ -24,31 +26,30 @@ import (
"github.com/ethereum/go-ethereum/statediff"
)
-// PayloadConverter interface is used to convert a geth statediff.Payload to our IPLDPayload type
-type PayloadConverter interface {
- Convert(payload statediff.Payload) (*IPLDPayload, error)
-}
-
-// Converter is the underlying struct for the PayloadConverter interface
-type Converter struct {
+// PayloadConverter satisfies the PayloadConverter interface for ethereum
+type PayloadConverter struct {
chainConfig *params.ChainConfig
}
// NewPayloadConverter creates a pointer to a new Converter which satisfies the PayloadConverter interface
-func NewPayloadConverter(chainConfig *params.ChainConfig) *Converter {
- return &Converter{
+func NewPayloadConverter(chainConfig *params.ChainConfig) *PayloadConverter {
+ return &PayloadConverter{
chainConfig: chainConfig,
}
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
-func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
+func (pc *PayloadConverter) Convert(payload interface{}) (interface{}, error) {
+ stateDiffPayload, ok := payload.(statediff.Payload)
+ if !ok {
+ return nil, fmt.Errorf("eth converter: expected payload type %T got %T", statediff.Payload{}, payload)
+ }
// Unpack block rlp to access fields
block := new(types.Block)
- err := rlp.DecodeBytes(payload.BlockRlp, block)
- if err != nil {
+ if err := rlp.DecodeBytes(stateDiffPayload.BlockRlp, block); err != nil {
return nil, err
}
+ // Process and publish headers
header := block.Header()
headerRlp, err := rlp.EncodeToBytes(header)
if err != nil {
@@ -56,16 +57,14 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
}
trxLen := len(block.Transactions())
convertedPayload := &IPLDPayload{
- TotalDifficulty: payload.TotalDifficulty,
- BlockHash: block.Hash(),
- BlockNumber: block.Number(),
+ TotalDifficulty: stateDiffPayload.TotalDifficulty,
+ Block: block,
HeaderRLP: headerRlp,
- BlockBody: block.Body(),
- TrxMetaData: make([]*TrxMetaData, 0, trxLen),
+ TrxMetaData: make([]TxModel, 0, trxLen),
Receipts: make(types.Receipts, 0, trxLen),
- ReceiptMetaData: make([]*ReceiptMetaData, 0, trxLen),
- StateNodes: make(map[common.Hash]StateNode),
- StorageNodes: make(map[common.Hash][]StorageNode),
+ ReceiptMetaData: make([]ReceiptModel, 0, trxLen),
+ StateNodes: make([]TrieNode, 0),
+ StorageNodes: make(map[common.Hash][]TrieNode),
}
signer := types.MakeSigner(pc.chainConfig, block.Number())
transactions := block.Transactions()
@@ -75,7 +74,7 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
if err != nil {
return nil, err
}
- txMeta := &TrxMetaData{
+ txMeta := TxModel{
Dst: handleNullAddr(trx.To()),
Src: handleNullAddr(&from),
}
@@ -85,7 +84,7 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
// Decode receipts for this block
receipts := make(types.Receipts, 0)
- if err := rlp.DecodeBytes(payload.ReceiptsRlp, &receipts); err != nil {
+ if err := rlp.DecodeBytes(stateDiffPayload.ReceiptsRlp, &receipts); err != nil {
return nil, err
}
// Derive any missing fields
@@ -100,9 +99,9 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
receipt.ContractAddress = *transactions[i].To()
}
// Extract topic0 data from the receipt's logs for indexing
- rctMeta := &ReceiptMetaData{
- Topic0s: make([]string, 0, len(receipt.Logs)),
- ContractAddress: receipt.ContractAddress.Hex(),
+ rctMeta := ReceiptModel{
+ Topic0s: make([]string, 0, len(receipt.Logs)),
+ Contract: receipt.ContractAddress.Hex(),
}
for _, log := range receipt.Logs {
if len(log.Topics) < 1 {
@@ -117,17 +116,18 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
// Unpack state diff rlp to access fields
stateDiff := new(statediff.StateDiff)
- if err = rlp.DecodeBytes(payload.StateDiffRlp, stateDiff); err != nil {
+ if err := rlp.DecodeBytes(stateDiffPayload.StateDiffRlp, stateDiff); err != nil {
return nil, err
}
for _, createdAccount := range stateDiff.CreatedAccounts {
hashKey := common.BytesToHash(createdAccount.Key)
- convertedPayload.StateNodes[hashKey] = StateNode{
+ convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
+ Key: hashKey,
Value: createdAccount.Value,
Leaf: createdAccount.Leaf,
- }
+ })
for _, storageDiff := range createdAccount.Storage {
- convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
+ convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
Key: common.BytesToHash(storageDiff.Key),
Value: storageDiff.Value,
Leaf: storageDiff.Leaf,
@@ -136,12 +136,13 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
}
for _, deletedAccount := range stateDiff.DeletedAccounts {
hashKey := common.BytesToHash(deletedAccount.Key)
- convertedPayload.StateNodes[hashKey] = StateNode{
+ convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
+ Key: hashKey,
Value: deletedAccount.Value,
Leaf: deletedAccount.Leaf,
- }
+ })
for _, storageDiff := range deletedAccount.Storage {
- convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
+ convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
Key: common.BytesToHash(storageDiff.Key),
Value: storageDiff.Value,
Leaf: storageDiff.Leaf,
@@ -150,12 +151,13 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
}
for _, updatedAccount := range stateDiff.UpdatedAccounts {
hashKey := common.BytesToHash(updatedAccount.Key)
- convertedPayload.StateNodes[hashKey] = StateNode{
+ convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
+ Key: hashKey,
Value: updatedAccount.Value,
Leaf: updatedAccount.Leaf,
- }
+ })
for _, storageDiff := range updatedAccount.Storage {
- convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
+ convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], TrieNode{
Key: common.BytesToHash(storageDiff.Key),
Value: storageDiff.Value,
Leaf: storageDiff.Leaf,
diff --git a/pkg/ipfs/converter_test.go b/pkg/super_node/eth/converter_test.go
similarity index 55%
rename from pkg/ipfs/converter_test.go
rename to pkg/super_node/eth/converter_test.go
index fc346b40..a9b04902 100644
--- a/pkg/ipfs/converter_test.go
+++ b/pkg/super_node/eth/converter_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs_test
+package eth_test
import (
"github.com/ethereum/go-ethereum/params"
@@ -22,35 +22,31 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var _ = Describe("Converter", func() {
Describe("Convert", func() {
It("Converts mock statediff.Payloads into the expected IPLDPayloads", func() {
- converter := ipfs.NewPayloadConverter(params.MainnetChainConfig)
- converterPayload, err := converter.Convert(mocks.MockStateDiffPayload)
+ converter := eth.NewPayloadConverter(params.MainnetChainConfig)
+ payload, err := converter.Convert(mocks.MockStateDiffPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(converterPayload.BlockNumber).To(Equal(mocks.BlockNumber))
- Expect(converterPayload.BlockHash).To(Equal(mocks.MockBlock.Hash()))
- Expect(converterPayload.StateNodes).To(Equal(mocks.MockStateNodes))
- Expect(converterPayload.StorageNodes).To(Equal(mocks.MockStorageNodes))
- Expect(converterPayload.TotalDifficulty.Int64()).To(Equal(mocks.MockStateDiffPayload.TotalDifficulty.Int64()))
- gotBody, err := rlp.EncodeToBytes(converterPayload.BlockBody)
+ convertedPayload, ok := payload.(*eth.IPLDPayload)
+ Expect(ok).To(BeTrue())
+ Expect(convertedPayload.Block.Number().String()).To(Equal(mocks.BlockNumber.String()))
+ Expect(convertedPayload.Block.Hash().String()).To(Equal(mocks.MockBlock.Hash().String()))
+ Expect(convertedPayload.StateNodes).To(Equal(mocks.MockStateNodes))
+ Expect(convertedPayload.StorageNodes).To(Equal(mocks.MockStorageNodes))
+ Expect(convertedPayload.TotalDifficulty.Int64()).To(Equal(mocks.MockStateDiffPayload.TotalDifficulty.Int64()))
+ gotBody, err := rlp.EncodeToBytes(convertedPayload.Block.Body())
Expect(err).ToNot(HaveOccurred())
expectedBody, err := rlp.EncodeToBytes(mocks.MockBlock.Body())
Expect(err).ToNot(HaveOccurred())
Expect(gotBody).To(Equal(expectedBody))
- Expect(converterPayload.HeaderRLP).To(Equal(mocks.MockHeaderRlp))
- Expect(converterPayload.TrxMetaData).To(Equal(mocks.MockTrxMeta))
- Expect(converterPayload.ReceiptMetaData).To(Equal(mocks.MockRctMeta))
- })
-
- It(" Throws an error if the wrong chain config is used", func() {
- converter := ipfs.NewPayloadConverter(params.TestnetChainConfig)
- _, err := converter.Convert(mocks.MockStateDiffPayload)
- Expect(err).To(HaveOccurred())
+ Expect(convertedPayload.HeaderRLP).To(Equal(mocks.MockHeaderRlp))
+ Expect(convertedPayload.TrxMetaData).To(Equal(mocks.MockTrxMeta))
+ Expect(convertedPayload.ReceiptMetaData).To(Equal(mocks.MockRctMeta))
})
})
})
diff --git a/pkg/ipfs/ipfs_suite_test.go b/pkg/super_node/eth/eth_suite_test.go
similarity index 90%
rename from pkg/ipfs/ipfs_suite_test.go
rename to pkg/super_node/eth/eth_suite_test.go
index f62834c1..a2831e54 100644
--- a/pkg/ipfs/ipfs_suite_test.go
+++ b/pkg/super_node/eth/eth_suite_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs_test
+package eth_test
import (
"io/ioutil"
@@ -25,9 +25,9 @@ import (
"github.com/sirupsen/logrus"
)
-func TestIPFS(t *testing.T) {
+func TestETHSuperNode(t *testing.T) {
RegisterFailHandler(Fail)
- RunSpecs(t, "IPFS Suite Test")
+ RunSpecs(t, "Super Node ETH Suite Test")
}
var _ = BeforeSuite(func() {
diff --git a/pkg/super_node/filterer.go b/pkg/super_node/eth/filterer.go
similarity index 65%
rename from pkg/super_node/filterer.go
rename to pkg/super_node/eth/filterer.go
index d99179ca..4148006d 100644
--- a/pkg/super_node/filterer.go
+++ b/pkg/super_node/eth/filterer.go
@@ -14,65 +14,67 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package super_node
+package eth
import (
"bytes"
+ "fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
- "github.com/vulcanize/vulcanizedb/pkg/config"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
-// ResponseFilterer is the inteface used to screen eth data and package appropriate data into a response payload
-type ResponseFilterer interface {
- FilterResponse(streamFilters config.Subscription, payload ipfs.IPLDPayload) (streamer.SuperNodePayload, error)
-}
-
-// Filterer is the underlying struct for the ResponseFilterer interface
-type Filterer struct{}
+// ResponseFilterer satisfies the ResponseFilterer interface for ethereum
+type ResponseFilterer struct{}
// NewResponseFilterer creates a new Filterer satisfying the ResponseFilterer interface
-func NewResponseFilterer() *Filterer {
- return &Filterer{}
+func NewResponseFilterer() *ResponseFilterer {
+ return &ResponseFilterer{}
}
-// FilterResponse is used to filter through eth data to extract and package requested data into a Payload
-func (s *Filterer) FilterResponse(streamFilters config.Subscription, payload ipfs.IPLDPayload) (streamer.SuperNodePayload, error) {
- if checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
- response := new(streamer.SuperNodePayload)
- if err := s.filterHeaders(streamFilters.HeaderFilter, response, payload); err != nil {
- return streamer.SuperNodePayload{}, err
+// Filter is used to filter through eth data to extract and package requested data into a Payload
+func (s *ResponseFilterer) Filter(filter, payload interface{}) (interface{}, error) {
+ ethFilters, ok := filter.(*config.EthSubscription)
+ if !ok {
+ return StreamPayload{}, fmt.Errorf("eth filterer expected filter type %T got %T", &config.EthSubscription{}, filter)
+ }
+ ethPayload, ok := payload.(*IPLDPayload)
+ if !ok {
+ return StreamPayload{}, fmt.Errorf("eth filterer expected payload type %T got %T", &IPLDPayload{}, payload)
+ }
+ if checkRange(ethFilters.Start.Int64(), ethFilters.End.Int64(), ethPayload.Block.Number().Int64()) {
+ response := new(StreamPayload)
+ if err := s.filterHeaders(ethFilters.HeaderFilter, response, ethPayload); err != nil {
+ return StreamPayload{}, err
}
- txHashes, err := s.filterTransactions(streamFilters.TrxFilter, response, payload)
+ txHashes, err := s.filterTransactions(ethFilters.TxFilter, response, ethPayload)
if err != nil {
- return streamer.SuperNodePayload{}, err
+ return StreamPayload{}, err
}
- if err := s.filerReceipts(streamFilters.ReceiptFilter, response, payload, txHashes); err != nil {
- return streamer.SuperNodePayload{}, err
+ if err := s.filerReceipts(ethFilters.ReceiptFilter, response, ethPayload, txHashes); err != nil {
+ return StreamPayload{}, err
}
- if err := s.filterState(streamFilters.StateFilter, response, payload); err != nil {
- return streamer.SuperNodePayload{}, err
+ if err := s.filterState(ethFilters.StateFilter, response, ethPayload); err != nil {
+ return StreamPayload{}, err
}
- if err := s.filterStorage(streamFilters.StorageFilter, response, payload); err != nil {
- return streamer.SuperNodePayload{}, err
+ if err := s.filterStorage(ethFilters.StorageFilter, response, ethPayload); err != nil {
+ return StreamPayload{}, err
}
- response.BlockNumber = payload.BlockNumber
+ response.BlockNumber = ethPayload.Block.Number()
return *response, nil
}
- return streamer.SuperNodePayload{}, nil
+ return StreamPayload{}, nil
}
-func (s *Filterer) filterHeaders(headerFilter config.HeaderFilter, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
+func (s *ResponseFilterer) filterHeaders(headerFilter config.HeaderFilter, response *StreamPayload, payload *IPLDPayload) error {
if !headerFilter.Off {
response.HeadersRlp = append(response.HeadersRlp, payload.HeaderRLP)
if headerFilter.Uncles {
- response.UnclesRlp = make([][]byte, 0, len(payload.BlockBody.Uncles))
- for _, uncle := range payload.BlockBody.Uncles {
+ response.UnclesRlp = make([][]byte, 0, len(payload.Block.Body().Uncles))
+ for _, uncle := range payload.Block.Body().Uncles {
uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil {
return err
@@ -91,10 +93,10 @@ func checkRange(start, end, actual int64) bool {
return false
}
-func (s *Filterer) filterTransactions(trxFilter config.TrxFilter, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) ([]common.Hash, error) {
- trxHashes := make([]common.Hash, 0, len(payload.BlockBody.Transactions))
+func (s *ResponseFilterer) filterTransactions(trxFilter config.TxFilter, response *StreamPayload, payload *IPLDPayload) ([]common.Hash, error) {
+ trxHashes := make([]common.Hash, 0, len(payload.Block.Body().Transactions))
if !trxFilter.Off {
- for i, trx := range payload.BlockBody.Transactions {
+ for i, trx := range payload.Block.Body().Transactions {
if checkTransactions(trxFilter.Src, trxFilter.Dst, payload.TrxMetaData[i].Src, payload.TrxMetaData[i].Dst) {
trxBuffer := new(bytes.Buffer)
err := trx.EncodeRLP(trxBuffer)
@@ -127,10 +129,10 @@ func checkTransactions(wantedSrc, wantedDst []string, actualSrc, actualDst strin
return false
}
-func (s *Filterer) filerReceipts(receiptFilter config.ReceiptFilter, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload, trxHashes []common.Hash) error {
+func (s *ResponseFilterer) filerReceipts(receiptFilter config.ReceiptFilter, response *StreamPayload, payload *IPLDPayload, trxHashes []common.Hash) error {
if !receiptFilter.Off {
for i, receipt := range payload.Receipts {
- if checkReceipts(receipt, receiptFilter.Topic0s, payload.ReceiptMetaData[i].Topic0s, receiptFilter.Contracts, payload.ReceiptMetaData[i].ContractAddress, trxHashes, receiptFilter.MatchTxs) {
+ if checkReceipts(receipt, receiptFilter.Topic0s, payload.ReceiptMetaData[i].Topic0s, receiptFilter.Contracts, payload.ReceiptMetaData[i].Contract, trxHashes, receiptFilter.MatchTxs) {
receiptForStorage := (*types.ReceiptForStorage)(receipt)
receiptBuffer := new(bytes.Buffer)
err := receiptForStorage.EncodeRLP(receiptBuffer)
@@ -188,18 +190,18 @@ func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics, wantedContrac
return false
}
-func (s *Filterer) filterState(stateFilter config.StateFilter, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
+func (s *ResponseFilterer) filterState(stateFilter config.StateFilter, response *StreamPayload, payload *IPLDPayload) error {
if !stateFilter.Off {
response.StateNodesRlp = make(map[common.Hash][]byte)
keyFilters := make([]common.Hash, 0, len(stateFilter.Addresses))
for _, addr := range stateFilter.Addresses {
- keyFilter := ipfs.AddressToKey(common.HexToAddress(addr))
+ keyFilter := AddressToKey(common.HexToAddress(addr))
keyFilters = append(keyFilters, keyFilter)
}
- for key, stateNode := range payload.StateNodes {
- if checkNodeKeys(keyFilters, key) {
+ for _, stateNode := range payload.StateNodes {
+ if checkNodeKeys(keyFilters, stateNode.Key) {
if stateNode.Leaf || stateFilter.IntermediateNodes {
- response.StateNodesRlp[key] = stateNode.Value
+ response.StateNodesRlp[stateNode.Key] = stateNode.Value
}
}
}
@@ -220,17 +222,17 @@ func checkNodeKeys(wantedKeys []common.Hash, actualKey common.Hash) bool {
return false
}
-func (s *Filterer) filterStorage(storageFilter config.StorageFilter, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
+func (s *ResponseFilterer) filterStorage(storageFilter config.StorageFilter, response *StreamPayload, payload *IPLDPayload) error {
if !storageFilter.Off {
response.StorageNodesRlp = make(map[common.Hash]map[common.Hash][]byte)
stateKeyFilters := make([]common.Hash, 0, len(storageFilter.Addresses))
for _, addr := range storageFilter.Addresses {
- keyFilter := ipfs.AddressToKey(common.HexToAddress(addr))
+ keyFilter := AddressToKey(common.HexToAddress(addr))
stateKeyFilters = append(stateKeyFilters, keyFilter)
}
storageKeyFilters := make([]common.Hash, 0, len(storageFilter.StorageKeys))
for _, store := range storageFilter.StorageKeys {
- keyFilter := ipfs.HexToKey(store)
+ keyFilter := HexToKey(store)
storageKeyFilters = append(storageKeyFilters, keyFilter)
}
for stateKey, storageNodes := range payload.StorageNodes {
diff --git a/pkg/super_node/filterer_test.go b/pkg/super_node/eth/filterer_test.go
similarity index 65%
rename from pkg/super_node/filterer_test.go
rename to pkg/super_node/eth/filterer_test.go
index b24b900c..ea285f0e 100644
--- a/pkg/super_node/filterer_test.go
+++ b/pkg/super_node/eth/filterer_test.go
@@ -14,21 +14,23 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package super_node_test
+package eth_test
import (
"bytes"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
- "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var (
- filterer super_node.ResponseFilterer
+ filterer *eth.ResponseFilterer
expectedRctForStorageRLP1 []byte
expectedRctForStorageRLP2 []byte
)
@@ -36,34 +38,38 @@ var (
var _ = Describe("Filterer", func() {
Describe("FilterResponse", func() {
BeforeEach(func() {
- filterer = super_node.NewResponseFilterer()
+ filterer = eth.NewResponseFilterer()
expectedRctForStorageRLP1 = getReceiptForStorageRLP(mocks.MockReceipts, 0)
expectedRctForStorageRLP2 = getReceiptForStorageRLP(mocks.MockReceipts, 1)
})
- It("Transcribes all the data from the IPLDPayload into the SuperNodePayload if given an open filter", func() {
- superNodePayload, err := filterer.FilterResponse(openFilter, *mocks.MockIPLDPayload)
+ It("Transcribes all the data from the IPLDPayload into the StreamPayload if given an open filter", func() {
+ payload, err := filterer.Filter(openFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
- Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeeNodePayload.HeadersRlp))
+ superNodePayload, ok := payload.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
+ Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeedNodePayload.HeadersRlp))
var unclesRlp [][]byte
Expect(superNodePayload.UnclesRlp).To(Equal(unclesRlp))
Expect(len(superNodePayload.TransactionsRlp)).To(Equal(2))
- Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
- Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload.ReceiptsRlp)).To(Equal(2))
- Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
- Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
Expect(len(superNodePayload.StateNodesRlp)).To(Equal(2))
Expect(superNodePayload.StateNodesRlp[mocks.ContractLeafKey]).To(Equal(mocks.ValueBytes))
Expect(superNodePayload.StateNodesRlp[mocks.AnotherContractLeafKey]).To(Equal(mocks.AnotherValueBytes))
- Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeeNodePayload.StorageNodesRlp))
+ Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeedNodePayload.StorageNodesRlp))
})
It("Applies filters from the provided config.Subscription", func() {
- superNodePayload1, err := filterer.FilterResponse(rctContractFilter, *mocks.MockIPLDPayload)
+ payload1, err := filterer.Filter(rctContractFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(superNodePayload1.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
+ superNodePayload1, ok := payload1.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload1.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload1.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload1.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload1.TransactionsRlp)).To(Equal(0))
@@ -72,9 +78,11 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload1.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload1.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
- superNodePayload2, err := filterer.FilterResponse(rctTopicsFilter, *mocks.MockIPLDPayload)
+ payload2, err := filterer.Filter(rctTopicsFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(superNodePayload2.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
+ superNodePayload2, ok := payload2.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload2.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload2.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload2.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload2.TransactionsRlp)).To(Equal(0))
@@ -83,9 +91,11 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload2.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload2.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP1))
- superNodePayload3, err := filterer.FilterResponse(rctTopicsAndContractFilter, *mocks.MockIPLDPayload)
+ payload3, err := filterer.Filter(rctTopicsAndContractFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(superNodePayload3.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
+ superNodePayload3, ok := payload3.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload3.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload3.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload3.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload3.TransactionsRlp)).To(Equal(0))
@@ -94,9 +104,11 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload3.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload3.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP1))
- superNodePayload4, err := filterer.FilterResponse(rctContractsAndTopicFilter, *mocks.MockIPLDPayload)
+ payload4, err := filterer.Filter(rctContractsAndTopicFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(superNodePayload4.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
+ superNodePayload4, ok := payload4.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload4.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload4.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload4.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload4.TransactionsRlp)).To(Equal(0))
@@ -105,35 +117,41 @@ var _ = Describe("Filterer", func() {
Expect(len(superNodePayload4.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload4.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
- superNodePayload5, err := filterer.FilterResponse(rctsForAllCollectedTrxs, *mocks.MockIPLDPayload)
+ payload5, err := filterer.Filter(rctsForAllCollectedTrxs, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(superNodePayload5.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
+ superNodePayload5, ok := payload5.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload5.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload5.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload5.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload5.TransactionsRlp)).To(Equal(2))
- Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
- Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload5.StorageNodesRlp)).To(Equal(0))
Expect(len(superNodePayload5.StateNodesRlp)).To(Equal(0))
Expect(len(superNodePayload5.ReceiptsRlp)).To(Equal(2))
- Expect(super_node.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
- Expect(super_node.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
- superNodePayload6, err := filterer.FilterResponse(rctsForSelectCollectedTrxs, *mocks.MockIPLDPayload)
+ payload6, err := filterer.Filter(rctsForSelectCollectedTrxs, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(superNodePayload6.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
+ superNodePayload6, ok := payload6.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload6.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload6.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload6.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload6.TransactionsRlp)).To(Equal(1))
- Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload6.StorageNodesRlp)).To(Equal(0))
Expect(len(superNodePayload6.StateNodesRlp)).To(Equal(0))
Expect(len(superNodePayload6.ReceiptsRlp)).To(Equal(1))
Expect(superNodePayload4.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
- superNodePayload7, err := filterer.FilterResponse(stateFilter, *mocks.MockIPLDPayload)
+ payload7, err := filterer.Filter(stateFilter, mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(superNodePayload7.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
+ superNodePayload7, ok := payload7.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload7.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
Expect(len(superNodePayload7.HeadersRlp)).To(Equal(0))
Expect(len(superNodePayload7.UnclesRlp)).To(Equal(0))
Expect(len(superNodePayload7.TransactionsRlp)).To(Equal(0))
diff --git a/pkg/super_node/eth/helpers.go b/pkg/super_node/eth/helpers.go
new file mode 100644
index 00000000..fec021b7
--- /dev/null
+++ b/pkg/super_node/eth/helpers.go
@@ -0,0 +1,33 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+)
+
+// AddressToKey hashes an address
+func AddressToKey(address common.Address) common.Hash {
+ return crypto.Keccak256Hash(address[:])
+}
+
+// HexToKey hashes a hex (0x leading or not) string
+func HexToKey(hex string) common.Hash {
+ addr := common.FromHex(hex)
+ return crypto.Keccak256Hash(addr[:])
+}
diff --git a/pkg/super_node/eth/indexer.go b/pkg/super_node/eth/indexer.go
new file mode 100644
index 00000000..e94061b8
--- /dev/null
+++ b/pkg/super_node/eth/indexer.go
@@ -0,0 +1,150 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/lib/pq"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+)
+
+// Indexer satisfies the Indexer interface for ethereum
+type CIDIndexer struct {
+ db *postgres.DB
+}
+
+// NewCIDIndexer creates a new pointer to a Indexer which satisfies the CIDIndexer interface
+func NewCIDIndexer(db *postgres.DB) *CIDIndexer {
+ return &CIDIndexer{
+ db: db,
+ }
+}
+
+// Index indexes a cidPayload in Postgres
+func (in *CIDIndexer) Index(cids interface{}) error {
+ cidPayload, ok := cids.(*CIDPayload)
+ if !ok {
+ return fmt.Errorf("eth indexer expected cids type %T got %T", &CIDPayload{}, cids)
+ }
+ tx, err := in.db.Beginx()
+ if err != nil {
+ return err
+ }
+ headerID, err := in.indexHeaderCID(tx, cidPayload.HeaderCID)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ return err
+ }
+ for _, uncle := range cidPayload.UncleCIDs {
+ err := in.indexUncleCID(tx, uncle)
+ if err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ return err
+ }
+ }
+ if err := in.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID); err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ return err
+ }
+ if err := in.indexStateAndStorageCIDs(tx, cidPayload, headerID); err != nil {
+ if err := tx.Rollback(); err != nil {
+ log.Error(err)
+ }
+ return err
+ }
+ return tx.Commit()
+}
+
+func (repo *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) {
+ var headerID int64
+ err := tx.QueryRowx(`INSERT INTO public.header_cids (block_number, block_hash, parent_hash, cid, uncle, td) VALUES ($1, $2, $3, $4, $5, $6)
+ ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, uncle, td) = ($3, $4, $5, $6)
+ RETURNING id`,
+ header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, false, header.TotalDifficulty).Scan(&headerID)
+ return headerID, err
+}
+
+func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle HeaderModel) error {
+ _, err := tx.Exec(`INSERT INTO public.header_cids (block_number, block_hash, parent_hash, cid, uncle) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, uncle) = ($3, $4, $5)`,
+ uncle.BlockNumber, uncle.BlockHash, uncle.ParentHash, uncle.CID, true)
+ return err
+}
+
+func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
+ for _, trxCidMeta := range payload.TransactionCIDs {
+ var txID int64
+ err := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src) = ($3, $4, $5)
+ RETURNING id`,
+ headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src).Scan(&txID)
+ if err != nil {
+ return err
+ }
+ receiptCidMeta, ok := payload.ReceiptCIDs[common.HexToHash(trxCidMeta.TxHash)]
+ if ok {
+ if err := in.indexReceiptCID(tx, receiptCidMeta, txID); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (in *CIDIndexer) indexReceiptCID(tx *sqlx.Tx, cidMeta ReceiptModel, txID int64) error {
+ _, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid, contract, topic0s) VALUES ($1, $2, $3, $4)`,
+ txID, cidMeta.CID, cidMeta.Contract, pq.Array(cidMeta.Topic0s))
+ return err
+}
+
+func (in *CIDIndexer) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
+ for _, stateCID := range payload.StateNodeCIDs {
+ var stateID int64
+ err := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4)
+ ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4)
+ RETURNING id`,
+ headerID, stateCID.StateKey, stateCID.CID, stateCID.Leaf).Scan(&stateID)
+ if err != nil {
+ return err
+ }
+ for _, storageCID := range payload.StorageNodeCIDs[common.HexToHash(stateCID.StateKey)] {
+ if err := in.indexStorageCID(tx, storageCID, stateID); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (in *CIDIndexer) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeModel, stateID int64) error {
+ _, err := tx.Exec(`INSERT INTO public.storage_cids (state_id, storage_key, cid, leaf) VALUES ($1, $2, $3, $4)
+ ON CONFLICT (state_id, storage_key) DO UPDATE SET (cid, leaf) = ($3, $4)`,
+ stateID, storageCID.StorageKey, storageCID.CID, storageCID.Leaf)
+ return err
+}
diff --git a/pkg/super_node/repository_test.go b/pkg/super_node/eth/indexer_test.go
similarity index 73%
rename from pkg/super_node/repository_test.go
rename to pkg/super_node/eth/indexer_test.go
index a21ef665..a6bf6c39 100644
--- a/pkg/super_node/repository_test.go
+++ b/pkg/super_node/eth/indexer_test.go
@@ -14,30 +14,32 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package super_node_test
+package eth_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
- "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ eth2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
-var _ = Describe("Repository", func() {
+var _ = Describe("Indexer", func() {
var (
db *postgres.DB
err error
- repo super_node.CIDRepository
+ repo *eth2.CIDIndexer
)
BeforeEach(func() {
- db, err = super_node.SetupDB()
+ db, err = eth.SetupDB()
Expect(err).ToNot(HaveOccurred())
- repo = super_node.NewCIDRepository(db)
+ repo = eth2.NewCIDIndexer(db)
})
AfterEach(func() {
- super_node.TearDownDB(db)
+ eth.TearDownDB(db)
})
Describe("Index", func() {
@@ -63,8 +65,8 @@ var _ = Describe("Repository", func() {
err = db.Select(&trxs, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(2))
- Expect(super_node.ListContainsString(trxs, "mockTrxCID1")).To(BeTrue())
- Expect(super_node.ListContainsString(trxs, "mockTrxCID2")).To(BeTrue())
+ Expect(shared.ListContainsString(trxs, "mockTrxCID1")).To(BeTrue())
+ Expect(shared.ListContainsString(trxs, "mockTrxCID2")).To(BeTrue())
// check receipts were properly indexed
rcts := make([]string, 0)
pgStr = `SELECT receipt_cids.cid FROM receipt_cids, transaction_cids, header_cids
@@ -74,10 +76,10 @@ var _ = Describe("Repository", func() {
err = db.Select(&rcts, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(rcts)).To(Equal(2))
- Expect(super_node.ListContainsString(rcts, "mockRctCID1")).To(BeTrue())
- Expect(super_node.ListContainsString(rcts, "mockRctCID2")).To(BeTrue())
+ Expect(shared.ListContainsString(rcts, "mockRctCID1")).To(BeTrue())
+ Expect(shared.ListContainsString(rcts, "mockRctCID2")).To(BeTrue())
// check that state nodes were properly indexed
- stateNodes := make([]ipfs.StateNodeCID, 0)
+ stateNodes := make([]eth.StateNodeModel, 0)
pgStr = `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&stateNodes, pgStr, 1)
@@ -86,15 +88,15 @@ var _ = Describe("Repository", func() {
for _, stateNode := range stateNodes {
if stateNode.CID == "mockStateCID1" {
Expect(stateNode.Leaf).To(Equal(true))
- Expect(stateNode.Key).To(Equal(mocks.ContractLeafKey.Hex()))
+ Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex()))
}
if stateNode.CID == "mockStateCID2" {
Expect(stateNode.Leaf).To(Equal(true))
- Expect(stateNode.Key).To(Equal(mocks.AnotherContractLeafKey.Hex()))
+ Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex()))
}
}
// check that storage nodes were properly indexed
- storageNodes := make([]ipfs.StorageNodeCID, 0)
+ storageNodes := make([]eth.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key, storage_cids.leaf FROM storage_cids, state_cids, header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
@@ -102,11 +104,11 @@ var _ = Describe("Repository", func() {
err = db.Select(&storageNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(storageNodes)).To(Equal(1))
- Expect(storageNodes[0]).To(Equal(ipfs.StorageNodeCID{
- CID: "mockStorageCID",
- Leaf: true,
- Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
- StateKey: mocks.ContractLeafKey.Hex(),
+ Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{
+ CID: "mockStorageCID",
+ Leaf: true,
+ StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
+ StateKey: mocks.ContractLeafKey.Hex(),
}))
})
})
diff --git a/pkg/ipfs/fetcher.go b/pkg/super_node/eth/ipld_fetcher.go
similarity index 70%
rename from pkg/ipfs/fetcher.go
rename to pkg/super_node/eth/ipld_fetcher.go
index 84f74ccb..ea46db92 100644
--- a/pkg/ipfs/fetcher.go
+++ b/pkg/super_node/eth/ipld_fetcher.go
@@ -14,78 +14,73 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs
+package eth
import (
"context"
"errors"
+ "fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
log "github.com/sirupsen/logrus"
+
+ "github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
var (
errUnexpectedNumberOfIPLDs = errors.New("ipfs batch fetch returned unexpected number of IPLDs")
)
-// IPLDFetcher is an interface for fetching IPLDs
-type IPLDFetcher interface {
- FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error)
- FetchHeaders(cids []string) ([]blocks.Block, error)
- FetchUncles(cids []string) ([]blocks.Block, error)
- FetchTrxs(cids []string) ([]blocks.Block, error)
- FetchRcts(cids []string) ([]blocks.Block, error)
- FetchState(cids []StateNodeCID) (map[common.Hash]blocks.Block, error)
- FetchStorage(cids []StorageNodeCID) (map[common.Hash]map[common.Hash]blocks.Block, error)
-}
-
-// EthIPLDFetcher is used to fetch ETH IPLD objects from IPFS
-type EthIPLDFetcher struct {
+// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
+type IPLDFetcher struct {
BlockService blockservice.BlockService
}
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
-func NewIPLDFetcher(ipfsPath string) (*EthIPLDFetcher, error) {
- blockService, err := InitIPFSBlockService(ipfsPath)
+func NewIPLDFetcher(ipfsPath string) (*IPLDFetcher, error) {
+ blockService, err := ipfs.InitIPFSBlockService(ipfsPath)
if err != nil {
return nil, err
}
- return &EthIPLDFetcher{
+ return &IPLDFetcher{
BlockService: blockService,
}, nil
}
-// FetchIPLDs is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
-func (f *EthIPLDFetcher) FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error) {
-
+// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
+func (f *IPLDFetcher) Fetch(cids interface{}) (interface{}, error) {
+ cidWrapper, ok := cids.(*CIDWrapper)
+ if !ok {
+ return nil, fmt.Errorf("eth fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
+ }
log.Debug("fetching iplds")
iplds := new(IPLDWrapper)
- iplds.BlockNumber = cids.BlockNumber
+ iplds.BlockNumber = cidWrapper.BlockNumber
var err error
- iplds.Headers, err = f.FetchHeaders(cids.Headers)
+ iplds.Headers, err = f.FetchHeaders(cidWrapper.Headers)
if err != nil {
return nil, err
}
- iplds.Uncles, err = f.FetchUncles(cids.Uncles)
+ iplds.Uncles, err = f.FetchUncles(cidWrapper.Uncles)
if err != nil {
return nil, err
}
- iplds.Transactions, err = f.FetchTrxs(cids.Transactions)
+ iplds.Transactions, err = f.FetchTrxs(cidWrapper.Transactions)
if err != nil {
return nil, err
}
- iplds.Receipts, err = f.FetchRcts(cids.Receipts)
+ iplds.Receipts, err = f.FetchRcts(cidWrapper.Receipts)
if err != nil {
return nil, err
}
- iplds.StateNodes, err = f.FetchState(cids.StateNodes)
+ iplds.StateNodes, err = f.FetchState(cidWrapper.StateNodes)
if err != nil {
return nil, err
}
- iplds.StorageNodes, err = f.FetchStorage(cids.StorageNodes)
+ iplds.StorageNodes, err = f.FetchStorage(cidWrapper.StorageNodes)
if err != nil {
return nil, err
}
@@ -94,11 +89,11 @@ func (f *EthIPLDFetcher) FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error) {
// FetchHeaders fetches headers
// It uses the f.fetchBatch method
-func (f *EthIPLDFetcher) FetchHeaders(cids []string) ([]blocks.Block, error) {
+func (f *IPLDFetcher) FetchHeaders(cids []HeaderModel) ([]blocks.Block, error) {
log.Debug("fetching header iplds")
headerCids := make([]cid.Cid, 0, len(cids))
for _, c := range cids {
- dc, err := cid.Decode(c)
+ dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
@@ -114,11 +109,11 @@ func (f *EthIPLDFetcher) FetchHeaders(cids []string) ([]blocks.Block, error) {
// FetchUncles fetches uncles
// It uses the f.fetchBatch method
-func (f *EthIPLDFetcher) FetchUncles(cids []string) ([]blocks.Block, error) {
+func (f *IPLDFetcher) FetchUncles(cids []HeaderModel) ([]blocks.Block, error) {
log.Debug("fetching uncle iplds")
uncleCids := make([]cid.Cid, 0, len(cids))
for _, c := range cids {
- dc, err := cid.Decode(c)
+ dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
@@ -134,11 +129,11 @@ func (f *EthIPLDFetcher) FetchUncles(cids []string) ([]blocks.Block, error) {
// FetchTrxs fetches transactions
// It uses the f.fetchBatch method
-func (f *EthIPLDFetcher) FetchTrxs(cids []string) ([]blocks.Block, error) {
+func (f *IPLDFetcher) FetchTrxs(cids []TxModel) ([]blocks.Block, error) {
log.Debug("fetching transaction iplds")
trxCids := make([]cid.Cid, 0, len(cids))
for _, c := range cids {
- dc, err := cid.Decode(c)
+ dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
@@ -154,11 +149,11 @@ func (f *EthIPLDFetcher) FetchTrxs(cids []string) ([]blocks.Block, error) {
// FetchRcts fetches receipts
// It uses the f.fetchBatch method
-func (f *EthIPLDFetcher) FetchRcts(cids []string) ([]blocks.Block, error) {
+func (f *IPLDFetcher) FetchRcts(cids []ReceiptModel) ([]blocks.Block, error) {
log.Debug("fetching receipt iplds")
rctCids := make([]cid.Cid, 0, len(cids))
for _, c := range cids {
- dc, err := cid.Decode(c)
+ dc, err := cid.Decode(c.CID)
if err != nil {
return nil, err
}
@@ -175,11 +170,11 @@ func (f *EthIPLDFetcher) FetchRcts(cids []string) ([]blocks.Block, error) {
// FetchState fetches state nodes
// It uses the single f.fetch method instead of the batch fetch, because it
// needs to maintain the data's relation to state keys
-func (f *EthIPLDFetcher) FetchState(cids []StateNodeCID) (map[common.Hash]blocks.Block, error) {
+func (f *IPLDFetcher) FetchState(cids []StateNodeModel) (map[common.Hash]blocks.Block, error) {
log.Debug("fetching state iplds")
stateNodes := make(map[common.Hash]blocks.Block)
for _, stateNode := range cids {
- if stateNode.CID == "" || stateNode.Key == "" {
+ if stateNode.CID == "" || stateNode.StateKey == "" {
continue
}
dc, err := cid.Decode(stateNode.CID)
@@ -190,7 +185,7 @@ func (f *EthIPLDFetcher) FetchState(cids []StateNodeCID) (map[common.Hash]blocks
if err != nil {
return nil, err
}
- stateNodes[common.HexToHash(stateNode.Key)] = state
+ stateNodes[common.HexToHash(stateNode.StateKey)] = state
}
return stateNodes, nil
}
@@ -198,11 +193,11 @@ func (f *EthIPLDFetcher) FetchState(cids []StateNodeCID) (map[common.Hash]blocks
// FetchStorage fetches storage nodes
// It uses the single f.fetch method instead of the batch fetch, because it
// needs to maintain the data's relation to state and storage keys
-func (f *EthIPLDFetcher) FetchStorage(cids []StorageNodeCID) (map[common.Hash]map[common.Hash]blocks.Block, error) {
+func (f *IPLDFetcher) FetchStorage(cids []StorageNodeWithStateKeyModel) (map[common.Hash]map[common.Hash]blocks.Block, error) {
log.Debug("fetching storage iplds")
storageNodes := make(map[common.Hash]map[common.Hash]blocks.Block)
for _, storageNode := range cids {
- if storageNode.CID == "" || storageNode.Key == "" || storageNode.StateKey == "" {
+ if storageNode.CID == "" || storageNode.StorageKey == "" || storageNode.StateKey == "" {
continue
}
dc, err := cid.Decode(storageNode.CID)
@@ -216,20 +211,20 @@ func (f *EthIPLDFetcher) FetchStorage(cids []StorageNodeCID) (map[common.Hash]ma
if storageNodes[common.HexToHash(storageNode.StateKey)] == nil {
storageNodes[common.HexToHash(storageNode.StateKey)] = make(map[common.Hash]blocks.Block)
}
- storageNodes[common.HexToHash(storageNode.StateKey)][common.HexToHash(storageNode.Key)] = storage
+ storageNodes[common.HexToHash(storageNode.StateKey)][common.HexToHash(storageNode.StorageKey)] = storage
}
return storageNodes, nil
}
// fetch is used to fetch a single cid
-func (f *EthIPLDFetcher) fetch(cid cid.Cid) (blocks.Block, error) {
+func (f *IPLDFetcher) fetch(cid cid.Cid) (blocks.Block, error) {
return f.BlockService.GetBlock(context.Background(), cid)
}
// fetchBatch is used to fetch a batch of IPFS data blocks by cid
// There is no guarantee all are fetched, and no error in such a case, so
// downstream we will need to confirm which CIDs were fetched in the result set
-func (f *EthIPLDFetcher) fetchBatch(cids []cid.Cid) []blocks.Block {
+func (f *IPLDFetcher) fetchBatch(cids []cid.Cid) []blocks.Block {
fetchedBlocks := make([]blocks.Block, 0, len(cids))
blockChan := f.BlockService.GetBlocks(context.Background(), cids)
for block := range blockChan {
diff --git a/pkg/ipfs/fetcher_test.go b/pkg/super_node/eth/ipld_fetcher_test.go
similarity index 73%
rename from pkg/ipfs/fetcher_test.go
rename to pkg/super_node/eth/ipld_fetcher_test.go
index 8205f86b..9b15daa3 100644
--- a/pkg/ipfs/fetcher_test.go
+++ b/pkg/super_node/eth/ipld_fetcher_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs_test
+package eth_test
import (
"math/big"
@@ -24,8 +24,8 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var (
@@ -45,28 +45,44 @@ var (
mockStorageBlock2 = blocks.NewBlock(mockStorageData2)
mockBlocks = []blocks.Block{mockHeaderBlock, mockUncleBlock, mockTrxBlock, mockReceiptBlock, mockStateBlock, mockStorageBlock1, mockStorageBlock2}
mockBlockService *mocks.MockIPFSBlockService
- mockCIDWrapper = ipfs.CIDWrapper{
- BlockNumber: big.NewInt(9000),
- Headers: []string{mockHeaderBlock.Cid().String()},
- Uncles: []string{mockUncleBlock.Cid().String()},
- Transactions: []string{mockTrxBlock.Cid().String()},
- Receipts: []string{mockReceiptBlock.Cid().String()},
- StateNodes: []ipfs.StateNodeCID{{
- CID: mockStateBlock.Cid().String(),
- Leaf: true,
- Key: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
- }},
- StorageNodes: []ipfs.StorageNodeCID{{
- CID: mockStorageBlock1.Cid().String(),
+ mockCIDWrapper = ð.CIDWrapper{
+ BlockNumber: big.NewInt(9000),
+ Headers: []eth.HeaderModel{
+ {
+ CID: mockHeaderBlock.Cid().String(),
+ },
+ },
+ Uncles: []eth.HeaderModel{
+ {
+ CID: mockUncleBlock.Cid().String(),
+ },
+ },
+ Transactions: []eth.TxModel{
+ {
+ CID: mockTrxBlock.Cid().String(),
+ },
+ },
+ Receipts: []eth.ReceiptModel{
+ {
+ CID: mockReceiptBlock.Cid().String(),
+ },
+ },
+ StateNodes: []eth.StateNodeModel{{
+ CID: mockStateBlock.Cid().String(),
Leaf: true,
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
- Key: "0000000000000000000000000000000000000000000000000000000000000001",
+ }},
+ StorageNodes: []eth.StorageNodeWithStateKeyModel{{
+ CID: mockStorageBlock1.Cid().String(),
+ Leaf: true,
+ StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
+ StorageKey: "0000000000000000000000000000000000000000000000000000000000000001",
},
{
- CID: mockStorageBlock2.Cid().String(),
- Leaf: true,
- StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
- Key: "0000000000000000000000000000000000000000000000000000000000000002",
+ CID: mockStorageBlock2.Cid().String(),
+ Leaf: true,
+ StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
+ StorageKey: "0000000000000000000000000000000000000000000000000000000000000002",
}},
}
)
@@ -81,10 +97,12 @@ var _ = Describe("Fetcher", func() {
})
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
- fetcher := new(ipfs.EthIPLDFetcher)
+ fetcher := new(eth.IPLDFetcher)
fetcher.BlockService = mockBlockService
- iplds, err := fetcher.FetchIPLDs(mockCIDWrapper)
+ i, err := fetcher.Fetch(mockCIDWrapper)
Expect(err).ToNot(HaveOccurred())
+ iplds, ok := i.(*eth.IPLDWrapper)
+ Expect(ok).To(BeTrue())
Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber))
Expect(len(iplds.Headers)).To(Equal(1))
Expect(iplds.Headers[0]).To(Equal(mockHeaderBlock))
diff --git a/pkg/ipfs/mocks/blockservice.go b/pkg/super_node/eth/mocks/blockservice.go
similarity index 100%
rename from pkg/ipfs/mocks/blockservice.go
rename to pkg/super_node/eth/mocks/blockservice.go
diff --git a/pkg/ipfs/mocks/converter.go b/pkg/super_node/eth/mocks/converter.go
similarity index 68%
rename from pkg/ipfs/mocks/converter.go
rename to pkg/super_node/eth/mocks/converter.go
index 0aaa9b53..65761f3f 100644
--- a/pkg/ipfs/mocks/converter.go
+++ b/pkg/super_node/eth/mocks/converter.go
@@ -21,33 +21,41 @@ import (
"github.com/ethereum/go-ethereum/statediff"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// PayloadConverter is the underlying struct for the Converter interface
type PayloadConverter struct {
PassedStatediffPayload statediff.Payload
- ReturnIPLDPayload *ipfs.IPLDPayload
+ ReturnIPLDPayload *eth.IPLDPayload
ReturnErr error
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
-func (pc *PayloadConverter) Convert(payload statediff.Payload) (*ipfs.IPLDPayload, error) {
- pc.PassedStatediffPayload = payload
+func (pc *PayloadConverter) Convert(payload interface{}) (interface{}, error) {
+ stateDiffPayload, ok := payload.(statediff.Payload)
+ if !ok {
+ return nil, fmt.Errorf("convert expected payload type %T got %T", statediff.Payload{}, payload)
+ }
+ pc.PassedStatediffPayload = stateDiffPayload
return pc.ReturnIPLDPayload, pc.ReturnErr
}
// IterativePayloadConverter is the underlying struct for the Converter interface
type IterativePayloadConverter struct {
PassedStatediffPayload []statediff.Payload
- ReturnIPLDPayload []*ipfs.IPLDPayload
+ ReturnIPLDPayload []*eth.IPLDPayload
ReturnErr error
iteration int
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
-func (pc *IterativePayloadConverter) Convert(payload statediff.Payload) (*ipfs.IPLDPayload, error) {
- pc.PassedStatediffPayload = append(pc.PassedStatediffPayload, payload)
+func (pc *IterativePayloadConverter) Convert(payload interface{}) (interface{}, error) {
+ stateDiffPayload, ok := payload.(statediff.Payload)
+ if !ok {
+ return nil, fmt.Errorf("convert expected payload type %T got %T", statediff.Payload{}, payload)
+ }
+ pc.PassedStatediffPayload = append(pc.PassedStatediffPayload, stateDiffPayload)
if len(pc.PassedStatediffPayload) < pc.iteration+1 {
return nil, fmt.Errorf("IterativePayloadConverter does not have a payload to return at iteration %d", pc.iteration)
}
diff --git a/pkg/ipfs/mocks/dag_putters.go b/pkg/super_node/eth/mocks/dag_putters.go
similarity index 100%
rename from pkg/ipfs/mocks/dag_putters.go
rename to pkg/super_node/eth/mocks/dag_putters.go
diff --git a/pkg/super_node/eth/mocks/fetcher.go b/pkg/super_node/eth/mocks/fetcher.go
new file mode 100644
index 00000000..542ca0b8
--- /dev/null
+++ b/pkg/super_node/eth/mocks/fetcher.go
@@ -0,0 +1,50 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package mocks
+
+import (
+ "errors"
+ "sync/atomic"
+
+ "github.com/ethereum/go-ethereum/statediff"
+)
+
+// StateDiffFetcher mock for tests
+type StateDiffFetcher struct {
+ PayloadsToReturn map[uint64]statediff.Payload
+ FetchErrs map[uint64]error
+ CalledAtBlockHeights [][]uint64
+ CalledTimes int64
+}
+
+// FetchStateDiffsAt mock method
+func (fetcher *StateDiffFetcher) FetchAt(blockHeights []uint64) ([]interface{}, error) {
+ if fetcher.PayloadsToReturn == nil {
+ return nil, errors.New("mock StateDiffFetcher needs to be initialized with payloads to return")
+ }
+ atomic.AddInt64(&fetcher.CalledTimes, 1) // thread-safe increment
+ fetcher.CalledAtBlockHeights = append(fetcher.CalledAtBlockHeights, blockHeights)
+ results := make([]interface{}, 0, len(blockHeights))
+ for _, height := range blockHeights {
+ results = append(results, fetcher.PayloadsToReturn[height])
+ err, ok := fetcher.FetchErrs[height]
+ if ok && err != nil {
+ return nil, err
+ }
+ }
+ return results, nil
+}
diff --git a/pkg/super_node/mocks/repository.go b/pkg/super_node/eth/mocks/indexer.go
similarity index 69%
rename from pkg/super_node/mocks/repository.go
rename to pkg/super_node/eth/mocks/indexer.go
index 4c37f468..05e6cd38 100644
--- a/pkg/super_node/mocks/repository.go
+++ b/pkg/super_node/eth/mocks/indexer.go
@@ -16,16 +16,24 @@
package mocks
-import "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+import (
+ "fmt"
-// CIDRepository is the underlying struct for the Repository interface
-type CIDRepository struct {
- PassedCIDPayload []*ipfs.CIDPayload
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+)
+
+// CIDIndexer is the underlying struct for the Indexer interface
+type CIDIndexer struct {
+ PassedCIDPayload []*eth.CIDPayload
ReturnErr error
}
// Index indexes a cidPayload in Postgres
-func (repo *CIDRepository) Index(cidPayload *ipfs.CIDPayload) error {
+func (repo *CIDIndexer) Index(cids interface{}) error {
+ cidPayload, ok := cids.(*eth.CIDPayload)
+ if !ok {
+ return fmt.Errorf("index expected cids type %T got %T", ð.CIDPayload{}, cids)
+ }
repo.PassedCIDPayload = append(repo.PassedCIDPayload, cidPayload)
return repo.ReturnErr
}
diff --git a/pkg/ipfs/mocks/publisher.go b/pkg/super_node/eth/mocks/publisher.go
similarity index 66%
rename from pkg/ipfs/mocks/publisher.go
rename to pkg/super_node/eth/mocks/publisher.go
index c8f64449..6b85ff66 100644
--- a/pkg/ipfs/mocks/publisher.go
+++ b/pkg/super_node/eth/mocks/publisher.go
@@ -19,33 +19,41 @@ package mocks
import (
"fmt"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// IPLDPublisher is the underlying struct for the Publisher interface
type IPLDPublisher struct {
- PassedIPLDPayload *ipfs.IPLDPayload
- ReturnCIDPayload *ipfs.CIDPayload
+ PassedIPLDPayload *eth.IPLDPayload
+ ReturnCIDPayload *eth.CIDPayload
ReturnErr error
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *IPLDPublisher) Publish(payload *ipfs.IPLDPayload) (*ipfs.CIDPayload, error) {
- pub.PassedIPLDPayload = payload
+func (pub *IPLDPublisher) Publish(payload interface{}) (interface{}, error) {
+ ipldPayload, ok := payload.(*eth.IPLDPayload)
+ if !ok {
+ return nil, fmt.Errorf("publish expected payload type %T got %T", ð.IPLDPayload{}, payload)
+ }
+ pub.PassedIPLDPayload = ipldPayload
return pub.ReturnCIDPayload, pub.ReturnErr
}
// IterativeIPLDPublisher is the underlying struct for the Publisher interface; used in testing
type IterativeIPLDPublisher struct {
- PassedIPLDPayload []*ipfs.IPLDPayload
- ReturnCIDPayload []*ipfs.CIDPayload
+ PassedIPLDPayload []*eth.IPLDPayload
+ ReturnCIDPayload []*eth.CIDPayload
ReturnErr error
iteration int
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *IterativeIPLDPublisher) Publish(payload *ipfs.IPLDPayload) (*ipfs.CIDPayload, error) {
- pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, payload)
+func (pub *IterativeIPLDPublisher) Publish(payload interface{}) (interface{}, error) {
+ ipldPayload, ok := payload.(*eth.IPLDPayload)
+ if !ok {
+ return nil, fmt.Errorf("publish expected payload type %T got %T", ð.IPLDPayload{}, payload)
+ }
+ pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, ipldPayload)
if len(pub.ReturnCIDPayload) < pub.iteration+1 {
return nil, fmt.Errorf("IterativeIPLDPublisher does not have a payload to return at iteration %d", pub.iteration)
}
diff --git a/pkg/super_node/eth/mocks/retriever.go b/pkg/super_node/eth/mocks/retriever.go
new file mode 100644
index 00000000..3620514e
--- /dev/null
+++ b/pkg/super_node/eth/mocks/retriever.go
@@ -0,0 +1,64 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package mocks
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+)
+
+// MockCIDRetriever is a mock CID retriever for use in tests
+type MockCIDRetriever struct {
+ GapsToRetrieve []shared.Gap
+ GapsToRetrieveErr error
+ CalledTimes int
+ FirstBlockNumberToReturn int64
+ RetrieveFirstBlockNumberErr error
+}
+
+// RetrieveCIDs mock method
+func (*MockCIDRetriever) Retrieve(filter interface{}, blockNumber int64) (interface{}, bool, error) {
+ panic("implement me")
+}
+
+// RetrieveLastBlockNumber mock method
+func (*MockCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
+ panic("implement me")
+}
+
+// RetrieveFirstBlockNumber mock method
+func (mcr *MockCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
+ return mcr.FirstBlockNumberToReturn, mcr.RetrieveFirstBlockNumberErr
+}
+
+// RetrieveGapsInData mock method
+func (mcr *MockCIDRetriever) RetrieveGapsInData() ([]shared.Gap, error) {
+ mcr.CalledTimes++
+ return mcr.GapsToRetrieve, mcr.GapsToRetrieveErr
+}
+
+// SetGapsToRetrieve mock method
+func (mcr *MockCIDRetriever) SetGapsToRetrieve(gaps []shared.Gap) {
+ if mcr.GapsToRetrieve == nil {
+ mcr.GapsToRetrieve = make([]shared.Gap, 0)
+ }
+ mcr.GapsToRetrieve = append(mcr.GapsToRetrieve, gaps...)
+}
+
+func (mcr *MockCIDRetriever) Database() *postgres.DB {
+ panic("implement me")
+}
diff --git a/pkg/super_node/eth/mocks/streamer.go b/pkg/super_node/eth/mocks/streamer.go
new file mode 100644
index 00000000..83186b16
--- /dev/null
+++ b/pkg/super_node/eth/mocks/streamer.go
@@ -0,0 +1,43 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package mocks
+
+import (
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/statediff"
+)
+
+// StateDiffStreamer is the underlying struct for the Streamer interface
+type StateDiffStreamer struct {
+ PassedPayloadChan chan interface{}
+ ReturnSub *rpc.ClientSubscription
+ ReturnErr error
+ StreamPayloads []statediff.Payload
+}
+
+// Stream is the main loop for subscribing to data from the Geth state diff process
+func (sds *StateDiffStreamer) Stream(payloadChan chan interface{}) (*rpc.ClientSubscription, error) {
+ sds.PassedPayloadChan = payloadChan
+
+ go func() {
+ for _, payload := range sds.StreamPayloads {
+ sds.PassedPayloadChan <- payload
+ }
+ }()
+
+ return sds.ReturnSub, sds.ReturnErr
+}
diff --git a/pkg/ipfs/mocks/test_data.go b/pkg/super_node/eth/mocks/test_data.go
similarity index 68%
rename from pkg/ipfs/mocks/test_data.go
rename to pkg/super_node/eth/mocks/test_data.go
index 9c42df8f..71d6feed 100644
--- a/pkg/ipfs/mocks/test_data.go
+++ b/pkg/super_node/eth/mocks/test_data.go
@@ -23,24 +23,23 @@ import (
"math/big"
rand2 "math/rand"
- "github.com/ipfs/go-block-format"
- "github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
+ "github.com/ipfs/go-block-format"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ eth2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
)
// Test variables
var (
// block data
- BlockNumber = big.NewInt(rand2.Int63())
+ BlockNumber = big.NewInt(1)
MockHeader = types.Header{
Time: 0,
Number: BlockNumber,
@@ -53,7 +52,7 @@ var (
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts)
MockBlockRlp, _ = rlp.EncodeToBytes(MockBlock)
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
- MockTrxMeta = []*ipfs.TrxMetaData{
+ MockTrxMeta = []eth.TxModel{
{
CID: "", // This is empty until we go to publish to ipfs
Src: senderAddr.Hex(),
@@ -65,20 +64,20 @@ var (
Dst: "0x0000000000000000000000000000000000000001",
},
}
- MockRctMeta = []*ipfs.ReceiptMetaData{
+ MockRctMeta = []eth.ReceiptModel{
{
CID: "",
Topic0s: []string{
"0x0000000000000000000000000000000000000000000000000000000000000004",
},
- ContractAddress: "0x0000000000000000000000000000000000000000",
+ Contract: "0x0000000000000000000000000000000000000000",
},
{
CID: "",
Topic0s: []string{
"0x0000000000000000000000000000000000000000000000000000000000000005",
},
- ContractAddress: "0x0000000000000000000000000000000000000001",
+ Contract: "0x0000000000000000000000000000000000000001",
},
}
@@ -101,9 +100,9 @@ var (
}}
emptyStorage = make([]statediff.StorageDiff, 0)
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
- ContractLeafKey = ipfs.AddressToKey(Address)
+ ContractLeafKey = eth.AddressToKey(Address)
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
- AnotherContractLeafKey = ipfs.AddressToKey(AnotherAddress)
+ AnotherContractLeafKey = eth.AddressToKey(AnotherAddress)
testAccount = state.Account{
Nonce: NonceValue,
Balance: big.NewInt(BalanceValue),
@@ -139,17 +138,19 @@ var (
CreatedAccounts: CreatedAccountDiffs,
}
MockStateDiffBytes, _ = rlp.EncodeToBytes(MockStateDiff)
- MockStateNodes = map[common.Hash]ipfs.StateNode{
- ContractLeafKey: {
+ MockStateNodes = []eth.TrieNode{
+ {
+ Key: ContractLeafKey,
Value: ValueBytes,
Leaf: true,
},
- AnotherContractLeafKey: {
+ {
+ Key: AnotherContractLeafKey,
Value: AnotherValueBytes,
Leaf: true,
},
}
- MockStorageNodes = map[common.Hash][]ipfs.StorageNode{
+ MockStorageNodes = map[common.Hash][]eth.TrieNode{
ContractLeafKey: {
{
Key: common.BytesToHash(StorageKey),
@@ -167,14 +168,12 @@ var (
TotalDifficulty: big.NewInt(1337),
}
- MockIPLDPayload = &ipfs.IPLDPayload{
+ MockIPLDPayload = ð.IPLDPayload{
TotalDifficulty: big.NewInt(1337),
- BlockNumber: big.NewInt(1),
- BlockHash: MockBlock.Hash(),
+ Block: MockBlock,
Receipts: MockReceipts,
HeaderRLP: MockHeaderRlp,
- BlockBody: MockBlock.Body(),
- TrxMetaData: []*ipfs.TrxMetaData{
+ TrxMetaData: []eth.TxModel{
{
CID: "",
Src: senderAddr.Hex(),
@@ -186,109 +185,148 @@ var (
Dst: "0x0000000000000000000000000000000000000001",
},
},
- ReceiptMetaData: []*ipfs.ReceiptMetaData{
+ ReceiptMetaData: []eth.ReceiptModel{
{
CID: "",
Topic0s: []string{
"0x0000000000000000000000000000000000000000000000000000000000000004",
},
- ContractAddress: "0x0000000000000000000000000000000000000000",
+ Contract: "0x0000000000000000000000000000000000000000",
},
{
CID: "",
Topic0s: []string{
"0x0000000000000000000000000000000000000000000000000000000000000005",
},
- ContractAddress: "0x0000000000000000000000000000000000000001",
+ Contract: "0x0000000000000000000000000000000000000001",
},
},
StorageNodes: MockStorageNodes,
StateNodes: MockStateNodes,
}
- MockCIDPayload = &ipfs.CIDPayload{
- TotalDifficulty: "1337",
- BlockNumber: "1",
- BlockHash: MockBlock.Hash(),
- HeaderCID: "mockHeaderCID",
- UncleCIDs: make(map[common.Hash]string),
- TransactionCIDs: map[common.Hash]*ipfs.TrxMetaData{
+ MockCIDPayload = ð.CIDPayload{
+ HeaderCID: eth2.HeaderModel{
+ BlockHash: MockBlock.Hash().String(),
+ BlockNumber: MockBlock.Number().String(),
+ CID: "mockHeaderCID",
+ Uncle: false,
+ ParentHash: MockBlock.ParentHash().String(),
+ TotalDifficulty: "1337",
+ },
+ UncleCIDs: []eth2.HeaderModel{},
+ TransactionCIDs: []eth.TxModel{
+ {
+ TxHash: MockTransactions[0].Hash().String(),
+ CID: "mockTrxCID1",
+ Dst: "0x0000000000000000000000000000000000000000",
+ Src: senderAddr.Hex(),
+ },
+ {
+ TxHash: MockTransactions[1].Hash().String(),
+ CID: "mockTrxCID2",
+ Dst: "0x0000000000000000000000000000000000000001",
+ Src: senderAddr.Hex(),
+ },
+ },
+ ReceiptCIDs: map[common.Hash]eth.ReceiptModel{
MockTransactions[0].Hash(): {
- CID: "mockTrxCID1",
- Dst: "0x0000000000000000000000000000000000000000",
- Src: senderAddr.Hex(),
+ CID: "mockRctCID1",
+ Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000004"},
+ Contract: "0x0000000000000000000000000000000000000000",
},
MockTransactions[1].Hash(): {
- CID: "mockTrxCID2",
- Dst: "0x0000000000000000000000000000000000000001",
- Src: senderAddr.Hex(),
+ CID: "mockRctCID2",
+ Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000005"},
+ Contract: "0x0000000000000000000000000000000000000001",
},
},
- ReceiptCIDs: map[common.Hash]*ipfs.ReceiptMetaData{
- MockTransactions[0].Hash(): {
- CID: "mockRctCID1",
- Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000004"},
- ContractAddress: "0x0000000000000000000000000000000000000000",
+ StateNodeCIDs: []eth.StateNodeModel{
+ {
+ CID: "mockStateCID1",
+ Leaf: true,
+ StateKey: ContractLeafKey.String(),
},
- MockTransactions[1].Hash(): {
- CID: "mockRctCID2",
- Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000005"},
- ContractAddress: "0x0000000000000000000000000000000000000001",
+ {
+ CID: "mockStateCID2",
+ Leaf: true,
+ StateKey: AnotherContractLeafKey.String(),
},
},
- StateNodeCIDs: map[common.Hash]ipfs.StateNodeCID{
- ContractLeafKey: {
- CID: "mockStateCID1",
- Leaf: true,
- Key: "",
- },
- AnotherContractLeafKey: {
- CID: "mockStateCID2",
- Leaf: true,
- Key: "",
- },
- },
- StorageNodeCIDs: map[common.Hash][]ipfs.StorageNodeCID{
+ StorageNodeCIDs: map[common.Hash][]eth.StorageNodeModel{
ContractLeafKey: {
{
- CID: "mockStorageCID",
- Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
- Leaf: true,
- StateKey: "",
+ CID: "mockStorageCID",
+ StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
+ Leaf: true,
},
},
},
}
- MockCIDWrapper = &ipfs.CIDWrapper{
- BlockNumber: big.NewInt(1),
- Headers: []string{"mockHeaderCID"},
- Transactions: []string{"mockTrxCID1", "mockTrxCID2"},
- Receipts: []string{"mockRctCID1", "mockRctCID2"},
- Uncles: []string{},
- StateNodes: []ipfs.StateNodeCID{
+ MockCIDWrapper = ð.CIDWrapper{
+ BlockNumber: big.NewInt(1),
+ Headers: []eth2.HeaderModel{
{
- CID: "mockStateCID1",
- Leaf: true,
- Key: ContractLeafKey.Hex(),
- },
- {
- CID: "mockStateCID2",
- Leaf: true,
- Key: AnotherContractLeafKey.Hex(),
+ BlockNumber: "1",
+ BlockHash: MockBlock.Hash().String(),
+ ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
+ CID: "mockHeaderCID",
+ Uncle: false,
+ TotalDifficulty: "1337",
},
},
- StorageNodes: []ipfs.StorageNodeCID{
+ Transactions: []eth2.TxModel{
{
- CID: "mockStorageCID",
+ CID: "mockTrxCID1",
+ },
+ {
+ TxHash: MockTransactions[1].Hash().String(),
+ CID: "mockTrxCID2",
+ Dst: "0x0000000000000000000000000000000000000001",
+ Src: senderAddr.String(),
+ },
+ },
+ Receipts: []eth2.ReceiptModel{
+ {
+ CID: "mockRctCID1",
+ Contract: "0x0000000000000000000000000000000000000000",
+ Topic0s: []string{
+ "0x0000000000000000000000000000000000000000000000000000000000000004",
+ },
+ },
+ {
+ CID: "mockRctCID2",
+ Contract: "0x0000000000000000000000000000000000000001",
+ Topic0s: []string{
+ "0x0000000000000000000000000000000000000000000000000000000000000005",
+ },
+ },
+ },
+ Uncles: []eth2.HeaderModel{},
+ StateNodes: []eth.StateNodeModel{
+ {
+ CID: "mockStateCID1",
Leaf: true,
StateKey: ContractLeafKey.Hex(),
- Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
+ },
+ {
+ CID: "mockStateCID2",
+ Leaf: true,
+ StateKey: AnotherContractLeafKey.Hex(),
+ },
+ },
+ StorageNodes: []eth.StorageNodeWithStateKeyModel{
+ {
+ CID: "mockStorageCID",
+ Leaf: true,
+ StateKey: ContractLeafKey.Hex(),
+ StorageKey: "0x0000000000000000000000000000000000000000000000000000000000000001",
},
},
}
- MockIPLDWrapper = ipfs.IPLDWrapper{
+ MockIPLDWrapper = ð.IPLDWrapper{
BlockNumber: big.NewInt(1),
Headers: []blocks.Block{
blocks.NewBlock(MockHeaderRlp),
@@ -312,7 +350,7 @@ var (
},
}
- MockSeeNodePayload = streamer.SuperNodePayload{
+ MockSeedNodePayload = eth2.StreamPayload{
BlockNumber: big.NewInt(1),
HeadersRlp: [][]byte{MockHeaderRlp},
UnclesRlp: [][]byte{},
diff --git a/pkg/super_node/eth/models.go b/pkg/super_node/eth/models.go
new file mode 100644
index 00000000..0530bb86
--- /dev/null
+++ b/pkg/super_node/eth/models.go
@@ -0,0 +1,71 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import "github.com/lib/pq"
+
+type HeaderModel struct {
+ ID int64 `db:"id"`
+ BlockNumber string `db:"block_number"`
+ BlockHash string `db:"block_hash"`
+ ParentHash string `db:"parent_hash"`
+ CID string `db:"cid"`
+ Uncle bool `db:"uncle"`
+ TotalDifficulty string `db:"td"`
+}
+
+type TxModel struct {
+ ID int64 `db:"id"`
+ HeaderID int64 `db:"header_id"`
+ TxHash string `db:"tx_hash"`
+ CID string `db:"cid"`
+ Dst string `db:"dst"`
+ Src string `db:"src"`
+}
+
+type ReceiptModel struct {
+ ID int64 `db:"id"`
+ TxID int64 `db:"tx_id"`
+ CID string `db:"cid"`
+ Contract string `db:"contract"`
+ Topic0s pq.StringArray `db:"topic0s"`
+}
+
+type StateNodeModel struct {
+ ID int64 `db:"id"`
+ HeaderID int64 `db:"header_id"`
+ StateKey string `db:"state_key"`
+ Leaf bool `db:"leaf"`
+ CID string `db:"cid"`
+}
+
+type StorageNodeModel struct {
+ ID int64 `db:"id"`
+ StateID int64 `db:"state_id"`
+ StorageKey string `db:"storage_key"`
+ Leaf bool `db:"leaf"`
+ CID string `db:"cid"`
+}
+
+type StorageNodeWithStateKeyModel struct {
+ ID int64 `db:"id"`
+ StateID int64 `db:"state_id"`
+ StateKey string `db:"state_key"`
+ StorageKey string `db:"storage_key"`
+ Leaf bool `db:"leaf"`
+ CID string `db:"cid"`
+}
diff --git a/pkg/super_node/eth/payload_fetcher.go b/pkg/super_node/eth/payload_fetcher.go
new file mode 100644
index 00000000..2029fd07
--- /dev/null
+++ b/pkg/super_node/eth/payload_fetcher.go
@@ -0,0 +1,74 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/statediff"
+
+ "github.com/vulcanize/vulcanizedb/pkg/eth/client"
+)
+
+// BatchClient is an interface to a batch-fetching geth rpc client; created to allow mock insertion
+type BatchClient interface {
+ BatchCall(batch []client.BatchElem) error
+}
+
+// PayloadFetcher satisfies the PayloadFetcher interface for ethereum
+type PayloadFetcher struct {
+ // PayloadFetcher is thread-safe as long as the underlying client is thread-safe, since it has/modifies no other state
+ // http.Client is thread-safe
+ client BatchClient
+}
+
+const method = "statediff_stateDiffAt"
+
+// NewStateDiffFetcher returns a PayloadFetcher
+func NewPayloadFetcher(bc BatchClient) *PayloadFetcher {
+ return &PayloadFetcher{
+ client: bc,
+ }
+}
+
+// FetchAt fetches the statediff payloads at the given block heights
+// Calls StateDiffAt(ctx context.Context, blockNumber uint64) (*Payload, error)
+func (fetcher *PayloadFetcher) FetchAt(blockHeights []uint64) ([]interface{}, error) {
+ batch := make([]client.BatchElem, 0)
+ for _, height := range blockHeights {
+ batch = append(batch, client.BatchElem{
+ Method: method,
+ Args: []interface{}{height},
+ Result: new(statediff.Payload),
+ })
+ }
+ batchErr := fetcher.client.BatchCall(batch)
+ if batchErr != nil {
+ return nil, fmt.Errorf("PayloadFetcher err: %s", batchErr.Error())
+ }
+ results := make([]interface{}, 0, len(blockHeights))
+ for _, batchElem := range batch {
+ if batchElem.Error != nil {
+ return nil, fmt.Errorf("PayloadFetcher err: %s", batchElem.Error.Error())
+ }
+ payload, ok := batchElem.Result.(*statediff.Payload)
+ if ok {
+ results = append(results, *payload)
+ }
+ }
+ return results, nil
+}
diff --git a/pkg/super_node/eth/payload_fetcher_test.go b/pkg/super_node/eth/payload_fetcher_test.go
new file mode 100644
index 00000000..0451aae3
--- /dev/null
+++ b/pkg/super_node/eth/payload_fetcher_test.go
@@ -0,0 +1,59 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth_test
+
+import (
+ "github.com/ethereum/go-ethereum/statediff"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
+ "github.com/vulcanize/vulcanizedb/libraries/shared/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+)
+
+var _ = Describe("StateDiffFetcher", func() {
+ Describe("FetchStateDiffsAt", func() {
+ var (
+ mc *mocks.BackFillerClient
+ stateDiffFetcher *eth.PayloadFetcher
+ )
+ BeforeEach(func() {
+ mc = new(mocks.BackFillerClient)
+ setDiffAtErr1 := mc.SetReturnDiffAt(test_data.BlockNumber.Uint64(), test_data.MockStatediffPayload)
+ Expect(setDiffAtErr1).ToNot(HaveOccurred())
+ setDiffAtErr2 := mc.SetReturnDiffAt(test_data.BlockNumber2.Uint64(), test_data.MockStatediffPayload2)
+ Expect(setDiffAtErr2).ToNot(HaveOccurred())
+ stateDiffFetcher = eth.NewPayloadFetcher(mc)
+ })
+ It("Batch calls statediff_stateDiffAt", func() {
+ blockHeights := []uint64{
+ test_data.BlockNumber.Uint64(),
+ test_data.BlockNumber2.Uint64(),
+ }
+ stateDiffPayloads, fetchErr := stateDiffFetcher.FetchAt(blockHeights)
+ Expect(fetchErr).ToNot(HaveOccurred())
+ Expect(len(stateDiffPayloads)).To(Equal(2))
+ payload1, ok := stateDiffPayloads[0].(statediff.Payload)
+ Expect(ok).To(BeTrue())
+ payload2, ok := stateDiffPayloads[1].(statediff.Payload)
+ Expect(ok).To(BeTrue())
+ Expect(payload1).To(Equal(test_data.MockStatediffPayload))
+ Expect(payload2).To(Equal(test_data.MockStatediffPayload2))
+ })
+ })
+})
diff --git a/pkg/ipfs/publisher.go b/pkg/super_node/eth/publisher.go
similarity index 58%
rename from pkg/ipfs/publisher.go
rename to pkg/super_node/eth/publisher.go
index f44c09ca..74a580e6 100644
--- a/pkg/ipfs/publisher.go
+++ b/pkg/super_node/eth/publisher.go
@@ -14,14 +14,16 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs
+package eth
import (
"errors"
+ "fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
+
"github.com/vulcanize/eth-block-extractor/pkg/ipfs"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_header"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_receipts"
@@ -31,13 +33,8 @@ import (
rlp2 "github.com/vulcanize/eth-block-extractor/pkg/wrappers/rlp"
)
-// IPLDPublisher is the interface for publishing an IPLD payload
-type IPLDPublisher interface {
- Publish(payload *IPLDPayload) (*CIDPayload, error)
-}
-
-// Publisher is the underlying struct for the IPLDPublisher interface
-type Publisher struct {
+// IPLDPublisher satisfies the IPLDPublisher for ethereum
+type IPLDPublisher struct {
HeaderPutter ipfs.DagPutter
TransactionPutter ipfs.DagPutter
ReceiptPutter ipfs.DagPutter
@@ -46,12 +43,12 @@ type Publisher struct {
}
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
-func NewIPLDPublisher(ipfsPath string) (*Publisher, error) {
+func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) {
node, err := ipfs.InitIPFSNode(ipfsPath)
if err != nil {
return nil, err
}
- return &Publisher{
+ return &IPLDPublisher{
HeaderPutter: eth_block_header.NewBlockHeaderDagPutter(node, rlp2.RlpDecoder{}),
TransactionPutter: eth_block_transactions.NewBlockTransactionsDagPutter(node),
ReceiptPutter: eth_block_receipts.NewEthBlockReceiptDagPutter(node),
@@ -61,57 +58,72 @@ func NewIPLDPublisher(ipfsPath string) (*Publisher, error) {
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
-func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
+func (pub *IPLDPublisher) Publish(payload interface{}) (interface{}, error) {
+ ipldPayload, ok := payload.(*IPLDPayload)
+ if !ok {
+ return nil, fmt.Errorf("eth publisher expected payload type %T got %T", &IPLDPayload{}, payload)
+ }
// Process and publish headers
- headerCid, err := pub.publishHeaders(payload.HeaderRLP)
+ headerCid, err := pub.publishHeader(ipldPayload.HeaderRLP)
if err != nil {
return nil, err
}
+ header := HeaderModel{
+ CID: headerCid,
+ ParentHash: ipldPayload.Block.ParentHash().String(),
+ BlockNumber: ipldPayload.Block.Number().String(),
+ Uncle: false,
+ BlockHash: ipldPayload.Block.Hash().String(),
+ TotalDifficulty: ipldPayload.TotalDifficulty.String(),
+ }
// Process and publish uncles
- uncleCids := make(map[common.Hash]string)
- for _, uncle := range payload.BlockBody.Uncles {
+ uncleCids := make([]HeaderModel, 0, len(ipldPayload.Block.Uncles()))
+ for _, uncle := range ipldPayload.Block.Uncles() {
uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil {
return nil, err
}
- cid, err := pub.publishHeaders(uncleRlp)
+ uncleCid, err := pub.publishHeader(uncleRlp)
if err != nil {
return nil, err
}
- uncleCids[uncle.Hash()] = cid
+ uncleCids = append(uncleCids, HeaderModel{
+ CID: uncleCid,
+ ParentHash: uncle.ParentHash.String(),
+ Uncle: true,
+ BlockHash: uncle.Hash().String(),
+ BlockNumber: uncle.Number.String(),
+ })
}
// Process and publish transactions
- transactionCids, err := pub.publishTransactions(payload.BlockBody, payload.TrxMetaData)
+ transactionCids, err := pub.publishTransactions(ipldPayload.Block.Body(), ipldPayload.TrxMetaData)
if err != nil {
return nil, err
}
// Process and publish receipts
- receiptsCids, err := pub.publishReceipts(payload.Receipts, payload.ReceiptMetaData)
+ receiptsCids, err := pub.publishReceipts(ipldPayload.Receipts, ipldPayload.ReceiptMetaData)
if err != nil {
return nil, err
}
// Process and publish state leafs
- stateNodeCids, err := pub.publishStateNodes(payload.StateNodes)
+ stateNodeCids, err := pub.publishStateNodes(ipldPayload.StateNodes)
if err != nil {
return nil, err
}
// Process and publish storage leafs
- storageNodeCids, err := pub.publishStorageNodes(payload.StorageNodes)
+ storageNodeCids, err := pub.publishStorageNodes(ipldPayload.StorageNodes)
if err != nil {
return nil, err
}
// Package CIDs and their metadata into a single struct
return &CIDPayload{
- TotalDifficulty: payload.TotalDifficulty.String(),
- BlockHash: payload.BlockHash,
- BlockNumber: payload.BlockNumber.String(),
- HeaderCID: headerCid,
+ HeaderCID: header,
UncleCIDs: uncleCids,
TransactionCIDs: transactionCids,
ReceiptCIDs: receiptsCids,
@@ -120,7 +132,7 @@ func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
}, nil
}
-func (pub *Publisher) publishHeaders(headerRLP []byte) (string, error) {
+func (pub *IPLDPublisher) publishHeader(headerRLP []byte) (string, error) {
headerCids, err := pub.HeaderPutter.DagPut(headerRLP)
if err != nil {
return "", err
@@ -131,7 +143,7 @@ func (pub *Publisher) publishHeaders(headerRLP []byte) (string, error) {
return headerCids[0], nil
}
-func (pub *Publisher) publishTransactions(blockBody *types.Body, trxMeta []*TrxMetaData) (map[common.Hash]*TrxMetaData, error) {
+func (pub *IPLDPublisher) publishTransactions(blockBody *types.Body, trxMeta []TxModel) ([]TxModel, error) {
transactionCids, err := pub.TransactionPutter.DagPut(blockBody)
if err != nil {
return nil, err
@@ -139,15 +151,19 @@ func (pub *Publisher) publishTransactions(blockBody *types.Body, trxMeta []*TrxM
if len(transactionCids) != len(blockBody.Transactions) {
return nil, errors.New("expected one CID for each transaction")
}
- mappedTrxCids := make(map[common.Hash]*TrxMetaData, len(transactionCids))
+ mappedTrxCids := make([]TxModel, len(transactionCids))
for i, trx := range blockBody.Transactions {
- mappedTrxCids[trx.Hash()] = trxMeta[i]
- mappedTrxCids[trx.Hash()].CID = transactionCids[i]
+ mappedTrxCids[i] = TxModel{
+ CID: transactionCids[i],
+ TxHash: trx.Hash().Hex(),
+ Src: trxMeta[i].Src,
+ Dst: trxMeta[i].Dst,
+ }
}
return mappedTrxCids, nil
}
-func (pub *Publisher) publishReceipts(receipts types.Receipts, receiptMeta []*ReceiptMetaData) (map[common.Hash]*ReceiptMetaData, error) {
+func (pub *IPLDPublisher) publishReceipts(receipts types.Receipts, receiptMeta []ReceiptModel) (map[common.Hash]ReceiptModel, error) {
receiptsCids, err := pub.ReceiptPutter.DagPut(receipts)
if err != nil {
return nil, err
@@ -155,18 +171,21 @@ func (pub *Publisher) publishReceipts(receipts types.Receipts, receiptMeta []*Re
if len(receiptsCids) != len(receipts) {
return nil, errors.New("expected one CID for each receipt")
}
- // Keep receipts associated with their transaction
- mappedRctCids := make(map[common.Hash]*ReceiptMetaData, len(receiptsCids))
+ // Map receipt cids to their transaction hashes
+ mappedRctCids := make(map[common.Hash]ReceiptModel, len(receiptsCids))
for i, rct := range receipts {
- mappedRctCids[rct.TxHash] = receiptMeta[i]
- mappedRctCids[rct.TxHash].CID = receiptsCids[i]
+ mappedRctCids[rct.TxHash] = ReceiptModel{
+ CID: receiptsCids[i],
+ Contract: receiptMeta[i].Contract,
+ Topic0s: receiptMeta[i].Topic0s,
+ }
}
return mappedRctCids, nil
}
-func (pub *Publisher) publishStateNodes(stateNodes map[common.Hash]StateNode) (map[common.Hash]StateNodeCID, error) {
- stateNodeCids := make(map[common.Hash]StateNodeCID)
- for addrKey, node := range stateNodes {
+func (pub *IPLDPublisher) publishStateNodes(stateNodes []TrieNode) ([]StateNodeModel, error) {
+ stateNodeCids := make([]StateNodeModel, 0, len(stateNodes))
+ for _, node := range stateNodes {
stateNodeCid, err := pub.StatePutter.DagPut(node.Value)
if err != nil {
return nil, err
@@ -174,18 +193,19 @@ func (pub *Publisher) publishStateNodes(stateNodes map[common.Hash]StateNode) (m
if len(stateNodeCid) != 1 {
return nil, errors.New("single CID expected to be returned for state leaf")
}
- stateNodeCids[addrKey] = StateNodeCID{
- CID: stateNodeCid[0],
- Leaf: node.Leaf,
- }
+ stateNodeCids = append(stateNodeCids, StateNodeModel{
+ StateKey: node.Key.String(),
+ CID: stateNodeCid[0],
+ Leaf: node.Leaf,
+ })
}
return stateNodeCids, nil
}
-func (pub *Publisher) publishStorageNodes(storageNodes map[common.Hash][]StorageNode) (map[common.Hash][]StorageNodeCID, error) {
- storageLeafCids := make(map[common.Hash][]StorageNodeCID)
+func (pub *IPLDPublisher) publishStorageNodes(storageNodes map[common.Hash][]TrieNode) (map[common.Hash][]StorageNodeModel, error) {
+ storageLeafCids := make(map[common.Hash][]StorageNodeModel)
for addrKey, storageTrie := range storageNodes {
- storageLeafCids[addrKey] = make([]StorageNodeCID, 0, len(storageTrie))
+ storageLeafCids[addrKey] = make([]StorageNodeModel, 0, len(storageTrie))
for _, node := range storageTrie {
storageNodeCid, err := pub.StoragePutter.DagPut(node.Value)
if err != nil {
@@ -194,10 +214,11 @@ func (pub *Publisher) publishStorageNodes(storageNodes map[common.Hash][]Storage
if len(storageNodeCid) != 1 {
return nil, errors.New("single CID expected to be returned for storage leaf")
}
- storageLeafCids[addrKey] = append(storageLeafCids[addrKey], StorageNodeCID{
- Key: node.Key.Hex(),
- CID: storageNodeCid[0],
- Leaf: node.Leaf,
+ // Map storage node cids to their state key hashes
+ storageLeafCids[addrKey] = append(storageLeafCids[addrKey], StorageNodeModel{
+ StorageKey: node.Key.Hex(),
+ CID: storageNodeCid[0],
+ Leaf: node.Leaf,
})
}
}
diff --git a/pkg/ipfs/publisher_test.go b/pkg/super_node/eth/publisher_test.go
similarity index 69%
rename from pkg/ipfs/publisher_test.go
rename to pkg/super_node/eth/publisher_test.go
index 6e34c13c..65d9872f 100644
--- a/pkg/ipfs/publisher_test.go
+++ b/pkg/super_node/eth/publisher_test.go
@@ -14,15 +14,15 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs_test
+package eth_test
import (
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var (
@@ -47,36 +47,38 @@ var _ = Describe("Publisher", func() {
mockHeaderDagPutter.CIDsToReturn = []string{"mockHeaderCID"}
mockTrxDagPutter.CIDsToReturn = []string{"mockTrxCID1", "mockTrxCID2"}
mockRctDagPutter.CIDsToReturn = []string{"mockRctCID1", "mockRctCID2"}
- val1 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[mocks.ContractLeafKey].Value)
- val2 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[mocks.AnotherContractLeafKey].Value)
+ val1 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[0].Value)
+ val2 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[1].Value)
mockStateDagPutter.CIDsToReturn = map[common.Hash][]string{
val1: {"mockStateCID1"},
val2: {"mockStateCID2"},
}
mockStorageDagPutter.CIDsToReturn = []string{"mockStorageCID"}
- publisher := ipfs.Publisher{
+ publisher := eth.IPLDPublisher{
HeaderPutter: mockHeaderDagPutter,
TransactionPutter: mockTrxDagPutter,
ReceiptPutter: mockRctDagPutter,
StatePutter: mockStateDagPutter,
StoragePutter: mockStorageDagPutter,
}
- cidPayload, err := publisher.Publish(mocks.MockIPLDPayload)
+ payload, err := publisher.Publish(mocks.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
- Expect(cidPayload.TotalDifficulty).To(Equal(mocks.MockIPLDPayload.TotalDifficulty.String()))
- Expect(cidPayload.BlockNumber).To(Equal(mocks.MockCIDPayload.BlockNumber))
- Expect(cidPayload.BlockHash).To(Equal(mocks.MockCIDPayload.BlockHash))
+ cidPayload, ok := payload.(*eth.CIDPayload)
+ Expect(ok).To(BeTrue())
+ Expect(cidPayload.HeaderCID.TotalDifficulty).To(Equal(mocks.MockIPLDPayload.TotalDifficulty.String()))
+ Expect(cidPayload.HeaderCID.BlockNumber).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockNumber))
+ Expect(cidPayload.HeaderCID.BlockHash).To(Equal(mocks.MockCIDPayload.HeaderCID.BlockHash))
Expect(cidPayload.UncleCIDs).To(Equal(mocks.MockCIDPayload.UncleCIDs))
Expect(cidPayload.HeaderCID).To(Equal(mocks.MockCIDPayload.HeaderCID))
Expect(len(cidPayload.TransactionCIDs)).To(Equal(2))
- Expect(cidPayload.TransactionCIDs[mocks.MockTransactions[0].Hash()]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[mocks.MockTransactions[0].Hash()]))
- Expect(cidPayload.TransactionCIDs[mocks.MockTransactions[1].Hash()]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[mocks.MockTransactions[1].Hash()]))
+ Expect(cidPayload.TransactionCIDs[0]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[0]))
+ Expect(cidPayload.TransactionCIDs[1]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[1]))
Expect(len(cidPayload.ReceiptCIDs)).To(Equal(2))
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[0].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[0].Hash()]))
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[1].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[1].Hash()]))
Expect(len(cidPayload.StateNodeCIDs)).To(Equal(2))
- Expect(cidPayload.StateNodeCIDs[mocks.ContractLeafKey]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[mocks.ContractLeafKey]))
- Expect(cidPayload.StateNodeCIDs[mocks.AnotherContractLeafKey]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[mocks.AnotherContractLeafKey]))
+ Expect(cidPayload.StateNodeCIDs[0]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[0]))
+ Expect(cidPayload.StateNodeCIDs[1]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[1]))
Expect(cidPayload.StorageNodeCIDs).To(Equal(mocks.MockCIDPayload.StorageNodeCIDs))
})
})
diff --git a/pkg/ipfs/resolver.go b/pkg/super_node/eth/resolver.go
similarity index 57%
rename from pkg/ipfs/resolver.go
rename to pkg/super_node/eth/resolver.go
index 86e6ac40..d8783e04 100644
--- a/pkg/ipfs/resolver.go
+++ b/pkg/super_node/eth/resolver.go
@@ -14,36 +14,30 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs
+package eth
import (
+ "fmt"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-block-format"
- "github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
)
-// IPLDResolver is the interface to resolving IPLDs
-type IPLDResolver interface {
- ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperNodePayload
- ResolveHeaders(iplds []blocks.Block) [][]byte
- ResolveUncles(iplds []blocks.Block) [][]byte
- ResolveTransactions(iplds []blocks.Block) [][]byte
- ResolveReceipts(blocks []blocks.Block) [][]byte
- ResolveState(iplds map[common.Hash]blocks.Block) map[common.Hash][]byte
- ResolveStorage(iplds map[common.Hash]map[common.Hash]blocks.Block) map[common.Hash]map[common.Hash][]byte
+// IPLDResolver satisfies the IPLDResolver interface for ethereum
+type IPLDResolver struct{}
+
+// NewIPLDResolver returns a pointer to an IPLDResolver which satisfies the IPLDResolver interface
+func NewIPLDResolver() *IPLDResolver {
+ return &IPLDResolver{}
}
-// EthIPLDResolver is the underlying struct to support the IPLDResolver interface
-type EthIPLDResolver struct{}
-
-// NewIPLDResolver returns a pointer to an EthIPLDResolver which satisfies the IPLDResolver interface
-func NewIPLDResolver() *EthIPLDResolver {
- return &EthIPLDResolver{}
-}
-
-// ResolveIPLDs is the exported method for resolving all of the ETH IPLDs packaged in an IpfsBlockWrapper
-func (eir *EthIPLDResolver) ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperNodePayload {
- return streamer.SuperNodePayload{
+// Resolve is the exported method for resolving all of the ETH IPLDs packaged in an IpfsBlockWrapper
+func (eir *IPLDResolver) Resolve(iplds interface{}) (interface{}, error) {
+ ipfsBlocks, ok := iplds.(*IPLDWrapper)
+ if !ok {
+ return StreamPayload{}, fmt.Errorf("eth resolver expected iplds type %T got %T", &IPLDWrapper{}, iplds)
+ }
+ return StreamPayload{
BlockNumber: ipfsBlocks.BlockNumber,
HeadersRlp: eir.ResolveHeaders(ipfsBlocks.Headers),
UnclesRlp: eir.ResolveUncles(ipfsBlocks.Uncles),
@@ -51,10 +45,10 @@ func (eir *EthIPLDResolver) ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperN
ReceiptsRlp: eir.ResolveReceipts(ipfsBlocks.Receipts),
StateNodesRlp: eir.ResolveState(ipfsBlocks.StateNodes),
StorageNodesRlp: eir.ResolveStorage(ipfsBlocks.StorageNodes),
- }
+ }, nil
}
-func (eir *EthIPLDResolver) ResolveHeaders(iplds []blocks.Block) [][]byte {
+func (eir *IPLDResolver) ResolveHeaders(iplds []blocks.Block) [][]byte {
headerRlps := make([][]byte, 0, len(iplds))
for _, ipld := range iplds {
headerRlps = append(headerRlps, ipld.RawData())
@@ -62,7 +56,7 @@ func (eir *EthIPLDResolver) ResolveHeaders(iplds []blocks.Block) [][]byte {
return headerRlps
}
-func (eir *EthIPLDResolver) ResolveUncles(iplds []blocks.Block) [][]byte {
+func (eir *IPLDResolver) ResolveUncles(iplds []blocks.Block) [][]byte {
uncleRlps := make([][]byte, 0, len(iplds))
for _, ipld := range iplds {
uncleRlps = append(uncleRlps, ipld.RawData())
@@ -70,7 +64,7 @@ func (eir *EthIPLDResolver) ResolveUncles(iplds []blocks.Block) [][]byte {
return uncleRlps
}
-func (eir *EthIPLDResolver) ResolveTransactions(iplds []blocks.Block) [][]byte {
+func (eir *IPLDResolver) ResolveTransactions(iplds []blocks.Block) [][]byte {
trxs := make([][]byte, 0, len(iplds))
for _, ipld := range iplds {
trxs = append(trxs, ipld.RawData())
@@ -78,7 +72,7 @@ func (eir *EthIPLDResolver) ResolveTransactions(iplds []blocks.Block) [][]byte {
return trxs
}
-func (eir *EthIPLDResolver) ResolveReceipts(iplds []blocks.Block) [][]byte {
+func (eir *IPLDResolver) ResolveReceipts(iplds []blocks.Block) [][]byte {
rcts := make([][]byte, 0, len(iplds))
for _, ipld := range iplds {
rcts = append(rcts, ipld.RawData())
@@ -86,7 +80,7 @@ func (eir *EthIPLDResolver) ResolveReceipts(iplds []blocks.Block) [][]byte {
return rcts
}
-func (eir *EthIPLDResolver) ResolveState(iplds map[common.Hash]blocks.Block) map[common.Hash][]byte {
+func (eir *IPLDResolver) ResolveState(iplds map[common.Hash]blocks.Block) map[common.Hash][]byte {
stateNodes := make(map[common.Hash][]byte, len(iplds))
for key, ipld := range iplds {
stateNodes[key] = ipld.RawData()
@@ -94,7 +88,7 @@ func (eir *EthIPLDResolver) ResolveState(iplds map[common.Hash]blocks.Block) map
return stateNodes
}
-func (eir *EthIPLDResolver) ResolveStorage(iplds map[common.Hash]map[common.Hash]blocks.Block) map[common.Hash]map[common.Hash][]byte {
+func (eir *IPLDResolver) ResolveStorage(iplds map[common.Hash]map[common.Hash]blocks.Block) map[common.Hash]map[common.Hash][]byte {
storageNodes := make(map[common.Hash]map[common.Hash][]byte)
for stateKey, storageIPLDs := range iplds {
storageNodes[stateKey] = make(map[common.Hash][]byte)
diff --git a/pkg/ipfs/resolver_test.go b/pkg/super_node/eth/resolver_test.go
similarity index 54%
rename from pkg/ipfs/resolver_test.go
rename to pkg/super_node/eth/resolver_test.go
index d8adb6a2..eae9d300 100644
--- a/pkg/ipfs/resolver_test.go
+++ b/pkg/super_node/eth/resolver_test.go
@@ -14,39 +14,42 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs_test
+package eth_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
- "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var (
- resolver ipfs.IPLDResolver
+ resolver *eth.IPLDResolver
)
var _ = Describe("Resolver", func() {
Describe("ResolveIPLDs", func() {
BeforeEach(func() {
- resolver = ipfs.NewIPLDResolver()
+ resolver = eth.NewIPLDResolver()
})
It("Resolves IPLD data to their correct geth data types and packages them to send to requesting transformers", func() {
- superNodePayload := resolver.ResolveIPLDs(mocks.MockIPLDWrapper)
- Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
- Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeeNodePayload.HeadersRlp))
- Expect(superNodePayload.UnclesRlp).To(Equal(mocks.MockSeeNodePayload.UnclesRlp))
+ payload, err := resolver.Resolve(mocks.MockIPLDWrapper)
+ Expect(err).ToNot(HaveOccurred())
+ superNodePayload, ok := payload.(eth.StreamPayload)
+ Expect(ok).To(BeTrue())
+ Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeedNodePayload.BlockNumber.Int64()))
+ Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeedNodePayload.HeadersRlp))
+ Expect(superNodePayload.UnclesRlp).To(Equal(mocks.MockSeedNodePayload.UnclesRlp))
Expect(len(superNodePayload.TransactionsRlp)).To(Equal(2))
- Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
- Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload.ReceiptsRlp)).To(Equal(2))
- Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
- Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
+ Expect(shared.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
Expect(len(superNodePayload.StateNodesRlp)).To(Equal(2))
- Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeeNodePayload.StorageNodesRlp))
+ Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeedNodePayload.StorageNodesRlp))
})
})
})
diff --git a/pkg/super_node/retriever.go b/pkg/super_node/eth/retriever.go
similarity index 63%
rename from pkg/super_node/retriever.go
rename to pkg/super_node/eth/retriever.go
index 7173b946..3433210f 100644
--- a/pkg/super_node/retriever.go
+++ b/pkg/super_node/eth/retriever.go
@@ -14,161 +14,156 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package super_node
+package eth
import (
+ "fmt"
"math/big"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
-// CIDRetriever is the interface for retrieving CIDs from the Postgres cache
-type CIDRetriever interface {
- RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error)
- RetrieveLastBlockNumber() (int64, error)
- RetrieveFirstBlockNumber() (int64, error)
- RetrieveGapsInData() ([][2]uint64, error)
- RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]string, error)
- RetrieveUncleCIDs(tx *sqlx.Tx, blockNumber int64) ([]string, error)
- RetrieveTrxCIDs(tx *sqlx.Tx, txFilter config.TrxFilter, blockNumber int64) ([]string, []int64, error)
- RetrieveRctCIDs(tx *sqlx.Tx, rctFilter config.ReceiptFilter, blockNumber int64, trxIds []int64) ([]string, error)
- RetrieveStateCIDs(tx *sqlx.Tx, stateFilter config.StateFilter, blockNumber int64) ([]ipfs.StateNodeCID, error)
- Database() *postgres.DB
-}
-
-// EthCIDRetriever is the underlying struct supporting the CIDRetriever interface
-type EthCIDRetriever struct {
+// CIDRetriever satisfies the CIDRetriever interface for ethereum
+type CIDRetriever struct {
db *postgres.DB
}
-// NewCIDRetriever returns a pointer to a new EthCIDRetriever which supports the CIDRetriever interface
-func NewCIDRetriever(db *postgres.DB) *EthCIDRetriever {
- return &EthCIDRetriever{
+// NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface
+func NewCIDRetriever(db *postgres.DB) *CIDRetriever {
+ return &CIDRetriever{
db: db,
}
}
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
-func (ecr *EthCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
+func (ecr *CIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number ASC LIMIT 1")
return blockNumber, err
}
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
-func (ecr *EthCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
+func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number DESC LIMIT 1 ")
return blockNumber, err
}
-// RetrieveCIDs is used to retrieve all of the CIDs which conform to the passed StreamFilters
-func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) {
+// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
+func (ecr *CIDRetriever) Retrieve(filter interface{}, blockNumber int64) (interface{}, bool, error) {
+ streamFilter, ok := filter.(*config.EthSubscription)
+ if !ok {
+ return nil, true, fmt.Errorf("eth retriever expected filter type %T got %T", &config.EthSubscription{}, filter)
+ }
log.Debug("retrieving cids")
tx, err := ecr.db.Beginx()
if err != nil {
- return nil, err
+ return nil, true, err
}
- cw := new(ipfs.CIDWrapper)
+ cw := new(CIDWrapper)
cw.BlockNumber = big.NewInt(blockNumber)
-
// Retrieve cached header CIDs
- if !streamFilters.HeaderFilter.Off {
+ if !streamFilter.HeaderFilter.Off {
cw.Headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("header cid retrieval error")
- return nil, err
+ return nil, true, err
}
- if streamFilters.HeaderFilter.Uncles {
+ if streamFilter.HeaderFilter.Uncles {
cw.Uncles, err = ecr.RetrieveUncleCIDs(tx, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("uncle cid retrieval error")
- return nil, err
+ return nil, true, err
}
}
}
-
// Retrieve cached trx CIDs
- var trxIds []int64
- if !streamFilters.TrxFilter.Off {
- cw.Transactions, trxIds, err = ecr.RetrieveTrxCIDs(tx, streamFilters.TrxFilter, blockNumber)
+ if !streamFilter.TxFilter.Off {
+ cw.Transactions, err = ecr.RetrieveTrxCIDs(tx, streamFilter.TxFilter, blockNumber)
if err != nil {
- err := tx.Rollback()
- if err != nil {
+ if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("transaction cid retrieval error")
- return nil, err
+ return nil, true, err
}
}
-
+ trxIds := make([]int64, 0, len(cw.Transactions))
+ for _, tx := range cw.Transactions {
+ trxIds = append(trxIds, tx.ID)
+ }
// Retrieve cached receipt CIDs
- if !streamFilters.ReceiptFilter.Off {
- cw.Receipts, err = ecr.RetrieveRctCIDs(tx, streamFilters.ReceiptFilter, blockNumber, trxIds)
+ if !streamFilter.ReceiptFilter.Off {
+ cw.Receipts, err = ecr.RetrieveRctCIDs(tx, streamFilter.ReceiptFilter, blockNumber, trxIds)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("receipt cid retrieval error")
- return nil, err
+ return nil, true, err
}
}
-
// Retrieve cached state CIDs
- if !streamFilters.StateFilter.Off {
- cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, streamFilters.StateFilter, blockNumber)
+ if !streamFilter.StateFilter.Off {
+ cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, streamFilter.StateFilter, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("state cid retrieval error")
- return nil, err
+ return nil, true, err
}
}
-
// Retrieve cached storage CIDs
- if !streamFilters.StorageFilter.Off {
- cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, streamFilters.StorageFilter, blockNumber)
+ if !streamFilter.StorageFilter.Off {
+ cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, streamFilter.StorageFilter, blockNumber)
if err != nil {
if err := tx.Rollback(); err != nil {
log.Error(err)
}
log.Error("storage cid retrieval error")
- return nil, err
+ return nil, true, err
}
}
+ return cw, empty(cw), tx.Commit()
+}
- return cw, tx.Commit()
+func empty(cidWrapper *CIDWrapper) bool {
+ if len(cidWrapper.Transactions) > 0 || len(cidWrapper.Headers) > 0 || len(cidWrapper.Uncles) > 0 || len(cidWrapper.Receipts) > 0 || len(cidWrapper.StateNodes) > 0 || len(cidWrapper.StorageNodes) > 0 {
+ return false
+ }
+ return true
}
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
-func (ecr *EthCIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]string, error) {
+func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]HeaderModel, error) {
log.Debug("retrieving header cids for block ", blockNumber)
- headers := make([]string, 0)
- pgStr := `SELECT cid FROM header_cids
+ headers := make([]HeaderModel, 0)
+ pgStr := `SELECT * FROM header_cids
WHERE block_number = $1 AND uncle IS FALSE`
err := tx.Select(&headers, pgStr, blockNumber)
return headers, err
}
// RetrieveUncleCIDs retrieves and returns all of the uncle cids at the provided blockheight
-func (ecr *EthCIDRetriever) RetrieveUncleCIDs(tx *sqlx.Tx, blockNumber int64) ([]string, error) {
+func (ecr *CIDRetriever) RetrieveUncleCIDs(tx *sqlx.Tx, blockNumber int64) ([]HeaderModel, error) {
log.Debug("retrieving header cids for block ", blockNumber)
- headers := make([]string, 0)
- pgStr := `SELECT cid FROM header_cids
+ headers := make([]HeaderModel, 0)
+ pgStr := `SELECT * FROM header_cids
WHERE block_number = $1 AND uncle IS TRUE`
err := tx.Select(&headers, pgStr, blockNumber)
return headers, err
@@ -176,15 +171,14 @@ func (ecr *EthCIDRetriever) RetrieveUncleCIDs(tx *sqlx.Tx, blockNumber int64) ([
// RetrieveTrxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
// also returns the ids for the returned transaction cids
-func (ecr *EthCIDRetriever) RetrieveTrxCIDs(tx *sqlx.Tx, txFilter config.TrxFilter, blockNumber int64) ([]string, []int64, error) {
+func (ecr *CIDRetriever) RetrieveTrxCIDs(tx *sqlx.Tx, txFilter config.TxFilter, blockNumber int64) ([]TxModel, error) {
log.Debug("retrieving transaction cids for block ", blockNumber)
args := make([]interface{}, 0, 3)
- type result struct {
- ID int64 `db:"id"`
- Cid string `db:"cid"`
- }
- results := make([]result, 0)
- pgStr := `SELECT transaction_cids.id, transaction_cids.cid FROM transaction_cids INNER JOIN header_cids ON (transaction_cids.header_id = header_cids.id)
+ results := make([]TxModel, 0)
+ pgStr := `SELECT transaction_cids.id, transaction_cids.header_id,
+ transaction_cids.tx_hash, transaction_cids.cid,
+ transaction_cids.dst, transaction_cids.src
+ FROM transaction_cids INNER JOIN header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
args = append(args, blockNumber)
if len(txFilter.Dst) > 0 {
@@ -197,23 +191,19 @@ func (ecr *EthCIDRetriever) RetrieveTrxCIDs(tx *sqlx.Tx, txFilter config.TrxFilt
}
err := tx.Select(&results, pgStr, args...)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- ids := make([]int64, 0, len(results))
- cids := make([]string, 0, len(results))
- for _, res := range results {
- cids = append(cids, res.Cid)
- ids = append(ids, res.ID)
- }
- return cids, ids, nil
+ return results, nil
}
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight that conform to the provided
// filter parameters and correspond to the provided tx ids
-func (ecr *EthCIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter config.ReceiptFilter, blockNumber int64, trxIds []int64) ([]string, error) {
+func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter config.ReceiptFilter, blockNumber int64, trxIds []int64) ([]ReceiptModel, error) {
log.Debug("retrieving receipt cids for block ", blockNumber)
args := make([]interface{}, 0, 4)
- pgStr := `SELECT receipt_cids.cid FROM receipt_cids, transaction_cids, header_cids
+ pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid,
+ receipt_cids.contract, receipt_cids.topic0s
+ FROM receipt_cids, transaction_cids, header_cids
WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
@@ -254,23 +244,29 @@ func (ecr *EthCIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter config.Receip
args = append(args, pq.Array(trxIds))
}
}
- receiptCids := make([]string, 0)
+ receiptCids := make([]ReceiptModel, 0)
err := tx.Select(&receiptCids, pgStr, args...)
+ if err != nil {
+ println(pgStr)
+ println("FUCK YOU\r\n\r\n\r\n")
+ }
return receiptCids, err
}
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided blockheight that conform to the provided filter parameters
-func (ecr *EthCIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter config.StateFilter, blockNumber int64) ([]ipfs.StateNodeCID, error) {
+func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter config.StateFilter, blockNumber int64) ([]StateNodeModel, error) {
log.Debug("retrieving state cids for block ", blockNumber)
args := make([]interface{}, 0, 2)
- pgStr := `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
+ pgStr := `SELECT state_cids.id, state_cids.header_id,
+ state_cids.state_key, state_cids.leaf, state_cids.cid
+ FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
args = append(args, blockNumber)
addrLen := len(stateFilter.Addresses)
if addrLen > 0 {
keys := make([]string, 0, addrLen)
for _, addr := range stateFilter.Addresses {
- keys = append(keys, ipfs.HexToKey(addr).Hex())
+ keys = append(keys, HexToKey(addr).Hex())
}
pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(keys))
@@ -278,16 +274,17 @@ func (ecr *EthCIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter config.St
if !stateFilter.IntermediateNodes {
pgStr += ` AND state_cids.leaf = TRUE`
}
- stateNodeCIDs := make([]ipfs.StateNodeCID, 0)
+ stateNodeCIDs := make([]StateNodeModel, 0)
err := tx.Select(&stateNodeCIDs, pgStr, args...)
return stateNodeCIDs, err
}
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided blockheight that conform to the provided filter parameters
-func (ecr *EthCIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter config.StorageFilter, blockNumber int64) ([]ipfs.StorageNodeCID, error) {
+func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter config.StorageFilter, blockNumber int64) ([]StorageNodeWithStateKeyModel, error) {
log.Debug("retrieving storage cids for block ", blockNumber)
args := make([]interface{}, 0, 3)
- pgStr := `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key, storage_cids.leaf FROM storage_cids, state_cids, header_cids
+ pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_key,
+ storage_cids.leaf, storage_cids.cid, state_cids.state_key FROM storage_cids, state_cids, header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
@@ -296,7 +293,7 @@ func (ecr *EthCIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter confi
if addrLen > 0 {
keys := make([]string, 0, addrLen)
for _, addr := range storageFilter.Addresses {
- keys = append(keys, ipfs.HexToKey(addr).Hex())
+ keys = append(keys, HexToKey(addr).Hex())
}
pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(keys))
@@ -311,35 +308,36 @@ func (ecr *EthCIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter confi
if !storageFilter.IntermediateNodes {
pgStr += ` AND storage_cids.leaf = TRUE`
}
- storageNodeCIDs := make([]ipfs.StorageNodeCID, 0)
+ storageNodeCIDs := make([]StorageNodeWithStateKeyModel, 0)
err := tx.Select(&storageNodeCIDs, pgStr, args...)
return storageNodeCIDs, err
}
-type gap struct {
- Start uint64 `db:"start"`
- Stop uint64 `db:"stop"`
-}
-
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
-func (ecr *EthCIDRetriever) RetrieveGapsInData() ([][2]uint64, error) {
+func (ecr *CIDRetriever) RetrieveGapsInData() ([]shared.Gap, error) {
pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM header_cids
LEFT JOIN header_cids r on header_cids.block_number = r.block_number - 1
LEFT JOIN header_cids fr on header_cids.block_number < fr.block_number
WHERE r.block_number is NULL and fr.block_number IS NOT NULL
GROUP BY header_cids.block_number, r.block_number`
- gaps := make([]gap, 0)
- err := ecr.db.Select(&gaps, pgStr)
+ results := make([]struct {
+ Start uint64 `db:"start"`
+ Stop uint64 `db:"stop"`
+ }, 0)
+ err := ecr.db.Select(&results, pgStr)
if err != nil {
return nil, err
}
- gapRanges := make([][2]uint64, 0)
- for _, gap := range gaps {
- gapRanges = append(gapRanges, [2]uint64{gap.Start, gap.Stop})
+ gaps := make([]shared.Gap, len(results))
+ for i, res := range results {
+ gaps[i] = shared.Gap{
+ Start: res.Start,
+ Stop: res.Stop,
+ }
}
- return gapRanges, nil
+ return gaps, nil
}
-func (ecr *EthCIDRetriever) Database() *postgres.DB {
+func (ecr *CIDRetriever) Database() *postgres.DB {
return ecr.db
}
diff --git a/pkg/super_node/retriever_test.go b/pkg/super_node/eth/retriever_test.go
similarity index 60%
rename from pkg/super_node/retriever_test.go
rename to pkg/super_node/eth/retriever_test.go
index 1d04f3ab..8912360b 100644
--- a/pkg/super_node/retriever_test.go
+++ b/pkg/super_node/eth/retriever_test.go
@@ -14,39 +14,40 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package super_node_test
+package eth_test
import (
"math/big"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
- "github.com/vulcanize/vulcanizedb/pkg/super_node"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ eth2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var (
- retriever super_node.CIDRetriever
- openFilter = config.Subscription{
- StartingBlock: big.NewInt(0),
- EndingBlock: big.NewInt(1),
+ openFilter = &config.EthSubscription{
+ Start: big.NewInt(0),
+ End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{},
- TrxFilter: config.TrxFilter{},
+ TxFilter: config.TxFilter{},
ReceiptFilter: config.ReceiptFilter{},
StateFilter: config.StateFilter{},
StorageFilter: config.StorageFilter{},
}
- rctContractFilter = config.Subscription{
- StartingBlock: big.NewInt(0),
- EndingBlock: big.NewInt(1),
+ rctContractFilter = &config.EthSubscription{
+ Start: big.NewInt(0),
+ End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
- TrxFilter: config.TrxFilter{
+ TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
@@ -59,13 +60,13 @@ var (
Off: true,
},
}
- rctTopicsFilter = config.Subscription{
- StartingBlock: big.NewInt(0),
- EndingBlock: big.NewInt(1),
+ rctTopicsFilter = &config.EthSubscription{
+ Start: big.NewInt(0),
+ End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
- TrxFilter: config.TrxFilter{
+ TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
@@ -78,13 +79,13 @@ var (
Off: true,
},
}
- rctTopicsAndContractFilter = config.Subscription{
- StartingBlock: big.NewInt(0),
- EndingBlock: big.NewInt(1),
+ rctTopicsAndContractFilter = &config.EthSubscription{
+ Start: big.NewInt(0),
+ End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
- TrxFilter: config.TrxFilter{
+ TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
@@ -98,13 +99,13 @@ var (
Off: true,
},
}
- rctContractsAndTopicFilter = config.Subscription{
- StartingBlock: big.NewInt(0),
- EndingBlock: big.NewInt(1),
+ rctContractsAndTopicFilter = &config.EthSubscription{
+ Start: big.NewInt(0),
+ End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
- TrxFilter: config.TrxFilter{
+ TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
@@ -118,13 +119,13 @@ var (
Off: true,
},
}
- rctsForAllCollectedTrxs = config.Subscription{
- StartingBlock: big.NewInt(0),
- EndingBlock: big.NewInt(1),
+ rctsForAllCollectedTrxs = &config.EthSubscription{
+ Start: big.NewInt(0),
+ End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
- TrxFilter: config.TrxFilter{}, // Trx filter open so we will collect all trxs, therefore we will also collect all corresponding rcts despite rct filter
+ TxFilter: config.TxFilter{}, // Trx filter open so we will collect all trxs, therefore we will also collect all corresponding rcts despite rct filter
ReceiptFilter: config.ReceiptFilter{
MatchTxs: true,
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000006"}, // Topic isn't one of the topics we have
@@ -137,13 +138,13 @@ var (
Off: true,
},
}
- rctsForSelectCollectedTrxs = config.Subscription{
- StartingBlock: big.NewInt(0),
- EndingBlock: big.NewInt(1),
+ rctsForSelectCollectedTrxs = &config.EthSubscription{
+ Start: big.NewInt(0),
+ End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
- TrxFilter: config.TrxFilter{
+ TxFilter: config.TxFilter{
Dst: []string{"0x0000000000000000000000000000000000000001"}, // We only filter for one of the trxs so we will only get the one corresponding receipt
},
ReceiptFilter: config.ReceiptFilter{
@@ -158,13 +159,13 @@ var (
Off: true,
},
}
- stateFilter = config.Subscription{
- StartingBlock: big.NewInt(0),
- EndingBlock: big.NewInt(1),
+ stateFilter = &config.EthSubscription{
+ Start: big.NewInt(0),
+ End: big.NewInt(1),
HeaderFilter: config.HeaderFilter{
Off: true,
},
- TrxFilter: config.TrxFilter{
+ TxFilter: config.TxFilter{
Off: true,
},
ReceiptFilter: config.ReceiptFilter{
@@ -181,129 +182,179 @@ var (
var _ = Describe("Retriever", func() {
var (
- db *postgres.DB
- repo super_node.CIDRepository
+ db *postgres.DB
+ repo *eth2.CIDIndexer
+ retriever *eth2.CIDRetriever
)
BeforeEach(func() {
var err error
- db, err = super_node.SetupDB()
+ db, err = eth.SetupDB()
Expect(err).ToNot(HaveOccurred())
- repo = super_node.NewCIDRepository(db)
- retriever = super_node.NewCIDRetriever(db)
+ repo = eth2.NewCIDIndexer(db)
+ retriever = eth2.NewCIDRetriever(db)
})
AfterEach(func() {
- super_node.TearDownDB(db)
+ eth.TearDownDB(db)
})
- Describe("RetrieveCIDs", func() {
+ Describe("Retrieve", func() {
BeforeEach(func() {
err := repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
})
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
- cidWrapper, err := retriever.RetrieveCIDs(openFilter, 1)
+ cids, empty, err := retriever.Retrieve(openFilter, 1)
Expect(err).ToNot(HaveOccurred())
+ Expect(empty).ToNot(BeTrue())
+ cidWrapper, ok := cids.(*eth.CIDWrapper)
+ Expect(ok).To(BeTrue())
Expect(cidWrapper.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper.Headers)).To(Equal(1))
- Expect(cidWrapper.Headers).To(Equal(mocks.MockCIDWrapper.Headers))
+ expectedHeaderCIDs := mocks.MockCIDWrapper.Headers
+ expectedHeaderCIDs[0].ID = cidWrapper.Headers[0].ID
+ Expect(cidWrapper.Headers).To(Equal(expectedHeaderCIDs))
Expect(len(cidWrapper.Transactions)).To(Equal(2))
- Expect(super_node.ListContainsString(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[0])).To(BeTrue())
- Expect(super_node.ListContainsString(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[1])).To(BeTrue())
+ Expect(eth.TxModelsContainsCID(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
+ Expect(eth.TxModelsContainsCID(cidWrapper.Transactions, mocks.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
Expect(len(cidWrapper.Receipts)).To(Equal(2))
- Expect(super_node.ListContainsString(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[0])).To(BeTrue())
- Expect(super_node.ListContainsString(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[1])).To(BeTrue())
+ Expect(eth.ReceiptModelsContainsCID(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[0].CID)).To(BeTrue())
+ Expect(eth.ReceiptModelsContainsCID(cidWrapper.Receipts, mocks.MockCIDWrapper.Receipts[1].CID)).To(BeTrue())
Expect(len(cidWrapper.StateNodes)).To(Equal(2))
for _, stateNode := range cidWrapper.StateNodes {
if stateNode.CID == "mockStateCID1" {
- Expect(stateNode.Key).To(Equal(mocks.ContractLeafKey.Hex()))
+ Expect(stateNode.StateKey).To(Equal(mocks.ContractLeafKey.Hex()))
Expect(stateNode.Leaf).To(Equal(true))
}
if stateNode.CID == "mockStateCID2" {
- Expect(stateNode.Key).To(Equal(mocks.AnotherContractLeafKey.Hex()))
+ Expect(stateNode.StateKey).To(Equal(mocks.AnotherContractLeafKey.Hex()))
Expect(stateNode.Leaf).To(Equal(true))
}
}
Expect(len(cidWrapper.StorageNodes)).To(Equal(1))
- Expect(cidWrapper.StorageNodes).To(Equal(mocks.MockCIDWrapper.StorageNodes))
+ expectedStorageNodeCIDs := mocks.MockCIDWrapper.StorageNodes
+ expectedStorageNodeCIDs[0].ID = cidWrapper.StorageNodes[0].ID
+ expectedStorageNodeCIDs[0].StateID = cidWrapper.StorageNodes[0].StateID
+ Expect(cidWrapper.StorageNodes).To(Equal(expectedStorageNodeCIDs))
})
It("Applies filters from the provided config.Subscription", func() {
- cidWrapper1, err1 := retriever.RetrieveCIDs(rctContractFilter, 1)
- Expect(err1).ToNot(HaveOccurred())
+ cids1, empty, err := retriever.Retrieve(rctContractFilter, 1)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(empty).ToNot(BeTrue())
+ cidWrapper1, ok := cids1.(*eth.CIDWrapper)
+ Expect(ok).To(BeTrue())
Expect(cidWrapper1.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper1.Headers)).To(Equal(0))
Expect(len(cidWrapper1.Transactions)).To(Equal(0))
Expect(len(cidWrapper1.StateNodes)).To(Equal(0))
Expect(len(cidWrapper1.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper1.Receipts)).To(Equal(1))
- Expect(cidWrapper1.Receipts[0]).To(Equal("mockRctCID2"))
+ expectedReceiptCID := mocks.MockCIDWrapper.Receipts[1]
+ expectedReceiptCID.ID = cidWrapper1.Receipts[0].ID
+ expectedReceiptCID.TxID = cidWrapper1.Receipts[0].TxID
+ Expect(cidWrapper1.Receipts[0]).To(Equal(expectedReceiptCID))
- cidWrapper2, err2 := retriever.RetrieveCIDs(rctTopicsFilter, 1)
- Expect(err2).ToNot(HaveOccurred())
+ cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(empty).ToNot(BeTrue())
+ cidWrapper2, ok := cids2.(*eth.CIDWrapper)
+ Expect(ok).To(BeTrue())
Expect(cidWrapper2.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper2.Headers)).To(Equal(0))
Expect(len(cidWrapper2.Transactions)).To(Equal(0))
Expect(len(cidWrapper2.StateNodes)).To(Equal(0))
Expect(len(cidWrapper2.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper2.Receipts)).To(Equal(1))
- Expect(cidWrapper2.Receipts[0]).To(Equal("mockRctCID1"))
+ expectedReceiptCID = mocks.MockCIDWrapper.Receipts[0]
+ expectedReceiptCID.ID = cidWrapper2.Receipts[0].ID
+ expectedReceiptCID.TxID = cidWrapper2.Receipts[0].TxID
+ Expect(cidWrapper2.Receipts[0]).To(Equal(expectedReceiptCID))
- cidWrapper3, err3 := retriever.RetrieveCIDs(rctTopicsAndContractFilter, 1)
- Expect(err3).ToNot(HaveOccurred())
+ cids3, empty, err := retriever.Retrieve(rctTopicsAndContractFilter, 1)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(empty).ToNot(BeTrue())
+ cidWrapper3, ok := cids3.(*eth.CIDWrapper)
+ Expect(ok).To(BeTrue())
Expect(cidWrapper3.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper3.Headers)).To(Equal(0))
Expect(len(cidWrapper3.Transactions)).To(Equal(0))
Expect(len(cidWrapper3.StateNodes)).To(Equal(0))
Expect(len(cidWrapper3.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper3.Receipts)).To(Equal(1))
- Expect(cidWrapper3.Receipts[0]).To(Equal("mockRctCID1"))
+ expectedReceiptCID = mocks.MockCIDWrapper.Receipts[0]
+ expectedReceiptCID.ID = cidWrapper3.Receipts[0].ID
+ expectedReceiptCID.TxID = cidWrapper3.Receipts[0].TxID
+ Expect(cidWrapper3.Receipts[0]).To(Equal(expectedReceiptCID))
- cidWrapper4, err4 := retriever.RetrieveCIDs(rctContractsAndTopicFilter, 1)
- Expect(err4).ToNot(HaveOccurred())
+ cids4, empty, err := retriever.Retrieve(rctContractsAndTopicFilter, 1)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(empty).ToNot(BeTrue())
+ cidWrapper4, ok := cids4.(*eth.CIDWrapper)
+ Expect(ok).To(BeTrue())
Expect(cidWrapper4.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper4.Headers)).To(Equal(0))
Expect(len(cidWrapper4.Transactions)).To(Equal(0))
Expect(len(cidWrapper4.StateNodes)).To(Equal(0))
Expect(len(cidWrapper4.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper4.Receipts)).To(Equal(1))
- Expect(cidWrapper4.Receipts[0]).To(Equal("mockRctCID2"))
+ expectedReceiptCID = mocks.MockCIDWrapper.Receipts[1]
+ expectedReceiptCID.ID = cidWrapper4.Receipts[0].ID
+ expectedReceiptCID.TxID = cidWrapper4.Receipts[0].TxID
+ Expect(cidWrapper4.Receipts[0]).To(Equal(expectedReceiptCID))
- cidWrapper5, err5 := retriever.RetrieveCIDs(rctsForAllCollectedTrxs, 1)
- Expect(err5).ToNot(HaveOccurred())
+ cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(empty).ToNot(BeTrue())
+ cidWrapper5, ok := cids5.(*eth.CIDWrapper)
+ Expect(ok).To(BeTrue())
Expect(cidWrapper5.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper5.Headers)).To(Equal(0))
Expect(len(cidWrapper5.Transactions)).To(Equal(2))
- Expect(super_node.ListContainsString(cidWrapper5.Transactions, "mockTrxCID1")).To(BeTrue())
- Expect(super_node.ListContainsString(cidWrapper5.Transactions, "mockTrxCID2")).To(BeTrue())
+ Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, "mockTrxCID1")).To(BeTrue())
+ Expect(eth.TxModelsContainsCID(cidWrapper5.Transactions, "mockTrxCID2")).To(BeTrue())
Expect(len(cidWrapper5.StateNodes)).To(Equal(0))
Expect(len(cidWrapper5.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper5.Receipts)).To(Equal(2))
- Expect(super_node.ListContainsString(cidWrapper5.Receipts, "mockRctCID1")).To(BeTrue())
- Expect(super_node.ListContainsString(cidWrapper5.Receipts, "mockRctCID2")).To(BeTrue())
+ Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, "mockRctCID1")).To(BeTrue())
+ Expect(eth.ReceiptModelsContainsCID(cidWrapper5.Receipts, "mockRctCID2")).To(BeTrue())
- cidWrapper6, err6 := retriever.RetrieveCIDs(rctsForSelectCollectedTrxs, 1)
- Expect(err6).ToNot(HaveOccurred())
+ cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(empty).ToNot(BeTrue())
+ cidWrapper6, ok := cids6.(*eth.CIDWrapper)
+ Expect(ok).To(BeTrue())
Expect(cidWrapper6.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper6.Headers)).To(Equal(0))
Expect(len(cidWrapper6.Transactions)).To(Equal(1))
- Expect(cidWrapper6.Transactions[0]).To(Equal("mockTrxCID2"))
+ expectedTxCID := mocks.MockCIDWrapper.Transactions[1]
+ expectedTxCID.ID = cidWrapper6.Transactions[0].ID
+ expectedTxCID.HeaderID = cidWrapper6.Transactions[0].HeaderID
+ Expect(cidWrapper6.Transactions[0]).To(Equal(expectedTxCID))
Expect(len(cidWrapper6.StateNodes)).To(Equal(0))
Expect(len(cidWrapper6.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper6.Receipts)).To(Equal(1))
- Expect(cidWrapper6.Receipts[0]).To(Equal("mockRctCID2"))
+ expectedReceiptCID = mocks.MockCIDWrapper.Receipts[1]
+ expectedReceiptCID.ID = cidWrapper6.Receipts[0].ID
+ expectedReceiptCID.TxID = cidWrapper6.Receipts[0].TxID
+ Expect(cidWrapper6.Receipts[0]).To(Equal(expectedReceiptCID))
- cidWrapper7, err7 := retriever.RetrieveCIDs(stateFilter, 1)
- Expect(err7).ToNot(HaveOccurred())
+ cids7, empty, err := retriever.Retrieve(stateFilter, 1)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(empty).ToNot(BeTrue())
+ cidWrapper7, ok := cids7.(*eth.CIDWrapper)
+ Expect(ok).To(BeTrue())
Expect(cidWrapper7.BlockNumber).To(Equal(mocks.MockCIDWrapper.BlockNumber))
Expect(len(cidWrapper7.Headers)).To(Equal(0))
Expect(len(cidWrapper7.Transactions)).To(Equal(0))
Expect(len(cidWrapper7.Receipts)).To(Equal(0))
Expect(len(cidWrapper7.StorageNodes)).To(Equal(0))
Expect(len(cidWrapper7.StateNodes)).To(Equal(1))
- Expect(cidWrapper7.StateNodes[0]).To(Equal(ipfs.StateNodeCID{
- Leaf: true,
- Key: mocks.ContractLeafKey.Hex(),
- CID: "mockStateCID1",
+ Expect(cidWrapper7.StateNodes[0]).To(Equal(eth.StateNodeModel{
+ ID: cidWrapper7.StateNodes[0].ID,
+ HeaderID: cidWrapper7.StateNodes[0].HeaderID,
+ Leaf: true,
+ StateKey: mocks.ContractLeafKey.Hex(),
+ CID: "mockStateCID1",
}))
})
})
@@ -319,7 +370,7 @@ var _ = Describe("Retriever", func() {
It("Gets the number of the first block that has data in the database", func() {
payload := *mocks.MockCIDPayload
- payload.BlockNumber = "1010101"
+ payload.HeaderCID.BlockNumber = "1010101"
err := repo.Index(&payload)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
@@ -329,9 +380,9 @@ var _ = Describe("Retriever", func() {
It("Gets the number of the first block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload
- payload1.BlockNumber = "1010101"
+ payload1.HeaderCID.BlockNumber = "1010101"
payload2 := payload1
- payload2.BlockNumber = "5"
+ payload2.HeaderCID.BlockNumber = "5"
err := repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
@@ -353,7 +404,7 @@ var _ = Describe("Retriever", func() {
It("Gets the number of the latest block that has data in the database", func() {
payload := *mocks.MockCIDPayload
- payload.BlockNumber = "1010101"
+ payload.HeaderCID.BlockNumber = "1010101"
err := repo.Index(&payload)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
@@ -363,9 +414,9 @@ var _ = Describe("Retriever", func() {
It("Gets the number of the latest block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload
- payload1.BlockNumber = "1010101"
+ payload1.HeaderCID.BlockNumber = "1010101"
payload2 := payload1
- payload2.BlockNumber = "5"
+ payload2.HeaderCID.BlockNumber = "5"
err := repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
@@ -379,9 +430,9 @@ var _ = Describe("Retriever", func() {
Describe("RetrieveGapsInData", func() {
It("Doesn't return gaps if there are none", func() {
payload1 := *mocks.MockCIDPayload
- payload1.BlockNumber = "2"
+ payload1.HeaderCID.BlockNumber = "2"
payload2 := payload1
- payload2.BlockNumber = "3"
+ payload2.HeaderCID.BlockNumber = "3"
err := repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload1)
@@ -395,7 +446,7 @@ var _ = Describe("Retriever", func() {
It("Doesn't return the gap from 0 to the earliest block", func() {
payload := *mocks.MockCIDPayload
- payload.BlockNumber = "5"
+ payload.HeaderCID.BlockNumber = "5"
err := repo.Index(&payload)
Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData()
@@ -405,9 +456,9 @@ var _ = Describe("Retriever", func() {
It("Finds gap between two entries", func() {
payload1 := *mocks.MockCIDPayload
- payload1.BlockNumber = "1010101"
+ payload1.HeaderCID.BlockNumber = "1010101"
payload2 := payload1
- payload2.BlockNumber = "5"
+ payload2.HeaderCID.BlockNumber = "5"
err := repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
@@ -415,23 +466,23 @@ var _ = Describe("Retriever", func() {
gaps, err := retriever.RetrieveGapsInData()
Expect(err).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(1))
- Expect(gaps[0][0]).To(Equal(uint64(6)))
- Expect(gaps[0][1]).To(Equal(uint64(1010100)))
+ Expect(gaps[0].Start).To(Equal(uint64(6)))
+ Expect(gaps[0].Stop).To(Equal(uint64(1010100)))
})
It("Finds gaps between multiple entries", func() {
payload1 := *mocks.MockCIDPayload
- payload1.BlockNumber = "1010101"
+ payload1.HeaderCID.BlockNumber = "1010101"
payload2 := payload1
- payload2.BlockNumber = "5"
+ payload2.HeaderCID.BlockNumber = "5"
payload3 := payload2
- payload3.BlockNumber = "100"
+ payload3.HeaderCID.BlockNumber = "100"
payload4 := payload3
- payload4.BlockNumber = "101"
+ payload4.HeaderCID.BlockNumber = "101"
payload5 := payload4
- payload5.BlockNumber = "102"
+ payload5.HeaderCID.BlockNumber = "102"
payload6 := payload5
- payload6.BlockNumber = "1000"
+ payload6.HeaderCID.BlockNumber = "1000"
err := repo.Index(&payload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2)
@@ -447,9 +498,9 @@ var _ = Describe("Retriever", func() {
gaps, err := retriever.RetrieveGapsInData()
Expect(err).ToNot(HaveOccurred())
Expect(len(gaps)).To(Equal(3))
- Expect(super_node.ListContainsRange(gaps, [2]uint64{6, 99})).To(BeTrue())
- Expect(super_node.ListContainsRange(gaps, [2]uint64{103, 999})).To(BeTrue())
- Expect(super_node.ListContainsRange(gaps, [2]uint64{1001, 1010100})).To(BeTrue())
+ Expect(shared.ListContainsGap(gaps, shared.Gap{Start: 6, Stop: 99})).To(BeTrue())
+ Expect(shared.ListContainsGap(gaps, shared.Gap{Start: 103, Stop: 999})).To(BeTrue())
+ Expect(shared.ListContainsGap(gaps, shared.Gap{Start: 1001, Stop: 1010100})).To(BeTrue())
})
})
})
diff --git a/pkg/super_node/eth/streamer.go b/pkg/super_node/eth/streamer.go
new file mode 100644
index 00000000..0e88463c
--- /dev/null
+++ b/pkg/super_node/eth/streamer.go
@@ -0,0 +1,46 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import (
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/sirupsen/logrus"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+)
+
+const (
+ PayloadChanBufferSize = 20000 // the max eth sub buffer size
+)
+
+// PayloadStreamer satisfies the PayloadStreamer interface for ethereum
+type PayloadStreamer struct {
+ Client core.RPCClient
+}
+
+// NewPayloadStreamer creates a pointer to a new StateDiffStreamer which satisfies the PayloadStreamer interface
+func NewPayloadStreamer(client core.RPCClient) *PayloadStreamer {
+ return &PayloadStreamer{
+ Client: client,
+ }
+}
+
+// Stream is the main loop for subscribing to data from the Geth state diff process
+func (sds *PayloadStreamer) Stream(payloadChan chan interface{}) (*rpc.ClientSubscription, error) {
+ logrus.Info("streaming diffs from geth")
+ return sds.Client.Subscribe("statediff", payloadChan, "stream")
+}
diff --git a/pkg/super_node/eth/streamer_test.go b/pkg/super_node/eth/streamer_test.go
new file mode 100644
index 00000000..bd18c434
--- /dev/null
+++ b/pkg/super_node/eth/streamer_test.go
@@ -0,0 +1,34 @@
+// Copyright 2019 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package eth_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/fakes"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/eth"
+)
+
+var _ = Describe("StateDiff Streamer", func() {
+ It("subscribes to the geth statediff service", func() {
+ client := &fakes.MockRPCClient{}
+ streamer := eth.NewPayloadStreamer(client)
+ payloadChan := make(chan interface{})
+ _, err := streamer.Stream(payloadChan)
+ Expect(err).NotTo(HaveOccurred())
+ client.AssertSubscribeCalledWith("statediff", payloadChan, []interface{}{"stream"})
+ })
+})
diff --git a/pkg/super_node/test_helpers.go b/pkg/super_node/eth/test_helpers.go
similarity index 87%
rename from pkg/super_node/test_helpers.go
rename to pkg/super_node/eth/test_helpers.go
index 762f5f5e..00d2469d 100644
--- a/pkg/super_node/test_helpers.go
+++ b/pkg/super_node/eth/test_helpers.go
@@ -14,11 +14,9 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package super_node
+package eth
import (
- "bytes"
-
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/config"
@@ -57,10 +55,10 @@ func TearDownDB(db *postgres.DB) {
Expect(err).NotTo(HaveOccurred())
}
-// ListContainsString used to check if a list of strings contains a particular string
-func ListContainsString(sss []string, s string) bool {
- for _, str := range sss {
- if s == str {
+// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
+func TxModelsContainsCID(txs []TxModel, cid string) bool {
+ for _, tx := range txs {
+ if tx.CID == cid {
return true
}
}
@@ -68,9 +66,9 @@ func ListContainsString(sss []string, s string) bool {
}
// ListContainsBytes used to check if a list of byte arrays contains a particular byte array
-func ListContainsBytes(bbb [][]byte, b []byte) bool {
- for _, by := range bbb {
- if bytes.Equal(by, b) {
+func ReceiptModelsContainsCID(rcts []ReceiptModel, cid string) bool {
+ for _, rct := range rcts {
+ if rct.CID == cid {
return true
}
}
diff --git a/pkg/super_node/eth/types.go b/pkg/super_node/eth/types.go
new file mode 100644
index 00000000..95351bc2
--- /dev/null
+++ b/pkg/super_node/eth/types.go
@@ -0,0 +1,119 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package eth
+
+import (
+ "encoding/json"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ipfs/go-block-format"
+)
+
+// IPLDPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
+// Returned by PayloadConverter
+// Passed to IPLDPublisher and ResponseFilterer
+type IPLDPayload struct {
+ TotalDifficulty *big.Int
+ Block *types.Block
+ HeaderRLP []byte
+ TrxMetaData []TxModel
+ Receipts types.Receipts
+ ReceiptMetaData []ReceiptModel
+ StateNodes []TrieNode
+ StorageNodes map[common.Hash][]TrieNode
+}
+
+// Trie struct used to flag node as leaf or not
+type TrieNode struct {
+ Key common.Hash
+ Value []byte
+ Leaf bool
+}
+
+// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres
+// Returned by IPLDPublisher
+// Passed to CIDIndexer
+type CIDPayload struct {
+ HeaderCID HeaderModel
+ UncleCIDs []HeaderModel
+ TransactionCIDs []TxModel
+ ReceiptCIDs map[common.Hash]ReceiptModel
+ StateNodeCIDs []StateNodeModel
+ StorageNodeCIDs map[common.Hash][]StorageNodeModel
+}
+
+// CIDWrapper is used to direct fetching of IPLDs from IPFS
+// Returned by CIDRetriever
+// Passed to IPLDFetcher
+type CIDWrapper struct {
+ BlockNumber *big.Int
+ Headers []HeaderModel
+ Uncles []HeaderModel
+ Transactions []TxModel
+ Receipts []ReceiptModel
+ StateNodes []StateNodeModel
+ StorageNodes []StorageNodeWithStateKeyModel
+}
+
+// IPLDWrapper is used to package raw IPLD block data fetched from IPFS
+// Returned by IPLDFetcher
+// Passed to IPLDResolver
+type IPLDWrapper struct {
+ BlockNumber *big.Int
+ Headers []blocks.Block
+ Uncles []blocks.Block
+ Transactions []blocks.Block
+ Receipts []blocks.Block
+ StateNodes map[common.Hash]blocks.Block
+ StorageNodes map[common.Hash]map[common.Hash]blocks.Block
+}
+
+// StreamPayload holds the data streamed from the super node eth service to the requesting clients
+// Returned by IPLDResolver and ResponseFilterer
+// Passed to client subscriptions
+type StreamPayload struct {
+ BlockNumber *big.Int `json:"blockNumber"`
+ HeadersRlp [][]byte `json:"headersRlp"`
+ UnclesRlp [][]byte `json:"unclesRlp"`
+ TransactionsRlp [][]byte `json:"transactionsRlp"`
+ ReceiptsRlp [][]byte `json:"receiptsRlp"`
+ StateNodesRlp map[common.Hash][]byte `json:"stateNodesRlp"`
+ StorageNodesRlp map[common.Hash]map[common.Hash][]byte `json:"storageNodesRlp"`
+
+ encoded []byte
+ err error
+}
+
+func (sd *StreamPayload) ensureEncoded() {
+ if sd.encoded == nil && sd.err == nil {
+ sd.encoded, sd.err = json.Marshal(sd)
+ }
+}
+
+// Length to implement Encoder interface for StateDiff
+func (sd *StreamPayload) Length() int {
+ sd.ensureEncoded()
+ return len(sd.encoded)
+}
+
+// Encode to implement Encoder interface for StateDiff
+func (sd *StreamPayload) Encode() ([]byte, error) {
+ sd.ensureEncoded()
+ return sd.encoded, sd.err
+}
diff --git a/pkg/super_node/helpers.go b/pkg/super_node/helpers.go
new file mode 100644
index 00000000..b175ca28
--- /dev/null
+++ b/pkg/super_node/helpers.go
@@ -0,0 +1,37 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package super_node
+
+import log "github.com/sirupsen/logrus"
+
+func sendNonBlockingErr(sub Subscription, err error) {
+ log.Error(err)
+ select {
+ case sub.PayloadChan <- Payload{nil, err.Error()}:
+ default:
+ log.Infof("unable to send error to subscription %s", sub.ID)
+ }
+}
+
+func sendNonBlockingQuit(sub Subscription) {
+ select {
+ case sub.QuitChan <- true:
+ log.Infof("closing subscription %s", sub.ID)
+ default:
+ log.Infof("unable to close subscription %s; channel has no receiver", sub.ID)
+ }
+}
diff --git a/pkg/super_node/mocks/retriever.go b/pkg/super_node/mocks/retriever.go
deleted file mode 100644
index 57258b37..00000000
--- a/pkg/super_node/mocks/retriever.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package mocks
-
-import (
- "github.com/jmoiron/sqlx"
- "github.com/vulcanize/vulcanizedb/pkg/config"
- "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
-)
-
-// MockCIDRetriever is a mock CID retriever for use in tests
-type MockCIDRetriever struct {
- GapsToRetrieve [][2]uint64
- GapsToRetrieveErr error
- CalledTimes int
- FirstBlockNumberToReturn int64
- RetrieveFirstBlockNumberErr error
-}
-
-// RetrieveCIDs mock method
-func (*MockCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) {
- panic("implement me")
-}
-
-// RetrieveHeaderCIDs mock method
-func (*MockCIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]string, error) {
- panic("implement me")
-
-}
-
-// RetrieveUncleCIDs mock method
-func (*MockCIDRetriever) RetrieveUncleCIDs(tx *sqlx.Tx, blockNumber int64) ([]string, error) {
- panic("implement me")
-
-}
-
-// RetrieveTrxCIDs mock method
-func (*MockCIDRetriever) RetrieveTrxCIDs(tx *sqlx.Tx, txFilter config.TrxFilter, blockNumber int64) ([]string, []int64, error) {
- panic("implement me")
-
-}
-
-// RetrieveRctCIDs mock method
-func (*MockCIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter config.ReceiptFilter, blockNumber int64, trxIds []int64) ([]string, error) {
- panic("implement me")
-
-}
-
-// RetrieveStateCIDs mock method
-func (*MockCIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter config.StateFilter, blockNumber int64) ([]ipfs.StateNodeCID, error) {
- panic("implement me")
-
-}
-
-// RetrieveLastBlockNumber mock method
-func (*MockCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
- panic("implement me")
-}
-
-// RetrieveFirstBlockNumber mock method
-func (mcr *MockCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
- return mcr.FirstBlockNumberToReturn, mcr.RetrieveFirstBlockNumberErr
-}
-
-// RetrieveGapsInData mock method
-func (mcr *MockCIDRetriever) RetrieveGapsInData() ([][2]uint64, error) {
- mcr.CalledTimes++
- return mcr.GapsToRetrieve, mcr.GapsToRetrieveErr
-}
-
-// SetGapsToRetrieve mock method
-func (mcr *MockCIDRetriever) SetGapsToRetrieve(gaps [][2]uint64) {
- if mcr.GapsToRetrieve == nil {
- mcr.GapsToRetrieve = make([][2]uint64, 0)
- }
- mcr.GapsToRetrieve = append(mcr.GapsToRetrieve, gaps...)
-}
-
-func (mcr *MockCIDRetriever) Database() *postgres.DB {
- panic("implement me")
-}
diff --git a/pkg/super_node/repository.go b/pkg/super_node/repository.go
deleted file mode 100644
index b65fa3c9..00000000
--- a/pkg/super_node/repository.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package super_node
-
-import (
- "github.com/jmoiron/sqlx"
- "github.com/lib/pq"
- log "github.com/sirupsen/logrus"
-
- "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs"
-)
-
-// CIDRepository is an interface for indexing ipfs.CIDPayloads
-type CIDRepository interface {
- Index(cidPayload *ipfs.CIDPayload) error
-}
-
-// Repository is the underlying struct for the CIDRepository interface
-type Repository struct {
- db *postgres.DB
-}
-
-// NewCIDRepository creates a new pointer to a Repository which satisfies the CIDRepository interface
-func NewCIDRepository(db *postgres.DB) *Repository {
- return &Repository{
- db: db,
- }
-}
-
-// Index indexes a cidPayload in Postgres
-func (repo *Repository) Index(cidPayload *ipfs.CIDPayload) error {
- tx, err := repo.db.Beginx()
- if err != nil {
- return err
- }
- headerID, err := repo.indexHeaderCID(tx, cidPayload.HeaderCID, cidPayload.BlockNumber, cidPayload.BlockHash.Hex(), cidPayload.TotalDifficulty)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
- }
- return err
- }
- for uncleHash, cid := range cidPayload.UncleCIDs {
- err := repo.indexUncleCID(tx, cid, cidPayload.BlockNumber, uncleHash.Hex(), cidPayload.TotalDifficulty)
- if err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
- }
- return err
- }
- }
- if err := repo.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID); err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
- }
- return err
- }
- if err := repo.indexStateAndStorageCIDs(tx, cidPayload, headerID); err != nil {
- if err := tx.Rollback(); err != nil {
- log.Error(err)
- }
- return err
- }
- return tx.Commit()
-}
-
-func (repo *Repository) indexHeaderCID(tx *sqlx.Tx, cid, blockNumber, hash, td string) (int64, error) {
- var headerID int64
- err := tx.QueryRowx(`INSERT INTO public.header_cids (block_number, block_hash, cid, uncle, td) VALUES ($1, $2, $3, $4, $5)
- ON CONFLICT (block_number, block_hash) DO UPDATE SET (cid, uncle, td) = ($3, $4, $5)
- RETURNING id`,
- blockNumber, hash, cid, false, td).Scan(&headerID)
- return headerID, err
-}
-
-func (repo *Repository) indexUncleCID(tx *sqlx.Tx, cid, blockNumber, hash, td string) error {
- _, err := tx.Exec(`INSERT INTO public.header_cids (block_number, block_hash, cid, uncle, td) VALUES ($1, $2, $3, $4, $5)
- ON CONFLICT (block_number, block_hash) DO UPDATE SET (cid, uncle, td) = ($3, $4, $5)`,
- blockNumber, hash, cid, true, td)
- return err
-}
-
-func (repo *Repository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error {
- for hash, trxCidMeta := range payload.TransactionCIDs {
- var txID int64
- err := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src) VALUES ($1, $2, $3, $4, $5)
- ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src) = ($3, $4, $5)
- RETURNING id`,
- headerID, hash.Hex(), trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src).Scan(&txID)
- if err != nil {
- return err
- }
- receiptCidMeta, ok := payload.ReceiptCIDs[hash]
- if ok {
- if err := repo.indexReceiptCID(tx, receiptCidMeta, txID); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (repo *Repository) indexReceiptCID(tx *sqlx.Tx, cidMeta *ipfs.ReceiptMetaData, txID int64) error {
- _, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid, contract, topic0s) VALUES ($1, $2, $3, $4)`,
- txID, cidMeta.CID, cidMeta.ContractAddress, pq.Array(cidMeta.Topic0s))
- return err
-}
-
-func (repo *Repository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error {
- for accountKey, stateCID := range payload.StateNodeCIDs {
- var stateID int64
- err := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4)
- ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4)
- RETURNING id`,
- headerID, accountKey.Hex(), stateCID.CID, stateCID.Leaf).Scan(&stateID)
- if err != nil {
- return err
- }
- for _, storageCID := range payload.StorageNodeCIDs[accountKey] {
- if err := repo.indexStorageCID(tx, storageCID, stateID); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (repo *Repository) indexStorageCID(tx *sqlx.Tx, storageCID ipfs.StorageNodeCID, stateID int64) error {
- _, err := tx.Exec(`INSERT INTO public.storage_cids (state_id, storage_key, cid, leaf) VALUES ($1, $2, $3, $4)
- ON CONFLICT (state_id, storage_key) DO UPDATE SET (cid, leaf) = ($3, $4)`,
- stateID, storageCID.Key, storageCID.CID, storageCID.Leaf)
- return err
-}
diff --git a/pkg/super_node/service.go b/pkg/super_node/service.go
index 41de906c..9c72c1a1 100644
--- a/pkg/super_node/service.go
+++ b/pkg/super_node/service.go
@@ -17,6 +17,7 @@
package super_node
import (
+ "fmt"
"sync"
"github.com/ethereum/go-ethereum/common"
@@ -26,35 +27,34 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/statediff"
log "github.com/sirupsen/logrus"
- "github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
- "github.com/vulcanize/vulcanizedb/pkg/config"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/shared"
)
const (
- payloadChanBufferSize = 20000 // the max eth sub buffer size
+ PayloadChanBufferSize = 20000
)
-// NodeInterface is the top level interface for streaming, converting to IPLDs, publishing,
+// SuperNode is the top level interface for streaming, converting to IPLDs, publishing,
// and indexing all Ethereum data; screening this data; and serving it up to subscribed clients
// This service is compatible with the Ethereum service interface (node.Service)
-type NodeInterface interface {
+type SuperNode interface {
// APIs(), Protocols(), Start() and Stop()
node.Service
// Main event loop for syncAndPublish processes
- SyncAndPublish(wg *sync.WaitGroup, forwardPayloadChan chan<- ipfs.IPLDPayload, forwardQuitchan chan<- bool) error
+ SyncAndPublish(wg *sync.WaitGroup, forwardPayloadChan chan<- interface{}, forwardQuitchan chan<- bool) error
// Main event loop for handling client pub-sub
- ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool)
+ ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan interface{}, screenAndServeQuit <-chan bool)
// Method to subscribe to receive state diff processing output
- Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, quitChan chan<- bool, streamFilters config.Subscription)
+ Subscribe(id rpc.ID, sub chan<- Payload, quitChan chan<- bool, params SubscriptionSettings)
// Method to unsubscribe from state diff processing
Unsubscribe(id rpc.ID)
- // Method to access the Geth node info for this service
+ // Method to access the node info for this service
Node() core.Node
}
@@ -62,66 +62,96 @@ type NodeInterface interface {
type Service struct {
// Used to sync access to the Subscriptions
sync.Mutex
- // Interface for streaming statediff payloads over a geth rpc subscription
- Streamer streamer.Streamer
- // Interface for converting statediff payloads into ETH-IPLD object payloads
- Converter ipfs.PayloadConverter
- // Interface for publishing the ETH-IPLD payloads to IPFS
- Publisher ipfs.IPLDPublisher
- // Interface for indexing the CIDs of the published ETH-IPLDs in Postgres
- Repository CIDRepository
+ // Interface for streaming payloads over an rpc subscription
+ Streamer shared.PayloadStreamer
+ // Interface for converting raw payloads into IPLD object payloads
+ Converter shared.PayloadConverter
+ // Interface for publishing the IPLD payloads to IPFS
+ Publisher shared.IPLDPublisher
+ // Interface for indexing the CIDs of the published IPLDs in Postgres
+ Indexer shared.CIDIndexer
// Interface for filtering and serving data according to subscribed clients according to their specification
- Filterer ResponseFilterer
- // Interface for fetching ETH-IPLD objects from IPFS
- IPLDFetcher ipfs.IPLDFetcher
+ Filterer shared.ResponseFilterer
+ // Interface for fetching IPLD objects from IPFS
+ IPLDFetcher shared.IPLDFetcher
// Interface for searching and retrieving CIDs from Postgres index
- Retriever CIDRetriever
- // Interface for resolving ipfs blocks to their data types
- Resolver ipfs.IPLDResolver
- // Chan the processor uses to subscribe to state diff payloads from the Streamer
- PayloadChan chan statediff.Payload
+ Retriever shared.CIDRetriever
+ // Interface for resolving IPLDs to their data types
+ Resolver shared.IPLDResolver
+ // Chan the processor uses to subscribe to payloads from the Streamer
+ PayloadChan chan interface{}
// Used to signal shutdown of the service
QuitChan chan bool
// A mapping of rpc.IDs to their subscription channels, mapped to their subscription type (hash of the StreamFilters)
Subscriptions map[common.Hash]map[rpc.ID]Subscription
- // A mapping of subscription hash type to the corresponding StreamFilters
- SubscriptionTypes map[common.Hash]config.Subscription
- // Number of workers
- WorkerPoolSize int
+ // A mapping of subscription params hash to the corresponding subscription params
+ SubscriptionTypes map[common.Hash]SubscriptionSettings
// Info for the Geth node that this super node is working with
- GethNode core.Node
+ NodeInfo core.Node
+ // Number of publishAndIndex workers
+ WorkerPoolSize int
+ // chain type for this service
+ chain config.ChainType
+ // Path to ipfs data dir
+ ipfsPath string
+ // Underlying db
+ db *postgres.DB
}
// NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct
-func NewSuperNode(ipfsPath string, db *postgres.DB, rpcClient core.RPCClient, qc chan bool, workers int, node core.Node) (NodeInterface, error) {
- ipfsInitErr := ipfs.InitIPFSPlugins()
- if ipfsInitErr != nil {
- return nil, ipfsInitErr
+func NewSuperNode(settings *config.SuperNode) (SuperNode, error) {
+ if err := ipfs.InitIPFSPlugins(); err != nil {
+ return nil, err
}
- publisher, newPublisherErr := ipfs.NewIPLDPublisher(ipfsPath)
- if newPublisherErr != nil {
- return nil, newPublisherErr
+ sn := new(Service)
+ var err error
+ // If we are syncing, initialize the needed interfaces
+ if settings.Sync {
+ sn.Streamer, sn.PayloadChan, err = NewPayloadStreamer(settings.Chain, settings.WSClient)
+ if err != nil {
+ return nil, err
+ }
+ sn.Converter, err = NewPayloadConverter(settings.Chain, params.MainnetChainConfig)
+ if err != nil {
+ return nil, err
+ }
+ sn.Publisher, err = NewIPLDPublisher(settings.Chain, settings.IPFSPath)
+ if err != nil {
+ return nil, err
+ }
+ sn.Indexer, err = NewCIDIndexer(settings.Chain, settings.DB)
+ if err != nil {
+ return nil, err
+ }
+ sn.Filterer, err = NewResponseFilterer(settings.Chain)
+ if err != nil {
+ return nil, err
+ }
}
- ipldFetcher, newFetcherErr := ipfs.NewIPLDFetcher(ipfsPath)
- if newFetcherErr != nil {
- return nil, newFetcherErr
+ // If we are serving, initialize the needed interfaces
+ if settings.Serve {
+ sn.Retriever, err = NewCIDRetriever(settings.Chain, settings.DB)
+ if err != nil {
+ return nil, err
+ }
+ sn.IPLDFetcher, err = NewIPLDFetcher(settings.Chain, settings.IPFSPath)
+ if err != nil {
+ return nil, err
+ }
+ sn.Resolver, err = NewIPLDResolver(settings.Chain)
+ if err != nil {
+ return nil, err
+ }
}
- return &Service{
- Streamer: streamer.NewStateDiffStreamer(rpcClient),
- Repository: NewCIDRepository(db),
- Converter: ipfs.NewPayloadConverter(params.MainnetChainConfig),
- Publisher: publisher,
- Filterer: NewResponseFilterer(),
- IPLDFetcher: ipldFetcher,
- Retriever: NewCIDRetriever(db),
- Resolver: ipfs.NewIPLDResolver(),
- PayloadChan: make(chan statediff.Payload, payloadChanBufferSize),
- QuitChan: qc,
- Subscriptions: make(map[common.Hash]map[rpc.ID]Subscription),
- SubscriptionTypes: make(map[common.Hash]config.Subscription),
- WorkerPoolSize: workers,
- GethNode: node,
- }, nil
+ sn.QuitChan = settings.Quit
+ sn.Subscriptions = make(map[common.Hash]map[rpc.ID]Subscription)
+ sn.SubscriptionTypes = make(map[common.Hash]SubscriptionSettings)
+ sn.WorkerPoolSize = settings.Workers
+ sn.NodeInfo = settings.NodeInfo
+ sn.ipfsPath = settings.IPFSPath
+ sn.chain = settings.Chain
+ sn.db = settings.DB
+ return sn, nil
}
// Protocols exports the services p2p protocols, this service has none
@@ -131,7 +161,7 @@ func (sap *Service) Protocols() []p2p.Protocol {
// APIs returns the RPC descriptors the super node service offers
func (sap *Service) APIs() []rpc.API {
- return []rpc.API{
+ apis := []rpc.API{
{
Namespace: APIName,
Version: APIVersion,
@@ -139,20 +169,26 @@ func (sap *Service) APIs() []rpc.API {
Public: true,
},
}
+ chainAPI, err := NewPublicAPI(sap.chain, sap.db, sap.ipfsPath)
+ if err != nil {
+ log.Error(err)
+ return apis
+ }
+ return append(apis, chainAPI)
}
// SyncAndPublish is the backend processing loop which streams data from geth, converts it to iplds, publishes them to ipfs, and indexes their cids
// This continues on no matter if or how many subscribers there are, it then forwards the data to the ScreenAndServe() loop
// which filters and sends relevant data to client subscriptions, if there are any
-func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload chan<- ipfs.IPLDPayload, screenAndServeQuit chan<- bool) error {
- sub, streamErr := sap.Streamer.Stream(sap.PayloadChan)
- if streamErr != nil {
- return streamErr
+func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload chan<- interface{}, screenAndServeQuit chan<- bool) error {
+ sub, err := sap.Streamer.Stream(sap.PayloadChan)
+ if err != nil {
+ return err
}
wg.Add(1)
// Channels for forwarding data to the publishAndIndex workers
- publishAndIndexPayload := make(chan ipfs.IPLDPayload, payloadChanBufferSize)
+ publishAndIndexPayload := make(chan interface{}, PayloadChanBufferSize)
publishAndIndexQuit := make(chan bool, sap.WorkerPoolSize)
// publishAndIndex worker pool to handle publishing and indexing concurrently, while
// limiting the number of Postgres connections we can possibly open so as to prevent error
@@ -163,30 +199,30 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
for {
select {
case payload := <-sap.PayloadChan:
- ipldPayload, convertErr := sap.Converter.Convert(payload)
- if convertErr != nil {
- log.Error(convertErr)
+ ipldPayload, err := sap.Converter.Convert(payload)
+ if err != nil {
+ log.Error(err)
continue
}
// If we have a ScreenAndServe process running, forward the payload to it
select {
- case screenAndServePayload <- *ipldPayload:
+ case screenAndServePayload <- ipldPayload:
default:
}
// Forward the payload to the publishAndIndex workers
select {
- case publishAndIndexPayload <- *ipldPayload:
+ case publishAndIndexPayload <- ipldPayload:
default:
}
- case subErr := <-sub.Err():
- log.Error(subErr)
+ case err := <-sub.Err():
+ log.Error(err)
case <-sap.QuitChan:
// If we have a ScreenAndServe process running, forward the quit signal to it
select {
case screenAndServeQuit <- true:
default:
}
- // Also forward a quit signal for each of the workers
+ // Also forward a quit signal for each of the publishAndIndex workers
for i := 0; i < sap.WorkerPoolSize; i++ {
select {
case publishAndIndexQuit <- true:
@@ -203,19 +239,18 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
return nil
}
-func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan ipfs.IPLDPayload, publishAndIndexQuit <-chan bool) {
+func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan interface{}, publishAndIndexQuit <-chan bool) {
go func() {
for {
select {
case payload := <-publishAndIndexPayload:
- cidPayload, publishErr := sap.Publisher.Publish(&payload)
- if publishErr != nil {
- log.Errorf("worker %d error: %v", id, publishErr)
+ cidPayload, err := sap.Publisher.Publish(payload)
+ if err != nil {
+ log.Errorf("worker %d error: %v", id, err)
continue
}
- indexErr := sap.Repository.Index(cidPayload)
- if indexErr != nil {
- log.Errorf("worker %d error: %v", id, indexErr)
+ if err := sap.Indexer.Index(cidPayload); err != nil {
+ log.Errorf("worker %d error: %v", id, err)
}
case <-publishAndIndexQuit:
log.Infof("quiting publishAndIndex worker %d", id)
@@ -228,16 +263,13 @@ func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan ipfs.I
// ScreenAndServe is the loop used to screen data streamed from the state diffing eth node
// and send the appropriate portions of it to a requesting client subscription, according to their subscription configuration
-func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool) {
+func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan interface{}, screenAndServeQuit <-chan bool) {
wg.Add(1)
go func() {
for {
select {
case payload := <-screenAndServePayload:
- sendErr := sap.sendResponse(payload)
- if sendErr != nil {
- log.Error(sendErr)
- }
+ sap.sendResponse(payload)
case <-screenAndServeQuit:
log.Info("quiting ScreenAndServe process")
wg.Done()
@@ -248,23 +280,25 @@ func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-c
log.Info("screenAndServe goroutine successfully spun up")
}
-func (sap *Service) sendResponse(payload ipfs.IPLDPayload) error {
+func (sap *Service) sendResponse(payload interface{}) {
sap.Lock()
for ty, subs := range sap.Subscriptions {
// Retrieve the subscription parameters for this subscription type
subConfig, ok := sap.SubscriptionTypes[ty]
if !ok {
log.Errorf("subscription configuration for subscription type %s not available", ty.Hex())
+ sap.closeType(ty)
continue
}
- response, filterErr := sap.Filterer.FilterResponse(subConfig, payload)
- if filterErr != nil {
- log.Error(filterErr)
+ response, err := sap.Filterer.Filter(subConfig, payload)
+ if err != nil {
+ log.Error(err)
+ sap.closeType(ty)
continue
}
for id, sub := range subs {
select {
- case sub.PayloadChan <- response:
+ case sub.PayloadChan <- Payload{response, ""}:
log.Infof("sending super node payload to subscription %s", id)
default:
log.Infof("unable to send payload to subscription %s; channel has no receiver", id)
@@ -272,99 +306,102 @@ func (sap *Service) sendResponse(payload ipfs.IPLDPayload) error {
}
}
sap.Unlock()
- return nil
}
// Subscribe is used by the API to subscribe to the service loop
-func (sap *Service) Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, quitChan chan<- bool, streamFilters config.Subscription) {
+// The params must be rlp serializable and satisfy the Params() interface
+func (sap *Service) Subscribe(id rpc.ID, sub chan<- Payload, quitChan chan<- bool, params SubscriptionSettings) {
log.Info("Subscribing to the super node service")
- // Subscription type is defined as the hash of its content
- // Group subscriptions by type and screen payloads once for subs of the same type
- by, encodeErr := rlp.EncodeToBytes(streamFilters)
- if encodeErr != nil {
- log.Error(encodeErr)
- }
- subscriptionHash := crypto.Keccak256(by)
- subscriptionType := common.BytesToHash(subscriptionHash)
subscription := Subscription{
+ ID: id,
PayloadChan: sub,
QuitChan: quitChan,
}
+ if params.ChainType() != sap.chain {
+ sendNonBlockingErr(subscription, fmt.Errorf("subscription %s is for chain %s, service supports chain %s", id, params.ChainType().String(), sap.chain.String()))
+ sendNonBlockingQuit(subscription)
+ return
+ }
+ // Subscription type is defined as the hash of the subscription settings
+ by, err := rlp.EncodeToBytes(params)
+ if err != nil {
+ sendNonBlockingErr(subscription, err)
+ sendNonBlockingQuit(subscription)
+ return
+ }
+ subscriptionType := crypto.Keccak256Hash(by)
// If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
// Otherwise we only filter new data as it is streamed in from the state diffing geth node
- if streamFilters.BackFill || streamFilters.BackFillOnly {
- sap.backFill(subscription, id, streamFilters)
+ if params.HistoricalData() || params.HistoricalDataOnly() {
+ if err := sap.backFill(subscription, id, params); err != nil {
+ sendNonBlockingErr(subscription, err)
+ sendNonBlockingQuit(subscription)
+ return
+ }
}
- if !streamFilters.BackFillOnly {
+ if !params.HistoricalDataOnly() {
+ // Add subscriber
sap.Lock()
if sap.Subscriptions[subscriptionType] == nil {
sap.Subscriptions[subscriptionType] = make(map[rpc.ID]Subscription)
}
sap.Subscriptions[subscriptionType][id] = subscription
- sap.SubscriptionTypes[subscriptionType] = streamFilters
+ sap.SubscriptionTypes[subscriptionType] = params
sap.Unlock()
}
}
-func (sap *Service) backFill(sub Subscription, id rpc.ID, con config.Subscription) {
- log.Debug("back-filling data for id", id)
+func (sap *Service) backFill(sub Subscription, id rpc.ID, params SubscriptionSettings) error {
+ log.Debug("sending historical data for subscriber", id)
// Retrieve cached CIDs relevant to this subscriber
var endingBlock int64
var startingBlock int64
- var retrieveFirstBlockErr error
- var retrieveLastBlockErr error
- startingBlock, retrieveFirstBlockErr = sap.Retriever.RetrieveFirstBlockNumber()
- if retrieveFirstBlockErr != nil {
- sub.PayloadChan <- streamer.SuperNodePayload{
- ErrMsg: "unable to set block range start; error: " + retrieveFirstBlockErr.Error(),
- }
+ var err error
+ startingBlock, err = sap.Retriever.RetrieveFirstBlockNumber()
+ if err != nil {
+ return err
}
- if startingBlock < con.StartingBlock.Int64() {
- startingBlock = con.StartingBlock.Int64()
+ if startingBlock < params.StartingBlock().Int64() {
+ startingBlock = params.StartingBlock().Int64()
}
- endingBlock, retrieveLastBlockErr = sap.Retriever.RetrieveLastBlockNumber()
- if retrieveLastBlockErr != nil {
- sub.PayloadChan <- streamer.SuperNodePayload{
- ErrMsg: "unable to set block range end; error: " + retrieveLastBlockErr.Error(),
- }
+ endingBlock, err = sap.Retriever.RetrieveLastBlockNumber()
+ if err != nil {
+ return err
}
- if endingBlock > con.EndingBlock.Int64() && con.EndingBlock.Int64() > 0 && con.EndingBlock.Int64() > startingBlock {
- endingBlock = con.EndingBlock.Int64()
+ if endingBlock > params.EndingBlock().Int64() && params.EndingBlock().Int64() > 0 && params.EndingBlock().Int64() > startingBlock {
+ endingBlock = params.EndingBlock().Int64()
}
- log.Debug("backfill starting block:", con.StartingBlock)
- log.Debug("backfill ending block:", endingBlock)
- // Backfilled payloads are sent concurrently to the streamed payloads, so the receiver needs to pay attention to
- // the blocknumbers in the payloads they receive to keep things in order
- // TODO: separate backfill into a different rpc subscription method altogether?
+ log.Debug("historical data starting block:", params.StartingBlock())
+ log.Debug("histocial data ending block:", endingBlock)
go func() {
for i := startingBlock; i <= endingBlock; i++ {
- cidWrapper, retrieveCIDsErr := sap.Retriever.RetrieveCIDs(con, i)
- if retrieveCIDsErr != nil {
- sub.PayloadChan <- streamer.SuperNodePayload{
- ErrMsg: "CID retrieval error: " + retrieveCIDsErr.Error(),
- }
+ cidWrapper, empty, err := sap.Retriever.Retrieve(params, i)
+ if err != nil {
+ sendNonBlockingErr(sub, fmt.Errorf("CID Retrieval error at block %d\r%s", i, err.Error()))
continue
}
- if ipfs.EmptyCIDWrapper(*cidWrapper) {
+ if empty {
continue
}
- blocksWrapper, fetchIPLDsErr := sap.IPLDFetcher.FetchIPLDs(*cidWrapper)
- if fetchIPLDsErr != nil {
- log.Error(fetchIPLDsErr)
- sub.PayloadChan <- streamer.SuperNodePayload{
- ErrMsg: "IPLD fetching error: " + fetchIPLDsErr.Error(),
- }
+ blocksWrapper, err := sap.IPLDFetcher.Fetch(cidWrapper)
+ if err != nil {
+ sendNonBlockingErr(sub, fmt.Errorf("IPLD Fetching error at block %d\r%s", i, err.Error()))
+ continue
+ }
+ backFillIplds, err := sap.Resolver.Resolve(blocksWrapper)
+ if err != nil {
+ sendNonBlockingErr(sub, fmt.Errorf("IPLD Resolving error at block %d\r%s", i, err.Error()))
continue
}
- backFillIplds := sap.Resolver.ResolveIPLDs(*blocksWrapper)
select {
- case sub.PayloadChan <- backFillIplds:
- log.Infof("sending super node back-fill payload to subscription %s", id)
+ case sub.PayloadChan <- Payload{backFillIplds, ""}:
+ log.Infof("sending super node historical data payload to subscription %s", id)
default:
log.Infof("unable to send back-fill payload to subscription %s; channel has no receiver", id)
}
}
}()
+ return nil
}
// Unsubscribe is used to unsubscribe to the StateDiffingService loop
@@ -386,7 +423,7 @@ func (sap *Service) Unsubscribe(id rpc.ID) {
func (sap *Service) Start(*p2p.Server) error {
log.Info("Starting super node service")
wg := new(sync.WaitGroup)
- payloadChan := make(chan ipfs.IPLDPayload, payloadChanBufferSize)
+ payloadChan := make(chan interface{}, PayloadChanBufferSize)
quitChan := make(chan bool, 1)
if err := sap.SyncAndPublish(wg, payloadChan, quitChan); err != nil {
return err
@@ -398,29 +435,37 @@ func (sap *Service) Start(*p2p.Server) error {
// Stop is used to close down the service
func (sap *Service) Stop() error {
log.Info("Stopping super node service")
+ sap.Lock()
close(sap.QuitChan)
+ sap.close()
+ sap.Unlock()
return nil
}
-// Node returns the Geth node info for this service
+// Node returns the node info for this service
func (sap *Service) Node() core.Node {
- return sap.GethNode
+ return sap.NodeInfo
}
// close is used to close all listening subscriptions
+// close needs to be called with subscription access locked
func (sap *Service) close() {
- sap.Lock()
- for ty, subs := range sap.Subscriptions {
- for id, sub := range subs {
- select {
- case sub.QuitChan <- true:
- log.Infof("closing subscription %s", id)
- default:
- log.Infof("unable to close subscription %s; channel has no receiver", id)
- }
+ for subType, subs := range sap.Subscriptions {
+ for _, sub := range subs {
+ sendNonBlockingQuit(sub)
}
- delete(sap.Subscriptions, ty)
- delete(sap.SubscriptionTypes, ty)
+ delete(sap.Subscriptions, subType)
+ delete(sap.SubscriptionTypes, subType)
}
- sap.Unlock()
+}
+
+// closeType is used to close all subscriptions of given type
+// closeType needs to be called with subscription access locked
+func (sap *Service) closeType(subType common.Hash) {
+ subs := sap.Subscriptions[subType]
+ for _, sub := range subs {
+ sendNonBlockingQuit(sub)
+ }
+ delete(sap.Subscriptions, subType)
+ delete(sap.SubscriptionTypes, subType)
}
diff --git a/pkg/super_node/service_test.go b/pkg/super_node/service_test.go
index e2cb4059..54c11a6c 100644
--- a/pkg/super_node/service_test.go
+++ b/pkg/super_node/service_test.go
@@ -25,38 +25,36 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- mocks2 "github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
- "github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
- mocks3 "github.com/vulcanize/vulcanizedb/pkg/super_node/mocks"
+ mocks2 "github.com/vulcanize/vulcanizedb/pkg/super_node/eth/mocks"
)
var _ = Describe("Service", func() {
Describe("SyncAndPublish", func() {
It("Streams statediff.Payloads, converts them to IPLDPayloads, publishes IPLDPayloads, and indexes CIDPayloads", func() {
wg := new(sync.WaitGroup)
- payloadChan := make(chan statediff.Payload, 1)
+ payloadChan := make(chan interface{}, 1)
quitChan := make(chan bool, 1)
- mockCidRepo := &mocks3.CIDRepository{
+ mockCidIndexer := &mocks2.CIDIndexer{
ReturnErr: nil,
}
- mockPublisher := &mocks.IPLDPublisher{
- ReturnCIDPayload: mocks.MockCIDPayload,
+ mockPublisher := &mocks2.IPLDPublisher{
+ ReturnCIDPayload: mocks2.MockCIDPayload,
ReturnErr: nil,
}
mockStreamer := &mocks2.StateDiffStreamer{
ReturnSub: &rpc.ClientSubscription{},
StreamPayloads: []statediff.Payload{
- mocks.MockStateDiffPayload,
+ mocks2.MockStateDiffPayload,
},
ReturnErr: nil,
}
- mockConverter := &mocks.PayloadConverter{
- ReturnIPLDPayload: mocks.MockIPLDPayload,
+ mockConverter := &mocks2.PayloadConverter{
+ ReturnIPLDPayload: mocks2.MockIPLDPayload,
ReturnErr: nil,
}
processor := &super_node.Service{
- Repository: mockCidRepo,
+ Indexer: mockCidIndexer,
Publisher: mockPublisher,
Streamer: mockStreamer,
Converter: mockConverter,
@@ -69,10 +67,10 @@ var _ = Describe("Service", func() {
time.Sleep(2 * time.Second)
quitChan <- true
wg.Wait()
- Expect(mockConverter.PassedStatediffPayload).To(Equal(mocks.MockStateDiffPayload))
- Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(1))
- Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
- Expect(mockPublisher.PassedIPLDPayload).To(Equal(mocks.MockIPLDPayload))
+ Expect(mockConverter.PassedStatediffPayload).To(Equal(mocks2.MockStateDiffPayload))
+ Expect(len(mockCidIndexer.PassedCIDPayload)).To(Equal(1))
+ Expect(mockCidIndexer.PassedCIDPayload[0]).To(Equal(mocks2.MockCIDPayload))
+ Expect(mockPublisher.PassedIPLDPayload).To(Equal(mocks2.MockIPLDPayload))
Expect(mockStreamer.PassedPayloadChan).To(Equal(payloadChan))
})
})
diff --git a/pkg/super_node/shared/functions.go b/pkg/super_node/shared/functions.go
new file mode 100644
index 00000000..856e77d0
--- /dev/null
+++ b/pkg/super_node/shared/functions.go
@@ -0,0 +1,49 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package shared
+
+import "bytes"
+
+// ListContainsString used to check if a list of strings contains a particular string
+func ListContainsString(sss []string, s string) bool {
+ for _, str := range sss {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+// ListContainsBytes used to check if a list of byte arrays contains a particular byte array
+func ListContainsBytes(bbb [][]byte, b []byte) bool {
+ for _, by := range bbb {
+ if bytes.Equal(by, b) {
+ return true
+ }
+ }
+ return false
+}
+
+// ListContainsGap used to check if a list of Gaps contains a particular Gap
+func ListContainsGap(gapList []Gap, gap Gap) bool {
+ for _, listGap := range gapList {
+ if listGap == gap {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/super_node/shared/intefaces.go b/pkg/super_node/shared/intefaces.go
new file mode 100644
index 00000000..a217817b
--- /dev/null
+++ b/pkg/super_node/shared/intefaces.go
@@ -0,0 +1,63 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package shared
+
+import (
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// ResponseFilterer applies a filter to the streamed payload and returns a subscription response packet
+type ResponseFilterer interface {
+ Filter(filter, payload interface{}) (response interface{}, err error)
+}
+
+// CIDIndexer indexes a set of cids with their associated meta data in Postgres
+type CIDIndexer interface {
+ Index(cids interface{}) error
+}
+
+// CIDRetriever retrieves cids according to a provided filter and returns a cid
+type CIDRetriever interface {
+ Retrieve(filter interface{}, blockNumber int64) (interface{}, bool, error)
+ RetrieveFirstBlockNumber() (int64, error)
+ RetrieveLastBlockNumber() (int64, error)
+ RetrieveGapsInData() ([]Gap, error)
+}
+
+type PayloadStreamer interface {
+ Stream(payloadChan chan interface{}) (*rpc.ClientSubscription, error)
+}
+
+type PayloadFetcher interface {
+ FetchAt(blockHeights []uint64) ([]interface{}, error)
+}
+
+type IPLDFetcher interface {
+ Fetch(cids interface{}) (interface{}, error)
+}
+
+type PayloadConverter interface {
+ Convert(payload interface{}) (interface{}, error)
+}
+
+type IPLDPublisher interface {
+ Publish(payload interface{}) (interface{}, error)
+}
+
+type IPLDResolver interface {
+ Resolve(iplds interface{}) (interface{}, error)
+}
diff --git a/pkg/super_node/shared/types.go b/pkg/super_node/shared/types.go
new file mode 100644
index 00000000..9bfb973d
--- /dev/null
+++ b/pkg/super_node/shared/types.go
@@ -0,0 +1,22 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package shared
+
+type Gap struct {
+ Start uint64
+ Stop uint64
+}
diff --git a/pkg/super_node/subscription.go b/pkg/super_node/subscription.go
index d7d91787..9669d194 100644
--- a/pkg/super_node/subscription.go
+++ b/pkg/super_node/subscription.go
@@ -17,11 +17,35 @@
package super_node
import (
- "github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/vulcanize/vulcanizedb/pkg/super_node/config"
)
// Subscription holds the information for an individual client subscription to the super node
type Subscription struct {
- PayloadChan chan<- streamer.SuperNodePayload
+ ID rpc.ID
+ PayloadChan chan<- Payload
QuitChan chan<- bool
}
+
+// Payload is the struct for a super node stream payload
+// It carries data of a type specific to the chain being supported/queried and an error message
+type Payload struct {
+ Data interface{} `json:"data"` // e.g. for Ethereum eth.StreamPayload
+ Err string `json:"err"`
+}
+
+// SubscriptionSettings is the interface every subscription filter type needs to satisfy, no matter the chain
+// Further specifics of the underlying filter type depend on the internal needs of the types
+// which satisfy the ResponseFilterer and CIDRetriever interfaces for a specific chain
+// The underlying type needs to be rlp serializable
+type SubscriptionSettings interface {
+ StartingBlock() *big.Int
+ EndingBlock() *big.Int
+ ChainType() config.ChainType
+ HistoricalData() bool
+ HistoricalDataOnly() bool
+}