backfiller refactoring; explicity errs; golint

This commit is contained in:
Ian Norden 2019-10-08 14:51:38 -05:00
parent 40c3aff597
commit 3a666df294
16 changed files with 283 additions and 334 deletions

View File

@ -16,27 +16,23 @@
package cmd package cmd
import ( import (
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"os" "os"
"path/filepath" "path/filepath"
syn "sync" syn "sync"
"time" "time"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/spf13/viper"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/geth" "github.com/vulcanize/vulcanizedb/pkg/geth"
"github.com/vulcanize/vulcanizedb/pkg/geth/client" "github.com/vulcanize/vulcanizedb/pkg/geth/client"
vRpc "github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc" vRpc "github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/geth/node" "github.com/vulcanize/vulcanizedb/pkg/geth/node"
"github.com/vulcanize/vulcanizedb/pkg/super_node"
"github.com/vulcanize/vulcanizedb/utils" "github.com/vulcanize/vulcanizedb/utils"
) )
@ -60,19 +56,19 @@ func init() {
} }
func syncAndPublish() { func syncAndPublish() {
superNode, err := newSuperNode() superNode, newNodeErr := newSuperNode()
if err != nil { if newNodeErr != nil {
log.Fatal(err) log.Fatal(newNodeErr)
} }
wg := &syn.WaitGroup{} wg := &syn.WaitGroup{}
err = superNode.SyncAndPublish(wg, nil, nil) syncAndPubErr := superNode.SyncAndPublish(wg, nil, nil)
if err != nil { if syncAndPubErr != nil {
log.Fatal(err) log.Fatal(syncAndPubErr)
} }
if viper.GetBool("backfill.on") && viper.GetString("backfill.ipcPath") != "" { if viper.GetBool("backfill.on") && viper.GetString("backfill.ipcPath") != "" {
backfiller := newBackFiller(superNode.GetPublisher()) backfiller, newBackFillerErr := newBackFiller()
if err != nil { if newBackFillerErr != nil {
log.Fatal(err) log.Fatal(newBackFillerErr)
} }
backfiller.FillGaps(wg, nil) backfiller.FillGaps(wg, nil)
} }
@ -80,9 +76,9 @@ func syncAndPublish() {
} }
func getBlockChainAndClient(path string) (*geth.BlockChain, core.RpcClient) { func getBlockChainAndClient(path string) (*geth.BlockChain, core.RpcClient) {
rawRpcClient, err := rpc.Dial(path) rawRpcClient, dialErr := rpc.Dial(path)
if err != nil { if dialErr != nil {
log.Fatal(err) log.Fatal(dialErr)
} }
rpcClient := client.NewRpcClient(rawRpcClient, ipc) rpcClient := client.NewRpcClient(rawRpcClient, ipc)
ethClient := ethclient.NewClient(rawRpcClient) ethClient := ethclient.NewClient(rawRpcClient)
@ -99,9 +95,9 @@ func newSuperNode() (super_node.NodeInterface, error) {
quitChan := make(chan bool) quitChan := make(chan bool)
ipfsPath = viper.GetString("client.ipfsPath") ipfsPath = viper.GetString("client.ipfsPath")
if ipfsPath == "" { if ipfsPath == "" {
home, err := os.UserHomeDir() home, homeDirErr := os.UserHomeDir()
if err != nil { if homeDirErr != nil {
log.Fatal(err) log.Fatal(homeDirErr)
} }
ipfsPath = filepath.Join(home, ".ipfs") ipfsPath = filepath.Join(home, ".ipfs")
} }
@ -112,7 +108,7 @@ func newSuperNode() (super_node.NodeInterface, error) {
return super_node.NewSuperNode(ipfsPath, &db, rpcClient, quitChan, workers, blockChain.Node()) return super_node.NewSuperNode(ipfsPath, &db, rpcClient, quitChan, workers, blockChain.Node())
} }
func newBackFiller(ipfsPublisher ipfs.IPLDPublisher) super_node.BackFillInterface { func newBackFiller() (super_node.BackFillInterface, error) {
blockChain, archivalRpcClient := getBlockChainAndClient(viper.GetString("backfill.ipcPath")) blockChain, archivalRpcClient := getBlockChainAndClient(viper.GetString("backfill.ipcPath"))
db := utils.LoadPostgres(databaseConfig, blockChain.Node()) db := utils.LoadPostgres(databaseConfig, blockChain.Node())
freq := viper.GetInt("backfill.frequency") freq := viper.GetInt("backfill.frequency")
@ -122,5 +118,5 @@ func newBackFiller(ipfsPublisher ipfs.IPLDPublisher) super_node.BackFillInterfac
} else { } else {
frequency = time.Duration(freq) frequency = time.Duration(freq)
} }
return super_node.NewBackFillService(ipfsPublisher, &db, archivalRpcClient, time.Minute*frequency) return super_node.NewBackFillService(ipfsPath, &db, archivalRpcClient, time.Minute*frequency)
} }

View File

@ -16,11 +16,8 @@
package cmd package cmd
import ( import (
"os"
"path/filepath"
syn "sync" syn "sync"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
@ -47,51 +44,30 @@ func init() {
} }
func syncPublishScreenAndServe() { func syncPublishScreenAndServe() {
superNode, err := newSuperNode() superNode, newNodeErr := newSuperNode()
if err != nil { if newNodeErr != nil {
log.Fatal(err) log.Fatal(newNodeErr)
} }
wg := &syn.WaitGroup{} wg := &syn.WaitGroup{}
forwardPayloadChan := make(chan ipfs.IPLDPayload, 20000) forwardPayloadChan := make(chan ipfs.IPLDPayload, 20000)
forwardQuitChan := make(chan bool, 1) forwardQuitChan := make(chan bool, 1)
err = superNode.SyncAndPublish(wg, forwardPayloadChan, forwardQuitChan) syncAndPubErr := superNode.SyncAndPublish(wg, forwardPayloadChan, forwardQuitChan)
if err != nil { if syncAndPubErr != nil {
log.Fatal(err) log.Fatal(syncAndPubErr)
} }
superNode.ScreenAndServe(forwardPayloadChan, forwardQuitChan) superNode.ScreenAndServe(wg, forwardPayloadChan, forwardQuitChan)
if viper.GetBool("backfill.on") && viper.GetString("backfill.ipcPath") != "" { if viper.GetBool("backfill.on") && viper.GetString("backfill.ipcPath") != "" {
backfiller := newBackFiller(superNode.GetPublisher()) backfiller, newBackFillerErr := newBackFiller()
if err != nil { if newBackFillerErr != nil {
log.Fatal(err) log.Fatal(newBackFillerErr)
} }
backfiller.FillGaps(wg, nil) backfiller.FillGaps(wg, nil)
} }
var ipcPath string serverErr := startServers(superNode)
ipcPath = viper.GetString("server.ipcPath") if serverErr != nil {
if ipcPath == "" { log.Fatal(serverErr)
home, err := os.UserHomeDir()
if err != nil {
log.Fatal(err)
}
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
}
_, _, err = rpc.StartIPCEndpoint(ipcPath, superNode.APIs())
if err != nil {
log.Fatal(err)
}
var wsEndpoint string
wsEndpoint = viper.GetString("server.wsEndpoint")
if wsEndpoint == "" {
wsEndpoint = "127.0.0.1:8080"
}
var exposeAll = true
var wsOrigins []string = nil
_, _, err = rpc.StartWSEndpoint(wsEndpoint, superNode.APIs(), []string{"vulcanizedb"}, wsOrigins, exposeAll)
if err != nil {
log.Fatal(err)
} }
wg.Wait() wg.Wait()
} }

View File

@ -1,35 +0,0 @@
[subscription]
path = "ws://seed0.20c.com:8080"
backfill = true
backfillOnly = false
startingBlock = 0
endingBlock = 0
[subscription.headerFilter]
off = false
finalOnly = true
[subscription.trxFilter]
off = false
src = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
dst = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
]
[subscription.receiptFilter]
off = false
contracts = []
topic0s = [
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
]
[subscription.stateFilter]
off = false
addresses = [
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
]
intermediateNodes = false
[subscription.storageFilter]
off = true
addresses = []
storageKeys = []
intermediateNodes = false

View File

@ -45,14 +45,14 @@ func NewPayloadConverter(chainConfig *params.ChainConfig) *Converter {
func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) { func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
// Unpack block rlp to access fields // Unpack block rlp to access fields
block := new(types.Block) block := new(types.Block)
err := rlp.DecodeBytes(payload.BlockRlp, block) decodeErr := rlp.DecodeBytes(payload.BlockRlp, block)
if err != nil { if decodeErr != nil {
return nil, err return nil, decodeErr
} }
header := block.Header() header := block.Header()
headerRlp, err := rlp.EncodeToBytes(header) headerRlp, encodeErr := rlp.EncodeToBytes(header)
if err != nil { if encodeErr != nil {
return nil, err return nil, encodeErr
} }
trxLen := len(block.Transactions()) trxLen := len(block.Transactions())
convertedPayload := &IPLDPayload{ convertedPayload := &IPLDPayload{
@ -70,9 +70,9 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
transactions := block.Transactions() transactions := block.Transactions()
for _, trx := range transactions { for _, trx := range transactions {
// Extract to and from data from the the transactions for indexing // Extract to and from data from the the transactions for indexing
from, err := types.Sender(signer, trx) from, senderErr := types.Sender(signer, trx)
if err != nil { if senderErr != nil {
return nil, err return nil, senderErr
} }
txMeta := &TrxMetaData{ txMeta := &TrxMetaData{
Dst: handleNullAddr(trx.To()), Dst: handleNullAddr(trx.To()),
@ -84,14 +84,14 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
// Decode receipts for this block // Decode receipts for this block
receipts := make(types.Receipts, 0) receipts := make(types.Receipts, 0)
err = rlp.DecodeBytes(payload.ReceiptsRlp, &receipts) decodeErr = rlp.DecodeBytes(payload.ReceiptsRlp, &receipts)
if err != nil { if decodeErr != nil {
return nil, err return nil, decodeErr
} }
// Derive any missing fields // Derive any missing fields
err = receipts.DeriveFields(pc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()) deriveErr := receipts.DeriveFields(pc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions())
if err != nil { if deriveErr != nil {
return nil, err return nil, deriveErr
} }
for i, receipt := range receipts { for i, receipt := range receipts {
// If the transaction for this receipt has a "to" address, the above DeriveFields() fails to assign it to the receipt's ContractAddress // If the transaction for this receipt has a "to" address, the above DeriveFields() fails to assign it to the receipt's ContractAddress
@ -118,9 +118,9 @@ func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
// Unpack state diff rlp to access fields // Unpack state diff rlp to access fields
stateDiff := new(statediff.StateDiff) stateDiff := new(statediff.StateDiff)
err = rlp.DecodeBytes(payload.StateDiffRlp, stateDiff) decodeErr = rlp.DecodeBytes(payload.StateDiffRlp, stateDiff)
if err != nil { if decodeErr != nil {
return nil, err return nil, decodeErr
} }
for _, createdAccount := range stateDiff.CreatedAccounts { for _, createdAccount := range stateDiff.CreatedAccounts {
hashKey := common.BytesToHash(createdAccount.Key) hashKey := common.BytesToHash(createdAccount.Key)

View File

@ -28,7 +28,7 @@ import (
// IPLDFetcher is an interface for fetching IPLDs // IPLDFetcher is an interface for fetching IPLDs
type IPLDFetcher interface { type IPLDFetcher interface {
FetchCIDs(cids CIDWrapper) (*IPLDWrapper, error) FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error)
} }
// EthIPLDFetcher is used to fetch ETH IPLD objects from IPFS // EthIPLDFetcher is used to fetch ETH IPLD objects from IPFS
@ -47,8 +47,8 @@ func NewIPLDFetcher(ipfsPath string) (*EthIPLDFetcher, error) {
}, nil }, nil
} }
// FetchCIDs is the exported method for fetching and returning all the cids passed in a CIDWrapper // FetchIPLDs is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
func (f *EthIPLDFetcher) FetchCIDs(cids CIDWrapper) (*IPLDWrapper, error) { func (f *EthIPLDFetcher) FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error) {
log.Debug("fetching iplds") log.Debug("fetching iplds")
blocks := &IPLDWrapper{ blocks := &IPLDWrapper{
@ -61,29 +61,29 @@ func (f *EthIPLDFetcher) FetchCIDs(cids CIDWrapper) (*IPLDWrapper, error) {
StorageNodes: make(map[common.Hash]map[common.Hash]blocks.Block), StorageNodes: make(map[common.Hash]map[common.Hash]blocks.Block),
} }
err := f.fetchHeaders(cids, blocks) headersErr := f.fetchHeaders(cids, blocks)
if err != nil { if headersErr != nil {
return nil, err return nil, headersErr
} }
err = f.fetchUncles(cids, blocks) unclesErr := f.fetchUncles(cids, blocks)
if err != nil { if unclesErr != nil {
return nil, err return nil, unclesErr
} }
err = f.fetchTrxs(cids, blocks) trxsErr := f.fetchTrxs(cids, blocks)
if err != nil { if trxsErr != nil {
return nil, err return nil, trxsErr
} }
err = f.fetchRcts(cids, blocks) rctsErr := f.fetchRcts(cids, blocks)
if err != nil { if rctsErr != nil {
return nil, err return nil, rctsErr
} }
err = f.fetchStorage(cids, blocks) storageErr := f.fetchStorage(cids, blocks)
if err != nil { if storageErr != nil {
return nil, err return nil, storageErr
} }
err = f.fetchState(cids, blocks) stateErr := f.fetchState(cids, blocks)
if err != nil { if stateErr != nil {
return nil, err return nil, stateErr
} }
return blocks, nil return blocks, nil
@ -174,13 +174,13 @@ func (f *EthIPLDFetcher) fetchState(cids CIDWrapper, blocks *IPLDWrapper) error
if stateNode.CID == "" || stateNode.Key == "" { if stateNode.CID == "" || stateNode.Key == "" {
continue continue
} }
dc, err := cid.Decode(stateNode.CID) dc, decodeErr := cid.Decode(stateNode.CID)
if err != nil { if decodeErr != nil {
return err return decodeErr
} }
block, err := f.fetch(dc) block, fetchErr := f.fetch(dc)
if err != nil { if fetchErr != nil {
return err return fetchErr
} }
blocks.StateNodes[common.HexToHash(stateNode.Key)] = block blocks.StateNodes[common.HexToHash(stateNode.Key)] = block
} }
@ -196,13 +196,13 @@ func (f *EthIPLDFetcher) fetchStorage(cids CIDWrapper, blks *IPLDWrapper) error
if storageNode.CID == "" || storageNode.Key == "" || storageNode.StateKey == "" { if storageNode.CID == "" || storageNode.Key == "" || storageNode.StateKey == "" {
continue continue
} }
dc, err := cid.Decode(storageNode.CID) dc, decodeErr := cid.Decode(storageNode.CID)
if err != nil { if decodeErr != nil {
return err return decodeErr
} }
blk, err := f.fetch(dc) blk, fetchErr := f.fetch(dc)
if err != nil { if fetchErr != nil {
return err return fetchErr
} }
if blks.StorageNodes[common.HexToHash(storageNode.StateKey)] == nil { if blks.StorageNodes[common.HexToHash(storageNode.StateKey)] == nil {
blks.StorageNodes[common.HexToHash(storageNode.StateKey)] = make(map[common.Hash]blocks.Block) blks.StorageNodes[common.HexToHash(storageNode.StateKey)] = make(map[common.Hash]blocks.Block)

View File

@ -83,7 +83,7 @@ var _ = Describe("Fetcher", func() {
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() { It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
fetcher := new(ipfs.EthIPLDFetcher) fetcher := new(ipfs.EthIPLDFetcher)
fetcher.BlockService = mockBlockService fetcher.BlockService = mockBlockService
iplds, err := fetcher.FetchCIDs(mockCIDWrapper) iplds, err := fetcher.FetchIPLDs(mockCIDWrapper)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber)) Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber))
Expect(len(iplds.Headers)).To(Equal(1)) Expect(len(iplds.Headers)).To(Equal(1))

View File

@ -23,23 +23,38 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ipfs/go-blockservice" "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/plugin/loader"
"github.com/ipfs/go-ipfs/repo/fsrepo" "github.com/ipfs/go-ipfs/repo/fsrepo"
) )
// InitIPFSPlugins is used to initialized IPFS plugins before creating a new IPFS node
// This should only be called once
func InitIPFSPlugins() error {
l, err := loader.NewPluginLoader("")
if err != nil {
return err
}
err = l.Initialize()
if err != nil {
return err
}
return l.Inject()
}
// InitIPFSBlockService is used to configure and return a BlockService using an ipfs repo path (e.g. ~/.ipfs) // InitIPFSBlockService is used to configure and return a BlockService using an ipfs repo path (e.g. ~/.ipfs)
func InitIPFSBlockService(ipfsPath string) (blockservice.BlockService, error) { func InitIPFSBlockService(ipfsPath string) (blockservice.BlockService, error) {
r, err := fsrepo.Open(ipfsPath) r, openErr := fsrepo.Open(ipfsPath)
if err != nil { if openErr != nil {
return nil, err return nil, openErr
} }
ctx := context.Background() ctx := context.Background()
cfg := &core.BuildCfg{ cfg := &core.BuildCfg{
Online: false, Online: false,
Repo: r, Repo: r,
} }
ipfsNode, err := core.NewNode(ctx, cfg) ipfsNode, newNodeErr := core.NewNode(ctx, cfg)
if err != nil { if newNodeErr != nil {
return nil, err return nil, newNodeErr
} }
return ipfsNode.Blocks, nil return ipfsNode.Blocks, nil
} }

View File

@ -18,6 +18,7 @@ package mocks
import ( import (
"fmt" "fmt"
"github.com/vulcanize/vulcanizedb/pkg/ipfs" "github.com/vulcanize/vulcanizedb/pkg/ipfs"
) )

View File

@ -22,8 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-ipfs/plugin/loader"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs" "github.com/vulcanize/eth-block-extractor/pkg/ipfs"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_header" "github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_header"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_receipts" "github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_receipts"
@ -49,18 +47,6 @@ type Publisher struct {
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface // NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
func NewIPLDPublisher(ipfsPath string) (*Publisher, error) { func NewIPLDPublisher(ipfsPath string) (*Publisher, error) {
l, err := loader.NewPluginLoader("")
if err != nil {
return nil, err
}
err = l.Initialize()
if err != nil {
return nil, err
}
err = l.Inject()
if err != nil {
return nil, err
}
node, err := ipfs.InitIPFSNode(ipfsPath) node, err := ipfs.InitIPFSNode(ipfsPath)
if err != nil { if err != nil {
return nil, err return nil, err
@ -77,47 +63,47 @@ func NewIPLDPublisher(ipfsPath string) (*Publisher, error) {
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload // Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) { func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
// Process and publish headers // Process and publish headers
headerCid, err := pub.publishHeaders(payload.HeaderRLP) headerCid, headersErr := pub.publishHeaders(payload.HeaderRLP)
if err != nil { if headersErr != nil {
return nil, err return nil, headersErr
} }
// Process and publish uncles // Process and publish uncles
uncleCids := make(map[common.Hash]string) uncleCids := make(map[common.Hash]string)
for _, uncle := range payload.BlockBody.Uncles { for _, uncle := range payload.BlockBody.Uncles {
uncleRlp, err := rlp.EncodeToBytes(uncle) uncleRlp, encodeErr := rlp.EncodeToBytes(uncle)
if err != nil { if encodeErr != nil {
return nil, err return nil, encodeErr
} }
cid, err := pub.publishHeaders(uncleRlp) cid, unclesErr := pub.publishHeaders(uncleRlp)
if err != nil { if unclesErr != nil {
return nil, err return nil, unclesErr
} }
uncleCids[uncle.Hash()] = cid uncleCids[uncle.Hash()] = cid
} }
// Process and publish transactions // Process and publish transactions
transactionCids, err := pub.publishTransactions(payload.BlockBody, payload.TrxMetaData) transactionCids, trxsErr := pub.publishTransactions(payload.BlockBody, payload.TrxMetaData)
if err != nil { if trxsErr != nil {
return nil, err return nil, trxsErr
} }
// Process and publish receipts // Process and publish receipts
receiptsCids, err := pub.publishReceipts(payload.Receipts, payload.ReceiptMetaData) receiptsCids, rctsErr := pub.publishReceipts(payload.Receipts, payload.ReceiptMetaData)
if err != nil { if rctsErr != nil {
return nil, err return nil, rctsErr
} }
// Process and publish state leafs // Process and publish state leafs
stateNodeCids, err := pub.publishStateNodes(payload.StateNodes) stateNodeCids, stateErr := pub.publishStateNodes(payload.StateNodes)
if err != nil { if stateErr != nil {
return nil, err return nil, stateErr
} }
// Process and publish storage leafs // Process and publish storage leafs
storageNodeCids, err := pub.publishStorageNodes(payload.StorageNodes) storageNodeCids, storageErr := pub.publishStorageNodes(payload.StorageNodes)
if err != nil { if storageErr != nil {
return nil, err return nil, storageErr
} }
// Package CIDs and their metadata into a single struct // Package CIDs and their metadata into a single struct

View File

@ -24,7 +24,7 @@ import (
// IPLDResolver is the interface to resolving IPLDs // IPLDResolver is the interface to resolving IPLDs
type IPLDResolver interface { type IPLDResolver interface {
ResolveIPLDs(ipfsBlocks IPLDWrapper) (streamer.SuperNodePayload, error) ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperNodePayload
} }
// EthIPLDResolver is the underlying struct to support the IPLDResolver interface // EthIPLDResolver is the underlying struct to support the IPLDResolver interface
@ -36,7 +36,7 @@ func NewIPLDResolver() *EthIPLDResolver {
} }
// ResolveIPLDs is the exported method for resolving all of the ETH IPLDs packaged in an IpfsBlockWrapper // ResolveIPLDs is the exported method for resolving all of the ETH IPLDs packaged in an IpfsBlockWrapper
func (eir *EthIPLDResolver) ResolveIPLDs(ipfsBlocks IPLDWrapper) (streamer.SuperNodePayload, error) { func (eir *EthIPLDResolver) ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperNodePayload {
response := &streamer.SuperNodePayload{ response := &streamer.SuperNodePayload{
BlockNumber: ipfsBlocks.BlockNumber, BlockNumber: ipfsBlocks.BlockNumber,
StateNodesRlp: make(map[common.Hash][]byte), StateNodesRlp: make(map[common.Hash][]byte),
@ -48,7 +48,7 @@ func (eir *EthIPLDResolver) ResolveIPLDs(ipfsBlocks IPLDWrapper) (streamer.Super
eir.resolveReceipts(ipfsBlocks.Receipts, response) eir.resolveReceipts(ipfsBlocks.Receipts, response)
eir.resolveState(ipfsBlocks.StateNodes, response) eir.resolveState(ipfsBlocks.StateNodes, response)
eir.resolveStorage(ipfsBlocks.StorageNodes, response) eir.resolveStorage(ipfsBlocks.StorageNodes, response)
return *response, nil return *response
} }
func (eir *EthIPLDResolver) resolveHeaders(blocks []blocks.Block, response *streamer.SuperNodePayload) { func (eir *EthIPLDResolver) resolveHeaders(blocks []blocks.Block, response *streamer.SuperNodePayload) {

View File

@ -52,15 +52,19 @@ type BackFillService struct {
} }
// NewBackFillService returns a new BackFillInterface // NewBackFillService returns a new BackFillInterface
func NewBackFillService(ipfsPublisher ipfs.IPLDPublisher, db *postgres.DB, archivalNodeRpcClient core.RpcClient, freq time.Duration) BackFillInterface { func NewBackFillService(ipfsPath string, db *postgres.DB, archivalNodeRPCClient core.RpcClient, freq time.Duration) (BackFillInterface, error) {
publisher, err := ipfs.NewIPLDPublisher(ipfsPath)
if err != nil {
return nil, err
}
return &BackFillService{ return &BackFillService{
Repository: NewCIDRepository(db), Repository: NewCIDRepository(db),
Converter: ipfs.NewPayloadConverter(params.MainnetChainConfig), Converter: ipfs.NewPayloadConverter(params.MainnetChainConfig),
Publisher: ipfsPublisher, Publisher: publisher,
Retriever: NewCIDRetriever(db), Retriever: NewCIDRetriever(db),
StateDiffFetcher: fetcher.NewStateDiffFetcher(archivalNodeRpcClient), StateDiffFetcher: fetcher.NewStateDiffFetcher(archivalNodeRPCClient),
GapCheckFrequency: freq, GapCheckFrequency: freq,
} }, nil
} }
// FillGaps periodically checks for and fills in gaps in the super node db // FillGaps periodically checks for and fills in gaps in the super node db
@ -103,6 +107,7 @@ func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup, quitChan <-chan bool) {
} }
} }
}() }()
log.Info("fillGaps goroutine successfully spun up")
} }
func (bfs *BackFillService) fillGaps(gap [2]int64) { func (bfs *BackFillService) fillGaps(gap [2]int64) {

View File

@ -44,25 +44,25 @@ func NewResponseFilterer() *Filterer {
// FilterResponse is used to filter through eth data to extract and package requested data into a Payload // FilterResponse is used to filter through eth data to extract and package requested data into a Payload
func (s *Filterer) FilterResponse(streamFilters config.Subscription, payload ipfs.IPLDPayload) (streamer.SuperNodePayload, error) { func (s *Filterer) FilterResponse(streamFilters config.Subscription, payload ipfs.IPLDPayload) (streamer.SuperNodePayload, error) {
response := new(streamer.SuperNodePayload) response := new(streamer.SuperNodePayload)
err := s.filterHeaders(streamFilters, response, payload) headersErr := s.filterHeaders(streamFilters, response, payload)
if err != nil { if headersErr != nil {
return streamer.SuperNodePayload{}, err return streamer.SuperNodePayload{}, headersErr
} }
txHashes, err := s.filterTransactions(streamFilters, response, payload) txHashes, trxsErr := s.filterTransactions(streamFilters, response, payload)
if err != nil { if trxsErr != nil {
return streamer.SuperNodePayload{}, err return streamer.SuperNodePayload{}, trxsErr
} }
err = s.filerReceipts(streamFilters, response, payload, txHashes) rctsErr := s.filerReceipts(streamFilters, response, payload, txHashes)
if err != nil { if rctsErr != nil {
return streamer.SuperNodePayload{}, err return streamer.SuperNodePayload{}, rctsErr
} }
err = s.filterState(streamFilters, response, payload) stateErr := s.filterState(streamFilters, response, payload)
if err != nil { if stateErr != nil {
return streamer.SuperNodePayload{}, err return streamer.SuperNodePayload{}, stateErr
} }
err = s.filterStorage(streamFilters, response, payload) storageErr := s.filterStorage(streamFilters, response, payload)
if err != nil { if storageErr != nil {
return streamer.SuperNodePayload{}, err return streamer.SuperNodePayload{}, storageErr
} }
response.BlockNumber = payload.BlockNumber response.BlockNumber = payload.BlockNumber
return *response, nil return *response, nil
@ -170,12 +170,12 @@ func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics, wantedContrac
if wantedContract == actualContract { if wantedContract == actualContract {
if len(wantedTopics) == 0 { if len(wantedTopics) == 0 {
return true return true
} else { // Or if we have contracts and topics to filter on we only keep receipts that satisfy both conditions }
for _, wantedTopic := range wantedTopics { // Or if we have contracts and topics to filter on we only keep receipts that satisfy both conditions
for _, actualTopic := range actualTopics { for _, wantedTopic := range wantedTopics {
if wantedTopic == actualTopic { for _, actualTopic := range actualTopics {
return true if wantedTopic == actualTopic {
} return true
} }
} }
} }

View File

@ -44,43 +44,43 @@ func NewCIDRepository(db *postgres.DB) *Repository {
// Index indexes a cidPayload in Postgres // Index indexes a cidPayload in Postgres
func (repo *Repository) Index(cidPayload *ipfs.CIDPayload) error { func (repo *Repository) Index(cidPayload *ipfs.CIDPayload) error {
tx, err := repo.db.Beginx() tx, beginErr := repo.db.Beginx()
if err != nil { if beginErr != nil {
return err return beginErr
} }
headerID, err := repo.indexHeaderCID(tx, cidPayload.HeaderCID, cidPayload.BlockNumber, cidPayload.BlockHash.Hex()) headerID, headerErr := repo.indexHeaderCID(tx, cidPayload.HeaderCID, cidPayload.BlockNumber, cidPayload.BlockHash.Hex())
if err != nil { if headerErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
return err return headerErr
} }
for uncleHash, cid := range cidPayload.UncleCIDs { for uncleHash, cid := range cidPayload.UncleCIDs {
err = repo.indexUncleCID(tx, cid, cidPayload.BlockNumber, uncleHash.Hex()) uncleErr := repo.indexUncleCID(tx, cid, cidPayload.BlockNumber, uncleHash.Hex())
if err != nil { if uncleErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
return err return uncleErr
} }
} }
err = repo.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID) trxAndRctErr := repo.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID)
if err != nil { if trxAndRctErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
return err return trxAndRctErr
} }
err = repo.indexStateAndStorageCIDs(tx, cidPayload, headerID) stateAndStorageErr := repo.indexStateAndStorageCIDs(tx, cidPayload, headerID)
if err != nil { if stateAndStorageErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
return err return stateAndStorageErr
} }
return tx.Commit() return tx.Commit()
} }
@ -104,18 +104,18 @@ func (repo *Repository) indexUncleCID(tx *sqlx.Tx, cid, blockNumber, hash string
func (repo *Repository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error { func (repo *Repository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error {
for hash, trxCidMeta := range payload.TransactionCIDs { for hash, trxCidMeta := range payload.TransactionCIDs {
var txID int64 var txID int64
err := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src) VALUES ($1, $2, $3, $4, $5) queryErr := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src) = ($3, $4, $5) ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src) = ($3, $4, $5)
RETURNING id`, RETURNING id`,
headerID, hash.Hex(), trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src).Scan(&txID) headerID, hash.Hex(), trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src).Scan(&txID)
if err != nil { if queryErr != nil {
return err return queryErr
} }
receiptCidMeta, ok := payload.ReceiptCIDs[hash] receiptCidMeta, ok := payload.ReceiptCIDs[hash]
if ok { if ok {
err = repo.indexReceiptCID(tx, receiptCidMeta, txID) rctErr := repo.indexReceiptCID(tx, receiptCidMeta, txID)
if err != nil { if rctErr != nil {
return err return rctErr
} }
} }
} }
@ -131,17 +131,17 @@ func (repo *Repository) indexReceiptCID(tx *sqlx.Tx, cidMeta *ipfs.ReceiptMetaDa
func (repo *Repository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error { func (repo *Repository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error {
for accountKey, stateCID := range payload.StateNodeCIDs { for accountKey, stateCID := range payload.StateNodeCIDs {
var stateID int64 var stateID int64
err := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4) queryErr := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4)
ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4) ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4)
RETURNING id`, RETURNING id`,
headerID, accountKey.Hex(), stateCID.CID, stateCID.Leaf).Scan(&stateID) headerID, accountKey.Hex(), stateCID.CID, stateCID.Leaf).Scan(&stateID)
if err != nil { if queryErr != nil {
return err return queryErr
} }
for _, storageCID := range payload.StorageNodeCIDs[accountKey] { for _, storageCID := range payload.StorageNodeCIDs[accountKey] {
err = repo.indexStorageCID(tx, storageCID, stateID) storageErr := repo.indexStorageCID(tx, storageCID, stateID)
if err != nil { if storageErr != nil {
return err return storageErr
} }
} }
} }

View File

@ -48,13 +48,14 @@ func NewCIDRetriever(db *postgres.DB) *EthCIDRetriever {
} }
} }
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
func (ecr *EthCIDRetriever) RetrieveFirstBlockNumber() (int64, error) { func (ecr *EthCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
var blockNumber int64 var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number ASC LIMIT 1") err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number ASC LIMIT 1")
return blockNumber, err return blockNumber, err
} }
// GetLastBlockNumber is used to retrieve the latest block number in the cache // RetrieveLastBlockNumber is used to retrieve the latest block number in the db
func (ecr *EthCIDRetriever) RetrieveLastBlockNumber() (int64, error) { func (ecr *EthCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
var blockNumber int64 var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number DESC LIMIT 1 ") err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number DESC LIMIT 1 ")
@ -64,10 +65,9 @@ func (ecr *EthCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
// RetrieveCIDs is used to retrieve all of the CIDs which conform to the passed StreamFilters // RetrieveCIDs is used to retrieve all of the CIDs which conform to the passed StreamFilters
func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) { func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) {
log.Debug("retrieving cids") log.Debug("retrieving cids")
var err error tx, beginErr := ecr.db.Beginx()
tx, err := ecr.db.Beginx() if beginErr != nil {
if err != nil { return nil, beginErr
return nil, err
} }
// THIS IS SUPER EXPENSIVE HAVING TO CYCLE THROUGH EACH BLOCK, NEED BETTER WAY TO FETCH CIDS // THIS IS SUPER EXPENSIVE HAVING TO CYCLE THROUGH EACH BLOCK, NEED BETTER WAY TO FETCH CIDS
// WHILE STILL MAINTAINING RELATION INFO ABOUT WHAT BLOCK THE CIDS BELONG TO // WHILE STILL MAINTAINING RELATION INFO ABOUT WHAT BLOCK THE CIDS BELONG TO
@ -76,24 +76,26 @@ func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, bloc
// Retrieve cached header CIDs // Retrieve cached header CIDs
if !streamFilters.HeaderFilter.Off { if !streamFilters.HeaderFilter.Off {
cw.Headers, err = ecr.retrieveHeaderCIDs(tx, streamFilters, blockNumber) var headersErr error
if err != nil { cw.Headers, headersErr = ecr.retrieveHeaderCIDs(tx, streamFilters, blockNumber)
if headersErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
log.Error("header cid retrieval error") log.Error("header cid retrieval error")
return nil, err return nil, headersErr
} }
if !streamFilters.HeaderFilter.FinalOnly { if !streamFilters.HeaderFilter.FinalOnly {
cw.Uncles, err = ecr.retrieveUncleCIDs(tx, streamFilters, blockNumber) var unclesErr error
if err != nil { cw.Uncles, unclesErr = ecr.retrieveUncleCIDs(tx, streamFilters, blockNumber)
if unclesErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
log.Error("uncle cid retrieval error") log.Error("uncle cid retrieval error")
return nil, err return nil, unclesErr
} }
} }
} }
@ -101,53 +103,57 @@ func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, bloc
// Retrieve cached trx CIDs // Retrieve cached trx CIDs
var trxIds []int64 var trxIds []int64
if !streamFilters.TrxFilter.Off { if !streamFilters.TrxFilter.Off {
cw.Transactions, trxIds, err = ecr.retrieveTrxCIDs(tx, streamFilters, blockNumber) var trxsErr error
if err != nil { cw.Transactions, trxIds, trxsErr = ecr.retrieveTrxCIDs(tx, streamFilters, blockNumber)
if trxsErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
log.Error("transaction cid retrieval error") log.Error("transaction cid retrieval error")
return nil, err return nil, trxsErr
} }
} }
// Retrieve cached receipt CIDs // Retrieve cached receipt CIDs
if !streamFilters.ReceiptFilter.Off { if !streamFilters.ReceiptFilter.Off {
cw.Receipts, err = ecr.retrieveRctCIDs(tx, streamFilters, blockNumber, trxIds) var rctsErr error
if err != nil { cw.Receipts, rctsErr = ecr.retrieveRctCIDs(tx, streamFilters, blockNumber, trxIds)
if rctsErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
log.Error("receipt cid retrieval error") log.Error("receipt cid retrieval error")
return nil, err return nil, rctsErr
} }
} }
// Retrieve cached state CIDs // Retrieve cached state CIDs
if !streamFilters.StateFilter.Off { if !streamFilters.StateFilter.Off {
cw.StateNodes, err = ecr.retrieveStateCIDs(tx, streamFilters, blockNumber) var stateErr error
if err != nil { cw.StateNodes, stateErr = ecr.retrieveStateCIDs(tx, streamFilters, blockNumber)
if stateErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
log.Error("state cid retrieval error") log.Error("state cid retrieval error")
return nil, err return nil, stateErr
} }
} }
// Retrieve cached storage CIDs // Retrieve cached storage CIDs
if !streamFilters.StorageFilter.Off { if !streamFilters.StorageFilter.Off {
cw.StorageNodes, err = ecr.retrieveStorageCIDs(tx, streamFilters, blockNumber) var storageErr error
if err != nil { cw.StorageNodes, storageErr = ecr.retrieveStorageCIDs(tx, streamFilters, blockNumber)
if storageErr != nil {
rollbackErr := tx.Rollback() rollbackErr := tx.Rollback()
if rollbackErr != nil { if rollbackErr != nil {
log.Error(rollbackErr) log.Error(rollbackErr)
} }
log.Error("storage cid retrieval error") log.Error("storage cid retrieval error")
return nil, err return nil, storageErr
} }
} }
@ -310,6 +316,7 @@ type gap struct {
Stop int64 `db:"stop"` Stop int64 `db:"stop"`
} }
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
func (ecr *EthCIDRetriever) RetrieveGapsInData() ([][2]int64, error) { func (ecr *EthCIDRetriever) RetrieveGapsInData() ([][2]int64, error) {
pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM header_cids pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM header_cids
LEFT JOIN header_cids r on header_cids.block_number = r.block_number - 1 LEFT JOIN header_cids r on header_cids.block_number = r.block_number - 1

View File

@ -49,16 +49,13 @@ type NodeInterface interface {
// Main event loop for syncAndPublish processes // Main event loop for syncAndPublish processes
SyncAndPublish(wg *sync.WaitGroup, forwardPayloadChan chan<- ipfs.IPLDPayload, forwardQuitchan chan<- bool) error SyncAndPublish(wg *sync.WaitGroup, forwardPayloadChan chan<- ipfs.IPLDPayload, forwardQuitchan chan<- bool) error
// Main event loop for handling client pub-sub // Main event loop for handling client pub-sub
ScreenAndServe(screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool)
// Method to subscribe to receive state diff processing output // Method to subscribe to receive state diff processing output
Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, quitChan chan<- bool, streamFilters config.Subscription) Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, quitChan chan<- bool, streamFilters config.Subscription)
// Method to unsubscribe from state diff processing // Method to unsubscribe from state diff processing
Unsubscribe(id rpc.ID) Unsubscribe(id rpc.ID)
// Method to access the Geth node info for this service // Method to access the Geth node info for this service
Node() core.Node Node() core.Node
// Method used to retrieve the underlying IPFS publisher for this service, so that is can be used for backfilling
// This is needed because it's not possible to initialize two ipfs nodes at the same path
GetPublisher() ipfs.IPLDPublisher
} }
// Service is the underlying struct for the super node // Service is the underlying struct for the super node
@ -92,18 +89,22 @@ type Service struct {
// Number of workers // Number of workers
WorkerPoolSize int WorkerPoolSize int
// Info for the Geth node that this super node is working with // Info for the Geth node that this super node is working with
gethNode core.Node GethNode core.Node
} }
// NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct // NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct
func NewSuperNode(ipfsPath string, db *postgres.DB, rpcClient core.RpcClient, qc chan bool, workers int, node core.Node) (NodeInterface, error) { func NewSuperNode(ipfsPath string, db *postgres.DB, rpcClient core.RpcClient, qc chan bool, workers int, node core.Node) (NodeInterface, error) {
publisher, err := ipfs.NewIPLDPublisher(ipfsPath) ipfsInitErr := ipfs.InitIPFSPlugins()
if err != nil { if ipfsInitErr != nil {
return nil, err return nil, ipfsInitErr
} }
ipldFetcher, err := ipfs.NewIPLDFetcher(ipfsPath) publisher, newPublisherErr := ipfs.NewIPLDPublisher(ipfsPath)
if err != nil { if newPublisherErr != nil {
return nil, err return nil, newPublisherErr
}
ipldFetcher, newFetcherErr := ipfs.NewIPLDFetcher(ipfsPath)
if newFetcherErr != nil {
return nil, newFetcherErr
} }
return &Service{ return &Service{
Streamer: streamer.NewStateDiffStreamer(rpcClient), Streamer: streamer.NewStateDiffStreamer(rpcClient),
@ -119,7 +120,7 @@ func NewSuperNode(ipfsPath string, db *postgres.DB, rpcClient core.RpcClient, qc
Subscriptions: make(map[common.Hash]map[rpc.ID]Subscription), Subscriptions: make(map[common.Hash]map[rpc.ID]Subscription),
SubscriptionTypes: make(map[common.Hash]config.Subscription), SubscriptionTypes: make(map[common.Hash]config.Subscription),
WorkerPoolSize: workers, WorkerPoolSize: workers,
gethNode: node, GethNode: node,
}, nil }, nil
} }
@ -144,9 +145,9 @@ func (sap *Service) APIs() []rpc.API {
// This continues on no matter if or how many subscribers there are, it then forwards the data to the ScreenAndServe() loop // This continues on no matter if or how many subscribers there are, it then forwards the data to the ScreenAndServe() loop
// which filters and sends relevant data to client subscriptions, if there are any // which filters and sends relevant data to client subscriptions, if there are any
func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload chan<- ipfs.IPLDPayload, screenAndServeQuit chan<- bool) error { func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload chan<- ipfs.IPLDPayload, screenAndServeQuit chan<- bool) error {
sub, err := sap.Streamer.Stream(sap.PayloadChan) sub, streamErr := sap.Streamer.Stream(sap.PayloadChan)
if err != nil { if streamErr != nil {
return err return streamErr
} }
wg.Add(1) wg.Add(1)
@ -158,14 +159,13 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
for i := 0; i < sap.WorkerPoolSize; i++ { for i := 0; i < sap.WorkerPoolSize; i++ {
sap.publishAndIndex(i, publishAndIndexPayload, publishAndIndexQuit) sap.publishAndIndex(i, publishAndIndexPayload, publishAndIndexQuit)
} }
go func() { go func() {
for { for {
select { select {
case payload := <-sap.PayloadChan: case payload := <-sap.PayloadChan:
ipldPayload, err := sap.Converter.Convert(payload) ipldPayload, convertErr := sap.Converter.Convert(payload)
if err != nil { if convertErr != nil {
log.Error(err) log.Error(convertErr)
continue continue
} }
// If we have a ScreenAndServe process running, forward the payload to it // If we have a ScreenAndServe process running, forward the payload to it
@ -178,8 +178,8 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
case publishAndIndexPayload <- *ipldPayload: case publishAndIndexPayload <- *ipldPayload:
default: default:
} }
case err = <-sub.Err(): case subErr := <-sub.Err():
log.Error(err) log.Error(subErr)
case <-sap.QuitChan: case <-sap.QuitChan:
// If we have a ScreenAndServe process running, forward the quit signal to it // If we have a ScreenAndServe process running, forward the quit signal to it
select { select {
@ -199,7 +199,7 @@ func (sap *Service) SyncAndPublish(wg *sync.WaitGroup, screenAndServePayload cha
} }
} }
}() }()
log.Info("syncAndPublish goroutine successfully spun up")
return nil return nil
} }
@ -208,14 +208,14 @@ func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan ipfs.I
for { for {
select { select {
case payload := <-publishAndIndexPayload: case payload := <-publishAndIndexPayload:
cidPayload, err := sap.Publisher.Publish(&payload) cidPayload, publishErr := sap.Publisher.Publish(&payload)
if err != nil { if publishErr != nil {
log.Errorf("worker %d error: %v", id, err) log.Errorf("worker %d error: %v", id, publishErr)
continue continue
} }
err = sap.Repository.Index(cidPayload) indexErr := sap.Repository.Index(cidPayload)
if err != nil { if indexErr != nil {
log.Errorf("worker %d error: %v", id, err) log.Errorf("worker %d error: %v", id, indexErr)
} }
case <-publishAndIndexQuit: case <-publishAndIndexQuit:
log.Infof("quiting publishAndIndex worker %d", id) log.Infof("quiting publishAndIndex worker %d", id)
@ -223,25 +223,29 @@ func (sap *Service) publishAndIndex(id int, publishAndIndexPayload <-chan ipfs.I
} }
} }
}() }()
log.Info("publishAndIndex goroutine successfully spun up")
} }
// ScreenAndServe is the loop used to screen data streamed from the state diffing eth node // ScreenAndServe is the loop used to screen data streamed from the state diffing eth node
// and send the appropriate portions of it to a requesting client subscription, according to their subscription configuration // and send the appropriate portions of it to a requesting client subscription, according to their subscription configuration
func (sap *Service) ScreenAndServe(screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool) { func (sap *Service) ScreenAndServe(wg *sync.WaitGroup, screenAndServePayload <-chan ipfs.IPLDPayload, screenAndServeQuit <-chan bool) {
wg.Add(1)
go func() { go func() {
for { for {
select { select {
case payload := <-screenAndServePayload: case payload := <-screenAndServePayload:
err := sap.sendResponse(payload) sendErr := sap.sendResponse(payload)
if err != nil { if sendErr != nil {
log.Error(err) log.Error(sendErr)
} }
case <-screenAndServeQuit: case <-screenAndServeQuit:
log.Info("quiting ScreenAndServe process") log.Info("quiting ScreenAndServe process")
wg.Done()
return return
} }
} }
}() }()
log.Info("screenAndServe goroutine successfully spun up")
} }
func (sap *Service) sendResponse(payload ipfs.IPLDPayload) error { func (sap *Service) sendResponse(payload ipfs.IPLDPayload) error {
@ -253,9 +257,9 @@ func (sap *Service) sendResponse(payload ipfs.IPLDPayload) error {
log.Errorf("subscription configuration for subscription type %s not available", ty.Hex()) log.Errorf("subscription configuration for subscription type %s not available", ty.Hex())
continue continue
} }
response, err := sap.Filterer.FilterResponse(subConfig, payload) response, filterErr := sap.Filterer.FilterResponse(subConfig, payload)
if err != nil { if filterErr != nil {
log.Error(err) log.Error(filterErr)
continue continue
} }
for id, sub := range subs { for id, sub := range subs {
@ -276,9 +280,9 @@ func (sap *Service) Subscribe(id rpc.ID, sub chan<- streamer.SuperNodePayload, q
log.Info("Subscribing to the super node service") log.Info("Subscribing to the super node service")
// Subscription type is defined as the hash of its content // Subscription type is defined as the hash of its content
// Group subscriptions by type and screen payloads once for subs of the same type // Group subscriptions by type and screen payloads once for subs of the same type
by, err := rlp.EncodeToBytes(streamFilters) by, encodeErr := rlp.EncodeToBytes(streamFilters)
if err != nil { if encodeErr != nil {
log.Error(err) log.Error(encodeErr)
} }
subscriptionHash := crypto.Keccak256(by) subscriptionHash := crypto.Keccak256(by)
subscriptionType := common.BytesToHash(subscriptionHash) subscriptionType := common.BytesToHash(subscriptionHash)
@ -307,20 +311,21 @@ func (sap *Service) backFill(sub Subscription, id rpc.ID, con config.Subscriptio
// Retrieve cached CIDs relevant to this subscriber // Retrieve cached CIDs relevant to this subscriber
var endingBlock int64 var endingBlock int64
var startingBlock int64 var startingBlock int64
var err error var retrieveFirstBlockErr error
startingBlock, err = sap.Retriever.RetrieveFirstBlockNumber() var retrieveLastBlockErr error
if err != nil { startingBlock, retrieveFirstBlockErr = sap.Retriever.RetrieveFirstBlockNumber()
if retrieveFirstBlockErr != nil {
sub.PayloadChan <- streamer.SuperNodePayload{ sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "unable to set block range start; error: " + err.Error(), ErrMsg: "unable to set block range start; error: " + retrieveFirstBlockErr.Error(),
} }
} }
if startingBlock < con.StartingBlock.Int64() { if startingBlock < con.StartingBlock.Int64() {
startingBlock = con.StartingBlock.Int64() startingBlock = con.StartingBlock.Int64()
} }
endingBlock, err = sap.Retriever.RetrieveLastBlockNumber() endingBlock, retrieveLastBlockErr = sap.Retriever.RetrieveLastBlockNumber()
if err != nil { if retrieveLastBlockErr != nil {
sub.PayloadChan <- streamer.SuperNodePayload{ sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "unable to set block range end; error: " + err.Error(), ErrMsg: "unable to set block range end; error: " + retrieveLastBlockErr.Error(),
} }
} }
if endingBlock > con.EndingBlock.Int64() && con.EndingBlock.Int64() > 0 && con.EndingBlock.Int64() > startingBlock { if endingBlock > con.EndingBlock.Int64() && con.EndingBlock.Int64() > 0 && con.EndingBlock.Int64() > startingBlock {
@ -333,32 +338,25 @@ func (sap *Service) backFill(sub Subscription, id rpc.ID, con config.Subscriptio
// TODO: separate backfill into a different rpc subscription method altogether? // TODO: separate backfill into a different rpc subscription method altogether?
go func() { go func() {
for i := startingBlock; i <= endingBlock; i++ { for i := startingBlock; i <= endingBlock; i++ {
cidWrapper, err := sap.Retriever.RetrieveCIDs(con, i) cidWrapper, retrieveCIDsErr := sap.Retriever.RetrieveCIDs(con, i)
if err != nil { if retrieveCIDsErr != nil {
sub.PayloadChan <- streamer.SuperNodePayload{ sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "CID retrieval error: " + err.Error(), ErrMsg: "CID retrieval error: " + retrieveCIDsErr.Error(),
} }
continue continue
} }
if ipfs.EmptyCIDWrapper(*cidWrapper) { if ipfs.EmptyCIDWrapper(*cidWrapper) {
continue continue
} }
blocksWrapper, err := sap.IPLDFetcher.FetchCIDs(*cidWrapper) blocksWrapper, fetchIPLDsErr := sap.IPLDFetcher.FetchIPLDs(*cidWrapper)
if err != nil { if fetchIPLDsErr != nil {
log.Error(err) log.Error(fetchIPLDsErr)
sub.PayloadChan <- streamer.SuperNodePayload{ sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "IPLD fetching error: " + err.Error(), ErrMsg: "IPLD fetching error: " + fetchIPLDsErr.Error(),
}
continue
}
backFillIplds, err := sap.Resolver.ResolveIPLDs(*blocksWrapper)
if err != nil {
log.Error(err)
sub.PayloadChan <- streamer.SuperNodePayload{
ErrMsg: "IPLD resolving error: " + err.Error(),
} }
continue continue
} }
backFillIplds := sap.Resolver.ResolveIPLDs(*blocksWrapper)
select { select {
case sub.PayloadChan <- backFillIplds: case sub.PayloadChan <- backFillIplds:
log.Infof("sending super node back-fill payload to subscription %s", id) log.Infof("sending super node back-fill payload to subscription %s", id)
@ -393,7 +391,7 @@ func (sap *Service) Start(*p2p.Server) error {
if err := sap.SyncAndPublish(wg, payloadChan, quitChan); err != nil { if err := sap.SyncAndPublish(wg, payloadChan, quitChan); err != nil {
return err return err
} }
sap.ScreenAndServe(payloadChan, quitChan) sap.ScreenAndServe(wg, payloadChan, quitChan)
return nil return nil
} }
@ -406,7 +404,7 @@ func (sap *Service) Stop() error {
// Node returns the Geth node info for this service // Node returns the Geth node info for this service
func (sap *Service) Node() core.Node { func (sap *Service) Node() core.Node {
return sap.gethNode return sap.GethNode
} }
// close is used to close all listening subscriptions // close is used to close all listening subscriptions
@ -426,7 +424,3 @@ func (sap *Service) close() {
} }
sap.Unlock() sap.Unlock()
} }
func (sap *Service) GetPublisher() ipfs.IPLDPublisher {
return sap.Publisher
}

View File

@ -26,6 +26,7 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres" "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
) )
// SetupDB is use to setup a db for super node tests
func SetupDB() (*postgres.DB, error) { func SetupDB() (*postgres.DB, error) {
return postgres.NewDB(config.Database{ return postgres.NewDB(config.Database{
Hostname: "localhost", Hostname: "localhost",
@ -34,6 +35,7 @@ func SetupDB() (*postgres.DB, error) {
}, core.Node{}) }, core.Node{})
} }
// TearDownDB is used to tear down the super node dbs after tests
func TearDownDB(db *postgres.DB) { func TearDownDB(db *postgres.DB) {
tx, err := db.Beginx() tx, err := db.Beginx()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -55,6 +57,7 @@ func TearDownDB(db *postgres.DB) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// ListContainsString used to check if a list of strings contains a particular string
func ListContainsString(sss []string, s string) bool { func ListContainsString(sss []string, s string) bool {
for _, str := range sss { for _, str := range sss {
if s == str { if s == str {
@ -64,6 +67,7 @@ func ListContainsString(sss []string, s string) bool {
return false return false
} }
// ListContainsBytes used to check if a list of byte arrays contains a particular byte array
func ListContainsBytes(bbb [][]byte, b []byte) bool { func ListContainsBytes(bbb [][]byte, b []byte) bool {
for _, by := range bbb { for _, by := range bbb {
if bytes.Equal(by, b) { if bytes.Equal(by, b) {