Merge pull request #4 from vulcanize/backfill_head_to_tail

Reference IPLD data by multihash-key foreign key
This commit is contained in:
Ian Norden 2020-08-05 00:01:51 -05:00 committed by GitHub
commit 96ac218218
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
102 changed files with 1246 additions and 1428 deletions

View File

@ -192,7 +192,7 @@ This set of parameters needs to be set no matter the chain type.
path = "~/.ipfs" # $IPFS_PATH path = "~/.ipfs" # $IPFS_PATH
mode = "postgres" # $IPFS_MODE mode = "postgres" # $IPFS_MODE
[superNode] [watcher]
chain = "bitcoin" # $SUPERNODE_CHAIN chain = "bitcoin" # $SUPERNODE_CHAIN
server = true # $SUPERNODE_SERVER server = true # $SUPERNODE_SERVER
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH

View File

@ -30,7 +30,7 @@ import (
var resyncCmd = &cobra.Command{ var resyncCmd = &cobra.Command{
Use: "resync", Use: "resync",
Short: "Resync historical data", Short: "Resync historical data",
Long: `Use this command to fill in sections of missing data in the super node`, Long: `Use this command to fill in sections of missing data in the ipfs-blockchain-watcher database`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs() subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand) logWithCommand = *log.WithField("SubCommand", subCommand)
@ -40,8 +40,8 @@ var resyncCmd = &cobra.Command{
func rsyncCmdCommand() { func rsyncCmdCommand() {
logWithCommand.Infof("running vdb version: %s", v.VersionWithMeta) logWithCommand.Infof("running vdb version: %s", v.VersionWithMeta)
logWithCommand.Debug("loading super node configuration variables") logWithCommand.Debug("loading resync configuration variables")
rConfig, err := resync.NewReSyncConfig() rConfig, err := resync.NewConfig()
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }

View File

@ -20,42 +20,31 @@ import (
"fmt" "fmt"
"os" "os"
"strings" "strings"
"time"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
) )
var ( var (
cfgFile string cfgFile string
databaseConfig config.Database
ipc string
subCommand string subCommand string
logWithCommand log.Entry logWithCommand log.Entry
) )
const (
pollingInterval = 7 * time.Second
validationWindow = 15
)
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
Use: "vulcanizedb", Use: "ipfs-blockchain-watcher",
PersistentPreRun: initFuncs, PersistentPreRun: initFuncs,
} }
func Execute() { func Execute() {
log.Info("----- Starting vDB -----") log.Info("----- Starting IPFS blockchain watcher -----")
if err := rootCmd.Execute(); err != nil { if err := rootCmd.Execute(); err != nil {
log.Fatal(err) log.Fatal(err)
} }
} }
func initFuncs(cmd *cobra.Command, args []string) { func initFuncs(cmd *cobra.Command, args []string) {
setViperConfigs()
logfile := viper.GetString("logfile") logfile := viper.GetString("logfile")
if logfile != "" { if logfile != "" {
file, err := os.OpenFile(logfile, file, err := os.OpenFile(logfile,
@ -75,18 +64,6 @@ func initFuncs(cmd *cobra.Command, args []string) {
} }
} }
func setViperConfigs() {
ipc = viper.GetString("client.ipcpath")
databaseConfig = config.Database{
Name: viper.GetString("database.name"),
Hostname: viper.GetString("database.hostname"),
Port: viper.GetInt("database.port"),
User: viper.GetString("database.user"),
Password: viper.GetString("database.password"),
}
viper.Set("database.config", databaseConfig)
}
func logLevel() error { func logLevel() error {
lvl, err := log.ParseLevel(viper.GetString("log.level")) lvl, err := log.ParseLevel(viper.GetString("log.level"))
if err != nil { if err != nil {
@ -102,7 +79,6 @@ func logLevel() error {
func init() { func init() {
cobra.OnInitialize(initConfig) cobra.OnInitialize(initConfig)
// When searching for env variables, replace dots in config keys with underscores
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv() viper.AutomaticEnv()
@ -122,7 +98,6 @@ func init() {
viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname")) viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname"))
viper.BindPFlag("database.user", rootCmd.PersistentFlags().Lookup("database-user")) viper.BindPFlag("database.user", rootCmd.PersistentFlags().Lookup("database-user"))
viper.BindPFlag("database.password", rootCmd.PersistentFlags().Lookup("database-password")) viper.BindPFlag("database.password", rootCmd.PersistentFlags().Lookup("database-password"))
viper.BindPFlag("client.ipcPath", rootCmd.PersistentFlags().Lookup("client-ipcPath"))
viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level")) viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level"))
} }

View File

@ -29,18 +29,16 @@ import (
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/client" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/client"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/streamer" w "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher"
) )
// streamEthSubscriptionCmd represents the streamEthSubscription command // streamEthSubscriptionCmd represents the streamEthSubscription command
var streamEthSubscriptionCmd = &cobra.Command{ var streamEthSubscriptionCmd = &cobra.Command{
Use: "streamEthSubscription", Use: "streamEthSubscription",
Short: "This command is used to subscribe to the super node eth stream with the provided filters", Short: "This command is used to subscribe to the eth ipfs watcher data stream with the provided filters",
Long: `This command is for demo and testing purposes and is used to subscribe to the super node with the provided subscription configuration parameters. Long: `This command is for demo and testing purposes and is used to subscribe to the watcher with the provided subscription configuration parameters.
It does not do anything with the data streamed from the super node other than unpack it and print it out for demonstration purposes.`, It does not do anything with the data streamed from the watcher other than unpack it and print it out for demonstration purposes.`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs() subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand) logWithCommand = *log.WithField("SubCommand", subCommand)
@ -60,18 +58,21 @@ func streamEthSubscription() {
} }
// Create a new rpc client and a subscription streamer with that client // Create a new rpc client and a subscription streamer with that client
rpcClient := getRPCClient() rpcClient, err := getRPCClient()
str := streamer.NewSuperNodeStreamer(rpcClient) if err != nil {
logWithCommand.Fatal(err)
}
subClient := client.NewClient(rpcClient)
// Buffered channel for reading subscription payloads // Buffered channel for reading subscription payloads
payloadChan := make(chan watcher.SubscriptionPayload, 20000) payloadChan := make(chan w.SubscriptionPayload, 20000)
// Subscribe to the super node service with the given config/filter parameters // Subscribe to the watcher service with the given config/filter parameters
rlpParams, err := rlp.EncodeToBytes(ethSubConfig) rlpParams, err := rlp.EncodeToBytes(ethSubConfig)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
sub, err := str.Stream(payloadChan, rlpParams) sub, err := subClient.Stream(payloadChan, rlpParams)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
@ -167,14 +168,10 @@ func streamEthSubscription() {
} }
} }
func getRPCClient() core.RPCClient { func getRPCClient() (*rpc.Client, error) {
vulcPath := viper.GetString("superNode.ethSubscription.wsPath") vulcPath := viper.GetString("watcher.ethSubscription.wsPath")
if vulcPath == "" { if vulcPath == "" {
vulcPath = "ws://127.0.0.1:8080" // default to and try the default ws url if no path is provided vulcPath = "ws://127.0.0.1:8080" // default to and try the default ws url if no path is provided
} }
rawRPCClient, err := rpc.Dial(vulcPath) return rpc.Dial(vulcPath)
if err != nil {
logWithCommand.Fatal(err)
}
return client.NewRPCClient(rawRPCClient, vulcPath)
} }

View File

@ -18,22 +18,23 @@ package cmd
import ( import (
"os" "os"
"os/signal" "os/signal"
"sync" s "sync"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
h "github.com/vulcanize/ipfs-blockchain-watcher/pkg/historical"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" w "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch"
v "github.com/vulcanize/ipfs-blockchain-watcher/version" v "github.com/vulcanize/ipfs-blockchain-watcher/version"
) )
// superNodeCmd represents the superNode command // watchCmd represents the watch command
var superNodeCmd = &cobra.Command{ var watchCmd = &cobra.Command{
Use: "superNode", Use: "watch",
Short: "sync chain data into PG-IPFS", Short: "sync chain data into PG-IPFS",
Long: `This command configures a VulcanizeDB ipfs-blockchain-watcher. Long: `This command configures a VulcanizeDB ipfs-blockchain-watcher.
@ -49,146 +50,155 @@ and fill in gaps in the data
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs() subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand) logWithCommand = *log.WithField("SubCommand", subCommand)
superNode() watch()
}, },
} }
func superNode() { func watch() {
logWithCommand.Infof("running vdb version: %s", v.VersionWithMeta) logWithCommand.Infof("running vdb version: %s", v.VersionWithMeta)
logWithCommand.Debug("loading super node configuration variables")
superNodeConfig, err := watcher.NewSuperNodeConfig() var forwardPayloadChan chan shared.ConvertedData
wg := new(s.WaitGroup)
logWithCommand.Debug("loading watcher configuration variables")
watcherConfig, err := w.NewConfig()
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
logWithCommand.Infof("super node config: %+v", superNodeConfig) logWithCommand.Infof("watcher config: %+v", watcherConfig)
if superNodeConfig.IPFSMode == shared.LocalInterface { if watcherConfig.IPFSMode == shared.LocalInterface {
if err := ipfs.InitIPFSPlugins(); err != nil { if err := ipfs.InitIPFSPlugins(); err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
} }
wg := &sync.WaitGroup{} logWithCommand.Debug("initializing new watcher service")
logWithCommand.Debug("initializing new super node service") watcher, err := w.NewWatcher(watcherConfig)
superNode, err := watcher.NewSuperNode(superNodeConfig)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
var forwardPayloadChan chan shared.ConvertedData
if superNodeConfig.Serve { if watcherConfig.Serve {
logWithCommand.Info("starting up super node servers") logWithCommand.Info("starting up watcher servers")
forwardPayloadChan = make(chan shared.ConvertedData, watcher.PayloadChanBufferSize) forwardPayloadChan = make(chan shared.ConvertedData, w.PayloadChanBufferSize)
superNode.Serve(wg, forwardPayloadChan) watcher.Serve(wg, forwardPayloadChan)
if err := startServers(superNode, superNodeConfig); err != nil { if err := startServers(watcher, watcherConfig); err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
} }
if superNodeConfig.Sync {
logWithCommand.Info("starting up super node sync process") if watcherConfig.Sync {
if err := superNode.Sync(wg, forwardPayloadChan); err != nil { logWithCommand.Info("starting up watcher sync process")
if err := watcher.Sync(wg, forwardPayloadChan); err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
} }
var backFiller watcher.BackFillInterface
if superNodeConfig.BackFill { var backFiller h.BackFillInterface
logWithCommand.Debug("initializing new super node backfill service") if watcherConfig.Historical {
backFiller, err = watcher.NewBackFillService(superNodeConfig, forwardPayloadChan) historicalConfig, err := h.NewConfig()
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
logWithCommand.Info("starting up super node backfill process") logWithCommand.Debug("initializing new historical backfill service")
backFiller, err = h.NewBackFillService(historicalConfig, forwardPayloadChan)
if err != nil {
logWithCommand.Fatal(err)
}
logWithCommand.Info("starting up watcher backfill process")
backFiller.BackFill(wg) backFiller.BackFill(wg)
} }
shutdown := make(chan os.Signal) shutdown := make(chan os.Signal)
signal.Notify(shutdown, os.Interrupt) signal.Notify(shutdown, os.Interrupt)
<-shutdown <-shutdown
if superNodeConfig.BackFill { if watcherConfig.Historical {
backFiller.Stop() backFiller.Stop()
} }
superNode.Stop() watcher.Stop()
wg.Wait() wg.Wait()
} }
func startServers(superNode watcher.SuperNode, settings *watcher.Config) error { func startServers(watcher w.Watcher, settings *w.Config) error {
logWithCommand.Debug("starting up IPC server") logWithCommand.Debug("starting up IPC server")
_, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, superNode.APIs()) _, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, watcher.APIs())
if err != nil { if err != nil {
return err return err
} }
logWithCommand.Debug("starting up WS server") logWithCommand.Debug("starting up WS server")
_, _, err = rpc.StartWSEndpoint(settings.WSEndpoint, superNode.APIs(), []string{"vdb"}, nil, true) _, _, err = rpc.StartWSEndpoint(settings.WSEndpoint, watcher.APIs(), []string{"vdb"}, nil, true)
if err != nil { if err != nil {
return err return err
} }
logWithCommand.Debug("starting up HTTP server") logWithCommand.Debug("starting up HTTP server")
_, _, err = rpc.StartHTTPEndpoint(settings.HTTPEndpoint, superNode.APIs(), []string{settings.Chain.API()}, nil, nil, rpc.HTTPTimeouts{}) _, _, err = rpc.StartHTTPEndpoint(settings.HTTPEndpoint, watcher.APIs(), []string{settings.Chain.API()}, nil, nil, rpc.HTTPTimeouts{})
return err return err
} }
func init() { func init() {
rootCmd.AddCommand(superNodeCmd) rootCmd.AddCommand(watchCmd)
// flags for all config variables // flags for all config variables
superNodeCmd.PersistentFlags().String("ipfs-path", "", "ipfs repository path") watchCmd.PersistentFlags().String("ipfs-path", "", "ipfs repository path")
superNodeCmd.PersistentFlags().String("supernode-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.") watchCmd.PersistentFlags().String("watcher-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.")
superNodeCmd.PersistentFlags().Bool("supernode-server", false, "turn vdb server on or off") watchCmd.PersistentFlags().Bool("watcher-server", false, "turn vdb server on or off")
superNodeCmd.PersistentFlags().String("supernode-ws-path", "", "vdb server ws path") watchCmd.PersistentFlags().String("watcher-ws-path", "", "vdb server ws path")
superNodeCmd.PersistentFlags().String("supernode-http-path", "", "vdb server http path") watchCmd.PersistentFlags().String("watcher-http-path", "", "vdb server http path")
superNodeCmd.PersistentFlags().String("supernode-ipc-path", "", "vdb server ipc path") watchCmd.PersistentFlags().String("watcher-ipc-path", "", "vdb server ipc path")
superNodeCmd.PersistentFlags().Bool("supernode-sync", false, "turn vdb sync on or off") watchCmd.PersistentFlags().Bool("watcher-sync", false, "turn vdb sync on or off")
superNodeCmd.PersistentFlags().Int("supernode-workers", 0, "how many worker goroutines to publish and index data") watchCmd.PersistentFlags().Int("watcher-workers", 0, "how many worker goroutines to publish and index data")
superNodeCmd.PersistentFlags().Bool("supernode-back-fill", false, "turn vdb backfill on or off") watchCmd.PersistentFlags().Bool("watcher-back-fill", false, "turn vdb backfill on or off")
superNodeCmd.PersistentFlags().Int("supernode-frequency", 0, "how often (in seconds) the backfill process checks for gaps") watchCmd.PersistentFlags().Int("watcher-frequency", 0, "how often (in seconds) the backfill process checks for gaps")
superNodeCmd.PersistentFlags().Int("supernode-batch-size", 0, "data fetching batch size") watchCmd.PersistentFlags().Int("watcher-batch-size", 0, "data fetching batch size")
superNodeCmd.PersistentFlags().Int("supernode-batch-number", 0, "how many goroutines to fetch data concurrently") watchCmd.PersistentFlags().Int("watcher-batch-number", 0, "how many goroutines to fetch data concurrently")
superNodeCmd.PersistentFlags().Int("supernode-validation-level", 0, "backfill will resync any data below this level") watchCmd.PersistentFlags().Int("watcher-validation-level", 0, "backfill will resync any data below this level")
superNodeCmd.PersistentFlags().Int("supernode-timeout", 0, "timeout used for backfill http requests") watchCmd.PersistentFlags().Int("watcher-timeout", 0, "timeout used for backfill http requests")
superNodeCmd.PersistentFlags().String("btc-ws-path", "", "ws url for bitcoin node") watchCmd.PersistentFlags().String("btc-ws-path", "", "ws url for bitcoin node")
superNodeCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node") watchCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node")
superNodeCmd.PersistentFlags().String("btc-password", "", "password for btc node") watchCmd.PersistentFlags().String("btc-password", "", "password for btc node")
superNodeCmd.PersistentFlags().String("btc-username", "", "username for btc node") watchCmd.PersistentFlags().String("btc-username", "", "username for btc node")
superNodeCmd.PersistentFlags().String("btc-node-id", "", "btc node id") watchCmd.PersistentFlags().String("btc-node-id", "", "btc node id")
superNodeCmd.PersistentFlags().String("btc-client-name", "", "btc client name") watchCmd.PersistentFlags().String("btc-client-name", "", "btc client name")
superNodeCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash") watchCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash")
superNodeCmd.PersistentFlags().String("btc-network-id", "", "btc network id") watchCmd.PersistentFlags().String("btc-network-id", "", "btc network id")
superNodeCmd.PersistentFlags().String("eth-ws-path", "", "ws url for ethereum node") watchCmd.PersistentFlags().String("eth-ws-path", "", "ws url for ethereum node")
superNodeCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node") watchCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
superNodeCmd.PersistentFlags().String("eth-node-id", "", "eth node id") watchCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
superNodeCmd.PersistentFlags().String("eth-client-name", "", "eth client name") watchCmd.PersistentFlags().String("eth-client-name", "", "eth client name")
superNodeCmd.PersistentFlags().String("eth-genesis-block", "", "eth genesis block hash") watchCmd.PersistentFlags().String("eth-genesis-block", "", "eth genesis block hash")
superNodeCmd.PersistentFlags().String("eth-network-id", "", "eth network id") watchCmd.PersistentFlags().String("eth-network-id", "", "eth network id")
// and their bindings // and their bindings
viper.BindPFlag("ipfs.path", superNodeCmd.PersistentFlags().Lookup("ipfs-path")) viper.BindPFlag("ipfs.path", watchCmd.PersistentFlags().Lookup("ipfs-path"))
viper.BindPFlag("superNode.chain", superNodeCmd.PersistentFlags().Lookup("supernode-chain")) viper.BindPFlag("watcher.chain", watchCmd.PersistentFlags().Lookup("watcher-chain"))
viper.BindPFlag("superNode.server", superNodeCmd.PersistentFlags().Lookup("supernode-server")) viper.BindPFlag("watcher.server", watchCmd.PersistentFlags().Lookup("watcher-server"))
viper.BindPFlag("superNode.wsPath", superNodeCmd.PersistentFlags().Lookup("supernode-ws-path")) viper.BindPFlag("watcher.wsPath", watchCmd.PersistentFlags().Lookup("watcher-ws-path"))
viper.BindPFlag("superNode.httpPath", superNodeCmd.PersistentFlags().Lookup("supernode-http-path")) viper.BindPFlag("watcher.httpPath", watchCmd.PersistentFlags().Lookup("watcher-http-path"))
viper.BindPFlag("superNode.ipcPath", superNodeCmd.PersistentFlags().Lookup("supernode-ipc-path")) viper.BindPFlag("watcher.ipcPath", watchCmd.PersistentFlags().Lookup("watcher-ipc-path"))
viper.BindPFlag("superNode.sync", superNodeCmd.PersistentFlags().Lookup("supernode-sync")) viper.BindPFlag("watcher.sync", watchCmd.PersistentFlags().Lookup("watcher-sync"))
viper.BindPFlag("superNode.workers", superNodeCmd.PersistentFlags().Lookup("supernode-workers")) viper.BindPFlag("watcher.workers", watchCmd.PersistentFlags().Lookup("watcher-workers"))
viper.BindPFlag("superNode.backFill", superNodeCmd.PersistentFlags().Lookup("supernode-back-fill")) viper.BindPFlag("watcher.backFill", watchCmd.PersistentFlags().Lookup("watcher-back-fill"))
viper.BindPFlag("superNode.frequency", superNodeCmd.PersistentFlags().Lookup("supernode-frequency")) viper.BindPFlag("watcher.frequency", watchCmd.PersistentFlags().Lookup("watcher-frequency"))
viper.BindPFlag("superNode.batchSize", superNodeCmd.PersistentFlags().Lookup("supernode-batch-size")) viper.BindPFlag("watcher.batchSize", watchCmd.PersistentFlags().Lookup("watcher-batch-size"))
viper.BindPFlag("superNode.batchNumber", superNodeCmd.PersistentFlags().Lookup("supernode-batch-number")) viper.BindPFlag("watcher.batchNumber", watchCmd.PersistentFlags().Lookup("watcher-batch-number"))
viper.BindPFlag("superNode.validationLevel", superNodeCmd.PersistentFlags().Lookup("supernode-validation-level")) viper.BindPFlag("watcher.validationLevel", watchCmd.PersistentFlags().Lookup("watcher-validation-level"))
viper.BindPFlag("superNode.timeout", superNodeCmd.PersistentFlags().Lookup("supernode-timeout")) viper.BindPFlag("watcher.timeout", watchCmd.PersistentFlags().Lookup("watcher-timeout"))
viper.BindPFlag("bitcoin.wsPath", superNodeCmd.PersistentFlags().Lookup("btc-ws-path")) viper.BindPFlag("bitcoin.wsPath", watchCmd.PersistentFlags().Lookup("btc-ws-path"))
viper.BindPFlag("bitcoin.httpPath", superNodeCmd.PersistentFlags().Lookup("btc-http-path")) viper.BindPFlag("bitcoin.httpPath", watchCmd.PersistentFlags().Lookup("btc-http-path"))
viper.BindPFlag("bitcoin.pass", superNodeCmd.PersistentFlags().Lookup("btc-password")) viper.BindPFlag("bitcoin.pass", watchCmd.PersistentFlags().Lookup("btc-password"))
viper.BindPFlag("bitcoin.user", superNodeCmd.PersistentFlags().Lookup("btc-username")) viper.BindPFlag("bitcoin.user", watchCmd.PersistentFlags().Lookup("btc-username"))
viper.BindPFlag("bitcoin.nodeID", superNodeCmd.PersistentFlags().Lookup("btc-node-id")) viper.BindPFlag("bitcoin.nodeID", watchCmd.PersistentFlags().Lookup("btc-node-id"))
viper.BindPFlag("bitcoin.clientName", superNodeCmd.PersistentFlags().Lookup("btc-client-name")) viper.BindPFlag("bitcoin.clientName", watchCmd.PersistentFlags().Lookup("btc-client-name"))
viper.BindPFlag("bitcoin.genesisBlock", superNodeCmd.PersistentFlags().Lookup("btc-genesis-block")) viper.BindPFlag("bitcoin.genesisBlock", watchCmd.PersistentFlags().Lookup("btc-genesis-block"))
viper.BindPFlag("bitcoin.networkID", superNodeCmd.PersistentFlags().Lookup("btc-network-id")) viper.BindPFlag("bitcoin.networkID", watchCmd.PersistentFlags().Lookup("btc-network-id"))
viper.BindPFlag("ethereum.wsPath", superNodeCmd.PersistentFlags().Lookup("eth-ws-path")) viper.BindPFlag("ethereum.wsPath", watchCmd.PersistentFlags().Lookup("eth-ws-path"))
viper.BindPFlag("ethereum.httpPath", superNodeCmd.PersistentFlags().Lookup("eth-http-path")) viper.BindPFlag("ethereum.httpPath", watchCmd.PersistentFlags().Lookup("eth-http-path"))
viper.BindPFlag("ethereum.nodeID", superNodeCmd.PersistentFlags().Lookup("eth-node-id")) viper.BindPFlag("ethereum.nodeID", watchCmd.PersistentFlags().Lookup("eth-node-id"))
viper.BindPFlag("ethereum.clientName", superNodeCmd.PersistentFlags().Lookup("eth-client-name")) viper.BindPFlag("ethereum.clientName", watchCmd.PersistentFlags().Lookup("eth-client-name"))
viper.BindPFlag("ethereum.genesisBlock", superNodeCmd.PersistentFlags().Lookup("eth-genesis-block")) viper.BindPFlag("ethereum.genesisBlock", watchCmd.PersistentFlags().Lookup("eth-genesis-block"))
viper.BindPFlag("ethereum.networkID", superNodeCmd.PersistentFlags().Lookup("eth-network-id")) viper.BindPFlag("ethereum.networkID", watchCmd.PersistentFlags().Lookup("eth-network-id"))
} }

View File

@ -5,9 +5,17 @@ CREATE TABLE eth.header_cids (
block_hash VARCHAR(66) NOT NULL, block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL, parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL, cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
td NUMERIC NOT NULL, td NUMERIC NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE, node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
reward NUMERIC NOT NULL, reward NUMERIC NOT NULL,
state_root VARCHAR(66) NOT NULL,
tx_root VARCHAR(66) NOT NULL,
receipt_root VARCHAR(66) NOT NULL,
uncle_root VARCHAR(66) NOT NULL,
bloom BYTEA NOT NULL,
timestamp NUMERIC NOT NULL,
times_validated INTEGER NOT NULL DEFAULT 1,
UNIQUE (block_number, block_hash) UNIQUE (block_number, block_hash)
); );

View File

@ -5,6 +5,7 @@ CREATE TABLE eth.uncle_cids (
block_hash VARCHAR(66) NOT NULL, block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL, parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL, cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
reward NUMERIC NOT NULL, reward NUMERIC NOT NULL,
UNIQUE (header_id, block_hash) UNIQUE (header_id, block_hash)
); );

View File

@ -5,6 +5,7 @@ CREATE TABLE eth.transaction_cids (
tx_hash VARCHAR(66) NOT NULL, tx_hash VARCHAR(66) NOT NULL,
index INTEGER NOT NULL, index INTEGER NOT NULL,
cid TEXT NOT NULL, cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
dst VARCHAR(66) NOT NULL, dst VARCHAR(66) NOT NULL,
src VARCHAR(66) NOT NULL, src VARCHAR(66) NOT NULL,
UNIQUE (header_id, tx_hash) UNIQUE (header_id, tx_hash)

View File

@ -3,11 +3,15 @@ CREATE TABLE eth.receipt_cids (
id SERIAL PRIMARY KEY, id SERIAL PRIMARY KEY,
tx_id INTEGER NOT NULL REFERENCES eth.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, tx_id INTEGER NOT NULL REFERENCES eth.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
cid TEXT NOT NULL, cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
contract VARCHAR(66), contract VARCHAR(66),
contract_hash VARCHAR(66),
topic0s VARCHAR(66)[], topic0s VARCHAR(66)[],
topic1s VARCHAR(66)[], topic1s VARCHAR(66)[],
topic2s VARCHAR(66)[], topic2s VARCHAR(66)[],
topic3s VARCHAR(66)[] topic3s VARCHAR(66)[],
log_contracts VARCHAR(66)[],
UNIQUE (tx_id)
); );
-- +goose Down -- +goose Down

View File

@ -1,12 +0,0 @@
-- +goose Up
CREATE TABLE eth.state_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
state_key VARCHAR(66) NOT NULL,
leaf BOOLEAN NOT NULL,
cid TEXT NOT NULL,
UNIQUE (header_id, state_key)
);
-- +goose Down
DROP TABLE eth.state_cids;

View File

@ -0,0 +1,15 @@
-- +goose Up
CREATE TABLE eth.state_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
state_leaf_key VARCHAR(66),
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
state_path BYTEA,
node_type INTEGER,
diff BOOLEAN NOT NULL DEFAULT FALSE,
UNIQUE (header_id, state_path, diff)
);
-- +goose Down
DROP TABLE eth.state_cids;

View File

@ -1,12 +0,0 @@
-- +goose Up
CREATE TABLE eth.storage_cids (
id SERIAL PRIMARY KEY,
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
storage_key VARCHAR(66) NOT NULL,
leaf BOOLEAN NOT NULL,
cid TEXT NOT NULL,
UNIQUE (state_id, storage_key)
);
-- +goose Down
DROP TABLE eth.storage_cids;

View File

@ -0,0 +1,15 @@
-- +goose Up
CREATE TABLE eth.storage_cids (
id SERIAL PRIMARY KEY,
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
storage_leaf_key VARCHAR(66),
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
storage_path BYTEA,
node_type INTEGER NOT NULL,
diff BOOLEAN NOT NULL DEFAULT FALSE,
UNIQUE (state_id, storage_path, diff)
);
-- +goose Down
DROP TABLE eth.storage_cids;

View File

@ -1,15 +0,0 @@
-- +goose Up
CREATE TABLE btc.header_cids (
id SERIAL PRIMARY KEY,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
timestamp NUMERIC NOT NULL,
bits BIGINT NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
UNIQUE (block_number, block_hash)
);
-- +goose Down
DROP TABLE btc.header_cids;

View File

@ -0,0 +1,17 @@
-- +goose Up
CREATE TABLE btc.header_cids (
id SERIAL PRIMARY KEY,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
timestamp NUMERIC NOT NULL,
bits BIGINT NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
times_validated INTEGER NOT NULL DEFAULT 1,
UNIQUE (block_number, block_hash)
);
-- +goose Down
DROP TABLE btc.header_cids;

View File

@ -5,6 +5,7 @@ CREATE TABLE btc.transaction_cids (
index INTEGER NOT NULL, index INTEGER NOT NULL,
tx_hash VARCHAR(66) NOT NULL UNIQUE, tx_hash VARCHAR(66) NOT NULL UNIQUE,
cid TEXT NOT NULL, cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
segwit BOOL NOT NULL, segwit BOOL NOT NULL,
witness_hash VARCHAR(66) witness_hash VARCHAR(66)
); );

View File

@ -1,9 +0,0 @@
-- +goose Up
CREATE TABLE eth.queue_data (
id SERIAL PRIMARY KEY,
data BYTEA NOT NULL,
height BIGINT UNIQUE NOT NULL
);
-- +goose Down
DROP TABLE eth.queue_data;

View File

@ -1,9 +0,0 @@
-- +goose Up
CREATE TABLE btc.queue_data (
id SERIAL PRIMARY KEY,
data BYTEA NOT NULL,
height BIGINT UNIQUE NOT NULL
);
-- +goose Down
DROP TABLE btc.queue_data;

View File

@ -2,10 +2,8 @@
COMMENT ON TABLE public.nodes IS E'@name NodeInfo'; COMMENT ON TABLE public.nodes IS E'@name NodeInfo';
COMMENT ON TABLE btc.header_cids IS E'@name BtcHeaderCids'; COMMENT ON TABLE btc.header_cids IS E'@name BtcHeaderCids';
COMMENT ON TABLE btc.transaction_cids IS E'@name BtcTransactionCids'; COMMENT ON TABLE btc.transaction_cids IS E'@name BtcTransactionCids';
COMMENT ON TABLE btc.queue_data IS E'@name BtcQueueData';
COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids'; COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids';
COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids'; COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids';
COMMENT ON TABLE eth.queue_data IS E'@name EthQueueData';
COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID'; COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID';
COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID'; COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID';
COMMENT ON COLUMN btc.header_cids.node_id IS E'@name BtcNodeID'; COMMENT ON COLUMN btc.header_cids.node_id IS E'@name BtcNodeID';

View File

@ -1,37 +0,0 @@
-- +goose Up
ALTER TABLE eth.state_cids
ADD COLUMN state_path BYTEA;
ALTER TABLE eth.state_cids
DROP COLUMN leaf;
ALTER TABLE eth.state_cids
ADD COLUMN node_type INTEGER;
ALTER TABLE eth.state_cids
ALTER COLUMN state_key DROP NOT NULL;
ALTER TABLE eth.state_cids
DROP CONSTRAINT state_cids_header_id_state_key_key;
ALTER TABLE eth.state_cids
ADD CONSTRAINT state_cids_header_id_state_path_key UNIQUE (header_id, state_path);
-- +goose Down
ALTER TABLE eth.state_cids
ADD CONSTRAINT state_cids_header_id_state_key_key UNIQUE (header_id, state_key);
ALTER TABLE eth.state_cids
DROP CONSTRAINT state_cids_header_id_state_path_key;
ALTER TABLE eth.state_cids
ALTER COLUMN state_key SET NOT NULL;
ALTER TABLE eth.state_cids
DROP COLUMN node_type;
ALTER TABLE eth.state_cids
ADD COLUMN leaf BOOLEAN NOT NULL;
ALTER TABLE eth.state_cids
DROP COLUMN state_path;

View File

@ -1,37 +0,0 @@
-- +goose Up
ALTER TABLE eth.storage_cids
ADD COLUMN storage_path BYTEA;
ALTER TABLE eth.storage_cids
DROP COLUMN leaf;
ALTER TABLE eth.storage_cids
ADD COLUMN node_type INTEGER;
ALTER TABLE eth.storage_cids
ALTER COLUMN storage_key DROP NOT NULL;
ALTER TABLE eth.storage_cids
DROP CONSTRAINT storage_cids_state_id_storage_key_key;
ALTER TABLE eth.storage_cids
ADD CONSTRAINT storage_cids_state_id_storage_path_key UNIQUE (state_id, storage_path);
-- +goose Down
ALTER TABLE eth.storage_cids
DROP CONSTRAINT storage_cids_state_id_storage_path_key;
ALTER TABLE eth.storage_cids
ADD CONSTRAINT storage_cids_state_id_storage_key_key UNIQUE (state_id, storage_key);
ALTER TABLE eth.storage_cids
ALTER COLUMN storage_key SET NOT NULL;
ALTER TABLE eth.storage_cids
DROP COLUMN node_type;
ALTER TABLE eth.storage_cids
ADD COLUMN leaf BOOLEAN NOT NULL;
ALTER TABLE eth.storage_cids
DROP COLUMN storage_path;

View File

@ -1,37 +0,0 @@
-- +goose Up
ALTER TABLE eth.header_cids
ADD COLUMN state_root VARCHAR(66);
ALTER TABLE eth.header_cids
ADD COLUMN tx_root VARCHAR(66);
ALTER TABLE eth.header_cids
ADD COLUMN receipt_root VARCHAR(66);
ALTER TABLE eth.header_cids
ADD COLUMN uncle_root VARCHAR(66);
ALTER TABLE eth.header_cids
ADD COLUMN bloom BYTEA;
ALTER TABLE eth.header_cids
ADD COLUMN timestamp NUMERIC;
-- +goose Down
ALTER TABLE eth.header_cids
DROP COLUMN timestamp;
ALTER TABLE eth.header_cids
DROP COLUMN bloom;
ALTER TABLE eth.header_cids
DROP COLUMN uncle_root;
ALTER TABLE eth.header_cids
DROP COLUMN receipt_root;
ALTER TABLE eth.header_cids
DROP COLUMN tx_root;
ALTER TABLE eth.header_cids
DROP COLUMN state_root;

View File

@ -1,13 +0,0 @@
-- +goose Up
ALTER TABLE eth.state_cids
RENAME COLUMN state_key TO state_leaf_key;
ALTER TABLE eth.storage_cids
RENAME COLUMN storage_key TO storage_leaf_key;
-- +goose Down
ALTER TABLE eth.storage_cids
RENAME COLUMN storage_leaf_key TO storage_key;
ALTER TABLE eth.state_cids
RENAME COLUMN state_leaf_key TO state_key;

View File

@ -1,22 +0,0 @@
-- +goose Up
ALTER TABLE eth.receipt_cids
ADD COLUMN log_contracts VARCHAR(66)[];
ALTER TABLE eth.receipt_cids
ADD COLUMN contract_hash VARCHAR(66);
WITH uniques AS (SELECT DISTINCT ON (tx_id) * FROM eth.receipt_cids)
DELETE FROM eth.receipt_cids WHERE receipt_cids.id NOT IN (SELECT id FROM uniques);
ALTER TABLE eth.receipt_cids
ADD CONSTRAINT receipt_cids_tx_id_key UNIQUE (tx_id);
-- +goose Down
ALTER TABLE eth.receipt_cids
DROP CONSTRAINT receipt_cids_tx_id_key;
ALTER TABLE eth.receipt_cids
DROP COLUMN contract_hash;
ALTER TABLE eth.receipt_cids
DROP COLUMN log_contracts;

View File

@ -1,13 +0,0 @@
-- +goose Up
ALTER TABLE eth.header_cids
ADD COLUMN times_validated INTEGER NOT NULL DEFAULT 1;
ALTER TABLE btc.header_cids
ADD COLUMN times_validated INTEGER NOT NULL DEFAULT 1;
-- +goose Down
ALTER TABLE btc.header_cids
DROP COLUMN times_validated;
ALTER TABLE eth.header_cids
DROP COLUMN times_validated;

View File

@ -44,6 +44,7 @@ CREATE TABLE btc.header_cids (
block_hash character varying(66) NOT NULL, block_hash character varying(66) NOT NULL,
parent_hash character varying(66) NOT NULL, parent_hash character varying(66) NOT NULL,
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL,
"timestamp" numeric NOT NULL, "timestamp" numeric NOT NULL,
bits bigint NOT NULL, bits bigint NOT NULL,
node_id integer NOT NULL, node_id integer NOT NULL,
@ -85,44 +86,6 @@ CREATE SEQUENCE btc.header_cids_id_seq
ALTER SEQUENCE btc.header_cids_id_seq OWNED BY btc.header_cids.id; ALTER SEQUENCE btc.header_cids_id_seq OWNED BY btc.header_cids.id;
--
-- Name: queue_data; Type: TABLE; Schema: btc; Owner: -
--
CREATE TABLE btc.queue_data (
id integer NOT NULL,
data bytea NOT NULL,
height bigint NOT NULL
);
--
-- Name: TABLE queue_data; Type: COMMENT; Schema: btc; Owner: -
--
COMMENT ON TABLE btc.queue_data IS '@name BtcQueueData';
--
-- Name: queue_data_id_seq; Type: SEQUENCE; Schema: btc; Owner: -
--
CREATE SEQUENCE btc.queue_data_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: queue_data_id_seq; Type: SEQUENCE OWNED BY; Schema: btc; Owner: -
--
ALTER SEQUENCE btc.queue_data_id_seq OWNED BY btc.queue_data.id;
-- --
-- Name: transaction_cids; Type: TABLE; Schema: btc; Owner: - -- Name: transaction_cids; Type: TABLE; Schema: btc; Owner: -
-- --
@ -133,6 +96,7 @@ CREATE TABLE btc.transaction_cids (
index integer NOT NULL, index integer NOT NULL,
tx_hash character varying(66) NOT NULL, tx_hash character varying(66) NOT NULL,
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL,
segwit boolean NOT NULL, segwit boolean NOT NULL,
witness_hash character varying(66) witness_hash character varying(66)
); );
@ -246,15 +210,16 @@ CREATE TABLE eth.header_cids (
block_hash character varying(66) NOT NULL, block_hash character varying(66) NOT NULL,
parent_hash character varying(66) NOT NULL, parent_hash character varying(66) NOT NULL,
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL,
td numeric NOT NULL, td numeric NOT NULL,
node_id integer NOT NULL, node_id integer NOT NULL,
reward numeric NOT NULL, reward numeric NOT NULL,
state_root character varying(66), state_root character varying(66) NOT NULL,
tx_root character varying(66), tx_root character varying(66) NOT NULL,
receipt_root character varying(66), receipt_root character varying(66) NOT NULL,
uncle_root character varying(66), uncle_root character varying(66) NOT NULL,
bloom bytea, bloom bytea NOT NULL,
"timestamp" numeric, "timestamp" numeric NOT NULL,
times_validated integer DEFAULT 1 NOT NULL times_validated integer DEFAULT 1 NOT NULL
); );
@ -293,44 +258,6 @@ CREATE SEQUENCE eth.header_cids_id_seq
ALTER SEQUENCE eth.header_cids_id_seq OWNED BY eth.header_cids.id; ALTER SEQUENCE eth.header_cids_id_seq OWNED BY eth.header_cids.id;
--
-- Name: queue_data; Type: TABLE; Schema: eth; Owner: -
--
CREATE TABLE eth.queue_data (
id integer NOT NULL,
data bytea NOT NULL,
height bigint NOT NULL
);
--
-- Name: TABLE queue_data; Type: COMMENT; Schema: eth; Owner: -
--
COMMENT ON TABLE eth.queue_data IS '@name EthQueueData';
--
-- Name: queue_data_id_seq; Type: SEQUENCE; Schema: eth; Owner: -
--
CREATE SEQUENCE eth.queue_data_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: queue_data_id_seq; Type: SEQUENCE OWNED BY; Schema: eth; Owner: -
--
ALTER SEQUENCE eth.queue_data_id_seq OWNED BY eth.queue_data.id;
-- --
-- Name: receipt_cids; Type: TABLE; Schema: eth; Owner: - -- Name: receipt_cids; Type: TABLE; Schema: eth; Owner: -
-- --
@ -339,13 +266,14 @@ CREATE TABLE eth.receipt_cids (
id integer NOT NULL, id integer NOT NULL,
tx_id integer NOT NULL, tx_id integer NOT NULL,
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL,
contract character varying(66), contract character varying(66),
contract_hash character varying(66),
topic0s character varying(66)[], topic0s character varying(66)[],
topic1s character varying(66)[], topic1s character varying(66)[],
topic2s character varying(66)[], topic2s character varying(66)[],
topic3s character varying(66)[], topic3s character varying(66)[],
log_contracts character varying(66)[], log_contracts character varying(66)[]
contract_hash character varying(66)
); );
@ -412,8 +340,10 @@ CREATE TABLE eth.state_cids (
header_id integer NOT NULL, header_id integer NOT NULL,
state_leaf_key character varying(66), state_leaf_key character varying(66),
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL,
state_path bytea, state_path bytea,
node_type integer node_type integer,
diff boolean DEFAULT false NOT NULL
); );
@ -446,8 +376,10 @@ CREATE TABLE eth.storage_cids (
state_id integer NOT NULL, state_id integer NOT NULL,
storage_leaf_key character varying(66), storage_leaf_key character varying(66),
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL,
storage_path bytea, storage_path bytea,
node_type integer node_type integer NOT NULL,
diff boolean DEFAULT false NOT NULL
); );
@ -481,6 +413,7 @@ CREATE TABLE eth.transaction_cids (
tx_hash character varying(66) NOT NULL, tx_hash character varying(66) NOT NULL,
index integer NOT NULL, index integer NOT NULL,
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL,
dst character varying(66) NOT NULL, dst character varying(66) NOT NULL,
src character varying(66) NOT NULL src character varying(66) NOT NULL
); );
@ -523,6 +456,7 @@ CREATE TABLE eth.uncle_cids (
block_hash character varying(66) NOT NULL, block_hash character varying(66) NOT NULL,
parent_hash character varying(66) NOT NULL, parent_hash character varying(66) NOT NULL,
cid text NOT NULL, cid text NOT NULL,
mh_key text NOT NULL,
reward numeric NOT NULL reward numeric NOT NULL
); );
@ -643,13 +577,6 @@ ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id;
ALTER TABLE ONLY btc.header_cids ALTER COLUMN id SET DEFAULT nextval('btc.header_cids_id_seq'::regclass); ALTER TABLE ONLY btc.header_cids ALTER COLUMN id SET DEFAULT nextval('btc.header_cids_id_seq'::regclass);
--
-- Name: queue_data id; Type: DEFAULT; Schema: btc; Owner: -
--
ALTER TABLE ONLY btc.queue_data ALTER COLUMN id SET DEFAULT nextval('btc.queue_data_id_seq'::regclass);
-- --
-- Name: transaction_cids id; Type: DEFAULT; Schema: btc; Owner: - -- Name: transaction_cids id; Type: DEFAULT; Schema: btc; Owner: -
-- --
@ -678,13 +605,6 @@ ALTER TABLE ONLY btc.tx_outputs ALTER COLUMN id SET DEFAULT nextval('btc.tx_outp
ALTER TABLE ONLY eth.header_cids ALTER COLUMN id SET DEFAULT nextval('eth.header_cids_id_seq'::regclass); ALTER TABLE ONLY eth.header_cids ALTER COLUMN id SET DEFAULT nextval('eth.header_cids_id_seq'::regclass);
--
-- Name: queue_data id; Type: DEFAULT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.queue_data ALTER COLUMN id SET DEFAULT nextval('eth.queue_data_id_seq'::regclass);
-- --
-- Name: receipt_cids id; Type: DEFAULT; Schema: eth; Owner: - -- Name: receipt_cids id; Type: DEFAULT; Schema: eth; Owner: -
-- --
@ -757,22 +677,6 @@ ALTER TABLE ONLY btc.header_cids
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id); ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
--
-- Name: queue_data queue_data_height_key; Type: CONSTRAINT; Schema: btc; Owner: -
--
ALTER TABLE ONLY btc.queue_data
ADD CONSTRAINT queue_data_height_key UNIQUE (height);
--
-- Name: queue_data queue_data_pkey; Type: CONSTRAINT; Schema: btc; Owner: -
--
ALTER TABLE ONLY btc.queue_data
ADD CONSTRAINT queue_data_pkey PRIMARY KEY (id);
-- --
-- Name: transaction_cids transaction_cids_pkey; Type: CONSTRAINT; Schema: btc; Owner: - -- Name: transaction_cids transaction_cids_pkey; Type: CONSTRAINT; Schema: btc; Owner: -
-- --
@ -837,22 +741,6 @@ ALTER TABLE ONLY eth.header_cids
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id); ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
--
-- Name: queue_data queue_data_height_key; Type: CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.queue_data
ADD CONSTRAINT queue_data_height_key UNIQUE (height);
--
-- Name: queue_data queue_data_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.queue_data
ADD CONSTRAINT queue_data_pkey PRIMARY KEY (id);
-- --
-- Name: receipt_cids receipt_cids_pkey; Type: CONSTRAINT; Schema: eth; Owner: - -- Name: receipt_cids receipt_cids_pkey; Type: CONSTRAINT; Schema: eth; Owner: -
-- --
@ -886,11 +774,11 @@ ALTER TABLE ONLY eth.state_accounts
-- --
-- Name: state_cids state_cids_header_id_state_path_key; Type: CONSTRAINT; Schema: eth; Owner: - -- Name: state_cids state_cids_header_id_state_path_diff_key; Type: CONSTRAINT; Schema: eth; Owner: -
-- --
ALTER TABLE ONLY eth.state_cids ALTER TABLE ONLY eth.state_cids
ADD CONSTRAINT state_cids_header_id_state_path_key UNIQUE (header_id, state_path); ADD CONSTRAINT state_cids_header_id_state_path_diff_key UNIQUE (header_id, state_path, diff);
-- --
@ -910,11 +798,11 @@ ALTER TABLE ONLY eth.storage_cids
-- --
-- Name: storage_cids storage_cids_state_id_storage_path_key; Type: CONSTRAINT; Schema: eth; Owner: - -- Name: storage_cids storage_cids_state_id_storage_path_diff_key; Type: CONSTRAINT; Schema: eth; Owner: -
-- --
ALTER TABLE ONLY eth.storage_cids ALTER TABLE ONLY eth.storage_cids
ADD CONSTRAINT storage_cids_state_id_storage_path_key UNIQUE (state_id, storage_path); ADD CONSTRAINT storage_cids_state_id_storage_path_diff_key UNIQUE (state_id, storage_path, diff);
-- --
@ -981,6 +869,14 @@ ALTER TABLE ONLY public.nodes
ADD CONSTRAINT nodes_pkey PRIMARY KEY (id); ADD CONSTRAINT nodes_pkey PRIMARY KEY (id);
--
-- Name: header_cids header_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: -
--
ALTER TABLE ONLY btc.header_cids
ADD CONSTRAINT header_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- --
-- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: - -- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: -
-- --
@ -997,6 +893,14 @@ ALTER TABLE ONLY btc.transaction_cids
ADD CONSTRAINT transaction_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES btc.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; ADD CONSTRAINT transaction_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES btc.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: transaction_cids transaction_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: -
--
ALTER TABLE ONLY btc.transaction_cids
ADD CONSTRAINT transaction_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- --
-- Name: tx_inputs tx_inputs_tx_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: - -- Name: tx_inputs tx_inputs_tx_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: -
-- --
@ -1013,6 +917,14 @@ ALTER TABLE ONLY btc.tx_outputs
ADD CONSTRAINT tx_outputs_tx_id_fkey FOREIGN KEY (tx_id) REFERENCES btc.transaction_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; ADD CONSTRAINT tx_outputs_tx_id_fkey FOREIGN KEY (tx_id) REFERENCES btc.transaction_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: header_cids header_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.header_cids
ADD CONSTRAINT header_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- --
-- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - -- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
-- --
@ -1021,6 +933,14 @@ ALTER TABLE ONLY eth.header_cids
ADD CONSTRAINT header_cids_node_id_fkey FOREIGN KEY (node_id) REFERENCES public.nodes(id) ON DELETE CASCADE; ADD CONSTRAINT header_cids_node_id_fkey FOREIGN KEY (node_id) REFERENCES public.nodes(id) ON DELETE CASCADE;
--
-- Name: receipt_cids receipt_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.receipt_cids
ADD CONSTRAINT receipt_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- --
-- Name: receipt_cids receipt_cids_tx_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - -- Name: receipt_cids receipt_cids_tx_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
-- --
@ -1045,6 +965,22 @@ ALTER TABLE ONLY eth.state_cids
ADD CONSTRAINT state_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; ADD CONSTRAINT state_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: state_cids state_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.state_cids
ADD CONSTRAINT state_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: storage_cids storage_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.storage_cids
ADD CONSTRAINT storage_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- --
-- Name: storage_cids storage_cids_state_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - -- Name: storage_cids storage_cids_state_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
-- --
@ -1061,6 +997,14 @@ ALTER TABLE ONLY eth.transaction_cids
ADD CONSTRAINT transaction_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; ADD CONSTRAINT transaction_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: transaction_cids transaction_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.transaction_cids
ADD CONSTRAINT transaction_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- --
-- Name: uncle_cids uncle_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - -- Name: uncle_cids uncle_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
-- --
@ -1069,6 +1013,14 @@ ALTER TABLE ONLY eth.uncle_cids
ADD CONSTRAINT uncle_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; ADD CONSTRAINT uncle_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
--
-- Name: uncle_cids uncle_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: -
--
ALTER TABLE ONLY eth.uncle_cids
ADD CONSTRAINT uncle_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
-- --
-- PostgreSQL database dump complete -- PostgreSQL database dump complete
-- --

View File

@ -1,5 +1,5 @@
#!/bin/sh #!/bin/sh
# Runs the db migrations and starts the super node services # Runs the db migrations and starts the watcher services
# Exit if the variable tests fail # Exit if the variable tests fail
set -e set -e

View File

@ -26,9 +26,9 @@ services:
dockerfile: ./dockerfiles/super_node/Dockerfile dockerfile: ./dockerfiles/super_node/Dockerfile
args: args:
USER: "vdbm" USER: "vdbm"
CONFIG_FILE: ./environments/superNodeBTC.toml CONFIG_FILE: ./environments/watcherBTC.toml
environment: environment:
VDB_COMMAND: "superNode" VDB_COMMAND: "watcher"
DATABASE_NAME: "vulcanize_public" DATABASE_NAME: "vulcanize_public"
DATABASE_HOSTNAME: "db" DATABASE_HOSTNAME: "db"
DATABASE_PORT: 5432 DATABASE_PORT: 5432
@ -49,9 +49,9 @@ services:
dockerfile: ./dockerfiles/super_node/Dockerfile dockerfile: ./dockerfiles/super_node/Dockerfile
args: args:
USER: "vdbm" USER: "vdbm"
CONFIG_FILE: ./environments/superNodeETH.toml CONFIG_FILE: ./environments/watcherETH.toml
environment: environment:
VDB_COMMAND: "superNode" VDB_COMMAND: "watcher"
DATABASE_NAME: "vulcanize_public" DATABASE_NAME: "vulcanize_public"
DATABASE_HOSTNAME: "db" DATABASE_HOSTNAME: "db"
DATABASE_PORT: 5432 DATABASE_PORT: 5432

View File

@ -1,5 +1,5 @@
#!/bin/sh #!/bin/sh
# Runs the db migrations and starts the super node services # Runs the db migrations and starts the watcher services
# Exit if the variable tests fail # Exit if the variable tests fail
set -e set -e
@ -14,7 +14,7 @@ set +x
#test $DATABASE_PASSWORD #test $DATABASE_PASSWORD
#test $IPFS_INIT #test $IPFS_INIT
#test $IPFS_PATH #test $IPFS_PATH
VDB_COMMAND=${VDB_COMMAND:-superNode} VDB_COMMAND=${VDB_COMMAND:-watch}
set +e set +e
# Construct the connection string for postgres # Construct the connection string for postgres

View File

@ -1,5 +1,5 @@
#!/bin/sh #!/bin/sh
# Runs the db migrations and starts the super node services # Runs the db migrations and starts the watcher services
# Exit if the variable tests fail # Exit if the variable tests fail
set -e set -e

View File

@ -22,9 +22,9 @@ All of their data can then be queried with standard [GraphQL](https://graphql.or
### RPC Subscription Interface ### RPC Subscription Interface
A direct, real-time subscription to the data being processed by ipfs-blockchain-watcher can be established over WS or IPC through the [Stream](../pkg/watcher/api.go#L53) RPC method. A direct, real-time subscription to the data being processed by ipfs-blockchain-watcher can be established over WS or IPC through the [Stream](../pkg/watch/api.go#L53) RPC method.
This method is not chain-specific and each chain-type supports it, it is accessed under the "vdb" namespace rather than a chain-specific namespace. An interface for This method is not chain-specific and each chain-type supports it, it is accessed under the "vdb" namespace rather than a chain-specific namespace. An interface for
subscribing to this endpoint is provided [here](../pkg/streamer/super_node_streamer.go). subscribing to this endpoint is provided [here](../pkg/client/client.go).
When subscribing to this endpoint, the subscriber provides a set of RLP-encoded subscription parameters. These parameters will be chain-specific, and are used When subscribing to this endpoint, the subscriber provides a set of RLP-encoded subscription parameters. These parameters will be chain-specific, and are used
by ipfs-blockchain-watcher to filter and return a requested subset of chain data to the subscriber. (e.g. [BTC](../pkg/btc/subscription_config.go), [ETH](../../pkg/eth/subscription_config.go)). by ipfs-blockchain-watcher to filter and return a requested subset of chain data to the subscriber. (e.g. [BTC](../pkg/btc/subscription_config.go), [ETH](../../pkg/eth/subscription_config.go)).
@ -43,12 +43,12 @@ An example of how to subscribe to a real-time Ethereum data feed from ipfs-block
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/client" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/client"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/streamer" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/streamer"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch"
) )
config, _ := eth.NewEthSubscriptionConfig() config, _ := eth.NewEthSubscriptionConfig()
rlpConfig, _ := rlp.EncodeToBytes(config) rlpConfig, _ := rlp.EncodeToBytes(config)
vulcPath := viper.GetString("superNode.ethSubscription.path") vulcPath := viper.GetString("watcher.ethSubscription.path")
rawRPCClient, _ := rpc.Dial(vulcPath) rawRPCClient, _ := rpc.Dial(vulcPath)
rpcClient := client.NewRPCClient(rawRPCClient, vulcPath) rpcClient := client.NewRPCClient(rawRPCClient, vulcPath)
stream := streamer.NewSuperNodeStreamer(rpcClient) stream := streamer.NewSuperNodeStreamer(rpcClient)
@ -67,32 +67,32 @@ An example of how to subscribe to a real-time Ethereum data feed from ipfs-block
The .toml file being used to fill the Ethereum subscription config would look something like this: The .toml file being used to fill the Ethereum subscription config would look something like this:
```toml ```toml
[superNode] [watcher]
[superNode.ethSubscription] [watcher.ethSubscription]
historicalData = false historicalData = false
historicalDataOnly = false historicalDataOnly = false
startingBlock = 0 startingBlock = 0
endingBlock = 0 endingBlock = 0
wsPath = "ws://127.0.0.1:8080" wsPath = "ws://127.0.0.1:8080"
[superNode.ethSubscription.headerFilter] [watcher.ethSubscription.headerFilter]
off = false off = false
uncles = false uncles = false
[superNode.ethSubscription.txFilter] [watcher.ethSubscription.txFilter]
off = false off = false
src = [] src = []
dst = [] dst = []
[superNode.ethSubscription.receiptFilter] [watcher.ethSubscription.receiptFilter]
off = false off = false
contracts = [] contracts = []
topic0s = [] topic0s = []
topic1s = [] topic1s = []
topic2s = [] topic2s = []
topic3s = [] topic3s = []
[superNode.ethSubscription.stateFilter] [watcher.ethSubscription.stateFilter]
off = false off = false
addresses = [] addresses = []
intermediateNodes = false intermediateNodes = false
[superNode.ethSubscription.storageFilter] [watcher.ethSubscription.storageFilter]
off = true off = true
addresses = [] addresses = []
storageKeys = [] storageKeys = []
@ -131,9 +131,9 @@ in `src` and `dst`, respectively.
- Setting `off` to true tells ipfs-blockchain-watcher to not send any receipts to the subscriber - Setting `off` to true tells ipfs-blockchain-watcher to not send any receipts to the subscriber
- `topic0s` is a string array which can be filled with event topics we want to filter for, - `topic0s` is a string array which can be filled with event topics we want to filter for,
if it has any topics then ipfs-blockchain-watcher will only send receipts that contain logs which have that topic0. if it has any topics then ipfs-blockchain-watcher will only send receipts that contain logs which have that topic0.
- `contracts` is a string array which can be filled with contract addresses we want to filter for, if it contains any contract addresses the super node will - `contracts` is a string array which can be filled with contract addresses we want to filter for, if it contains any contract addresses the watcher will
only send receipts that correspond to one of those contracts. only send receipts that correspond to one of those contracts.
- `matchTrxs` is a bool which when set to true any receipts that correspond to filtered for transactions will be sent by the super node, regardless of whether or not the receipt satisfies the `topics` or `contracts` filters. - `matchTrxs` is a bool which when set to true any receipts that correspond to filtered for transactions will be sent by the watcher, regardless of whether or not the receipt satisfies the `topics` or `contracts` filters.
`ethSubscription.stateFilter` has three sub-options: `off`, `addresses`, and `intermediateNodes`. `ethSubscription.stateFilter` has three sub-options: `off`, `addresses`, and `intermediateNodes`.
@ -170,7 +170,7 @@ An example of how to subscribe to a real-time Bitcoin data feed from ipfs-blockc
config, _ := btc.NewBtcSubscriptionConfig() config, _ := btc.NewBtcSubscriptionConfig()
rlpConfig, _ := rlp.EncodeToBytes(config) rlpConfig, _ := rlp.EncodeToBytes(config)
vulcPath := viper.GetString("superNode.btcSubscription.path") vulcPath := viper.GetString("watcher.btcSubscription.path")
rawRPCClient, _ := rpc.Dial(vulcPath) rawRPCClient, _ := rpc.Dial(vulcPath)
rpcClient := client.NewRPCClient(rawRPCClient, vulcPath) rpcClient := client.NewRPCClient(rawRPCClient, vulcPath)
stream := streamer.NewSuperNodeStreamer(rpcClient) stream := streamer.NewSuperNodeStreamer(rpcClient)
@ -189,16 +189,16 @@ An example of how to subscribe to a real-time Bitcoin data feed from ipfs-blockc
The .toml file being used to fill the Bitcoin subscription config would look something like this: The .toml file being used to fill the Bitcoin subscription config would look something like this:
```toml ```toml
[superNode] [watcher]
[superNode.btcSubscription] [watcher.btcSubscription]
historicalData = false historicalData = false
historicalDataOnly = false historicalDataOnly = false
startingBlock = 0 startingBlock = 0
endingBlock = 0 endingBlock = 0
wsPath = "ws://127.0.0.1:8080" wsPath = "ws://127.0.0.1:8080"
[superNode.btcSubscription.headerFilter] [watcher.btcSubscription.headerFilter]
off = false off = false
[superNode.btcSubscription.txFilter] [watcher.btcSubscription.txFilter]
off = false off = false
segwit = false segwit = false
witnessHashes = [] witnessHashes = []

View File

@ -8,17 +8,17 @@
1. [IPFS Considerations](#ipfs-considerations) 1. [IPFS Considerations](#ipfs-considerations)
## Processes ## Processes
ipfs-blockchain-watcher is a [service](../pkg/super_node/service.go#L61) comprised of the following interfaces: ipfs-blockchain-watcher is a [service](../pkg/watch/service.go#L61) comprised of the following interfaces:
* [Payload Fetcher](../pkg/super_node/shared/interfaces.go#L29): Fetches raw chain data from a half-duplex endpoint (HTTP/IPC), used for historical data fetching. ([BTC](../../pkg/super_node/btc/payload_fetcher.go), [ETH](../../pkg/super_node/eth/payload_fetcher.go)). * [Payload Fetcher](../pkg/shared/interfaces.go#L29): Fetches raw chain data from a half-duplex endpoint (HTTP/IPC), used for historical data fetching. ([BTC](../../pkg/btc/payload_fetcher.go), [ETH](../../pkg/eth/payload_fetcher.go)).
* [Payload Streamer](../pkg/super_node/shared/interfaces.go#L24): Streams raw chain data from a full-duplex endpoint (WebSocket/IPC), used for syncing data at the head of the chain in real-time. ([BTC](../../pkg/super_node/btc/http_streamer.go), [ETH](../../pkg/super_node/eth/streamer.go)). * [Payload Streamer](../pkg/shared/interfaces.go#L24): Streams raw chain data from a full-duplex endpoint (WebSocket/IPC), used for syncing data at the head of the chain in real-time. ([BTC](../../pkg/btc/http_streamer.go), [ETH](../../pkg/eth/streamer.go)).
* [Payload Converter](../pkg/super_node/shared/interfaces.go#L34): Converters raw chain data to an intermediary form prepared for IPFS publishing. ([BTC](../../pkg/super_node/btc/converter.go), [ETH](../../pkg/super_node/eth/converter.go)). * [Payload Converter](../pkg/shared/interfaces.go#L34): Converters raw chain data to an intermediary form prepared for IPFS publishing. ([BTC](../../pkg/btc/converter.go), [ETH](../../pkg/eth/converter.go)).
* [IPLD Publisher](../pkg/super_node/shared/interfaces.go#L39): Publishes the converted data to IPFS, returning their CIDs and associated metadata for indexing. ([BTC](../../pkg/super_node/btc/publisher.go), [ETH](../../pkg/super_node/eth/publisher.go)). * [IPLD Publisher](../pkg/shared/interfaces.go#L39): Publishes the converted data to IPFS, returning their CIDs and associated metadata for indexing. ([BTC](../../pkg/btc/publisher.go), [ETH](../../pkg/eth/publisher.go)).
* [CID Indexer](../pkg/super_node/shared/interfaces.go#L44): Indexes CIDs in Postgres with their associated metadata. This metadata is chain specific and selected based on utility. ([BTC](../../pkg/super_node/btc/indexer.go), [ETH](../../pkg/super_node/eth/indexer.go)). * [CID Indexer](../pkg/shared/interfaces.go#L44): Indexes CIDs in Postgres with their associated metadata. This metadata is chain specific and selected based on utility. ([BTC](../../pkg/btc/indexer.go), [ETH](../../pkg/eth/indexer.go)).
* [CID Retriever](../pkg/super_node/shared/interfaces.go#L54): Retrieves CIDs from Postgres by searching against their associated metadata, is used to lookup data to serve API requests/subscriptions. ([BTC](../../pkg/super_node/btc/retriever.go), [ETH](../../pkg/super_node/eth/retriever.go)). * [CID Retriever](../pkg/shared/interfaces.go#L54): Retrieves CIDs from Postgres by searching against their associated metadata, is used to lookup data to serve API requests/subscriptions. ([BTC](../../pkg/btc/retriever.go), [ETH](../../pkg/eth/retriever.go)).
* [IPLD Fetcher](../pkg/super_node/shared/interfaces.go#L62): Fetches the IPLDs needed to service API requests/subscriptions from IPFS using retrieved CIDS; can route through a IPFS block-exchange to search for objects that are not directly available. ([BTC](../../pkg/super_node/btc/ipld_fetcher.go), [ETH](../../pkg/super_node/eth/ipld_fetcher.go)) * [IPLD Fetcher](../pkg/shared/interfaces.go#L62): Fetches the IPLDs needed to service API requests/subscriptions from IPFS using retrieved CIDS; can route through a IPFS block-exchange to search for objects that are not directly available. ([BTC](../../pkg/btc/ipld_fetcher.go), [ETH](../../pkg/eth/ipld_fetcher.go))
* [Response Filterer](../pkg/super_node/shared/interfaces.go#L49): Filters converted data payloads served to API subscriptions; filters according to the subscriber provided parameters. ([BTC](../../pkg/super_node/btc/filterer.go), [ETH](../../pkg/super_node/eth/filterer.go)). * [Response Filterer](../pkg/shared/interfaces.go#L49): Filters converted data payloads served to API subscriptions; filters according to the subscriber provided parameters. ([BTC](../../pkg/btc/filterer.go), [ETH](../../pkg/eth/filterer.go)).
* [API](https://github.com/ethereum/go-ethereum/blob/master/rpc/types.go#L31): Expose RPC methods for clients to interface with the data. Chain-specific APIs should aim to recapitulate as much of the native API as possible. ([VDB](../../pkg/super_node/api.go), [ETH](../../pkg/super_node/eth/api.go)). * [API](https://github.com/ethereum/go-ethereum/blob/master/rpc/types.go#L31): Expose RPC methods for clients to interface with the data. Chain-specific APIs should aim to recapitulate as much of the native API as possible. ([VDB](../../pkg/api.go), [ETH](../../pkg/eth/api.go)).
Appropriating the service for a new chain is done by creating underlying types to satisfy these interfaces for Appropriating the service for a new chain is done by creating underlying types to satisfy these interfaces for
@ -56,7 +56,7 @@ This set of parameters needs to be set no matter the chain type.
path = "~/.ipfs" # $IPFS_PATH path = "~/.ipfs" # $IPFS_PATH
mode = "direct" # $IPFS_MODE mode = "direct" # $IPFS_MODE
[superNode] [watcher]
chain = "bitcoin" # $SUPERNODE_CHAIN chain = "bitcoin" # $SUPERNODE_CHAIN
server = true # $SUPERNODE_SERVER server = true # $SUPERNODE_SERVER
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH

View File

@ -20,7 +20,7 @@
clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE
resetValidation = true # $RESYNC_RESET_VALIDATION resetValidation = true # $RESYNC_RESET_VALIDATION
[superNode] [watcher]
chain = "bitcoin" # $SUPERNODE_CHAIN chain = "bitcoin" # $SUPERNODE_CHAIN
server = true # $SUPERNODE_SERVER server = true # $SUPERNODE_SERVER
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH

View File

@ -21,7 +21,7 @@
clearOldCache = true # $RESYNC_CLEAR_OLD_CACHE clearOldCache = true # $RESYNC_CLEAR_OLD_CACHE
resetValidation = true # $RESYNC_RESET_VALIDATION resetValidation = true # $RESYNC_RESET_VALIDATION
[superNode] [watcher]
chain = "ethereum" # $SUPERNODE_CHAIN chain = "ethereum" # $SUPERNODE_CHAIN
server = true # $SUPERNODE_SERVER server = true # $SUPERNODE_SERVER
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH

View File

@ -1,29 +1,29 @@
[superNode] [watcher]
[superNode.ethSubscription] [watcher.ethSubscription]
historicalData = false historicalData = false
historicalDataOnly = false historicalDataOnly = false
startingBlock = 0 startingBlock = 0
endingBlock = 0 endingBlock = 0
wsPath = "ws://127.0.0.1:8080" wsPath = "ws://127.0.0.1:8080"
[superNode.ethSubscription.headerFilter] [watcher.ethSubscription.headerFilter]
off = false off = false
uncles = false uncles = false
[superNode.ethSubscription.txFilter] [watcher.ethSubscription.txFilter]
off = false off = false
src = [] src = []
dst = [] dst = []
[superNode.ethSubscription.receiptFilter] [watcher.ethSubscription.receiptFilter]
off = false off = false
contracts = [] contracts = []
topic0s = [] topic0s = []
topic1s = [] topic1s = []
topic2s = [] topic2s = []
topic3s = [] topic3s = []
[superNode.ethSubscription.stateFilter] [watcher.ethSubscription.stateFilter]
off = false off = false
addresses = [] addresses = []
intermediateNodes = false intermediateNodes = false
[superNode.ethSubscription.storageFilter] [watcher.ethSubscription.storageFilter]
off = true off = true
addresses = [] addresses = []
storageKeys = [] storageKeys = []

View File

@ -25,9 +25,9 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func TestBTCSuperNode(t *testing.T) { func TestBTCWatcher(t *testing.T) {
RegisterFailHandler(Fail) RegisterFailHandler(Fail)
RunSpecs(t, "Super Node BTC Suite Test") RunSpecs(t, "BTC IPFS Watcher Suite Test")
} }
var _ = BeforeSuite(func() { var _ = BeforeSuite(func() {

View File

@ -130,7 +130,7 @@ func (bcr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
results := make([]TxModel, 0) results := make([]TxModel, 0)
id := 1 id := 1
pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id, pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id,
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key,
transaction_cids.segwit, transaction_cids.witness_hash, transaction_cids.index transaction_cids.segwit, transaction_cids.witness_hash, transaction_cids.index
FROM btc.transaction_cids, btc.header_cids, btc.tx_inputs, btc.tx_outputs FROM btc.transaction_cids, btc.header_cids, btc.tx_inputs, btc.tx_outputs
WHERE transaction_cids.header_id = header_cids.id WHERE transaction_cids.header_id = header_cids.id
@ -169,7 +169,7 @@ func (bcr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db // RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
func (bcr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap, error) { func (bcr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap, error) {
log.Info("searching for gaps in the btc super node database") log.Info("searching for gaps in the btc ipfs watcher database")
startingBlock, err := bcr.RetrieveFirstBlockNumber() startingBlock, err := bcr.RetrieveFirstBlockNumber()
if err != nil { if err != nil {
return nil, fmt.Errorf("btc CIDRetriever RetrieveFirstBlockNumber error: %v", err) return nil, fmt.Errorf("btc CIDRetriever RetrieveFirstBlockNumber error: %v", err)

View File

@ -160,7 +160,7 @@ func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error {
func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A pgStr := `DELETE FROM public.blocks A
USING btc.transaction_cids B, btc.header_cids C USING btc.transaction_cids B, btc.header_cids C
WHERE A.key = B.cid WHERE A.key = B.mh_key
AND B.header_id = C.id AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2` AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1]) _, err := tx.Exec(pgStr, rng[0], rng[1])
@ -179,7 +179,7 @@ func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error {
func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A pgStr := `DELETE FROM public.blocks A
USING btc.header_cids B USING btc.header_cids B
WHERE A.key = B.cid WHERE A.key = B.mh_key
AND B.block_number BETWEEN $1 AND $2` AND B.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1]) _, err := tx.Exec(pgStr, rng[0], rng[1])
return err return err

View File

@ -33,25 +33,30 @@ var (
// header variables // header variables
blockHash1 = crypto.Keccak256Hash([]byte{00, 02}) blockHash1 = crypto.Keccak256Hash([]byte{00, 02})
blocKNumber1 = big.NewInt(0) blocKNumber1 = big.NewInt(0)
headerCid1 = "mockHeader1CID" headerCid1 = shared.TestCID([]byte("mockHeader1CID"))
headerMhKey1 = shared.MultihashKeyFromCID(headerCid1)
parentHash = crypto.Keccak256Hash([]byte{00, 01}) parentHash = crypto.Keccak256Hash([]byte{00, 01})
headerModel1 = btc.HeaderModel{ headerModel1 = btc.HeaderModel{
BlockHash: blockHash1.String(), BlockHash: blockHash1.String(),
BlockNumber: blocKNumber1.String(), BlockNumber: blocKNumber1.String(),
ParentHash: parentHash.String(), ParentHash: parentHash.String(),
CID: headerCid1, CID: headerCid1.String(),
MhKey: headerMhKey1,
} }
// tx variables // tx variables
tx1CID = "mockTx1CID" tx1CID = shared.TestCID([]byte("mockTx1CID"))
tx2CID = "mockTx2CID" tx1MhKey = shared.MultihashKeyFromCID(tx1CID)
tx2CID = shared.TestCID([]byte("mockTx2CID"))
tx2MhKey = shared.MultihashKeyFromCID(tx2CID)
tx1Hash = crypto.Keccak256Hash([]byte{01, 01}) tx1Hash = crypto.Keccak256Hash([]byte{01, 01})
tx2Hash = crypto.Keccak256Hash([]byte{01, 02}) tx2Hash = crypto.Keccak256Hash([]byte{01, 02})
opHash = crypto.Keccak256Hash([]byte{02, 01}) opHash = crypto.Keccak256Hash([]byte{02, 01})
txModels1 = []btc.TxModelWithInsAndOuts{ txModels1 = []btc.TxModelWithInsAndOuts{
{ {
Index: 0, Index: 0,
CID: tx1CID, CID: tx1CID.String(),
MhKey: tx1MhKey,
TxHash: tx1Hash.String(), TxHash: tx1Hash.String(),
SegWit: true, SegWit: true,
TxInputs: []btc.TxInput{ TxInputs: []btc.TxInput{
@ -75,7 +80,8 @@ var (
}, },
{ {
Index: 1, Index: 1,
CID: tx2CID, CID: tx2CID.String(),
MhKey: tx2MhKey,
TxHash: tx2Hash.String(), TxHash: tx2Hash.String(),
SegWit: true, SegWit: true,
}, },
@ -89,21 +95,25 @@ var (
// header variables // header variables
blockHash2 = crypto.Keccak256Hash([]byte{00, 03}) blockHash2 = crypto.Keccak256Hash([]byte{00, 03})
blocKNumber2 = big.NewInt(1) blocKNumber2 = big.NewInt(1)
headerCid2 = "mockHeaderCID2" headerCid2 = shared.TestCID([]byte("mockHeaderCID2"))
headerMhKey2 = shared.MultihashKeyFromCID(headerCid2)
headerModel2 = btc.HeaderModel{ headerModel2 = btc.HeaderModel{
BlockNumber: blocKNumber2.String(), BlockNumber: blocKNumber2.String(),
BlockHash: blockHash2.String(), BlockHash: blockHash2.String(),
ParentHash: blockHash1.String(), ParentHash: blockHash1.String(),
CID: headerCid2, CID: headerCid2.String(),
MhKey: headerMhKey2,
} }
// tx variables // tx variables
tx3CID = "mockTx3CID" tx3CID = shared.TestCID([]byte("mockTx3CID"))
tx3MhKey = shared.MultihashKeyFromCID(tx3CID)
tx3Hash = crypto.Keccak256Hash([]byte{01, 03}) tx3Hash = crypto.Keccak256Hash([]byte{01, 03})
txModels2 = []btc.TxModelWithInsAndOuts{ txModels2 = []btc.TxModelWithInsAndOuts{
{ {
Index: 0, Index: 0,
CID: tx3CID, CID: tx3CID.String(),
MhKey: tx3MhKey,
TxHash: tx3Hash.String(), TxHash: tx3Hash.String(),
SegWit: true, SegWit: true,
}, },
@ -112,13 +122,13 @@ var (
HeaderCID: headerModel2, HeaderCID: headerModel2,
TransactionCIDs: txModels2, TransactionCIDs: txModels2,
} }
rngs = [][2]uint64{{0, 1}} rngs = [][2]uint64{{0, 1}}
cids = []string{ mhKeys = []string{
headerCid1, headerMhKey1,
headerCid2, headerMhKey2,
tx1CID, tx1MhKey,
tx2CID, tx2MhKey,
tx3CID, tx3MhKey,
} }
mockData = []byte{'\x01'} mockData = []byte{'\x01'}
) )
@ -139,16 +149,15 @@ var _ = Describe("Cleaner", func() {
Describe("Clean", func() { Describe("Clean", func() {
BeforeEach(func() { BeforeEach(func() {
for _, key := range mhKeys {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData)
Expect(err).ToNot(HaveOccurred())
}
err := repo.Index(mockCIDPayload1) err := repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(mockCIDPayload2) err = repo.Index(mockCIDPayload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
for _, cid := range cids {
_, err = db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, cid, mockData)
Expect(err).ToNot(HaveOccurred())
}
tx, err := db.Beginx() tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
var startingIPFSBlocksCount int var startingIPFSBlocksCount int
@ -286,6 +295,11 @@ var _ = Describe("Cleaner", func() {
Describe("ResetValidation", func() { Describe("ResetValidation", func() {
BeforeEach(func() { BeforeEach(func() {
for _, key := range mhKeys {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData)
Expect(err).ToNot(HaveOccurred())
}
err := repo.Index(mockCIDPayload1) err := repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(mockCIDPayload2) err = repo.Index(mockCIDPayload2)

View File

@ -74,11 +74,11 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) { func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) {
var headerID int64 var headerID int64
err := tx.QueryRowx(`INSERT INTO btc.header_cids (block_number, block_hash, parent_hash, cid, timestamp, bits, node_id, times_validated) err := tx.QueryRowx(`INSERT INTO btc.header_cids (block_number, block_hash, parent_hash, cid, timestamp, bits, node_id, mh_key, times_validated)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, timestamp, bits, node_id, times_validated) = ($3, $4, $5, $6, $7, btc.header_cids.times_validated + 1) ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, timestamp, bits, node_id, mh_key, times_validated) = ($3, $4, $5, $6, $7, $8, btc.header_cids.times_validated + 1)
RETURNING id`, RETURNING id`,
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.Timestamp, header.Bits, in.db.NodeID, 1).Scan(&headerID) header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.Timestamp, header.Bits, in.db.NodeID, header.MhKey, 1).Scan(&headerID)
return headerID, err return headerID, err
} }
@ -107,11 +107,11 @@ func (in *CIDIndexer) indexTransactionCIDs(tx *sqlx.Tx, transactions []TxModelWi
func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModelWithInsAndOuts, headerID int64) (int64, error) { func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModelWithInsAndOuts, headerID int64) (int64, error) {
var txID int64 var txID int64
err := tx.QueryRowx(`INSERT INTO btc.transaction_cids (header_id, tx_hash, index, cid, segwit, witness_hash) err := tx.QueryRowx(`INSERT INTO btc.transaction_cids (header_id, tx_hash, index, cid, segwit, witness_hash, mh_key)
VALUES ($1, $2, $3, $4, $5, $6) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (tx_hash) DO UPDATE SET (header_id, index, cid, segwit, witness_hash) = ($1, $3, $4, $5, $6) ON CONFLICT (tx_hash) DO UPDATE SET (header_id, index, cid, segwit, witness_hash, mh_key) = ($1, $3, $4, $5, $6, $7)
RETURNING id`, RETURNING id`,
headerID, transaction.TxHash, transaction.Index, transaction.CID, transaction.SegWit, transaction.WitnessHash).Scan(&txID) headerID, transaction.TxHash, transaction.Index, transaction.CID, transaction.SegWit, transaction.WitnessHash, transaction.MhKey).Scan(&txID)
return txID, err return txID, err
} }

View File

@ -28,14 +28,20 @@ import (
var _ = Describe("Indexer", func() { var _ = Describe("Indexer", func() {
var ( var (
db *postgres.DB db *postgres.DB
err error err error
repo *btc.CIDIndexer repo *btc.CIDIndexer
mockData = []byte{1, 2, 3}
) )
BeforeEach(func() { BeforeEach(func() {
db, err = shared.SetupDB() db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
repo = btc.NewCIDIndexer(db) repo = btc.NewCIDIndexer(db)
// need entries in the public.blocks with the mhkeys or the FK constraint will fail
shared.PublishMockIPLD(db, mocks.MockHeaderMhKey, mockData)
shared.PublishMockIPLD(db, mocks.MockTrxMhKey1, mockData)
shared.PublishMockIPLD(db, mocks.MockTrxMhKey2, mockData)
shared.PublishMockIPLD(db, mocks.MockTrxMhKey3, mockData)
}) })
AfterEach(func() { AfterEach(func() {
btc.TearDownDB(db) btc.TearDownDB(db)
@ -43,6 +49,7 @@ var _ = Describe("Indexer", func() {
Describe("Index", func() { Describe("Index", func() {
It("Indexes CIDs and related metadata into vulcanizedb", func() { It("Indexes CIDs and related metadata into vulcanizedb", func() {
err = repo.Index(&mocks.MockCIDPayload) err = repo.Index(&mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
pgStr := `SELECT * FROM btc.header_cids pgStr := `SELECT * FROM btc.header_cids
@ -72,13 +79,13 @@ var _ = Describe("Indexer", func() {
Expect(tx.WitnessHash).To(Equal("")) Expect(tx.WitnessHash).To(Equal(""))
switch tx.Index { switch tx.Index {
case 0: case 0:
Expect(tx.CID).To(Equal("mockTrxCID1")) Expect(tx.CID).To(Equal(mocks.MockTrxCID1.String()))
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[0].TxHash().String())) Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[0].TxHash().String()))
case 1: case 1:
Expect(tx.CID).To(Equal("mockTrxCID2")) Expect(tx.CID).To(Equal(mocks.MockTrxCID2.String()))
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[1].TxHash().String())) Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[1].TxHash().String()))
case 2: case 2:
Expect(tx.CID).To(Equal("mockTrxCID3")) Expect(tx.CID).To(Equal(mocks.MockTrxCID3.String()))
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[2].TxHash().String())) Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[2].TxHash().String()))
} }
} }

View File

@ -79,7 +79,7 @@ func (f *IPLDPGFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error)
// FetchHeaders fetches headers // FetchHeaders fetches headers
func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) { func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) {
log.Debug("fetching header ipld") log.Debug("fetching header ipld")
headerBytes, err := shared.FetchIPLD(tx, c.CID) headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return ipfs.BlockModel{}, err return ipfs.BlockModel{}, err
} }
@ -94,7 +94,7 @@ func (f *IPLDPGFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockMode
log.Debug("fetching transaction iplds") log.Debug("fetching transaction iplds")
trxIPLDs := make([]ipfs.BlockModel, len(cids)) trxIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
trxBytes, err := shared.FetchIPLD(tx, c.CID) trxBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -25,11 +25,19 @@ import (
"github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
var ( var (
MockHeaderCID = shared.TestCID([]byte("MockHeaderCID"))
MockTrxCID1 = shared.TestCID([]byte("MockTrxCID1"))
MockTrxCID2 = shared.TestCID([]byte("MockTrxCID2"))
MockTrxCID3 = shared.TestCID([]byte("MockTrxCID3"))
MockHeaderMhKey = shared.MultihashKeyFromCID(MockHeaderCID)
MockTrxMhKey1 = shared.MultihashKeyFromCID(MockTrxCID1)
MockTrxMhKey2 = shared.MultihashKeyFromCID(MockTrxCID2)
MockTrxMhKey3 = shared.MultihashKeyFromCID(MockTrxCID3)
MockBlockHeight int64 = 1337 MockBlockHeight int64 = 1337
MockBlock = wire.MsgBlock{ MockBlock = wire.MsgBlock{
Header: wire.BlockHeader{ Header: wire.BlockHeader{
@ -479,7 +487,8 @@ var (
} }
MockTxsMetaDataPostPublish = []btc.TxModelWithInsAndOuts{ MockTxsMetaDataPostPublish = []btc.TxModelWithInsAndOuts{
{ {
CID: "mockTrxCID1", CID: MockTrxCID1.String(),
MhKey: MockTrxMhKey1,
TxHash: MockBlock.Transactions[0].TxHash().String(), TxHash: MockBlock.Transactions[0].TxHash().String(),
Index: 0, Index: 0,
SegWit: MockBlock.Transactions[0].HasWitness(), SegWit: MockBlock.Transactions[0].HasWitness(),
@ -517,7 +526,8 @@ var (
}, },
}, },
{ {
CID: "mockTrxCID2", CID: MockTrxCID2.String(),
MhKey: MockTrxMhKey2,
TxHash: MockBlock.Transactions[1].TxHash().String(), TxHash: MockBlock.Transactions[1].TxHash().String(),
Index: 1, Index: 1,
SegWit: MockBlock.Transactions[1].HasWitness(), SegWit: MockBlock.Transactions[1].HasWitness(),
@ -594,7 +604,8 @@ var (
}, },
}, },
{ {
CID: "mockTrxCID3", CID: MockTrxCID3.String(),
MhKey: MockTrxMhKey3,
TxHash: MockBlock.Transactions[2].TxHash().String(), TxHash: MockBlock.Transactions[2].TxHash().String(),
Index: 2, Index: 2,
SegWit: MockBlock.Transactions[2].HasWitness(), SegWit: MockBlock.Transactions[2].HasWitness(),
@ -671,7 +682,8 @@ var (
}, },
} }
MockHeaderMetaData = btc.HeaderModel{ MockHeaderMetaData = btc.HeaderModel{
CID: "mockHeaderCID", CID: MockHeaderCID.String(),
MhKey: MockHeaderMhKey,
ParentHash: MockBlock.Header.PrevBlock.String(), ParentHash: MockBlock.Header.PrevBlock.String(),
BlockNumber: strconv.Itoa(int(MockBlockHeight)), BlockNumber: strconv.Itoa(int(MockBlockHeight)),
BlockHash: MockBlock.Header.BlockHash().String(), BlockHash: MockBlock.Header.BlockHash().String(),
@ -686,53 +698,6 @@ var (
HeaderCID: MockHeaderMetaData, HeaderCID: MockHeaderMetaData,
TransactionCIDs: MockTxsMetaDataPostPublish, TransactionCIDs: MockTxsMetaDataPostPublish,
} }
DummyCIDPayloadForFKReference = btc.CIDPayload{
HeaderCID: btc.HeaderModel{
CID: "dummyHeader",
ParentHash: "",
BlockHash: "",
BlockNumber: "1336",
Bits: 1,
Timestamp: 1000000000,
},
TransactionCIDs: []btc.TxModelWithInsAndOuts{
{
TxHash: "87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03",
CID: "dummyTx1",
Index: 0,
TxOutputs: []btc.TxOutput{
{
Index: 0,
RequiredSigs: 0,
Value: 0,
PkScript: []byte{},
ScriptClass: 0,
},
},
},
{
TxHash: "cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3",
CID: "dummyTx2",
Index: 1,
TxOutputs: []btc.TxOutput{
{
Index: 0,
RequiredSigs: 0,
Value: 0,
PkScript: []byte{},
ScriptClass: 0,
},
{
Index: 1,
RequiredSigs: 0,
Value: 0,
PkScript: []byte{},
ScriptClass: 0,
},
},
},
},
}
) )
func stringSliceFromAddresses(addrs []btcutil.Address) []string { func stringSliceFromAddresses(addrs []btcutil.Address) []string {

View File

@ -25,6 +25,7 @@ type HeaderModel struct {
BlockHash string `db:"block_hash"` BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"` ParentHash string `db:"parent_hash"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
Timestamp int64 `db:"timestamp"` Timestamp int64 `db:"timestamp"`
Bits uint32 `db:"bits"` Bits uint32 `db:"bits"`
NodeID int64 `db:"node_id"` NodeID int64 `db:"node_id"`
@ -38,6 +39,7 @@ type TxModel struct {
Index int64 `db:"index"` Index int64 `db:"index"`
TxHash string `db:"tx_hash"` TxHash string `db:"tx_hash"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
SegWit bool `db:"segwit"` SegWit bool `db:"segwit"`
WitnessHash string `db:"witness_hash"` WitnessHash string `db:"witness_hash"`
} }
@ -49,6 +51,7 @@ type TxModelWithInsAndOuts struct {
Index int64 `db:"index"` Index int64 `db:"index"`
TxHash string `db:"tx_hash"` TxHash string `db:"tx_hash"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
SegWit bool `db:"segwit"` SegWit bool `db:"segwit"`
WitnessHash string `db:"witness_hash"` WitnessHash string `db:"witness_hash"`
TxInputs []TxInput TxInputs []TxInput

View File

@ -80,6 +80,7 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share
} }
header := HeaderModel{ header := HeaderModel{
CID: headerNode.Cid().String(), CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: ipldPayload.Header.PrevBlock.String(), ParentHash: ipldPayload.Header.PrevBlock.String(),
BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)), BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)),
BlockHash: ipldPayload.Header.BlockHash().String(), BlockHash: ipldPayload.Header.BlockHash().String(),
@ -98,6 +99,7 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share
} }
txModel := ipldPayload.TxMetaData[i] txModel := ipldPayload.TxMetaData[i]
txModel.CID = txNode.Cid().String() txModel.CID = txNode.Cid().String()
txModel.MhKey = shared.MultihashKeyFromCID(txNode.Cid())
txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID) txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -62,8 +62,10 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI
if err != nil { if err != nil {
return nil, err return nil, err
} }
mhKey, _ := shared.MultihashKeyFromCIDString(headerCid)
header := HeaderModel{ header := HeaderModel{
CID: headerCid, CID: headerCid,
MhKey: mhKey,
ParentHash: ipldPayload.Header.PrevBlock.String(), ParentHash: ipldPayload.Header.PrevBlock.String(),
BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)), BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)),
BlockHash: ipldPayload.Header.BlockHash().String(), BlockHash: ipldPayload.Header.BlockHash().String(),
@ -97,8 +99,10 @@ func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.BtcTx, txTrie
if err != nil { if err != nil {
return nil, err return nil, err
} }
mhKey, _ := shared.MultihashKeyFromCIDString(cid)
txCids[i] = TxModelWithInsAndOuts{ txCids[i] = TxModelWithInsAndOuts{
CID: cid, CID: cid,
MhKey: mhKey,
Index: trxMeta[i].Index, Index: trxMeta[i].Index,
TxHash: trxMeta[i].TxHash, TxHash: trxMeta[i].TxHash,
SegWit: trxMeta[i].SegWit, SegWit: trxMeta[i].SegWit,

View File

@ -57,12 +57,12 @@ var _ = Describe("Publisher", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx3Bytes := by.Bytes() tx3Bytes := by.Bytes()
mockHeaderDagPutter.CIDsToReturn = map[common.Hash]string{ mockHeaderDagPutter.CIDsToReturn = map[common.Hash]string{
common.BytesToHash(headerBytes): "mockHeaderCID", common.BytesToHash(headerBytes): mocks.MockHeaderCID.String(),
} }
mockTrxDagPutter.CIDsToReturn = map[common.Hash]string{ mockTrxDagPutter.CIDsToReturn = map[common.Hash]string{
common.BytesToHash(tx1Bytes): "mockTrxCID1", common.BytesToHash(tx1Bytes): mocks.MockTrxCID1.String(),
common.BytesToHash(tx2Bytes): "mockTrxCID2", common.BytesToHash(tx2Bytes): mocks.MockTrxCID2.String(),
common.BytesToHash(tx3Bytes): "mockTrxCID3", common.BytesToHash(tx3Bytes): mocks.MockTrxCID3.String(),
} }
publisher := btc.IPLDPublisher{ publisher := btc.IPLDPublisher{
HeaderPutter: mockHeaderDagPutter, HeaderPutter: mockHeaderDagPutter,

View File

@ -25,7 +25,7 @@ import (
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
// SubscriptionSettings config is used by a subscriber to specify what bitcoin data to stream from the super node // SubscriptionSettings config is used by a subscriber to specify what bitcoin data to stream from the watcher
type SubscriptionSettings struct { type SubscriptionSettings struct {
BackFill bool BackFill bool
BackFillOnly bool BackFillOnly bool
@ -55,36 +55,36 @@ type TxFilter struct {
func NewBtcSubscriptionConfig() (*SubscriptionSettings, error) { func NewBtcSubscriptionConfig() (*SubscriptionSettings, error) {
sc := new(SubscriptionSettings) sc := new(SubscriptionSettings)
// Below default to false, which means we do not backfill by default // Below default to false, which means we do not backfill by default
sc.BackFill = viper.GetBool("superNode.btcSubscription.historicalData") sc.BackFill = viper.GetBool("watcher.btcSubscription.historicalData")
sc.BackFillOnly = viper.GetBool("superNode.btcSubscription.historicalDataOnly") sc.BackFillOnly = viper.GetBool("watcher.btcSubscription.historicalDataOnly")
// Below default to 0 // Below default to 0
// 0 start means we start at the beginning and 0 end means we continue indefinitely // 0 start means we start at the beginning and 0 end means we continue indefinitely
sc.Start = big.NewInt(viper.GetInt64("superNode.btcSubscription.startingBlock")) sc.Start = big.NewInt(viper.GetInt64("watcher.btcSubscription.startingBlock"))
sc.End = big.NewInt(viper.GetInt64("superNode.btcSubscription.endingBlock")) sc.End = big.NewInt(viper.GetInt64("watcher.btcSubscription.endingBlock"))
// Below default to false, which means we get all headers by default // Below default to false, which means we get all headers by default
sc.HeaderFilter = HeaderFilter{ sc.HeaderFilter = HeaderFilter{
Off: viper.GetBool("superNode.btcSubscription.headerFilter.off"), Off: viper.GetBool("watcher.btcSubscription.headerFilter.off"),
} }
// Below defaults to false and two slices of length 0 // Below defaults to false and two slices of length 0
// Which means we get all transactions by default // Which means we get all transactions by default
pksc := viper.Get("superNode.btcSubscription.txFilter.pkScriptClass") pksc := viper.Get("watcher.btcSubscription.txFilter.pkScriptClass")
pkScriptClasses, ok := pksc.([]uint8) pkScriptClasses, ok := pksc.([]uint8)
if !ok { if !ok {
return nil, errors.New("superNode.btcSubscription.txFilter.pkScriptClass needs to be an array of uint8s") return nil, errors.New("watcher.btcSubscription.txFilter.pkScriptClass needs to be an array of uint8s")
} }
is := viper.Get("superNode.btcSubscription.txFilter.indexes") is := viper.Get("watcher.btcSubscription.txFilter.indexes")
indexes, ok := is.([]int64) indexes, ok := is.([]int64)
if !ok { if !ok {
return nil, errors.New("superNode.btcSubscription.txFilter.indexes needs to be an array of int64s") return nil, errors.New("watcher.btcSubscription.txFilter.indexes needs to be an array of int64s")
} }
sc.TxFilter = TxFilter{ sc.TxFilter = TxFilter{
Off: viper.GetBool("superNode.btcSubscription.txFilter.off"), Off: viper.GetBool("watcher.btcSubscription.txFilter.off"),
Segwit: viper.GetBool("superNode.btcSubscription.txFilter.segwit"), Segwit: viper.GetBool("watcher.btcSubscription.txFilter.segwit"),
WitnessHashes: viper.GetStringSlice("superNode.btcSubscription.txFilter.witnessHashes"), WitnessHashes: viper.GetStringSlice("watcher.btcSubscription.txFilter.witnessHashes"),
PkScriptClasses: pkScriptClasses, PkScriptClasses: pkScriptClasses,
Indexes: indexes, Indexes: indexes,
MultiSig: viper.GetBool("superNode.btcSubscription.txFilter.multiSig"), MultiSig: viper.GetBool("watcher.btcSubscription.txFilter.multiSig"),
Addresses: viper.GetStringSlice("superNode.btcSubscription.txFilter.addresses"), Addresses: viper.GetStringSlice("watcher.btcSubscription.txFilter.addresses"),
} }
return sc, nil return sc, nil
} }

View File

@ -22,7 +22,7 @@ import (
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
) )
// TearDownDB is used to tear down the super node dbs after tests // TearDownDB is used to tear down the watcher dbs after tests
func TearDownDB(db *postgres.DB) { func TearDownDB(db *postgres.DB) {
tx, err := db.Beginx() tx, err := db.Beginx()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher package builders
import ( import (
"fmt" "fmt"

43
pkg/client/client.go Normal file
View File

@ -0,0 +1,43 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Client is used by watchers to stream chain IPLD data from a vulcanizedb ipfs-blockchain-watcher
package client
import (
"context"
"github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch"
)
// Client is used to subscribe to the ipfs-blockchain-watcher ipld data stream
type Client struct {
c *rpc.Client
}
// NewClient creates a new Client
func NewClient(c *rpc.Client) *Client {
return &Client{
c: c,
}
}
// Stream is the main loop for subscribing to iplds from an ipfs-blockchain-watcher server
func (c *Client) Stream(payloadChan chan watch.SubscriptionPayload, rlpParams []byte) (*rpc.ClientSubscription, error) {
return c.c.Subscribe(context.Background(), "vdb", payloadChan, "stream", rlpParams)
}

View File

@ -1,93 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"context"
"errors"
"reflect"
"github.com/ethereum/go-ethereum/rpc"
)
// RPCClient is a wrapper around the geth RPC client
type RPCClient struct {
client *rpc.Client
ipcPath string
}
// BatchElem is a struct to hold the elements of a BatchCall
type BatchElem struct {
Method string
Args []interface{}
Result interface{}
Error error
}
// NewRPCClient creates a new RpcClient
func NewRPCClient(client *rpc.Client, ipcPath string) RPCClient {
return RPCClient{
client: client,
ipcPath: ipcPath,
}
}
// CallContext makes an rpc method call with the provided context and arguments
func (client RPCClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
//If an empty interface (or other nil object) is passed to CallContext, when the JSONRPC message is created the params will
//be interpreted as [null]. This seems to work fine for most of the ethereum clients (which presumably ignore a null parameter.
//Ganache however does not ignore it, and throws an 'Incorrect number of arguments' error.
if args == nil {
return client.client.CallContext(ctx, result, method)
}
return client.client.CallContext(ctx, result, method, args...)
}
func (client RPCClient) IpcPath() string {
return client.ipcPath
}
func (client RPCClient) SupportedModules() (map[string]string, error) {
return client.client.SupportedModules()
}
func (client RPCClient) BatchCall(batch []BatchElem) error {
var rpcBatch []rpc.BatchElem
for _, batchElem := range batch {
var newBatchElem = rpc.BatchElem{
Result: batchElem.Result,
Method: batchElem.Method,
Args: batchElem.Args,
Error: batchElem.Error,
}
rpcBatch = append(rpcBatch, newBatchElem)
}
return client.client.BatchCall(rpcBatch)
}
// Subscribe subscribes to an rpc "namespace_subscribe" subscription with the given channel
// The first argument needs to be the method we wish to invoke
func (client RPCClient) Subscribe(namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error) {
chanVal := reflect.ValueOf(payloadChan)
if chanVal.Kind() != reflect.Chan || chanVal.Type().ChanDir()&reflect.SendDir == 0 {
return nil, errors.New("second argument to Subscribe must be a writable channel")
}
if chanVal.IsNil() {
return nil, errors.New("channel given to Subscribe must not be nil")
}
return client.client.Subscribe(context.Background(), namespace, payloadChan, args...)
}

View File

@ -29,17 +29,12 @@ var vulcanizeConfig = []byte(`
name = "dbname" name = "dbname"
hostname = "localhost" hostname = "localhost"
port = 5432 port = 5432
[client]
ipcPath = "IPCPATH/geth.ipc"
`) `)
var _ = Describe("Loading the config", func() { var _ = Describe("Loading the config", func() {
It("reads the private config using the environment", func() { It("reads the private config using the environment", func() {
viper.SetConfigName("config") viper.SetConfigName("config")
viper.AddConfigPath("$GOPATH/src/github.com/vulcanize/ipfs-blockchain-watcher/environments/") viper.AddConfigPath("$GOPATH/src/github.com/vulcanize/ipfs-blockchain-watcher/environments/")
Expect(viper.Get("client.ipcpath")).To(BeNil())
testConfig := viper.New() testConfig := viper.New()
testConfig.SetConfigType("toml") testConfig.SetConfigType("toml")
@ -48,7 +43,6 @@ var _ = Describe("Loading the config", func() {
Expect(testConfig.Get("database.hostname")).To(Equal("localhost")) Expect(testConfig.Get("database.hostname")).To(Equal("localhost"))
Expect(testConfig.Get("database.name")).To(Equal("dbname")) Expect(testConfig.Get("database.name")).To(Equal("dbname"))
Expect(testConfig.Get("database.port")).To(Equal(int64(5432))) Expect(testConfig.Get("database.port")).To(Equal(int64(5432)))
Expect(testConfig.Get("client.ipcpath")).To(Equal("IPCPATH/geth.ipc"))
}) })
}) })

View File

@ -1,36 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
type EthClient interface {
BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error)
CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error)
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error)
}

View File

@ -29,10 +29,10 @@ import (
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
// APIName is the namespace for the super node's eth api // APIName is the namespace for the watcher's eth api
const APIName = "eth" const APIName = "eth"
// APIVersion is the version of the super node's eth api // APIVersion is the version of the watcher's eth api
const APIVersion = "0.0.1" const APIVersion = "0.0.1"
type PublicEthAPI struct { type PublicEthAPI struct {
@ -181,7 +181,7 @@ func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, f
} }
// GetTransactionByHash returns the transaction for the given hash // GetTransactionByHash returns the transaction for the given hash
// SuperNode cannot currently handle pending/tx_pool txs // eth ipfs-blockchain-watcher cannot currently handle pending/tx_pool txs
func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) { func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
// Try to return an already finalized transaction // Try to return an already finalized transaction
tx, blockHash, blockNumber, index, err := pea.B.GetTransaction(ctx, hash) tx, blockHash, blockNumber, index, err := pea.B.GetTransaction(ctx, hash)

View File

@ -124,7 +124,7 @@ var _ = Describe("API", func() {
}) })
Describe("GetTransactionByHash", func() { Describe("GetTransactionByHash", func() {
It("Retrieves the head block number", func() { It("Retrieves a transaction by hash", func() {
hash := mocks.MockTransactions[0].Hash() hash := mocks.MockTransactions[0].Hash()
tx, err := api.GetTransactionByHash(context.Background(), hash) tx, err := api.GetTransactionByHash(context.Background(), hash)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())

View File

@ -158,7 +158,7 @@ func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log
} }
// BlockByNumber returns the requested canonical block. // BlockByNumber returns the requested canonical block.
// Since the SuperNode can contain forked blocks, it is recommended to fetch BlockByHash as // Since the ipfs-blockchain-watcher database can contain forked blocks, it is recommended to fetch BlockByHash as
// fetching by number can return non-deterministic results (returns the first block found at that height) // fetching by number can return non-deterministic results (returns the first block found at that height)
func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Block, error) { func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Block, error) {
var err error var err error
@ -326,12 +326,12 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
// GetTransaction retrieves a tx by hash // GetTransaction retrieves a tx by hash
// It also returns the blockhash, blocknumber, and tx index associated with the transaction // It also returns the blockhash, blocknumber, and tx index associated with the transaction
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
pgStr := `SELECT transaction_cids.cid, transaction_cids.index, header_cids.block_hash, header_cids.block_number pgStr := `SELECT transaction_cids.mh_key, transaction_cids.index, header_cids.block_hash, header_cids.block_number
FROM eth.transaction_cids, eth.header_cids FROM eth.transaction_cids, eth.header_cids
WHERE transaction_cids.header_id = header_cids.id WHERE transaction_cids.header_id = header_cids.id
AND transaction_cids.tx_hash = $1` AND transaction_cids.tx_hash = $1`
var txCIDWithHeaderInfo struct { var txCIDWithHeaderInfo struct {
CID string `db:"cid"` MhKey string `db:"mh_key"`
Index int64 `db:"index"` Index int64 `db:"index"`
BlockHash string `db:"block_hash"` BlockHash string `db:"block_hash"`
BlockNumber int64 `db:"block_number"` BlockNumber int64 `db:"block_number"`
@ -356,7 +356,7 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type
} }
}() }()
txIPLD, err := b.Fetcher.FetchTrxs(tx, []TxModel{{CID: txCIDWithHeaderInfo.CID}}) txIPLD, err := b.Fetcher.FetchTrxs(tx, []TxModel{{MhKey: txCIDWithHeaderInfo.MhKey}})
if err != nil { if err != nil {
return nil, common.Hash{}, 0, 0, err return nil, common.Hash{}, 0, 0, err
} }

View File

@ -186,7 +186,7 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
results := make([]TxModel, 0) results := make([]TxModel, 0)
id := 1 id := 1
pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id, pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id,
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key,
transaction_cids.dst, transaction_cids.src, transaction_cids.index transaction_cids.dst, transaction_cids.src, transaction_cids.index
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id) FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.id = $%d`, id) WHERE header_cids.id = $%d`, id)
@ -210,8 +210,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]ReceiptModel, error) {
log.Debug("retrieving receipt cids for header id ", headerID) log.Debug("retrieving receipt cids for header id ", headerID)
args := make([]interface{}, 0, 4) args := make([]interface{}, 0, 4)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.contract, pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.id WHERE receipt_cids.tx_id = transaction_cids.id
@ -290,8 +290,8 @@ func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter Receip
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]ReceiptModel, error) {
log.Debug("retrieving receipt cids for block ", blockNumber) log.Debug("retrieving receipt cids for block ", blockNumber)
args := make([]interface{}, 0, 5) args := make([]interface{}, 0, 5)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.contract, pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.id WHERE receipt_cids.tx_id = transaction_cids.id
@ -387,7 +387,7 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter,
log.Debug("retrieving state cids for header id ", headerID) log.Debug("retrieving state cids for header id ", headerID)
args := make([]interface{}, 0, 2) args := make([]interface{}, 0, 2)
pgStr := `SELECT state_cids.id, state_cids.header_id, pgStr := `SELECT state_cids.id, state_cids.header_id,
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.state_path state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id) FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.id = $1` WHERE header_cids.id = $1`
args = append(args, headerID) args = append(args, headerID)
@ -411,8 +411,8 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter,
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]StorageNodeWithStateKeyModel, error) { func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]StorageNodeWithStateKeyModel, error) {
log.Debug("retrieving storage cids for header id ", headerID) log.Debug("retrieving storage cids for header id ", headerID)
args := make([]interface{}, 0, 3) args := make([]interface{}, 0, 3)
pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, storage_cids.node_type,
storage_cids.node_type, storage_cids.cid, storage_cids.storage_path, state_cids.state_leaf_key storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, state_cids.state_leaf_key
FROM eth.storage_cids, eth.state_cids, eth.header_cids FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE storage_cids.state_id = state_cids.id WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id AND state_cids.header_id = header_cids.id
@ -443,7 +443,7 @@ func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageF
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db // RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
// it finds the union of heights where no data exists and where the times_validated is lower than the validation level // it finds the union of heights where no data exists and where the times_validated is lower than the validation level
func (ecr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap, error) { func (ecr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap, error) {
log.Info("searching for gaps in the eth super node database") log.Info("searching for gaps in the eth ipfs watcher database")
startingBlock, err := ecr.RetrieveFirstBlockNumber() startingBlock, err := ecr.RetrieveFirstBlockNumber()
if err != nil { if err != nil {
return nil, fmt.Errorf("eth CIDRetriever RetrieveFirstBlockNumber error: %v", err) return nil, fmt.Errorf("eth CIDRetriever RetrieveFirstBlockNumber error: %v", err)
@ -607,8 +607,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) (
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs // RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]ReceiptModel, error) {
log.Debugf("retrieving receipt cids for tx ids %v", txIDs) log.Debugf("retrieving receipt cids for tx ids %v", txIDs)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.contract, pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
FROM eth.receipt_cids, eth.transaction_cids FROM eth.receipt_cids, eth.transaction_cids
WHERE tx_id = ANY($1::INTEGER[]) WHERE tx_id = ANY($1::INTEGER[])

View File

@ -19,6 +19,8 @@ package eth_test
import ( import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -209,14 +211,14 @@ var (
var _ = Describe("Retriever", func() { var _ = Describe("Retriever", func() {
var ( var (
db *postgres.DB db *postgres.DB
repo *eth2.CIDIndexer repo *eth2.IPLDPublisherAndIndexer
retriever *eth2.CIDRetriever retriever *eth2.CIDRetriever
) )
BeforeEach(func() { BeforeEach(func() {
var err error var err error
db, err = shared.SetupDB() db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
repo = eth2.NewCIDIndexer(db) repo = eth2.NewIPLDPublisherAndIndexer(db)
retriever = eth2.NewCIDRetriever(db) retriever = eth2.NewCIDRetriever(db)
}) })
AfterEach(func() { AfterEach(func() {
@ -225,7 +227,7 @@ var _ = Describe("Retriever", func() {
Describe("Retrieve", func() { Describe("Retrieve", func() {
BeforeEach(func() { BeforeEach(func() {
err := repo.Index(mocks.MockCIDPayload) _, err := repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}) })
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() { It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
@ -395,6 +397,7 @@ var _ = Describe("Retriever", func() {
NodeType: 2, NodeType: 2,
StateKey: common.BytesToHash(mocks.AccountLeafKey).Hex(), StateKey: common.BytesToHash(mocks.AccountLeafKey).Hex(),
CID: mocks.State2CID.String(), CID: mocks.State2CID.String(),
MhKey: mocks.State2MhKey,
Path: []byte{'\x0c'}, Path: []byte{'\x0c'},
})) }))
@ -405,8 +408,12 @@ var _ = Describe("Retriever", func() {
}) })
Describe("RetrieveFirstBlockNumber", func() { Describe("RetrieveFirstBlockNumber", func() {
It("Throws an error if there are no blocks in the database", func() {
_, err := retriever.RetrieveFirstBlockNumber()
Expect(err).To(HaveOccurred())
})
It("Gets the number of the first block that has data in the database", func() { It("Gets the number of the first block that has data in the database", func() {
err := repo.Index(mocks.MockCIDPayload) _, err := repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -414,9 +421,9 @@ var _ = Describe("Retriever", func() {
}) })
It("Gets the number of the first block that has data in the database", func() { It("Gets the number of the first block that has data in the database", func() {
payload := *mocks.MockCIDPayload payload := mocks.MockConvertedPayload
payload.HeaderCID.BlockNumber = "1010101" payload.Block = newMockBlock(1010101)
err := repo.Index(&payload) _, err := repo.Publish(payload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -424,13 +431,13 @@ var _ = Describe("Retriever", func() {
}) })
It("Gets the number of the first block that has data in the database", func() { It("Gets the number of the first block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload payload1 := mocks.MockConvertedPayload
payload1.HeaderCID.BlockNumber = "1010101" payload1.Block = newMockBlock(1010101)
payload2 := payload1 payload2 := payload1
payload2.HeaderCID.BlockNumber = "5" payload2.Block = newMockBlock(5)
err := repo.Index(&payload1) _, err := repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2) _, err = repo.Publish(payload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -439,8 +446,12 @@ var _ = Describe("Retriever", func() {
}) })
Describe("RetrieveLastBlockNumber", func() { Describe("RetrieveLastBlockNumber", func() {
It("Throws an error if there are no blocks in the database", func() {
_, err := retriever.RetrieveLastBlockNumber()
Expect(err).To(HaveOccurred())
})
It("Gets the number of the latest block that has data in the database", func() { It("Gets the number of the latest block that has data in the database", func() {
err := repo.Index(mocks.MockCIDPayload) _, err := repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -448,9 +459,9 @@ var _ = Describe("Retriever", func() {
}) })
It("Gets the number of the latest block that has data in the database", func() { It("Gets the number of the latest block that has data in the database", func() {
payload := *mocks.MockCIDPayload payload := mocks.MockConvertedPayload
payload.HeaderCID.BlockNumber = "1010101" payload.Block = newMockBlock(1010101)
err := repo.Index(&payload) _, err := repo.Publish(payload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -458,13 +469,13 @@ var _ = Describe("Retriever", func() {
}) })
It("Gets the number of the latest block that has data in the database", func() { It("Gets the number of the latest block that has data in the database", func() {
payload1 := *mocks.MockCIDPayload payload1 := mocks.MockConvertedPayload
payload1.HeaderCID.BlockNumber = "1010101" payload1.Block = newMockBlock(1010101)
payload2 := payload1 payload2 := payload1
payload2.HeaderCID.BlockNumber = "5" payload2.Block = newMockBlock(5)
err := repo.Index(&payload1) _, err := repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2) _, err = repo.Publish(payload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -474,21 +485,20 @@ var _ = Describe("Retriever", func() {
Describe("RetrieveGapsInData", func() { Describe("RetrieveGapsInData", func() {
It("Doesn't return gaps if there are none", func() { It("Doesn't return gaps if there are none", func() {
payload0 := *mocks.MockCIDPayload payload0 := mocks.MockConvertedPayload
payload0.HeaderCID.BlockNumber = "0" payload0.Block = newMockBlock(0)
payload1 := *mocks.MockCIDPayload payload1 := mocks.MockConvertedPayload
payload1.HeaderCID.BlockNumber = "1"
payload2 := payload1 payload2 := payload1
payload2.HeaderCID.BlockNumber = "2" payload2.Block = newMockBlock(2)
payload3 := payload2 payload3 := payload2
payload3.HeaderCID.BlockNumber = "3" payload3.Block = newMockBlock(3)
err := repo.Index(&payload0) _, err := repo.Publish(payload0)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload1) _, err = repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2) _, err = repo.Publish(payload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload3) _, err = repo.Publish(payload3)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData(1) gaps, err := retriever.RetrieveGapsInData(1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -496,9 +506,9 @@ var _ = Describe("Retriever", func() {
}) })
It("Returns the gap from 0 to the earliest block", func() { It("Returns the gap from 0 to the earliest block", func() {
payload := *mocks.MockCIDPayload payload := mocks.MockConvertedPayload
payload.HeaderCID.BlockNumber = "5" payload.Block = newMockBlock(5)
err := repo.Index(&payload) _, err := repo.Publish(payload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData(1) gaps, err := retriever.RetrieveGapsInData(1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -508,17 +518,16 @@ var _ = Describe("Retriever", func() {
}) })
It("Can handle single block gaps", func() { It("Can handle single block gaps", func() {
payload0 := *mocks.MockCIDPayload payload0 := mocks.MockConvertedPayload
payload0.HeaderCID.BlockNumber = "0" payload0.Block = newMockBlock(0)
payload1 := *mocks.MockCIDPayload payload1 := mocks.MockConvertedPayload
payload1.HeaderCID.BlockNumber = "1"
payload3 := payload1 payload3 := payload1
payload3.HeaderCID.BlockNumber = "3" payload3.Block = newMockBlock(3)
err := repo.Index(&payload0) _, err := repo.Publish(payload0)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload1) _, err = repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload3) _, err = repo.Publish(payload3)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData(1) gaps, err := retriever.RetrieveGapsInData(1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -528,13 +537,13 @@ var _ = Describe("Retriever", func() {
}) })
It("Finds gap between two entries", func() { It("Finds gap between two entries", func() {
payload1 := *mocks.MockCIDPayload payload1 := mocks.MockConvertedPayload
payload1.HeaderCID.BlockNumber = "1010101" payload1.Block = newMockBlock(1010101)
payload2 := payload1 payload2 := payload1
payload2.HeaderCID.BlockNumber = "0" payload2.Block = newMockBlock(0)
err := repo.Index(&payload1) _, err := repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2) _, err = repo.Publish(payload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData(1) gaps, err := retriever.RetrieveGapsInData(1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -544,49 +553,50 @@ var _ = Describe("Retriever", func() {
}) })
It("Finds gaps between multiple entries", func() { It("Finds gaps between multiple entries", func() {
payload := *mocks.MockCIDPayload payload1 := mocks.MockConvertedPayload
payload.HeaderCID.BlockNumber = "1010101" payload1.Block = newMockBlock(1010101)
payload1 := payload payload2 := mocks.MockConvertedPayload
payload1.HeaderCID.BlockNumber = "1" payload2.Block = newMockBlock(1)
payload2 := payload1 payload3 := mocks.MockConvertedPayload
payload2.HeaderCID.BlockNumber = "5" payload3.Block = newMockBlock(5)
payload3 := payload2 payload4 := mocks.MockConvertedPayload
payload3.HeaderCID.BlockNumber = "100" payload4.Block = newMockBlock(100)
payload4 := payload3 payload5 := mocks.MockConvertedPayload
payload4.HeaderCID.BlockNumber = "101" payload5.Block = newMockBlock(101)
payload5 := payload4 payload6 := mocks.MockConvertedPayload
payload5.HeaderCID.BlockNumber = "102" payload6.Block = newMockBlock(102)
payload6 := payload4 payload7 := mocks.MockConvertedPayload
payload6.HeaderCID.BlockNumber = "103" payload7.Block = newMockBlock(103)
payload7 := payload4 payload8 := mocks.MockConvertedPayload
payload7.HeaderCID.BlockNumber = "104" payload8.Block = newMockBlock(104)
payload8 := payload4 payload9 := mocks.MockConvertedPayload
payload8.HeaderCID.BlockNumber = "105" payload9.Block = newMockBlock(105)
payload9 := payload4 payload10 := mocks.MockConvertedPayload
payload9.HeaderCID.BlockNumber = "106" payload10.Block = newMockBlock(106)
payload10 := payload5 payload11 := mocks.MockConvertedPayload
payload10.HeaderCID.BlockNumber = "1000" payload11.Block = newMockBlock(1000)
err := repo.Index(&payload)
_, err := repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload1) _, err = repo.Publish(payload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2) _, err = repo.Publish(payload3)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload3) _, err = repo.Publish(payload4)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload4) _, err = repo.Publish(payload5)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload5) _, err = repo.Publish(payload6)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload6) _, err = repo.Publish(payload7)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload7) _, err = repo.Publish(payload8)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload8) _, err = repo.Publish(payload9)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload9) _, err = repo.Publish(payload10)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload10) _, err = repo.Publish(payload11)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
gaps, err := retriever.RetrieveGapsInData(1) gaps, err := retriever.RetrieveGapsInData(1)
@ -600,61 +610,63 @@ var _ = Describe("Retriever", func() {
}) })
It("Finds validation level gaps", func() { It("Finds validation level gaps", func() {
payload := *mocks.MockCIDPayload
payload.HeaderCID.BlockNumber = "1010101" payload1 := mocks.MockConvertedPayload
payload1 := payload payload1.Block = newMockBlock(1010101)
payload1.HeaderCID.BlockNumber = "1" payload2 := mocks.MockConvertedPayload
payload2 := payload1 payload2.Block = newMockBlock(1)
payload2.HeaderCID.BlockNumber = "5" payload3 := mocks.MockConvertedPayload
payload3 := payload2 payload3.Block = newMockBlock(5)
payload3.HeaderCID.BlockNumber = "100" payload4 := mocks.MockConvertedPayload
payload4 := payload3 payload4.Block = newMockBlock(100)
payload4.HeaderCID.BlockNumber = "101" payload5 := mocks.MockConvertedPayload
payload5 := payload4 payload5.Block = newMockBlock(101)
payload5.HeaderCID.BlockNumber = "102" payload6 := mocks.MockConvertedPayload
payload6 := payload4 payload6.Block = newMockBlock(102)
payload6.HeaderCID.BlockNumber = "103" payload7 := mocks.MockConvertedPayload
payload7 := payload4 payload7.Block = newMockBlock(103)
payload7.HeaderCID.BlockNumber = "104" payload8 := mocks.MockConvertedPayload
payload8 := payload4 payload8.Block = newMockBlock(104)
payload8.HeaderCID.BlockNumber = "105" payload9 := mocks.MockConvertedPayload
payload9 := payload4 payload9.Block = newMockBlock(105)
payload9.HeaderCID.BlockNumber = "106" payload10 := mocks.MockConvertedPayload
payload10 := payload4 payload10.Block = newMockBlock(106)
payload10.HeaderCID.BlockNumber = "107" payload11 := mocks.MockConvertedPayload
payload11 := payload4 payload11.Block = newMockBlock(107)
payload11.HeaderCID.BlockNumber = "108" payload12 := mocks.MockConvertedPayload
payload12 := payload4 payload12.Block = newMockBlock(108)
payload12.HeaderCID.BlockNumber = "109" payload13 := mocks.MockConvertedPayload
payload13 := payload5 payload13.Block = newMockBlock(109)
payload13.HeaderCID.BlockNumber = "1000" payload14 := mocks.MockConvertedPayload
err := repo.Index(&payload) payload14.Block = newMockBlock(1000)
_, err := repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload1) _, err = repo.Publish(payload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload2) _, err = repo.Publish(payload3)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload3) _, err = repo.Publish(payload4)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload4) _, err = repo.Publish(payload5)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload5) _, err = repo.Publish(payload6)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload6) _, err = repo.Publish(payload7)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload7) _, err = repo.Publish(payload8)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload8) _, err = repo.Publish(payload9)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload9) _, err = repo.Publish(payload10)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload10) _, err = repo.Publish(payload11)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload11) _, err = repo.Publish(payload12)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload12) _, err = repo.Publish(payload13)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(&payload13) _, err = repo.Publish(payload14)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
cleaner := eth.NewCleaner(db) cleaner := eth.NewCleaner(db)
@ -675,3 +687,9 @@ var _ = Describe("Retriever", func() {
}) })
}) })
}) })
func newMockBlock(blockNumber uint64) *types.Block {
header := mocks.MockHeader
header.Number.SetUint64(blockNumber)
return types.NewBlock(&mocks.MockHeader, mocks.MockTransactions, nil, mocks.MockReceipts)
}

View File

@ -243,7 +243,7 @@ func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error {
func (c *Cleaner) cleanStorageIPLDs(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanStorageIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A pgStr := `DELETE FROM public.blocks A
USING eth.storage_cids B, eth.state_cids C, eth.header_cids D USING eth.storage_cids B, eth.state_cids C, eth.header_cids D
WHERE A.key = B.cid WHERE A.key = B.mh_key
AND B.state_id = C.id AND B.state_id = C.id
AND C.header_id = D.id AND C.header_id = D.id
AND D.block_number BETWEEN $1 AND $2` AND D.block_number BETWEEN $1 AND $2`
@ -264,7 +264,7 @@ func (c *Cleaner) cleanStorageMetaData(tx *sqlx.Tx, rng [2]uint64) error {
func (c *Cleaner) cleanStateIPLDs(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanStateIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A pgStr := `DELETE FROM public.blocks A
USING eth.state_cids B, eth.header_cids C USING eth.state_cids B, eth.header_cids C
WHERE A.key = B.cid WHERE A.key = B.mh_key
AND B.header_id = C.id AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2` AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1]) _, err := tx.Exec(pgStr, rng[0], rng[1])
@ -283,7 +283,7 @@ func (c *Cleaner) cleanStateMetaData(tx *sqlx.Tx, rng [2]uint64) error {
func (c *Cleaner) cleanReceiptIPLDs(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanReceiptIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A pgStr := `DELETE FROM public.blocks A
USING eth.receipt_cids B, eth.transaction_cids C, eth.header_cids D USING eth.receipt_cids B, eth.transaction_cids C, eth.header_cids D
WHERE A.key = B.cid WHERE A.key = B.mh_key
AND B.tx_id = C.id AND B.tx_id = C.id
AND C.header_id = D.id AND C.header_id = D.id
AND D.block_number BETWEEN $1 AND $2` AND D.block_number BETWEEN $1 AND $2`
@ -304,7 +304,7 @@ func (c *Cleaner) cleanReceiptMetaData(tx *sqlx.Tx, rng [2]uint64) error {
func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A pgStr := `DELETE FROM public.blocks A
USING eth.transaction_cids B, eth.header_cids C USING eth.transaction_cids B, eth.header_cids C
WHERE A.key = B.cid WHERE A.key = B.mh_key
AND B.header_id = C.id AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2` AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1]) _, err := tx.Exec(pgStr, rng[0], rng[1])
@ -323,7 +323,7 @@ func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error {
func (c *Cleaner) cleanUncleIPLDs(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanUncleIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A pgStr := `DELETE FROM public.blocks A
USING eth.uncle_cids B, eth.header_cids C USING eth.uncle_cids B, eth.header_cids C
WHERE A.key = B.cid WHERE A.key = B.mh_key
AND B.header_id = C.id AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2` AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1]) _, err := tx.Exec(pgStr, rng[0], rng[1])
@ -342,7 +342,7 @@ func (c *Cleaner) cleanUncleMetaData(tx *sqlx.Tx, rng [2]uint64) error {
func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A pgStr := `DELETE FROM public.blocks A
USING eth.header_cids B USING eth.header_cids B
WHERE A.key = B.cid WHERE A.key = B.mh_key
AND B.block_number BETWEEN $1 AND $2` AND B.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1]) _, err := tx.Exec(pgStr, rng[0], rng[1])
return err return err

View File

@ -34,47 +34,55 @@ var (
// header variables // header variables
blockHash1 = crypto.Keccak256Hash([]byte{00, 02}) blockHash1 = crypto.Keccak256Hash([]byte{00, 02})
blocKNumber1 = big.NewInt(0) blocKNumber1 = big.NewInt(0)
headerCID1 = "mockHeader1CID" headerCID1 = shared.TestCID([]byte("mockHeader1CID"))
headerMhKey1 = shared.MultihashKeyFromCID(headerCID1)
parentHash = crypto.Keccak256Hash([]byte{00, 01}) parentHash = crypto.Keccak256Hash([]byte{00, 01})
totalDifficulty = "50000000000000000000" totalDifficulty = "50000000000000000000"
reward = "5000000000000000000" reward = "5000000000000000000"
headerModel = eth.HeaderModel{ headerModel = eth.HeaderModel{
BlockHash: blockHash1.String(), BlockHash: blockHash1.String(),
BlockNumber: blocKNumber1.String(), BlockNumber: blocKNumber1.String(),
CID: headerCID1, CID: headerCID1.String(),
MhKey: headerMhKey1,
ParentHash: parentHash.String(), ParentHash: parentHash.String(),
TotalDifficulty: totalDifficulty, TotalDifficulty: totalDifficulty,
Reward: reward, Reward: reward,
} }
// tx variables // tx variables
tx1CID = "mockTx1CID" tx1CID = shared.TestCID([]byte("mockTx1CID"))
tx2CID = "mockTx2CID" tx1MhKey = shared.MultihashKeyFromCID(tx1CID)
tx2CID = shared.TestCID([]byte("mockTx2CID"))
tx2MhKey = shared.MultihashKeyFromCID(tx2CID)
tx1Hash = crypto.Keccak256Hash([]byte{01, 01}) tx1Hash = crypto.Keccak256Hash([]byte{01, 01})
tx2Hash = crypto.Keccak256Hash([]byte{01, 02}) tx2Hash = crypto.Keccak256Hash([]byte{01, 02})
txSrc = common.HexToAddress("0x010a") txSrc = common.HexToAddress("0x010a")
txDst = common.HexToAddress("0x020a") txDst = common.HexToAddress("0x020a")
txModels1 = []eth.TxModel{ txModels1 = []eth.TxModel{
{ {
CID: tx1CID, CID: tx1CID.String(),
MhKey: tx1MhKey,
TxHash: tx1Hash.String(), TxHash: tx1Hash.String(),
Index: 0, Index: 0,
}, },
{ {
CID: tx2CID, CID: tx2CID.String(),
MhKey: tx2MhKey,
TxHash: tx2Hash.String(), TxHash: tx2Hash.String(),
Index: 1, Index: 1,
}, },
} }
// uncle variables // uncle variables
uncleCID = "mockUncle1CID" uncleCID = shared.TestCID([]byte("mockUncle1CID"))
uncleMhKey = shared.MultihashKeyFromCID(uncleCID)
uncleHash = crypto.Keccak256Hash([]byte{02, 02}) uncleHash = crypto.Keccak256Hash([]byte{02, 02})
uncleParentHash = crypto.Keccak256Hash([]byte{02, 01}) uncleParentHash = crypto.Keccak256Hash([]byte{02, 01})
uncleReward = "1000000000000000000" uncleReward = "1000000000000000000"
uncleModels1 = []eth.UncleModel{ uncleModels1 = []eth.UncleModel{
{ {
CID: uncleCID, CID: uncleCID.String(),
MhKey: uncleMhKey,
Reward: uncleReward, Reward: uncleReward,
BlockHash: uncleHash.String(), BlockHash: uncleHash.String(),
ParentHash: uncleParentHash.String(), ParentHash: uncleParentHash.String(),
@ -82,37 +90,45 @@ var (
} }
// receipt variables // receipt variables
rct1CID = "mockRct1CID" rct1CID = shared.TestCID([]byte("mockRct1CID"))
rct2CID = "mockRct2CID" rct1MhKey = shared.MultihashKeyFromCID(rct1CID)
rct2CID = shared.TestCID([]byte("mockRct2CID"))
rct2MhKey = shared.MultihashKeyFromCID(rct2CID)
rct1Contract = common.Address{} rct1Contract = common.Address{}
rct2Contract = common.HexToAddress("0x010c") rct2Contract = common.HexToAddress("0x010c")
receiptModels1 = map[common.Hash]eth.ReceiptModel{ receiptModels1 = map[common.Hash]eth.ReceiptModel{
tx1Hash: { tx1Hash: {
CID: rct1CID, CID: rct1CID.String(),
MhKey: rct1MhKey,
ContractHash: crypto.Keccak256Hash(rct1Contract.Bytes()).String(), ContractHash: crypto.Keccak256Hash(rct1Contract.Bytes()).String(),
}, },
tx2Hash: { tx2Hash: {
CID: rct2CID, CID: rct2CID.String(),
MhKey: rct2MhKey,
ContractHash: crypto.Keccak256Hash(rct2Contract.Bytes()).String(), ContractHash: crypto.Keccak256Hash(rct2Contract.Bytes()).String(),
}, },
} }
// state variables // state variables
state1CID1 = "mockState1CID1" state1CID1 = shared.TestCID([]byte("mockState1CID1"))
state1MhKey1 = shared.MultihashKeyFromCID(state1CID1)
state1Path = []byte{'\x01'} state1Path = []byte{'\x01'}
state1Key = crypto.Keccak256Hash(txSrc.Bytes()) state1Key = crypto.Keccak256Hash(txSrc.Bytes())
state2CID1 = "mockState2CID1" state2CID1 = shared.TestCID([]byte("mockState2CID1"))
state2MhKey1 = shared.MultihashKeyFromCID(state2CID1)
state2Path = []byte{'\x02'} state2Path = []byte{'\x02'}
state2Key = crypto.Keccak256Hash(txDst.Bytes()) state2Key = crypto.Keccak256Hash(txDst.Bytes())
stateModels1 = []eth.StateNodeModel{ stateModels1 = []eth.StateNodeModel{
{ {
CID: state1CID1, CID: state1CID1.String(),
MhKey: state1MhKey1,
Path: state1Path, Path: state1Path,
NodeType: 2, NodeType: 2,
StateKey: state1Key.String(), StateKey: state1Key.String(),
}, },
{ {
CID: state2CID1, CID: state2CID1.String(),
MhKey: state2MhKey1,
Path: state2Path, Path: state2Path,
NodeType: 2, NodeType: 2,
StateKey: state2Key.String(), StateKey: state2Key.String(),
@ -120,13 +136,15 @@ var (
} }
// storage variables // storage variables
storageCID = "mockStorageCID1" storageCID = shared.TestCID([]byte("mockStorageCID1"))
storageMhKey = shared.MultihashKeyFromCID(storageCID)
storagePath = []byte{'\x01'} storagePath = []byte{'\x01'}
storageKey = crypto.Keccak256Hash(common.Hex2Bytes("0x0000000000000000000000000000000000000000000000000000000000000000")) storageKey = crypto.Keccak256Hash(common.Hex2Bytes("0x0000000000000000000000000000000000000000000000000000000000000000"))
storageModels1 = map[string][]eth.StorageNodeModel{ storageModels1 = map[string][]eth.StorageNodeModel{
common.Bytes2Hex(state1Path): { common.Bytes2Hex(state1Path): {
{ {
CID: storageCID, CID: storageCID.String(),
MhKey: storageMhKey,
StorageKey: storageKey.String(), StorageKey: storageKey.String(),
Path: storagePath, Path: storagePath,
NodeType: 2, NodeType: 2,
@ -146,39 +164,47 @@ var (
// header variables // header variables
blockHash2 = crypto.Keccak256Hash([]byte{00, 03}) blockHash2 = crypto.Keccak256Hash([]byte{00, 03})
blocKNumber2 = big.NewInt(1) blocKNumber2 = big.NewInt(1)
headerCID2 = "mockHeaderCID2" headerCID2 = shared.TestCID([]byte("mockHeaderCID2"))
headerMhKey2 = shared.MultihashKeyFromCID(headerCID2)
headerModel2 = eth.HeaderModel{ headerModel2 = eth.HeaderModel{
BlockHash: blockHash2.String(), BlockHash: blockHash2.String(),
BlockNumber: blocKNumber2.String(), BlockNumber: blocKNumber2.String(),
CID: headerCID2, CID: headerCID2.String(),
MhKey: headerMhKey2,
ParentHash: blockHash1.String(), ParentHash: blockHash1.String(),
TotalDifficulty: totalDifficulty, TotalDifficulty: totalDifficulty,
Reward: reward, Reward: reward,
} }
// tx variables // tx variables
tx3CID = "mockTx3CID" tx3CID = shared.TestCID([]byte("mockTx3CID"))
tx3MhKey = shared.MultihashKeyFromCID(tx3CID)
tx3Hash = crypto.Keccak256Hash([]byte{01, 03}) tx3Hash = crypto.Keccak256Hash([]byte{01, 03})
txModels2 = []eth.TxModel{ txModels2 = []eth.TxModel{
{ {
CID: tx3CID, CID: tx3CID.String(),
MhKey: tx3MhKey,
TxHash: tx3Hash.String(), TxHash: tx3Hash.String(),
Index: 0, Index: 0,
}, },
} }
// receipt variables // receipt variables
rct3CID = "mockRct3CID" rct3CID = shared.TestCID([]byte("mockRct3CID"))
rct3MhKey = shared.MultihashKeyFromCID(rct3CID)
receiptModels2 = map[common.Hash]eth.ReceiptModel{ receiptModels2 = map[common.Hash]eth.ReceiptModel{
tx3Hash: { tx3Hash: {
CID: rct3CID, CID: rct3CID.String(),
MhKey: rct3MhKey,
ContractHash: crypto.Keccak256Hash(rct1Contract.Bytes()).String(), ContractHash: crypto.Keccak256Hash(rct1Contract.Bytes()).String(),
}, },
} }
// state variables // state variables
state1CID2 = "mockState1CID2" state1CID2 = shared.TestCID([]byte("mockState1CID2"))
state1MhKey2 = shared.MultihashKeyFromCID(state1CID2)
stateModels2 = []eth.StateNodeModel{ stateModels2 = []eth.StateNodeModel{
{ {
CID: state1CID2, CID: state1CID2.String(),
MhKey: state1MhKey2,
Path: state1Path, Path: state1Path,
NodeType: 2, NodeType: 2,
StateKey: state1Key.String(), StateKey: state1Key.String(),
@ -190,21 +216,21 @@ var (
ReceiptCIDs: receiptModels2, ReceiptCIDs: receiptModels2,
StateNodeCIDs: stateModels2, StateNodeCIDs: stateModels2,
} }
rngs = [][2]uint64{{0, 1}} rngs = [][2]uint64{{0, 1}}
cids = []string{ mhKeys = []string{
headerCID1, headerMhKey1,
headerCID2, headerMhKey2,
uncleCID, uncleMhKey,
tx1CID, tx1MhKey,
tx2CID, tx2MhKey,
tx3CID, tx3MhKey,
rct1CID, rct1MhKey,
rct2CID, rct2MhKey,
rct3CID, rct3MhKey,
state1CID1, state1MhKey1,
state2CID1, state2MhKey1,
state1CID2, state1MhKey2,
storageCID, storageMhKey,
} }
mockData = []byte{'\x01'} mockData = []byte{'\x01'}
) )
@ -224,16 +250,16 @@ var _ = Describe("Cleaner", func() {
}) })
Describe("Clean", func() { Describe("Clean", func() {
BeforeEach(func() { BeforeEach(func() {
for _, key := range mhKeys {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData)
Expect(err).ToNot(HaveOccurred())
}
err := repo.Index(mockCIDPayload1) err := repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(mockCIDPayload2) err = repo.Index(mockCIDPayload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
for _, cid := range cids {
_, err = db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, cid, mockData)
Expect(err).ToNot(HaveOccurred())
}
tx, err := db.Beginx() tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -613,6 +639,11 @@ var _ = Describe("Cleaner", func() {
Describe("ResetValidation", func() { Describe("ResetValidation", func() {
BeforeEach(func() { BeforeEach(func() {
for _, key := range mhKeys {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData)
Expect(err).ToNot(HaveOccurred())
}
err := repo.Index(mockCIDPayload1) err := repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Index(mockCIDPayload2) err = repo.Index(mockCIDPayload2)

View File

@ -72,8 +72,8 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Convert
return nil, err return nil, err
} }
txMeta := TxModel{ txMeta := TxModel{
Dst: shared.HandleNullAddrPointer(trx.To()), Dst: shared.HandleZeroAddrPointer(trx.To()),
Src: shared.HandleNullAddr(from), Src: shared.HandleZeroAddr(from),
TxHash: trx.Hash().String(), TxHash: trx.Hash().String(),
Index: int64(i), Index: int64(i),
} }
@ -106,7 +106,7 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Convert
logContracts = append(logContracts, addr) logContracts = append(logContracts, addr)
} }
// This is the contract address if this receipt is for a contract creation tx // This is the contract address if this receipt is for a contract creation tx
contract := shared.HandleNullAddr(receipt.ContractAddress) contract := shared.HandleZeroAddr(receipt.ContractAddress)
var contractHash string var contractHash string
if contract != "" { if contract != "" {
contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String() contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()

View File

@ -25,9 +25,9 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func TestETHSuperNode(t *testing.T) { func TestETHWatcher(t *testing.T) {
RegisterFailHandler(Fail) RegisterFailHandler(Fail)
RunSpecs(t, "Super Node ETH Suite Test") RunSpecs(t, "ETH IPFS WatcherSuite Test")
} }
var _ = BeforeSuite(func() { var _ = BeforeSuite(func() {

View File

@ -90,29 +90,29 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) { func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) {
var headerID int64 var headerID int64
err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, times_validated) err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, times_validated) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, eth.header_cids.times_validated + 1) ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1)
RETURNING id`, RETURNING id`,
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID, header.Reward, header.StateRoot, header.TxRoot, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID, header.Reward, header.StateRoot, header.TxRoot,
header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, 1).Scan(&headerID) header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1).Scan(&headerID)
return headerID, err return headerID, err
} }
func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int64) error { func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int64) error {
_, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward) VALUES ($1, $2, $3, $4, $5) _, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward) = ($3, $4, $5)`, ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)`,
uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward) uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
return err return err
} }
func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error { func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for _, trxCidMeta := range payload.TransactionCIDs { for _, trxCidMeta := range payload.TransactionCIDs {
var txID int64 var txID int64
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index) VALUES ($1, $2, $3, $4, $5, $6) err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index) = ($3, $4, $5, $6) ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key) = ($3, $4, $5, $6, $7)
RETURNING id`, RETURNING id`,
headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index).Scan(&txID) headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index, trxCidMeta.MhKey).Scan(&txID)
if err != nil { if err != nil {
return err return err
} }
@ -128,17 +128,17 @@ func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPa
func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModel, headerID int64) (int64, error) { func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModel, headerID int64) (int64, error) {
var txID int64 var txID int64
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index) VALUES ($1, $2, $3, $4, $5, $6) err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index) = ($3, $4, $5, $6) ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key) = ($3, $4, $5, $6, $7)
RETURNING id`, RETURNING id`,
headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index).Scan(&txID) headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey).Scan(&txID)
return txID, err return txID, err
} }
func (in *CIDIndexer) indexReceiptCID(tx *sqlx.Tx, cidMeta ReceiptModel, txID int64) error { func (in *CIDIndexer) indexReceiptCID(tx *sqlx.Tx, cidMeta ReceiptModel, txID int64) error {
_, err := tx.Exec(`INSERT INTO eth.receipt_cids (tx_id, cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) _, err := tx.Exec(`INSERT INTO eth.receipt_cids (tx_id, cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (tx_id) DO UPDATE SET (cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts) = ($2, $3, $4, $5, $6, $7, $8, $9)`, ON CONFLICT (tx_id) DO UPDATE SET (cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts, mh_key) = ($2, $3, $4, $5, $6, $7, $8, $9, $10)`,
txID, cidMeta.CID, cidMeta.Contract, cidMeta.ContractHash, cidMeta.Topic0s, cidMeta.Topic1s, cidMeta.Topic2s, cidMeta.Topic3s, cidMeta.LogContracts) txID, cidMeta.CID, cidMeta.Contract, cidMeta.ContractHash, cidMeta.Topic0s, cidMeta.Topic1s, cidMeta.Topic2s, cidMeta.Topic3s, cidMeta.LogContracts, cidMeta.MhKey)
return err return err
} }
@ -149,10 +149,10 @@ func (in *CIDIndexer) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload,
if stateCID.StateKey != nullHash.String() { if stateCID.StateKey != nullHash.String() {
stateKey = stateCID.StateKey stateKey = stateCID.StateKey
} }
err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type) VALUES ($1, $2, $3, $4, $5) err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type) = ($2, $3, $5) ON CONFLICT (header_id, state_path, diff) DO UPDATE SET (state_leaf_key, cid, node_type, mh_key) = ($2, $3, $5, $7)
RETURNING id`, RETURNING id`,
headerID, stateKey, stateCID.CID, stateCID.Path, stateCID.NodeType).Scan(&stateID) headerID, stateKey, stateCID.CID, stateCID.Path, stateCID.NodeType, true, stateCID.MhKey).Scan(&stateID)
if err != nil { if err != nil {
return err return err
} }
@ -180,10 +180,10 @@ func (in *CIDIndexer) indexStateCID(tx *sqlx.Tx, stateNode StateNodeModel, heade
if stateNode.StateKey != nullHash.String() { if stateNode.StateKey != nullHash.String() {
stateKey = stateNode.StateKey stateKey = stateNode.StateKey
} }
err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type) VALUES ($1, $2, $3, $4, $5) err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type) = ($2, $3, $5) ON CONFLICT (header_id, state_path, diff) DO UPDATE SET (state_leaf_key, cid, node_type, mh_key) = ($2, $3, $5, $7)
RETURNING id`, RETURNING id`,
headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType).Scan(&stateID) headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID)
return stateID, err return stateID, err
} }
@ -199,8 +199,8 @@ func (in *CIDIndexer) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeModel,
if storageCID.StorageKey != nullHash.String() { if storageCID.StorageKey != nullHash.String() {
storageKey = storageCID.StorageKey storageKey = storageCID.StorageKey
} }
_, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type) VALUES ($1, $2, $3, $4, $5) _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type) = ($2, $3, $5)`, ON CONFLICT (state_id, storage_path, diff) DO UPDATE SET (storage_leaf_key, cid, node_type, mh_key) = ($2, $3, $5, $7)`,
stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType) stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey)
return err return err
} }

View File

@ -37,6 +37,17 @@ var _ = Describe("Indexer", func() {
db, err = shared.SetupDB() db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
repo = eth.NewCIDIndexer(db) repo = eth.NewCIDIndexer(db)
// need entries in the public.blocks with the mhkeys or the FK constraint will fail
shared.PublishMockIPLD(db, mocks.HeaderMhKey, mockData)
shared.PublishMockIPLD(db, mocks.Trx1MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Trx2MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Trx3MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Rct1MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Rct2MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Rct3MhKey, mockData)
shared.PublishMockIPLD(db, mocks.State1MhKey, mockData)
shared.PublishMockIPLD(db, mocks.State2MhKey, mockData)
shared.PublishMockIPLD(db, mocks.StorageMhKey, mockData)
}) })
AfterEach(func() { AfterEach(func() {
eth.TearDownDB(db) eth.TearDownDB(db)

View File

@ -102,7 +102,7 @@ func (f *IPLDPGFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error)
// FetchHeaders fetches headers // FetchHeaders fetches headers
func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) { func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) {
log.Debug("fetching header ipld") log.Debug("fetching header ipld")
headerBytes, err := shared.FetchIPLD(tx, c.CID) headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return ipfs.BlockModel{}, err return ipfs.BlockModel{}, err
} }
@ -117,7 +117,7 @@ func (f *IPLDPGFetcher) FetchUncles(tx *sqlx.Tx, cids []UncleModel) ([]ipfs.Bloc
log.Debug("fetching uncle iplds") log.Debug("fetching uncle iplds")
uncleIPLDs := make([]ipfs.BlockModel, len(cids)) uncleIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
uncleBytes, err := shared.FetchIPLD(tx, c.CID) uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -134,7 +134,7 @@ func (f *IPLDPGFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockMode
log.Debug("fetching transaction iplds") log.Debug("fetching transaction iplds")
trxIPLDs := make([]ipfs.BlockModel, len(cids)) trxIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
txBytes, err := shared.FetchIPLD(tx, c.CID) txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -151,7 +151,7 @@ func (f *IPLDPGFetcher) FetchRcts(tx *sqlx.Tx, cids []ReceiptModel) ([]ipfs.Bloc
log.Debug("fetching receipt iplds") log.Debug("fetching receipt iplds")
rctIPLDs := make([]ipfs.BlockModel, len(cids)) rctIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
rctBytes, err := shared.FetchIPLD(tx, c.CID) rctBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -171,7 +171,7 @@ func (f *IPLDPGFetcher) FetchState(tx *sqlx.Tx, cids []StateNodeModel) ([]StateN
if stateNode.CID == "" { if stateNode.CID == "" {
continue continue
} }
stateBytes, err := shared.FetchIPLD(tx, stateNode.CID) stateBytes, err := shared.FetchIPLDByMhKey(tx, stateNode.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -196,7 +196,7 @@ func (f *IPLDPGFetcher) FetchStorage(tx *sqlx.Tx, cids []StorageNodeWithStateKey
if storageNode.CID == "" || storageNode.StateKey == "" { if storageNode.CID == "" || storageNode.StateKey == "" {
continue continue
} }
storageBytes, err := shared.FetchIPLD(tx, storageNode.CID) storageBytes, err := shared.FetchIPLDByMhKey(tx, storageNode.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -22,9 +22,7 @@ import (
"errors" "errors"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff" "github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/client"
) )
// BackFillerClient is a mock client for use in backfiller tests // BackFillerClient is a mock client for use in backfiller tests
@ -46,7 +44,7 @@ func (mc *BackFillerClient) SetReturnDiffAt(height uint64, diffPayload statediff
} }
// BatchCall mockClient method to simulate batch call to geth // BatchCall mockClient method to simulate batch call to geth
func (mc *BackFillerClient) BatchCall(batch []client.BatchElem) error { func (mc *BackFillerClient) BatchCall(batch []rpc.BatchElem) error {
if mc.MappedStateDiffAt == nil { if mc.MappedStateDiffAt == nil {
return errors.New("mockclient needs to be initialized with statediff payloads and errors") return errors.New("mockclient needs to be initialized with statediff payloads and errors")
} }

View File

@ -22,6 +22,8 @@ import (
"crypto/rand" "crypto/rand"
"math/big" "math/big"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -76,18 +78,29 @@ var (
Data: []byte{}, Data: []byte{},
} }
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256) HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256) Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256)
Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256) Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256)
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(2), multihash.KECCAK_256) Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(2), multihash.KECCAK_256)
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256) Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256)
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256) Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256)
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(2), multihash.KECCAK_256) Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(2), multihash.KECCAK_256)
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256) State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
State1MhKey = shared.MultihashKeyFromCID(State1CID)
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256) State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
State2MhKey = shared.MultihashKeyFromCID(State2CID)
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256) StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
MockTrxMeta = []eth.TxModel{ MockTrxMeta = []eth.TxModel{
{ {
CID: "", // This is empty until we go to publish to ipfs CID: "", // This is empty until we go to publish to ipfs
MhKey: "",
Src: SenderAddr.Hex(), Src: SenderAddr.Hex(),
Dst: Address.String(), Dst: Address.String(),
Index: 0, Index: 0,
@ -95,6 +108,7 @@ var (
}, },
{ {
CID: "", CID: "",
MhKey: "",
Src: SenderAddr.Hex(), Src: SenderAddr.Hex(),
Dst: AnotherAddress.String(), Dst: AnotherAddress.String(),
Index: 1, Index: 1,
@ -102,6 +116,7 @@ var (
}, },
{ {
CID: "", CID: "",
MhKey: "",
Src: SenderAddr.Hex(), Src: SenderAddr.Hex(),
Dst: "", Dst: "",
Index: 2, Index: 2,
@ -111,6 +126,7 @@ var (
MockTrxMetaPostPublsh = []eth.TxModel{ MockTrxMetaPostPublsh = []eth.TxModel{
{ {
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
MhKey: Trx1MhKey,
Src: SenderAddr.Hex(), Src: SenderAddr.Hex(),
Dst: Address.String(), Dst: Address.String(),
Index: 0, Index: 0,
@ -118,6 +134,7 @@ var (
}, },
{ {
CID: Trx2CID.String(), CID: Trx2CID.String(),
MhKey: Trx2MhKey,
Src: SenderAddr.Hex(), Src: SenderAddr.Hex(),
Dst: AnotherAddress.String(), Dst: AnotherAddress.String(),
Index: 1, Index: 1,
@ -125,6 +142,7 @@ var (
}, },
{ {
CID: Trx3CID.String(), CID: Trx3CID.String(),
MhKey: Trx3MhKey,
Src: SenderAddr.Hex(), Src: SenderAddr.Hex(),
Dst: "", Dst: "",
Index: 2, Index: 2,
@ -133,7 +151,8 @@ var (
} }
MockRctMeta = []eth.ReceiptModel{ MockRctMeta = []eth.ReceiptModel{
{ {
CID: "", CID: "",
MhKey: "",
Topic0s: []string{ Topic0s: []string{
mockTopic11.String(), mockTopic11.String(),
}, },
@ -147,7 +166,8 @@ var (
}, },
}, },
{ {
CID: "", CID: "",
MhKey: "",
Topic0s: []string{ Topic0s: []string{
mockTopic21.String(), mockTopic21.String(),
}, },
@ -162,6 +182,7 @@ var (
}, },
{ {
CID: "", CID: "",
MhKey: "",
Contract: ContractAddress.String(), Contract: ContractAddress.String(),
ContractHash: ContractHash, ContractHash: ContractHash,
LogContracts: []string{}, LogContracts: []string{},
@ -169,7 +190,8 @@ var (
} }
MockRctMetaPostPublish = []eth.ReceiptModel{ MockRctMetaPostPublish = []eth.ReceiptModel{
{ {
CID: Rct1CID.String(), CID: Rct1CID.String(),
MhKey: Rct1MhKey,
Topic0s: []string{ Topic0s: []string{
mockTopic11.String(), mockTopic11.String(),
}, },
@ -183,7 +205,8 @@ var (
}, },
}, },
{ {
CID: Rct2CID.String(), CID: Rct2CID.String(),
MhKey: Rct2MhKey,
Topic0s: []string{ Topic0s: []string{
mockTopic21.String(), mockTopic21.String(),
}, },
@ -198,6 +221,7 @@ var (
}, },
{ {
CID: Rct3CID.String(), CID: Rct3CID.String(),
MhKey: Rct3MhKey,
Contract: ContractAddress.String(), Contract: ContractAddress.String(),
ContractHash: ContractHash, ContractHash: ContractHash,
LogContracts: []string{}, LogContracts: []string{},
@ -296,12 +320,14 @@ var (
MockStateMetaPostPublish = []eth.StateNodeModel{ MockStateMetaPostPublish = []eth.StateNodeModel{
{ {
CID: State1CID.String(), CID: State1CID.String(),
MhKey: State1MhKey,
Path: []byte{'\x06'}, Path: []byte{'\x06'},
NodeType: 2, NodeType: 2,
StateKey: common.BytesToHash(ContractLeafKey).Hex(), StateKey: common.BytesToHash(ContractLeafKey).Hex(),
}, },
{ {
CID: State2CID.String(), CID: State2CID.String(),
MhKey: State2MhKey,
Path: []byte{'\x0c'}, Path: []byte{'\x0c'},
NodeType: 2, NodeType: 2,
StateKey: common.BytesToHash(AccountLeafKey).Hex(), StateKey: common.BytesToHash(AccountLeafKey).Hex(),
@ -341,6 +367,7 @@ var (
BlockHash: MockBlock.Hash().String(), BlockHash: MockBlock.Hash().String(),
BlockNumber: MockBlock.Number().String(), BlockNumber: MockBlock.Number().String(),
CID: HeaderCID.String(), CID: HeaderCID.String(),
MhKey: HeaderMhKey,
ParentHash: MockBlock.ParentHash().String(), ParentHash: MockBlock.ParentHash().String(),
TotalDifficulty: MockBlock.Difficulty().String(), TotalDifficulty: MockBlock.Difficulty().String(),
Reward: "5000000000000000000", Reward: "5000000000000000000",
@ -363,6 +390,7 @@ var (
contractPath: { contractPath: {
{ {
CID: StorageCID.String(), CID: StorageCID.String(),
MhKey: StorageMhKey,
Path: []byte{}, Path: []byte{},
StorageKey: common.BytesToHash(StorageLeafKey).Hex(), StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
NodeType: 2, NodeType: 2,
@ -392,6 +420,7 @@ var (
BlockHash: MockBlock.Hash().String(), BlockHash: MockBlock.Hash().String(),
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000", ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
CID: HeaderCID.String(), CID: HeaderCID.String(),
MhKey: HeaderMhKey,
TotalDifficulty: MockBlock.Difficulty().String(), TotalDifficulty: MockBlock.Difficulty().String(),
Reward: "5000000000000000000", Reward: "5000000000000000000",
StateRoot: MockBlock.Root().String(), StateRoot: MockBlock.Root().String(),
@ -410,6 +439,7 @@ var (
{ {
Path: []byte{}, Path: []byte{},
CID: StorageCID.String(), CID: StorageCID.String(),
MhKey: StorageMhKey,
NodeType: 2, NodeType: 2,
StateKey: common.BytesToHash(ContractLeafKey).Hex(), StateKey: common.BytesToHash(ContractLeafKey).Hex(),
StorageKey: common.BytesToHash(StorageLeafKey).Hex(), StorageKey: common.BytesToHash(StorageLeafKey).Hex(),

View File

@ -25,6 +25,7 @@ type HeaderModel struct {
BlockHash string `db:"block_hash"` BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"` ParentHash string `db:"parent_hash"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
TotalDifficulty string `db:"td"` TotalDifficulty string `db:"td"`
NodeID int64 `db:"node_id"` NodeID int64 `db:"node_id"`
Reward string `db:"reward"` Reward string `db:"reward"`
@ -44,6 +45,7 @@ type UncleModel struct {
BlockHash string `db:"block_hash"` BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"` ParentHash string `db:"parent_hash"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
Reward string `db:"reward"` Reward string `db:"reward"`
} }
@ -54,6 +56,7 @@ type TxModel struct {
Index int64 `db:"index"` Index int64 `db:"index"`
TxHash string `db:"tx_hash"` TxHash string `db:"tx_hash"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
Dst string `db:"dst"` Dst string `db:"dst"`
Src string `db:"src"` Src string `db:"src"`
} }
@ -63,6 +66,7 @@ type ReceiptModel struct {
ID int64 `db:"id"` ID int64 `db:"id"`
TxID int64 `db:"tx_id"` TxID int64 `db:"tx_id"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
Contract string `db:"contract"` Contract string `db:"contract"`
ContractHash string `db:"contract_hash"` ContractHash string `db:"contract_hash"`
LogContracts pq.StringArray `db:"log_contracts"` LogContracts pq.StringArray `db:"log_contracts"`
@ -80,6 +84,8 @@ type StateNodeModel struct {
StateKey string `db:"state_leaf_key"` StateKey string `db:"state_leaf_key"`
NodeType int `db:"node_type"` NodeType int `db:"node_type"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
} }
// StorageNodeModel is the db model for eth.storage_cids // StorageNodeModel is the db model for eth.storage_cids
@ -90,6 +96,8 @@ type StorageNodeModel struct {
StorageKey string `db:"storage_leaf_key"` StorageKey string `db:"storage_leaf_key"`
NodeType int `db:"node_type"` NodeType int `db:"node_type"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
} }
// StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key // StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key
@ -101,6 +109,8 @@ type StorageNodeWithStateKeyModel struct {
StorageKey string `db:"storage_leaf_key"` StorageKey string `db:"storage_leaf_key"`
NodeType int `db:"node_type"` NodeType int `db:"node_type"`
CID string `db:"cid"` CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
} }
// StateAccountModel is a db model for an eth state account (decoded value of state leaf node) // StateAccountModel is a db model for an eth state account (decoded value of state leaf node)

View File

@ -92,6 +92,7 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share
reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts) reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts)
header := HeaderModel{ header := HeaderModel{
CID: headerNode.Cid().String(), CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: ipldPayload.Block.ParentHash().String(), ParentHash: ipldPayload.Block.ParentHash().String(),
BlockNumber: ipldPayload.Block.Number().String(), BlockNumber: ipldPayload.Block.Number().String(),
BlockHash: ipldPayload.Block.Hash().String(), BlockHash: ipldPayload.Block.Hash().String(),
@ -117,6 +118,7 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share
uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncleNode.Number.Int64()) uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncleNode.Number.Int64())
uncle := UncleModel{ uncle := UncleModel{
CID: uncleNode.Cid().String(), CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
ParentHash: uncleNode.ParentHash.String(), ParentHash: uncleNode.ParentHash.String(),
BlockHash: uncleNode.Hash().String(), BlockHash: uncleNode.Hash().String(),
Reward: uncleReward.String(), Reward: uncleReward.String(),
@ -137,12 +139,14 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share
} }
txModel := ipldPayload.TxMetaData[i] txModel := ipldPayload.TxMetaData[i]
txModel.CID = txNode.Cid().String() txModel.CID = txNode.Cid().String()
txModel.MhKey = shared.MultihashKeyFromCID(txNode.Cid())
txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID) txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rctModel := ipldPayload.ReceiptMetaData[i] rctModel := ipldPayload.ReceiptMetaData[i]
rctModel.CID = rctNode.Cid().String() rctModel.CID = rctNode.Cid().String()
rctModel.MhKey = shared.MultihashKeyFromCID(rctNode.Cid())
if err := pub.indexer.indexReceiptCID(tx, rctModel, txID); err != nil { if err := pub.indexer.indexReceiptCID(tx, rctModel, txID); err != nil {
return nil, err return nil, err
} }
@ -162,10 +166,12 @@ func (pub *IPLDPublisherAndIndexer) publishAndIndexStateAndStorage(tx *sqlx.Tx,
if err != nil { if err != nil {
return err return err
} }
mhKey, _ := shared.MultihashKeyFromCIDString(stateCIDStr)
stateModel := StateNodeModel{ stateModel := StateNodeModel{
Path: stateNode.Path, Path: stateNode.Path,
StateKey: stateNode.LeafKey.String(), StateKey: stateNode.LeafKey.String(),
CID: stateCIDStr, CID: stateCIDStr,
MhKey: mhKey,
NodeType: ResolveFromNodeType(stateNode.Type), NodeType: ResolveFromNodeType(stateNode.Type),
} }
stateID, err := pub.indexer.indexStateCID(tx, stateModel, headerID) stateID, err := pub.indexer.indexStateCID(tx, stateModel, headerID)
@ -199,10 +205,12 @@ func (pub *IPLDPublisherAndIndexer) publishAndIndexStateAndStorage(tx *sqlx.Tx,
if err != nil { if err != nil {
return err return err
} }
mhKey, _ := shared.MultihashKeyFromCIDString(storageCIDStr)
storageModel := StorageNodeModel{ storageModel := StorageNodeModel{
Path: storageNode.Path, Path: storageNode.Path,
StorageKey: storageNode.LeafKey.Hex(), StorageKey: storageNode.LeafKey.Hex(),
CID: storageCIDStr, CID: storageCIDStr,
MhKey: mhKey,
NodeType: ResolveFromNodeType(storageNode.Type), NodeType: ResolveFromNodeType(storageNode.Type),
} }
if err := pub.indexer.indexStorageCID(tx, storageModel, stateID); err != nil { if err := pub.indexer.indexStorageCID(tx, storageModel, stateID); err != nil {

View File

@ -79,6 +79,7 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI
reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts) reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts)
header := HeaderModel{ header := HeaderModel{
CID: headerCid, CID: headerCid,
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: ipldPayload.Block.ParentHash().String(), ParentHash: ipldPayload.Block.ParentHash().String(),
BlockNumber: ipldPayload.Block.Number().String(), BlockNumber: ipldPayload.Block.Number().String(),
BlockHash: ipldPayload.Block.Hash().String(), BlockHash: ipldPayload.Block.Hash().String(),
@ -102,6 +103,7 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI
uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncle.Number.Int64()) uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncle.Number.Int64())
uncleCids[i] = UncleModel{ uncleCids[i] = UncleModel{
CID: uncleCid, CID: uncleCid,
MhKey: shared.MultihashKeyFromCID(uncle.Cid()),
ParentHash: uncle.ParentHash.String(), ParentHash: uncle.ParentHash.String(),
BlockHash: uncle.Hash().String(), BlockHash: uncle.Hash().String(),
Reward: uncleReward.String(), Reward: uncleReward.String(),
@ -162,6 +164,7 @@ func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.EthTx, txTrie
} }
trxCids[i] = TxModel{ trxCids[i] = TxModel{
CID: cid, CID: cid,
MhKey: shared.MultihashKeyFromCID(tx.Cid()),
Index: trxMeta[i].Index, Index: trxMeta[i].Index,
TxHash: trxMeta[i].TxHash, TxHash: trxMeta[i].TxHash,
Src: trxMeta[i].Src, Src: trxMeta[i].Src,
@ -186,6 +189,7 @@ func (pub *IPLDPublisher) publishReceipts(receipts []*ipld.EthReceipt, receiptTr
} }
rctCids[rct.TxHash] = ReceiptModel{ rctCids[rct.TxHash] = ReceiptModel{
CID: cid, CID: cid,
MhKey: shared.MultihashKeyFromCID(rct.Cid()),
Contract: receiptMeta[i].Contract, Contract: receiptMeta[i].Contract,
ContractHash: receiptMeta[i].ContractHash, ContractHash: receiptMeta[i].ContractHash,
Topic0s: receiptMeta[i].Topic0s, Topic0s: receiptMeta[i].Topic0s,
@ -220,6 +224,7 @@ func (pub *IPLDPublisher) publishStateNodes(stateNodes []TrieNode) ([]StateNodeM
Path: stateNode.Path, Path: stateNode.Path,
StateKey: stateNode.LeafKey.String(), StateKey: stateNode.LeafKey.String(),
CID: cid, CID: cid,
MhKey: shared.MultihashKeyFromCID(node.Cid()),
NodeType: ResolveFromNodeType(stateNode.Type), NodeType: ResolveFromNodeType(stateNode.Type),
}) })
// If we have a leaf, decode the account to extract additional metadata for indexing // If we have a leaf, decode the account to extract additional metadata for indexing
@ -266,6 +271,7 @@ func (pub *IPLDPublisher) publishStorageNodes(storageNodes map[string][]TrieNode
Path: storageNode.Path, Path: storageNode.Path,
StorageKey: storageNode.LeafKey.Hex(), StorageKey: storageNode.LeafKey.Hex(),
CID: cid, CID: cid,
MhKey: shared.MultihashKeyFromCID(node.Cid()),
NodeType: ResolveFromNodeType(storageNode.Type), NodeType: ResolveFromNodeType(storageNode.Type),
}) })
} }

View File

@ -24,7 +24,7 @@ import (
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
// SubscriptionSettings config is used by a subscriber to specify what eth data to stream from the super node // SubscriptionSettings config is used by a subscriber to specify what eth data to stream from the watcher
type SubscriptionSettings struct { type SubscriptionSettings struct {
BackFill bool BackFill bool
BackFillOnly bool BackFillOnly bool
@ -78,50 +78,50 @@ type StorageFilter struct {
func NewEthSubscriptionConfig() (*SubscriptionSettings, error) { func NewEthSubscriptionConfig() (*SubscriptionSettings, error) {
sc := new(SubscriptionSettings) sc := new(SubscriptionSettings)
// Below default to false, which means we do not backfill by default // Below default to false, which means we do not backfill by default
sc.BackFill = viper.GetBool("superNode.ethSubscription.historicalData") sc.BackFill = viper.GetBool("watcher.ethSubscription.historicalData")
sc.BackFillOnly = viper.GetBool("superNode.ethSubscription.historicalDataOnly") sc.BackFillOnly = viper.GetBool("watcher.ethSubscription.historicalDataOnly")
// Below default to 0 // Below default to 0
// 0 start means we start at the beginning and 0 end means we continue indefinitely // 0 start means we start at the beginning and 0 end means we continue indefinitely
sc.Start = big.NewInt(viper.GetInt64("superNode.ethSubscription.startingBlock")) sc.Start = big.NewInt(viper.GetInt64("watcher.ethSubscription.startingBlock"))
sc.End = big.NewInt(viper.GetInt64("superNode.ethSubscription.endingBlock")) sc.End = big.NewInt(viper.GetInt64("watcher.ethSubscription.endingBlock"))
// Below default to false, which means we get all headers and no uncles by default // Below default to false, which means we get all headers and no uncles by default
sc.HeaderFilter = HeaderFilter{ sc.HeaderFilter = HeaderFilter{
Off: viper.GetBool("superNode.ethSubscription.headerFilter.off"), Off: viper.GetBool("watcher.ethSubscription.headerFilter.off"),
Uncles: viper.GetBool("superNode.ethSubscription.headerFilter.uncles"), Uncles: viper.GetBool("watcher.ethSubscription.headerFilter.uncles"),
} }
// Below defaults to false and two slices of length 0 // Below defaults to false and two slices of length 0
// Which means we get all transactions by default // Which means we get all transactions by default
sc.TxFilter = TxFilter{ sc.TxFilter = TxFilter{
Off: viper.GetBool("superNode.ethSubscription.txFilter.off"), Off: viper.GetBool("watcher.ethSubscription.txFilter.off"),
Src: viper.GetStringSlice("superNode.ethSubscription.txFilter.src"), Src: viper.GetStringSlice("watcher.ethSubscription.txFilter.src"),
Dst: viper.GetStringSlice("superNode.ethSubscription.txFilter.dst"), Dst: viper.GetStringSlice("watcher.ethSubscription.txFilter.dst"),
} }
// By default all of the topic slices will be empty => match on any/all topics // By default all of the topic slices will be empty => match on any/all topics
topics := make([][]string, 4) topics := make([][]string, 4)
topics[0] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic0s") topics[0] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic0s")
topics[1] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic1s") topics[1] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic1s")
topics[2] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic2s") topics[2] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic2s")
topics[3] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic3s") topics[3] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic3s")
sc.ReceiptFilter = ReceiptFilter{ sc.ReceiptFilter = ReceiptFilter{
Off: viper.GetBool("superNode.ethSubscription.receiptFilter.off"), Off: viper.GetBool("watcher.ethSubscription.receiptFilter.off"),
MatchTxs: viper.GetBool("superNode.ethSubscription.receiptFilter.matchTxs"), MatchTxs: viper.GetBool("watcher.ethSubscription.receiptFilter.matchTxs"),
LogAddresses: viper.GetStringSlice("superNode.ethSubscription.receiptFilter.contracts"), LogAddresses: viper.GetStringSlice("watcher.ethSubscription.receiptFilter.contracts"),
Topics: topics, Topics: topics,
} }
// Below defaults to two false, and a slice of length 0 // Below defaults to two false, and a slice of length 0
// Which means we get all state leafs by default, but no intermediate nodes // Which means we get all state leafs by default, but no intermediate nodes
sc.StateFilter = StateFilter{ sc.StateFilter = StateFilter{
Off: viper.GetBool("superNode.ethSubscription.stateFilter.off"), Off: viper.GetBool("watcher.ethSubscription.stateFilter.off"),
IntermediateNodes: viper.GetBool("superNode.ethSubscription.stateFilter.intermediateNodes"), IntermediateNodes: viper.GetBool("watcher.ethSubscription.stateFilter.intermediateNodes"),
Addresses: viper.GetStringSlice("superNode.ethSubscription.stateFilter.addresses"), Addresses: viper.GetStringSlice("watcher.ethSubscription.stateFilter.addresses"),
} }
// Below defaults to two false, and two slices of length 0 // Below defaults to two false, and two slices of length 0
// Which means we get all storage leafs by default, but no intermediate nodes // Which means we get all storage leafs by default, but no intermediate nodes
sc.StorageFilter = StorageFilter{ sc.StorageFilter = StorageFilter{
Off: viper.GetBool("superNode.ethSubscription.storageFilter.off"), Off: viper.GetBool("watcher.ethSubscription.storageFilter.off"),
IntermediateNodes: viper.GetBool("superNode.ethSubscription.storageFilter.intermediateNodes"), IntermediateNodes: viper.GetBool("watcher.ethSubscription.storageFilter.intermediateNodes"),
Addresses: viper.GetStringSlice("superNode.ethSubscription.storageFilter.addresses"), Addresses: viper.GetStringSlice("watcher.ethSubscription.storageFilter.addresses"),
StorageKeys: viper.GetStringSlice("superNode.ethSubscription.storageFilter.storageKeys"), StorageKeys: viper.GetStringSlice("watcher.ethSubscription.storageFilter.storageKeys"),
} }
return sc, nil return sc, nil
} }

View File

@ -22,7 +22,7 @@ import (
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
) )
// TearDownDB is used to tear down the super node dbs after tests // TearDownDB is used to tear down the watcher dbs after tests
func TearDownDB(db *postgres.DB) { func TearDownDB(db *postgres.DB) {
tx, err := db.Beginx() tx, err := db.Beginx()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())

149
pkg/historical/config.go Normal file
View File

@ -0,0 +1,149 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package historical
import (
"fmt"
"time"
"github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils"
)
// Env variables
const (
SUPERNODE_CHAIN = "SUPERNODE_CHAIN"
SUPERNODE_FREQUENCY = "SUPERNODE_FREQUENCY"
SUPERNODE_BATCH_SIZE = "SUPERNODE_BATCH_SIZE"
SUPERNODE_BATCH_NUMBER = "SUPERNODE_BATCH_NUMBER"
SUPERNODE_VALIDATION_LEVEL = "SUPERNODE_VALIDATION_LEVEL"
BACKFILL_MAX_IDLE_CONNECTIONS = "BACKFILL_MAX_IDLE_CONNECTIONS"
BACKFILL_MAX_OPEN_CONNECTIONS = "BACKFILL_MAX_OPEN_CONNECTIONS"
BACKFILL_MAX_CONN_LIFETIME = "BACKFILL_MAX_CONN_LIFETIME"
)
// Config struct
type Config struct {
Chain shared.ChainType
IPFSPath string
IPFSMode shared.IPFSMode
DBConfig config.Database
DB *postgres.DB
HTTPClient interface{}
Frequency time.Duration
BatchSize uint64
BatchNumber uint64
ValidationLevel int
Timeout time.Duration // HTTP connection timeout in seconds
NodeInfo node.Node
}
// NewConfig is used to initialize a historical config from a .toml file
func NewConfig() (*Config, error) {
c := new(Config)
var err error
viper.BindEnv("superNode.chain", SUPERNODE_CHAIN)
chain := viper.GetString("superNode.chain")
c.Chain, err = shared.NewChainType(chain)
if err != nil {
return nil, err
}
c.IPFSMode, err = shared.GetIPFSMode()
if err != nil {
return nil, err
}
if c.IPFSMode == shared.LocalInterface || c.IPFSMode == shared.RemoteClient {
c.IPFSPath, err = shared.GetIPFSPath()
if err != nil {
return nil, err
}
}
c.DBConfig.Init()
if err := c.init(); err != nil {
return nil, err
}
return c, nil
}
func (c *Config) init() error {
var err error
viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH)
viper.BindEnv("bitcoin.httpPath", shared.BTC_HTTP_PATH)
viper.BindEnv("superNode.frequency", SUPERNODE_FREQUENCY)
viper.BindEnv("superNode.batchSize", SUPERNODE_BATCH_SIZE)
viper.BindEnv("superNode.batchNumber", SUPERNODE_BATCH_NUMBER)
viper.BindEnv("superNode.validationLevel", SUPERNODE_VALIDATION_LEVEL)
viper.BindEnv("superNode.timeout", shared.HTTP_TIMEOUT)
timeout := viper.GetInt("superNode.timeout")
if timeout < 15 {
timeout = 15
}
c.Timeout = time.Second * time.Duration(timeout)
switch c.Chain {
case shared.Ethereum:
ethHTTP := viper.GetString("ethereum.httpPath")
c.NodeInfo, c.HTTPClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP))
if err != nil {
return err
}
case shared.Bitcoin:
btcHTTP := viper.GetString("bitcoin.httpPath")
c.NodeInfo, c.HTTPClient = shared.GetBtcNodeAndClient(btcHTTP)
}
freq := viper.GetInt("superNode.frequency")
var frequency time.Duration
if freq <= 0 {
frequency = time.Second * 30
} else {
frequency = time.Second * time.Duration(freq)
}
c.Frequency = frequency
c.BatchSize = uint64(viper.GetInt64("superNode.batchSize"))
c.BatchNumber = uint64(viper.GetInt64("superNode.batchNumber"))
c.ValidationLevel = viper.GetInt("superNode.validationLevel")
dbConn := overrideDBConnConfig(c.DBConfig)
db := utils.LoadPostgres(dbConn, c.NodeInfo)
c.DB = &db
return nil
}
func overrideDBConnConfig(con config.Database) config.Database {
viper.BindEnv("database.backFill.maxIdle", BACKFILL_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.backFill.maxOpen", BACKFILL_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.backFill.maxLifetime", BACKFILL_MAX_CONN_LIFETIME)
con.MaxIdle = viper.GetInt("database.backFill.maxIdle")
con.MaxOpen = viper.GetInt("database.backFill.maxOpen")
con.MaxLifetime = viper.GetInt("database.backFill.maxLifetime")
return con
}

View File

@ -14,20 +14,22 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package core package historical_test
import ( import (
"context" "io/ioutil"
"testing"
"github.com/ethereum/go-ethereum/rpc" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/client" "github.com/sirupsen/logrus"
) )
type RPCClient interface { func TestIPFSWatcher(t *testing.T) {
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error RegisterFailHandler(Fail)
BatchCall(batch []client.BatchElem) error RunSpecs(t, "IPFS Watcher Historical Suite Test")
IpcPath() string
SupportedModules() (map[string]string, error)
Subscribe(namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error)
} }
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher package historical
import ( import (
"sync" "sync"
@ -22,23 +22,19 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/builders"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils" "github.com/vulcanize/ipfs-blockchain-watcher/utils"
) )
const ( // BackFillInterface for filling in gaps in the ipfs-blockchain-watcher db
DefaultMaxBatchSize uint64 = 100
DefaultMaxBatchNumber int64 = 50
)
// BackFillInterface for filling in gaps in the super node
type BackFillInterface interface { type BackFillInterface interface {
// Method for the super node to periodically check for and fill in gaps in its data using an archival node // Method for the watcher to periodically check for and fill in gaps in its data using an archival node
BackFill(wg *sync.WaitGroup) BackFill(wg *sync.WaitGroup)
Stop() error Stop() error
} }
// BackFillService for filling in gaps in the super node // BackFillService for filling in gaps in the watcher
type BackFillService struct { type BackFillService struct {
// Interface for converting payloads into IPLD object payloads // Interface for converting payloads into IPLD object payloads
Converter shared.PayloadConverter Converter shared.PayloadConverter
@ -68,33 +64,33 @@ type BackFillService struct {
// NewBackFillService returns a new BackFillInterface // NewBackFillService returns a new BackFillInterface
func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) { func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) {
publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.BackFillDBConn, settings.IPFSMode) publisher, err := builders.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
indexer, err := NewCIDIndexer(settings.Chain, settings.BackFillDBConn, settings.IPFSMode) indexer, err := builders.NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
converter, err := NewPayloadConverter(settings.Chain) converter, err := builders.NewPayloadConverter(settings.Chain)
if err != nil { if err != nil {
return nil, err return nil, err
} }
retriever, err := NewCIDRetriever(settings.Chain, settings.BackFillDBConn) retriever, err := builders.NewCIDRetriever(settings.Chain, settings.DB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
fetcher, err := NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout) fetcher, err := builders.NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout)
if err != nil { if err != nil {
return nil, err return nil, err
} }
batchSize := settings.BatchSize batchSize := settings.BatchSize
if batchSize == 0 { if batchSize == 0 {
batchSize = DefaultMaxBatchSize batchSize = shared.DefaultMaxBatchSize
} }
batchNumber := int64(settings.BatchNumber) batchNumber := int64(settings.BatchNumber)
if batchNumber == 0 { if batchNumber == 0 {
batchNumber = DefaultMaxBatchNumber batchNumber = shared.DefaultMaxBatchNumber
} }
return &BackFillService{ return &BackFillService{
Indexer: indexer, Indexer: indexer,
@ -112,7 +108,7 @@ func NewBackFillService(settings *Config, screenAndServeChan chan shared.Convert
}, nil }, nil
} }
// BackFill periodically checks for and fills in gaps in the super node db // BackFill periodically checks for and fills in gaps in the watcher db
func (bfs *BackFillService) BackFill(wg *sync.WaitGroup) { func (bfs *BackFillService) BackFill(wg *sync.WaitGroup) {
ticker := time.NewTicker(bfs.GapCheckFrequency) ticker := time.NewTicker(bfs.GapCheckFrequency)
go func() { go func() {
@ -126,7 +122,7 @@ func (bfs *BackFillService) BackFill(wg *sync.WaitGroup) {
case <-ticker.C: case <-ticker.C:
gaps, err := bfs.Retriever.RetrieveGapsInData(bfs.validationLevel) gaps, err := bfs.Retriever.RetrieveGapsInData(bfs.validationLevel)
if err != nil { if err != nil {
log.Errorf("%s super node db backFill RetrieveGapsInData error: %v", bfs.chain.String(), err) log.Errorf("%s watcher db backFill RetrieveGapsInData error: %v", bfs.chain.String(), err)
continue continue
} }
// spin up worker goroutines for this search pass // spin up worker goroutines for this search pass
@ -140,7 +136,7 @@ func (bfs *BackFillService) BackFill(wg *sync.WaitGroup) {
log.Infof("backFilling %s data from %d to %d", bfs.chain.String(), gap.Start, gap.Stop) log.Infof("backFilling %s data from %d to %d", bfs.chain.String(), gap.Start, gap.Stop)
blockRangeBins, err := utils.GetBlockHeightBins(gap.Start, gap.Stop, bfs.BatchSize) blockRangeBins, err := utils.GetBlockHeightBins(gap.Start, gap.Stop, bfs.BatchSize)
if err != nil { if err != nil {
log.Errorf("%s super node db backFill GetBlockHeightBins error: %v", bfs.chain.String(), err) log.Errorf("%s watcher db backFill GetBlockHeightBins error: %v", bfs.chain.String(), err)
continue continue
} }
for _, heights := range blockRangeBins { for _, heights := range blockRangeBins {

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher_test package historical_test
import ( import (
"sync" "sync"
@ -25,14 +25,14 @@ import (
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/historical"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks" mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher"
) )
var _ = Describe("BackFiller", func() { var _ = Describe("BackFiller", func() {
Describe("FillGaps", func() { Describe("FillGaps", func() {
It("Periodically checks for and fills in gaps in the super node's data", func() { It("Periodically checks for and fills in gaps in the watcher's data", func() {
mockCidRepo := &mocks.CIDIndexer{ mockCidRepo := &mocks.CIDIndexer{
ReturnErr: nil, ReturnErr: nil,
} }
@ -59,15 +59,15 @@ var _ = Describe("BackFiller", func() {
}, },
} }
quitChan := make(chan bool, 1) quitChan := make(chan bool, 1)
backfiller := &watcher.BackFillService{ backfiller := &historical.BackFillService{
Indexer: mockCidRepo, Indexer: mockCidRepo,
Publisher: mockPublisher, Publisher: mockPublisher,
Converter: mockConverter, Converter: mockConverter,
Fetcher: mockFetcher, Fetcher: mockFetcher,
Retriever: mockRetriever, Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2, GapCheckFrequency: time.Second * 2,
BatchSize: watcher.DefaultMaxBatchSize, BatchSize: shared.DefaultMaxBatchSize,
BatchNumber: watcher.DefaultMaxBatchNumber, BatchNumber: shared.DefaultMaxBatchNumber,
QuitChan: quitChan, QuitChan: quitChan,
} }
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
@ -114,15 +114,15 @@ var _ = Describe("BackFiller", func() {
}, },
} }
quitChan := make(chan bool, 1) quitChan := make(chan bool, 1)
backfiller := &watcher.BackFillService{ backfiller := &historical.BackFillService{
Indexer: mockCidRepo, Indexer: mockCidRepo,
Publisher: mockPublisher, Publisher: mockPublisher,
Converter: mockConverter, Converter: mockConverter,
Fetcher: mockFetcher, Fetcher: mockFetcher,
Retriever: mockRetriever, Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2, GapCheckFrequency: time.Second * 2,
BatchSize: watcher.DefaultMaxBatchSize, BatchSize: shared.DefaultMaxBatchSize,
BatchNumber: watcher.DefaultMaxBatchNumber, BatchNumber: shared.DefaultMaxBatchNumber,
QuitChan: quitChan, QuitChan: quitChan,
} }
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
@ -168,15 +168,15 @@ var _ = Describe("BackFiller", func() {
}, },
} }
quitChan := make(chan bool, 1) quitChan := make(chan bool, 1)
backfiller := &watcher.BackFillService{ backfiller := &historical.BackFillService{
Indexer: mockCidRepo, Indexer: mockCidRepo,
Publisher: mockPublisher, Publisher: mockPublisher,
Converter: mockConverter, Converter: mockConverter,
Fetcher: mockFetcher, Fetcher: mockFetcher,
Retriever: mockRetriever, Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2, GapCheckFrequency: time.Second * 2,
BatchSize: watcher.DefaultMaxBatchSize, BatchSize: shared.DefaultMaxBatchSize,
BatchNumber: watcher.DefaultMaxBatchNumber, BatchNumber: shared.DefaultMaxBatchNumber,
QuitChan: quitChan, QuitChan: quitChan,
} }
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}

View File

@ -14,24 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package core package node
import (
"fmt"
)
type NodeType int
const (
GETH NodeType = iota
PARITY
INFURA
GANACHE
)
const (
KOVAN_NETWORK_ID = 42
)
type Node struct { type Node struct {
GenesisBlock string GenesisBlock string
@ -39,19 +22,3 @@ type Node struct {
ID string ID string
ClientName string ClientName string
} }
type ParityNodeInfo struct {
Track string
ParityVersion `json:"version"`
Hash string
}
func (pn ParityNodeInfo) String() string {
return fmt.Sprintf("Parity/v%d.%d.%d/", pn.Major, pn.Minor, pn.Patch)
}
type ParityVersion struct {
Major int
Minor int
Patch int
}

View File

@ -22,16 +22,16 @@ import (
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //postgres driver _ "github.com/lib/pq" //postgres driver
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
) )
type DB struct { type DB struct {
*sqlx.DB *sqlx.DB
Node core.Node Node node.Node
NodeID int64 NodeID int64
} }
func NewDB(databaseConfig config.Database, node core.Node) (*DB, error) { func NewDB(databaseConfig config.Database, node node.Node) (*DB, error) {
connectString := config.DbConnectionString(databaseConfig) connectString := config.DbConnectionString(databaseConfig)
db, connectErr := sqlx.Connect("postgres", connectString) db, connectErr := sqlx.Connect("postgres", connectString)
if connectErr != nil { if connectErr != nil {
@ -55,7 +55,7 @@ func NewDB(databaseConfig config.Database, node core.Node) (*DB, error) {
return &pg, nil return &pg, nil
} }
func (db *DB) CreateNode(node *core.Node) error { func (db *DB) CreateNode(node *node.Node) error {
var nodeID int64 var nodeID int64
err := db.QueryRow( err := db.QueryRow(
`INSERT INTO nodes (genesis_block, network_id, node_id, client_name) `INSERT INTO nodes (genesis_block, network_id, node_id, client_name)

View File

@ -28,7 +28,7 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/test_config" "github.com/vulcanize/ipfs-blockchain-watcher/test_config"
) )
@ -84,7 +84,7 @@ var _ = Describe("Postgres DB", func() {
It("throws error when can't connect to the database", func() { It("throws error when can't connect to the database", func() {
invalidDatabase := config.Database{} invalidDatabase := config.Database{}
node := core.Node{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"} node := node.Node{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
_, err := postgres.NewDB(invalidDatabase, node) _, err := postgres.NewDB(invalidDatabase, node)
@ -94,7 +94,7 @@ var _ = Describe("Postgres DB", func() {
It("throws error when can't create node", func() { It("throws error when can't create node", func() {
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100)) badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
node := core.Node{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"} node := node.Node{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
_, err := postgres.NewDB(test_config.DBConfig, node) _, err := postgres.NewDB(test_config.DBConfig, node)

View File

@ -23,7 +23,7 @@ import (
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils" "github.com/vulcanize/ipfs-blockchain-watcher/utils"
@ -55,15 +55,15 @@ type Config struct {
IPFSMode shared.IPFSMode IPFSMode shared.IPFSMode
HTTPClient interface{} // Note this client is expected to support the retrieval of the specified data type(s) HTTPClient interface{} // Note this client is expected to support the retrieval of the specified data type(s)
NodeInfo core.Node // Info for the associated node NodeInfo node.Node // Info for the associated node
Ranges [][2]uint64 // The block height ranges to resync Ranges [][2]uint64 // The block height ranges to resync
BatchSize uint64 // BatchSize for the resync http calls (client has to support batch sizing) BatchSize uint64 // BatchSize for the resync http calls (client has to support batch sizing)
Timeout time.Duration // HTTP connection timeout in seconds Timeout time.Duration // HTTP connection timeout in seconds
BatchNumber uint64 BatchNumber uint64
} }
// NewReSyncConfig fills and returns a resync config from toml parameters // NewConfig fills and returns a resync config from toml parameters
func NewReSyncConfig() (*Config, error) { func NewConfig() (*Config, error) {
c := new(Config) c := new(Config)
var err error var err error

View File

@ -21,8 +21,8 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/builders"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher"
"github.com/vulcanize/ipfs-blockchain-watcher/utils" "github.com/vulcanize/ipfs-blockchain-watcher/utils"
) )
@ -63,37 +63,37 @@ type Service struct {
// NewResyncService creates and returns a resync service from the provided settings // NewResyncService creates and returns a resync service from the provided settings
func NewResyncService(settings *Config) (Resync, error) { func NewResyncService(settings *Config) (Resync, error) {
publisher, err := watcher.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode) publisher, err := builders.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
indexer, err := watcher.NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode) indexer, err := builders.NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
converter, err := watcher.NewPayloadConverter(settings.Chain) converter, err := builders.NewPayloadConverter(settings.Chain)
if err != nil { if err != nil {
return nil, err return nil, err
} }
retriever, err := watcher.NewCIDRetriever(settings.Chain, settings.DB) retriever, err := builders.NewCIDRetriever(settings.Chain, settings.DB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
fetcher, err := watcher.NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout) fetcher, err := builders.NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cleaner, err := watcher.NewCleaner(settings.Chain, settings.DB) cleaner, err := builders.NewCleaner(settings.Chain, settings.DB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
batchSize := settings.BatchSize batchSize := settings.BatchSize
if batchSize == 0 { if batchSize == 0 {
batchSize = watcher.DefaultMaxBatchSize batchSize = shared.DefaultMaxBatchSize
} }
batchNumber := int64(settings.BatchNumber) batchNumber := int64(settings.BatchNumber)
if batchNumber == 0 { if batchNumber == 0 {
batchNumber = watcher.DefaultMaxBatchNumber batchNumber = shared.DefaultMaxBatchNumber
} }
return &Service{ return &Service{
Indexer: indexer, Indexer: indexer,

View File

@ -14,8 +14,9 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package config package shared
type Client struct { const (
IPCPath string DefaultMaxBatchSize uint64 = 100
} DefaultMaxBatchNumber int64 = 50
)

View File

@ -24,7 +24,7 @@ import (
"github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcd/rpcclient"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
) )
// Env variables // Env variables
@ -51,7 +51,7 @@ const (
) )
// GetEthNodeAndClient returns eth node info and client from path url // GetEthNodeAndClient returns eth node info and client from path url
func GetEthNodeAndClient(path string) (core.Node, *rpc.Client, error) { func GetEthNodeAndClient(path string) (node.Node, *rpc.Client, error) {
viper.BindEnv("ethereum.nodeID", ETH_NODE_ID) viper.BindEnv("ethereum.nodeID", ETH_NODE_ID)
viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME) viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME)
viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK) viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK)
@ -59,9 +59,9 @@ func GetEthNodeAndClient(path string) (core.Node, *rpc.Client, error) {
rpcClient, err := rpc.Dial(path) rpcClient, err := rpc.Dial(path)
if err != nil { if err != nil {
return core.Node{}, nil, err return node.Node{}, nil, err
} }
return core.Node{ return node.Node{
ID: viper.GetString("ethereum.nodeID"), ID: viper.GetString("ethereum.nodeID"),
ClientName: viper.GetString("ethereum.clientName"), ClientName: viper.GetString("ethereum.clientName"),
GenesisBlock: viper.GetString("ethereum.genesisBlock"), GenesisBlock: viper.GetString("ethereum.genesisBlock"),
@ -94,7 +94,7 @@ func GetIPFSMode() (IPFSMode, error) {
} }
// GetBtcNodeAndClient returns btc node info from path url // GetBtcNodeAndClient returns btc node info from path url
func GetBtcNodeAndClient(path string) (core.Node, *rpcclient.ConnConfig) { func GetBtcNodeAndClient(path string) (node.Node, *rpcclient.ConnConfig) {
viper.BindEnv("bitcoin.nodeID", BTC_NODE_ID) viper.BindEnv("bitcoin.nodeID", BTC_NODE_ID)
viper.BindEnv("bitcoin.clientName", BTC_CLIENT_NAME) viper.BindEnv("bitcoin.clientName", BTC_CLIENT_NAME)
viper.BindEnv("bitcoin.genesisBlock", BTC_GENESIS_BLOCK) viper.BindEnv("bitcoin.genesisBlock", BTC_GENESIS_BLOCK)
@ -103,7 +103,7 @@ func GetBtcNodeAndClient(path string) (core.Node, *rpcclient.ConnConfig) {
viper.BindEnv("bitcoin.user", BTC_NODE_USER) viper.BindEnv("bitcoin.user", BTC_NODE_USER)
// For bitcoin we load in node info from the config because there is no RPC endpoint to retrieve this from the node // For bitcoin we load in node info from the config because there is no RPC endpoint to retrieve this from the node
return core.Node{ return node.Node{
ID: viper.GetString("bitcoin.nodeID"), ID: viper.GetString("bitcoin.nodeID"),
ClientName: viper.GetString("bitcoin.clientName"), ClientName: viper.GetString("bitcoin.clientName"),
GenesisBlock: viper.GetString("bitcoin.genesisBlock"), GenesisBlock: viper.GetString("bitcoin.genesisBlock"),

View File

@ -17,62 +17,26 @@
package shared package shared
import ( import (
"bytes"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/ipfs/go-cid"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-ipfs-blockstore" "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-ds-help" "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format" node "github.com/ipfs/go-ipld-format"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
) )
// ListContainsString used to check if a list of strings contains a particular string // HandleZeroAddrPointer will return an emtpy string for a nil address pointer
func ListContainsString(sss []string, s string) bool { func HandleZeroAddrPointer(to *common.Address) string {
for _, str := range sss {
if s == str {
return true
}
}
return false
}
// IPLDsContainBytes used to check if a list of strings contains a particular string
func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool {
for _, ipld := range iplds {
if bytes.Equal(ipld.Data, b) {
return true
}
}
return false
}
// ListContainsGap used to check if a list of Gaps contains a particular Gap
func ListContainsGap(gapList []Gap, gap Gap) bool {
for _, listGap := range gapList {
if listGap == gap {
return true
}
}
return false
}
// HandleNullAddrPointer will return an emtpy string for a nil address pointer
func HandleNullAddrPointer(to *common.Address) string {
if to == nil { if to == nil {
return "" return ""
} }
return to.Hex() return to.Hex()
} }
// HandleNullAddr will return an empty string for a a null address // HandleZeroAddr will return an empty string for a 0 value address
func HandleNullAddr(to common.Address) string { func HandleZeroAddr(to common.Address) string {
if to.Hex() == "0x0000000000000000000000000000000000000000" { if to.Hex() == "0x0000000000000000000000000000000000000000" {
return "" return ""
} }
@ -95,7 +59,7 @@ func PublishIPLD(tx *sqlx.Tx, i node.Node) error {
return err return err
} }
// FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx // FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx and cid string
func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) { func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) {
mhKey, err := MultihashKeyFromCIDString(cid) mhKey, err := MultihashKeyFromCIDString(cid)
if err != nil { if err != nil {
@ -106,6 +70,19 @@ func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) {
return block, tx.Get(&block, pgStr, mhKey) return block, tx.Get(&block, pgStr, mhKey)
} }
// FetchIPLDByMhKey is used to retrieve an ipld from Postgres blockstore with the provided tx and mhkey string
func FetchIPLDByMhKey(tx *sqlx.Tx, mhKey string) ([]byte, error) {
pgStr := `SELECT data FROM public.blocks WHERE key = $1`
var block []byte
return block, tx.Get(&block, pgStr, mhKey)
}
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
func MultihashKeyFromCID(c cid.Cid) string {
dbKey := dshelp.MultihashToDsKey(c.Hash())
return blockstore.BlockPrefix.String() + dbKey.String()
}
// MultihashKeyFromCIDString converts a cid string into a blockstore-prefixed multihash db key string // MultihashKeyFromCIDString converts a cid string into a blockstore-prefixed multihash db key string
func MultihashKeyFromCIDString(c string) (string, error) { func MultihashKeyFromCIDString(c string) (string, error) {
dc, err := cid.Decode(c) dc, err := cid.Decode(c)

View File

@ -17,16 +17,70 @@
package shared package shared
import ( import (
"bytes"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
) )
// SetupDB is use to setup a db for super node tests // SetupDB is use to setup a db for watcher tests
func SetupDB() (*postgres.DB, error) { func SetupDB() (*postgres.DB, error) {
return postgres.NewDB(config.Database{ return postgres.NewDB(config.Database{
Hostname: "localhost", Hostname: "localhost",
Name: "vulcanize_testing", Name: "vulcanize_testing",
Port: 5432, Port: 5432,
}, core.Node{}) }, node.Node{})
}
// ListContainsString used to check if a list of strings contains a particular string
func ListContainsString(sss []string, s string) bool {
for _, str := range sss {
if s == str {
return true
}
}
return false
}
// IPLDsContainBytes used to check if a list of strings contains a particular string
func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool {
for _, ipld := range iplds {
if bytes.Equal(ipld.Data, b) {
return true
}
}
return false
}
// ListContainsGap used to check if a list of Gaps contains a particular Gap
func ListContainsGap(gapList []Gap, gap Gap) bool {
for _, listGap := range gapList {
if listGap == gap {
return true
}
}
return false
}
// TestCID creates a basic CID for testing purposes
func TestCID(b []byte) cid.Cid {
pref := cid.Prefix{
Version: 1,
Codec: cid.Raw,
MhType: multihash.KECCAK_256,
MhLength: -1,
}
c, _ := pref.Sum(b)
return c
}
// PublishMockIPLD writes a mhkey-data pair to the public.blocks table so that test data can FK reference the mhkey
func PublishMockIPLD(db *postgres.DB, mhKey string, mockData []byte) error {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, mhKey, mockData)
return err
} }

View File

@ -1,42 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Streamer is used by watchers to stream eth data from a vulcanizedb super node
package streamer
import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher"
)
// SuperNodeStreamer is the underlying struct for the shared.SuperNodeStreamer interface
type SuperNodeStreamer struct {
Client core.RPCClient
}
// NewSuperNodeStreamer creates a pointer to a new SuperNodeStreamer which satisfies the ISuperNodeStreamer interface
func NewSuperNodeStreamer(client core.RPCClient) *SuperNodeStreamer {
return &SuperNodeStreamer{
Client: client,
}
}
// Stream is the main loop for subscribing to data from a vulcanizedb super node
func (sds *SuperNodeStreamer) Stream(payloadChan chan watcher.SubscriptionPayload, rlpParams []byte) (*rpc.ClientSubscription, error) {
return sds.Client.Subscribe("vdb", payloadChan, "stream", rlpParams)
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher package watch
import ( import (
"context" "context"
@ -25,8 +25,8 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
v "github.com/vulcanize/ipfs-blockchain-watcher/version" v "github.com/vulcanize/ipfs-blockchain-watcher/version"
) )
@ -37,22 +37,22 @@ const APIName = "vdb"
// APIVersion is the version of the state diffing service API // APIVersion is the version of the state diffing service API
const APIVersion = "0.0.1" const APIVersion = "0.0.1"
// PublicSuperNodeAPI is the public api for the super node // PublicWatcherAPI is the public api for the watcher
type PublicSuperNodeAPI struct { type PublicWatcherAPI struct {
sn SuperNode w Watcher
} }
// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process // NewPublicWatcherAPI creates a new PublicWatcherAPI with the provided underlying Watcher process
func NewPublicSuperNodeAPI(superNodeInterface SuperNode) *PublicSuperNodeAPI { func NewPublicWatcherAPI(w Watcher) *PublicWatcherAPI {
return &PublicSuperNodeAPI{ return &PublicWatcherAPI{
sn: superNodeInterface, w: w,
} }
} }
// Stream is the public method to setup a subscription that fires off super node payloads as they are processed // Stream is the public method to setup a subscription that fires off IPLD payloads as they are processed
func (api *PublicSuperNodeAPI) Stream(ctx context.Context, rlpParams []byte) (*rpc.Subscription, error) { func (api *PublicWatcherAPI) Stream(ctx context.Context, rlpParams []byte) (*rpc.Subscription, error) {
var params shared.SubscriptionSettings var params shared.SubscriptionSettings
switch api.sn.Chain() { switch api.w.Chain() {
case shared.Ethereum: case shared.Ethereum:
var ethParams eth.SubscriptionSettings var ethParams eth.SubscriptionSettings
if err := rlp.DecodeBytes(rlpParams, &ethParams); err != nil { if err := rlp.DecodeBytes(rlpParams, &ethParams); err != nil {
@ -81,22 +81,22 @@ func (api *PublicSuperNodeAPI) Stream(ctx context.Context, rlpParams []byte) (*r
// subscribe to events from the SyncPublishScreenAndServe service // subscribe to events from the SyncPublishScreenAndServe service
payloadChannel := make(chan SubscriptionPayload, PayloadChanBufferSize) payloadChannel := make(chan SubscriptionPayload, PayloadChanBufferSize)
quitChan := make(chan bool, 1) quitChan := make(chan bool, 1)
go api.sn.Subscribe(rpcSub.ID, payloadChannel, quitChan, params) go api.w.Subscribe(rpcSub.ID, payloadChannel, quitChan, params)
// loop and await payloads and relay them to the subscriber using notifier // loop and await payloads and relay them to the subscriber using notifier
for { for {
select { select {
case packet := <-payloadChannel: case packet := <-payloadChannel:
if err := notifier.Notify(rpcSub.ID, packet); err != nil { if err := notifier.Notify(rpcSub.ID, packet); err != nil {
log.Error("Failed to send super node packet", "err", err) log.Error("Failed to send watcher data packet", "err", err)
api.sn.Unsubscribe(rpcSub.ID) api.w.Unsubscribe(rpcSub.ID)
return return
} }
case <-rpcSub.Err(): case <-rpcSub.Err():
api.sn.Unsubscribe(rpcSub.ID) api.w.Unsubscribe(rpcSub.ID)
return return
case <-quitChan: case <-quitChan:
// don't need to unsubscribe to super node, the service does so before sending the quit signal this way // don't need to unsubscribe from the watcher, the service does so before sending the quit signal this way
return return
} }
} }
@ -105,21 +105,21 @@ func (api *PublicSuperNodeAPI) Stream(ctx context.Context, rlpParams []byte) (*r
return rpcSub, nil return rpcSub, nil
} }
// Node is a public rpc method to allow transformers to fetch the node info for the super node // Node is a public rpc method to allow transformers to fetch the node info for the watcher
// NOTE: this is the node info for the node that the super node is syncing from, not the node info for the super node itself // NOTE: this is the node info for the node that the watcher is syncing from, not the node info for the watcher itself
func (api *PublicSuperNodeAPI) Node() *core.Node { func (api *PublicWatcherAPI) Node() *node.Node {
return api.sn.Node() return api.w.Node()
} }
// Chain returns the chain type that this super node instance supports // Chain returns the chain type that this watcher instance supports
func (api *PublicSuperNodeAPI) Chain() shared.ChainType { func (api *PublicWatcherAPI) Chain() shared.ChainType {
return api.sn.Chain() return api.w.Chain()
} }
// Struct for holding super node meta data // Struct for holding watcher meta data
type InfoAPI struct{} type InfoAPI struct{}
// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process // NewInfoAPI creates a new InfoAPI
func NewInfoAPI() *InfoAPI { func NewInfoAPI() *InfoAPI {
return &InfoAPI{} return &InfoAPI{}
} }
@ -131,7 +131,7 @@ func (iapi *InfoAPI) Modules() map[string]string {
} }
} }
// NodeInfo gathers and returns a collection of metadata for the super node // NodeInfo gathers and returns a collection of metadata for the watcher
func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo { func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo {
return &p2p.NodeInfo{ return &p2p.NodeInfo{
// TODO: formalize this // TODO: formalize this
@ -140,7 +140,7 @@ func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo {
} }
} }
// Version returns the version of the super node // Version returns the version of the watcher
func (iapi *InfoAPI) Version() string { func (iapi *InfoAPI) Version() string {
return v.VersionWithMeta return v.VersionWithMeta
} }

View File

@ -14,18 +14,17 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher package watch
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"time"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils" "github.com/vulcanize/ipfs-blockchain-watcher/utils"
@ -33,27 +32,19 @@ import (
// Env variables // Env variables
const ( const (
SUPERNODE_CHAIN = "SUPERNODE_CHAIN" SUPERNODE_CHAIN = "SUPERNODE_CHAIN"
SUPERNODE_SYNC = "SUPERNODE_SYNC" SUPERNODE_SYNC = "SUPERNODE_SYNC"
SUPERNODE_WORKERS = "SUPERNODE_WORKERS" SUPERNODE_WORKERS = "SUPERNODE_WORKERS"
SUPERNODE_SERVER = "SUPERNODE_SERVER" SUPERNODE_SERVER = "SUPERNODE_SERVER"
SUPERNODE_WS_PATH = "SUPERNODE_WS_PATH" SUPERNODE_WS_PATH = "SUPERNODE_WS_PATH"
SUPERNODE_IPC_PATH = "SUPERNODE_IPC_PATH" SUPERNODE_IPC_PATH = "SUPERNODE_IPC_PATH"
SUPERNODE_HTTP_PATH = "SUPERNODE_HTTP_PATH" SUPERNODE_HTTP_PATH = "SUPERNODE_HTTP_PATH"
SUPERNODE_BACKFILL = "SUPERNODE_BACKFILL" SUPERNODE_BACKFILL = "SUPERNODE_BACKFILL"
SUPERNODE_FREQUENCY = "SUPERNODE_FREQUENCY"
SUPERNODE_BATCH_SIZE = "SUPERNODE_BATCH_SIZE"
SUPERNODE_BATCH_NUMBER = "SUPERNODE_BATCH_NUMBER"
SUPERNODE_VALIDATION_LEVEL = "SUPERNODE_VALIDATION_LEVEL"
SYNC_MAX_IDLE_CONNECTIONS = "SYNC_MAX_IDLE_CONNECTIONS" SYNC_MAX_IDLE_CONNECTIONS = "SYNC_MAX_IDLE_CONNECTIONS"
SYNC_MAX_OPEN_CONNECTIONS = "SYNC_MAX_OPEN_CONNECTIONS" SYNC_MAX_OPEN_CONNECTIONS = "SYNC_MAX_OPEN_CONNECTIONS"
SYNC_MAX_CONN_LIFETIME = "SYNC_MAX_CONN_LIFETIME" SYNC_MAX_CONN_LIFETIME = "SYNC_MAX_CONN_LIFETIME"
BACKFILL_MAX_IDLE_CONNECTIONS = "BACKFILL_MAX_IDLE_CONNECTIONS"
BACKFILL_MAX_OPEN_CONNECTIONS = "BACKFILL_MAX_OPEN_CONNECTIONS"
BACKFILL_MAX_CONN_LIFETIME = "BACKFILL_MAX_CONN_LIFETIME"
SERVER_MAX_IDLE_CONNECTIONS = "SERVER_MAX_IDLE_CONNECTIONS" SERVER_MAX_IDLE_CONNECTIONS = "SERVER_MAX_IDLE_CONNECTIONS"
SERVER_MAX_OPEN_CONNECTIONS = "SERVER_MAX_OPEN_CONNECTIONS" SERVER_MAX_OPEN_CONNECTIONS = "SERVER_MAX_OPEN_CONNECTIONS"
SERVER_MAX_CONN_LIFETIME = "SERVER_MAX_CONN_LIFETIME" SERVER_MAX_CONN_LIFETIME = "SERVER_MAX_CONN_LIFETIME"
@ -61,7 +52,6 @@ const (
// Config struct // Config struct
type Config struct { type Config struct {
// Ubiquitous fields
Chain shared.ChainType Chain shared.ChainType
IPFSPath string IPFSPath string
IPFSMode shared.IPFSMode IPFSMode shared.IPFSMode
@ -77,21 +67,14 @@ type Config struct {
SyncDBConn *postgres.DB SyncDBConn *postgres.DB
Workers int Workers int
WSClient interface{} WSClient interface{}
NodeInfo core.Node NodeInfo node.Node
// Backfiller params // Historical switch
BackFill bool Historical bool
BackFillDBConn *postgres.DB
HTTPClient interface{}
Frequency time.Duration
BatchSize uint64
BatchNumber uint64
ValidationLevel int
Timeout time.Duration // HTTP connection timeout in seconds
} }
// NewSuperNodeConfig is used to initialize a SuperNode config from a .toml file // NewConfig is used to initialize a watcher config from a .toml file
// Separate chain supernode instances need to be ran with separate ipfs path in order to avoid lock contention on the ipfs repository lockfile // Separate chain watcher instances need to be ran with separate ipfs path in order to avoid lock contention on the ipfs repository lockfile
func NewSuperNodeConfig() (*Config, error) { func NewConfig() (*Config, error) {
c := new(Config) c := new(Config)
var err error var err error
@ -106,6 +89,7 @@ func NewSuperNodeConfig() (*Config, error) {
viper.BindEnv("superNode.httpPath", SUPERNODE_HTTP_PATH) viper.BindEnv("superNode.httpPath", SUPERNODE_HTTP_PATH)
viper.BindEnv("superNode.backFill", SUPERNODE_BACKFILL) viper.BindEnv("superNode.backFill", SUPERNODE_BACKFILL)
c.Historical = viper.GetBool("superNode.backFill")
chain := viper.GetString("superNode.chain") chain := viper.GetString("superNode.chain")
c.Chain, err = shared.NewChainType(chain) c.Chain, err = shared.NewChainType(chain)
if err != nil { if err != nil {
@ -174,70 +158,14 @@ func NewSuperNodeConfig() (*Config, error) {
c.ServeDBConn = &serveDB c.ServeDBConn = &serveDB
} }
c.BackFill = viper.GetBool("superNode.backFill")
if c.BackFill {
if err := c.BackFillFields(); err != nil {
return nil, err
}
}
return c, nil return c, nil
} }
// BackFillFields is used to fill in the BackFill fields of the config
func (c *Config) BackFillFields() error {
var err error
viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH)
viper.BindEnv("bitcoin.httpPath", shared.BTC_HTTP_PATH)
viper.BindEnv("superNode.frequency", SUPERNODE_FREQUENCY)
viper.BindEnv("superNode.batchSize", SUPERNODE_BATCH_SIZE)
viper.BindEnv("superNode.batchNumber", SUPERNODE_BATCH_NUMBER)
viper.BindEnv("superNode.validationLevel", SUPERNODE_VALIDATION_LEVEL)
viper.BindEnv("superNode.timeout", shared.HTTP_TIMEOUT)
timeout := viper.GetInt("superNode.timeout")
if timeout < 15 {
timeout = 15
}
c.Timeout = time.Second * time.Duration(timeout)
switch c.Chain {
case shared.Ethereum:
ethHTTP := viper.GetString("ethereum.httpPath")
c.NodeInfo, c.HTTPClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP))
if err != nil {
return err
}
case shared.Bitcoin:
btcHTTP := viper.GetString("bitcoin.httpPath")
c.NodeInfo, c.HTTPClient = shared.GetBtcNodeAndClient(btcHTTP)
}
freq := viper.GetInt("superNode.frequency")
var frequency time.Duration
if freq <= 0 {
frequency = time.Second * 30
} else {
frequency = time.Second * time.Duration(freq)
}
c.Frequency = frequency
c.BatchSize = uint64(viper.GetInt64("superNode.batchSize"))
c.BatchNumber = uint64(viper.GetInt64("superNode.batchNumber"))
c.ValidationLevel = viper.GetInt("superNode.validationLevel")
backFillDBConn := overrideDBConnConfig(c.DBConfig, BackFill)
backFillDB := utils.LoadPostgres(backFillDBConn, c.NodeInfo)
c.BackFillDBConn = &backFillDB
return nil
}
type mode string type mode string
var ( var (
Sync mode = "sync" Sync mode = "sync"
BackFill mode = "backFill" Serve mode = "serve"
Serve mode = "serve"
) )
func overrideDBConnConfig(con config.Database, m mode) config.Database { func overrideDBConnConfig(con config.Database, m mode) config.Database {
@ -249,13 +177,6 @@ func overrideDBConnConfig(con config.Database, m mode) config.Database {
con.MaxIdle = viper.GetInt("database.sync.maxIdle") con.MaxIdle = viper.GetInt("database.sync.maxIdle")
con.MaxOpen = viper.GetInt("database.sync.maxOpen") con.MaxOpen = viper.GetInt("database.sync.maxOpen")
con.MaxLifetime = viper.GetInt("database.sync.maxLifetime") con.MaxLifetime = viper.GetInt("database.sync.maxLifetime")
case BackFill:
viper.BindEnv("database.backFill.maxIdle", BACKFILL_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.backFill.maxOpen", BACKFILL_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.backFill.maxLifetime", BACKFILL_MAX_CONN_LIFETIME)
con.MaxIdle = viper.GetInt("database.backFill.maxIdle")
con.MaxOpen = viper.GetInt("database.backFill.maxOpen")
con.MaxLifetime = viper.GetInt("database.backFill.maxLifetime")
case Serve: case Serve:
viper.BindEnv("database.server.maxIdle", SERVER_MAX_IDLE_CONNECTIONS) viper.BindEnv("database.server.maxIdle", SERVER_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS) viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS)

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher package watch
import log "github.com/sirupsen/logrus" import log "github.com/sirupsen/logrus"

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher package watch
import ( import (
"fmt" "fmt"
@ -22,13 +22,14 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node" ethnode "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/builders"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
) )
@ -37,12 +38,12 @@ const (
PayloadChanBufferSize = 2000 PayloadChanBufferSize = 2000
) )
// SuperNode is the top level interface for streaming, converting to IPLDs, publishing, // Watcher is the top level interface for streaming, converting to IPLDs, publishing,
// and indexing all chain data; screening this data; and serving it up to subscribed clients // and indexing all chain data; screening this data; and serving it up to subscribed clients
// This service is compatible with the Ethereum service interface (node.Service) // This service is compatible with the Ethereum service interface (node.Service)
type SuperNode interface { type Watcher interface {
// APIs(), Protocols(), Start() and Stop() // APIs(), Protocols(), Start() and Stop()
node.Service ethnode.Service
// Data processing event loop // Data processing event loop
Sync(wg *sync.WaitGroup, forwardPayloadChan chan<- shared.ConvertedData) error Sync(wg *sync.WaitGroup, forwardPayloadChan chan<- shared.ConvertedData) error
// Pub-Sub handling event loop // Pub-Sub handling event loop
@ -52,12 +53,12 @@ type SuperNode interface {
// Method to unsubscribe from the service // Method to unsubscribe from the service
Unsubscribe(id rpc.ID) Unsubscribe(id rpc.ID)
// Method to access the node info for the service // Method to access the node info for the service
Node() *core.Node Node() *node.Node
// Method to access chain type // Method to access chain type
Chain() shared.ChainType Chain() shared.ChainType
} }
// Service is the underlying struct for the super node // Service is the underlying struct for the watcher
type Service struct { type Service struct {
// Used to sync access to the Subscriptions // Used to sync access to the Subscriptions
sync.Mutex sync.Mutex
@ -83,8 +84,8 @@ type Service struct {
Subscriptions map[common.Hash]map[rpc.ID]Subscription Subscriptions map[common.Hash]map[rpc.ID]Subscription
// A mapping of subscription params hash to the corresponding subscription params // A mapping of subscription params hash to the corresponding subscription params
SubscriptionTypes map[common.Hash]shared.SubscriptionSettings SubscriptionTypes map[common.Hash]shared.SubscriptionSettings
// Info for the Geth node that this super node is working with // Info for the Geth node that this watcher is working with
NodeInfo *core.Node NodeInfo *node.Node
// Number of publishAndIndex workers // Number of publishAndIndex workers
WorkerPoolSize int WorkerPoolSize int
// chain type for this service // chain type for this service
@ -97,40 +98,40 @@ type Service struct {
serveWg *sync.WaitGroup serveWg *sync.WaitGroup
} }
// NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct // NewWatcher creates a new Watcher using an underlying Service struct
func NewSuperNode(settings *Config) (SuperNode, error) { func NewWatcher(settings *Config) (Watcher, error) {
sn := new(Service) sn := new(Service)
var err error var err error
// If we are syncing, initialize the needed interfaces // If we are syncing, initialize the needed interfaces
if settings.Sync { if settings.Sync {
sn.Streamer, sn.PayloadChan, err = NewPayloadStreamer(settings.Chain, settings.WSClient) sn.Streamer, sn.PayloadChan, err = builders.NewPayloadStreamer(settings.Chain, settings.WSClient)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.Converter, err = NewPayloadConverter(settings.Chain) sn.Converter, err = builders.NewPayloadConverter(settings.Chain)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.Publisher, err = NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.SyncDBConn, settings.IPFSMode) sn.Publisher, err = builders.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.SyncDBConn, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.Indexer, err = NewCIDIndexer(settings.Chain, settings.SyncDBConn, settings.IPFSMode) sn.Indexer, err = builders.NewCIDIndexer(settings.Chain, settings.SyncDBConn, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.Filterer, err = NewResponseFilterer(settings.Chain) sn.Filterer, err = builders.NewResponseFilterer(settings.Chain)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
// If we are serving, initialize the needed interfaces // If we are serving, initialize the needed interfaces
if settings.Serve { if settings.Serve {
sn.Retriever, err = NewCIDRetriever(settings.Chain, settings.ServeDBConn) sn.Retriever, err = builders.NewCIDRetriever(settings.Chain, settings.ServeDBConn)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.IPLDFetcher, err = NewIPLDFetcher(settings.Chain, settings.IPFSPath, settings.ServeDBConn, settings.IPFSMode) sn.IPLDFetcher, err = builders.NewIPLDFetcher(settings.Chain, settings.IPFSPath, settings.ServeDBConn, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -151,14 +152,14 @@ func (sap *Service) Protocols() []p2p.Protocol {
return []p2p.Protocol{} return []p2p.Protocol{}
} }
// APIs returns the RPC descriptors the super node service offers // APIs returns the RPC descriptors the watcher service offers
func (sap *Service) APIs() []rpc.API { func (sap *Service) APIs() []rpc.API {
ifnoAPI := NewInfoAPI() ifnoAPI := NewInfoAPI()
apis := []rpc.API{ apis := []rpc.API{
{ {
Namespace: APIName, Namespace: APIName,
Version: APIVersion, Version: APIVersion,
Service: NewPublicSuperNodeAPI(sap), Service: NewPublicWatcherAPI(sap),
Public: true, Public: true,
}, },
{ {
@ -180,7 +181,7 @@ func (sap *Service) APIs() []rpc.API {
Public: true, Public: true,
}, },
} }
chainAPI, err := NewPublicAPI(sap.chain, sap.db, sap.ipfsPath) chainAPI, err := builders.NewPublicAPI(sap.chain, sap.db, sap.ipfsPath)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return apis return apis
@ -211,7 +212,7 @@ func (sap *Service) Sync(wg *sync.WaitGroup, screenAndServePayload chan<- shared
case payload := <-sap.PayloadChan: case payload := <-sap.PayloadChan:
ipldPayload, err := sap.Converter.Convert(payload) ipldPayload, err := sap.Converter.Convert(payload)
if err != nil { if err != nil {
log.Errorf("super node conversion error for chain %s: %v", sap.chain.String(), err) log.Errorf("watcher conversion error for chain %s: %v", sap.chain.String(), err)
continue continue
} }
log.Infof("%s data streamed at head height %d", sap.chain.String(), ipldPayload.Height()) log.Infof("%s data streamed at head height %d", sap.chain.String(), ipldPayload.Height())
@ -229,7 +230,7 @@ func (sap *Service) Sync(wg *sync.WaitGroup, screenAndServePayload chan<- shared
publishAndIndexPayload <- ipldPayload publishAndIndexPayload <- ipldPayload
} }
case err := <-sub.Err(): case err := <-sub.Err():
log.Errorf("super node subscription error for chain %s: %v", sap.chain.String(), err) log.Errorf("watcher subscription error for chain %s: %v", sap.chain.String(), err)
case <-sap.QuitChan: case <-sap.QuitChan:
log.Infof("quiting %s Sync process", sap.chain.String()) log.Infof("quiting %s Sync process", sap.chain.String())
return return
@ -248,18 +249,18 @@ func (sap *Service) publishAndIndex(wg *sync.WaitGroup, id int, publishAndIndexP
for { for {
select { select {
case payload := <-publishAndIndexPayload: case payload := <-publishAndIndexPayload:
log.Debugf("%s super node publishAndIndex worker %d publishing data streamed at head height %d", sap.chain.String(), id, payload.Height()) log.Debugf("%s watcher publishAndIndex worker %d publishing data streamed at head height %d", sap.chain.String(), id, payload.Height())
cidPayload, err := sap.Publisher.Publish(payload) cidPayload, err := sap.Publisher.Publish(payload)
if err != nil { if err != nil {
log.Errorf("%s super node publishAndIndex worker %d publishing error: %v", sap.chain.String(), id, err) log.Errorf("%s watcher publishAndIndex worker %d publishing error: %v", sap.chain.String(), id, err)
continue continue
} }
log.Debugf("%s super node publishAndIndex worker %d indexing data streamed at head height %d", sap.chain.String(), id, payload.Height()) log.Debugf("%s watcher publishAndIndex worker %d indexing data streamed at head height %d", sap.chain.String(), id, payload.Height())
if err := sap.Indexer.Index(cidPayload); err != nil { if err := sap.Indexer.Index(cidPayload); err != nil {
log.Errorf("%s super node publishAndIndex worker %d indexing error: %v", sap.chain.String(), id, err) log.Errorf("%s watcher publishAndIndex worker %d indexing error: %v", sap.chain.String(), id, err)
} }
case <-sap.QuitChan: case <-sap.QuitChan:
log.Infof("%s super node publishAndIndex worker %d shutting down", sap.chain.String(), id) log.Infof("%s watcher publishAndIndex worker %d shutting down", sap.chain.String(), id)
return return
} }
} }
@ -298,7 +299,7 @@ func (sap *Service) filterAndServe(payload shared.ConvertedData) {
// Retrieve the subscription parameters for this subscription type // Retrieve the subscription parameters for this subscription type
subConfig, ok := sap.SubscriptionTypes[ty] subConfig, ok := sap.SubscriptionTypes[ty]
if !ok { if !ok {
log.Errorf("super node %s subscription configuration for subscription type %s not available", sap.chain.String(), ty.Hex()) log.Errorf("watcher %s subscription configuration for subscription type %s not available", sap.chain.String(), ty.Hex())
sap.closeType(ty) sap.closeType(ty)
continue continue
} }
@ -310,19 +311,19 @@ func (sap *Service) filterAndServe(payload shared.ConvertedData) {
} }
response, err := sap.Filterer.Filter(subConfig, payload) response, err := sap.Filterer.Filter(subConfig, payload)
if err != nil { if err != nil {
log.Errorf("super node filtering error for chain %s: %v", sap.chain.String(), err) log.Errorf("watcher filtering error for chain %s: %v", sap.chain.String(), err)
sap.closeType(ty) sap.closeType(ty)
continue continue
} }
responseRLP, err := rlp.EncodeToBytes(response) responseRLP, err := rlp.EncodeToBytes(response)
if err != nil { if err != nil {
log.Errorf("super node rlp encoding error for chain %s: %v", sap.chain.String(), err) log.Errorf("watcher rlp encoding error for chain %s: %v", sap.chain.String(), err)
continue continue
} }
for id, sub := range subs { for id, sub := range subs {
select { select {
case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.Height()}: case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.Height()}:
log.Debugf("sending super node %s payload to subscription %s", sap.chain.String(), id) log.Debugf("sending watcher %s payload to subscription %s", sap.chain.String(), id)
default: default:
log.Infof("unable to send %s payload to subscription %s; channel has no receiver", sap.chain.String(), id) log.Infof("unable to send %s payload to subscription %s; channel has no receiver", sap.chain.String(), id)
} }
@ -368,7 +369,7 @@ func (sap *Service) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitCha
// Otherwise we only filter new data as it is streamed in from the state diffing geth node // Otherwise we only filter new data as it is streamed in from the state diffing geth node
if params.HistoricalData() || params.HistoricalDataOnly() { if params.HistoricalData() || params.HistoricalDataOnly() {
if err := sap.sendHistoricalData(subscription, id, params); err != nil { if err := sap.sendHistoricalData(subscription, id, params); err != nil {
sendNonBlockingErr(subscription, fmt.Errorf("%s super node subscriber backfill error: %v", sap.chain.String(), err)) sendNonBlockingErr(subscription, fmt.Errorf("%s watcher subscriber backfill error: %v", sap.chain.String(), err))
sendNonBlockingQuit(subscription) sendNonBlockingQuit(subscription)
return return
} }
@ -404,13 +405,13 @@ func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params share
for i := startingBlock; i <= endingBlock; i++ { for i := startingBlock; i <= endingBlock; i++ {
select { select {
case <-sap.QuitChan: case <-sap.QuitChan:
log.Infof("%s super node historical data feed to subscription %s closed", sap.chain.String(), id) log.Infof("%s watcher historical data feed to subscription %s closed", sap.chain.String(), id)
return return
default: default:
} }
cidWrappers, empty, err := sap.Retriever.Retrieve(params, i) cidWrappers, empty, err := sap.Retriever.Retrieve(params, i)
if err != nil { if err != nil {
sendNonBlockingErr(sub, fmt.Errorf(" %s super node CID Retrieval error at block %d\r%s", sap.chain.String(), i, err.Error())) sendNonBlockingErr(sub, fmt.Errorf(" %s watcher CID Retrieval error at block %d\r%s", sap.chain.String(), i, err.Error()))
continue continue
} }
if empty { if empty {
@ -419,7 +420,7 @@ func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params share
for _, cids := range cidWrappers { for _, cids := range cidWrappers {
response, err := sap.IPLDFetcher.Fetch(cids) response, err := sap.IPLDFetcher.Fetch(cids)
if err != nil { if err != nil {
sendNonBlockingErr(sub, fmt.Errorf("%s super node IPLD Fetching error at block %d\r%s", sap.chain.String(), i, err.Error())) sendNonBlockingErr(sub, fmt.Errorf("%s watcher IPLD Fetching error at block %d\r%s", sap.chain.String(), i, err.Error()))
continue continue
} }
responseRLP, err := rlp.EncodeToBytes(response) responseRLP, err := rlp.EncodeToBytes(response)
@ -429,7 +430,7 @@ func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params share
} }
select { select {
case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.Height()}: case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.Height()}:
log.Debugf("sending super node historical data payload to %s subscription %s", sap.chain.String(), id) log.Debugf("sending watcher historical data payload to %s subscription %s", sap.chain.String(), id)
default: default:
log.Infof("unable to send backFill payload to %s subscription %s; channel has no receiver", sap.chain.String(), id) log.Infof("unable to send backFill payload to %s subscription %s; channel has no receiver", sap.chain.String(), id)
} }
@ -448,7 +449,7 @@ func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params share
// Unsubscribe is used by the API to remotely unsubscribe to the StateDiffingService loop // Unsubscribe is used by the API to remotely unsubscribe to the StateDiffingService loop
func (sap *Service) Unsubscribe(id rpc.ID) { func (sap *Service) Unsubscribe(id rpc.ID) {
log.Infof("Unsubscribing %s from the %s super node service", id, sap.chain.String()) log.Infof("Unsubscribing %s from the %s watcher service", id, sap.chain.String())
sap.Lock() sap.Lock()
for ty := range sap.Subscriptions { for ty := range sap.Subscriptions {
delete(sap.Subscriptions[ty], id) delete(sap.Subscriptions[ty], id)
@ -464,7 +465,7 @@ func (sap *Service) Unsubscribe(id rpc.ID) {
// Start is used to begin the service // Start is used to begin the service
// This is mostly just to satisfy the node.Service interface // This is mostly just to satisfy the node.Service interface
func (sap *Service) Start(*p2p.Server) error { func (sap *Service) Start(*p2p.Server) error {
log.Infof("Starting %s super node service", sap.chain.String()) log.Infof("Starting %s watcher service", sap.chain.String())
wg := new(sync.WaitGroup) wg := new(sync.WaitGroup)
payloadChan := make(chan shared.ConvertedData, PayloadChanBufferSize) payloadChan := make(chan shared.ConvertedData, PayloadChanBufferSize)
if err := sap.Sync(wg, payloadChan); err != nil { if err := sap.Sync(wg, payloadChan); err != nil {
@ -477,7 +478,7 @@ func (sap *Service) Start(*p2p.Server) error {
// Stop is used to close down the service // Stop is used to close down the service
// This is mostly just to satisfy the node.Service interface // This is mostly just to satisfy the node.Service interface
func (sap *Service) Stop() error { func (sap *Service) Stop() error {
log.Infof("Stopping %s super node service", sap.chain.String()) log.Infof("Stopping %s watcher service", sap.chain.String())
sap.Lock() sap.Lock()
close(sap.QuitChan) close(sap.QuitChan)
sap.close() sap.close()
@ -486,7 +487,7 @@ func (sap *Service) Stop() error {
} }
// Node returns the node info for this service // Node returns the node info for this service
func (sap *Service) Node() *core.Node { func (sap *Service) Node() *node.Node {
return sap.NodeInfo return sap.NodeInfo
} }

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher_test package watch_test
import ( import (
"sync" "sync"
@ -27,7 +27,7 @@ import (
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks" mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch"
) )
var _ = Describe("Service", func() { var _ = Describe("Service", func() {
@ -54,7 +54,7 @@ var _ = Describe("Service", func() {
ReturnIPLDPayload: mocks.MockConvertedPayload, ReturnIPLDPayload: mocks.MockConvertedPayload,
ReturnErr: nil, ReturnErr: nil,
} }
processor := &watcher.Service{ processor := &watch.Service{
Indexer: mockCidIndexer, Indexer: mockCidIndexer,
Publisher: mockPublisher, Publisher: mockPublisher,
Streamer: mockStreamer, Streamer: mockStreamer,

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
package watcher package watch
import ( import (
"errors" "errors"
@ -29,14 +29,14 @@ const (
BackFillCompleteFlag BackFillCompleteFlag
) )
// Subscription holds the information for an individual client subscription to the super node // Subscription holds the information for an individual client subscription to the watcher
type Subscription struct { type Subscription struct {
ID rpc.ID ID rpc.ID
PayloadChan chan<- SubscriptionPayload PayloadChan chan<- SubscriptionPayload
QuitChan chan<- bool QuitChan chan<- bool
} }
// SubscriptionPayload is the struct for a super node stream payload // SubscriptionPayload is the struct for a watcher data subscription payload
// It carries data of a type specific to the chain being supported/queried and an error message // It carries data of a type specific to the chain being supported/queried and an error message
type SubscriptionPayload struct { type SubscriptionPayload struct {
Data []byte `json:"data"` // e.g. for Ethereum rlp serialized eth.StreamPayload Data []byte `json:"data"` // e.g. for Ethereum rlp serialized eth.StreamPayload

Some files were not shown because too many files have changed in this diff Show More