From 449d23757ec4901eab983933c73c3f9d8409fecf Mon Sep 17 00:00:00 2001 From: Ian Norden Date: Mon, 29 Jun 2020 16:56:25 -0500 Subject: [PATCH 1/6] pkg rename --- cmd/streamEthSubscribe.go | 2 +- cmd/watch.go | 2 +- documentation/apis.md | 4 ++-- pkg/resync/service.go | 2 +- pkg/streamer/super_node_streamer.go | 2 +- pkg/{watcher => watch}/api.go | 0 pkg/{watcher => watch}/backfiller.go | 0 pkg/{watcher => watch}/backfiller_test.go | 2 +- pkg/{watcher => watch}/config.go | 0 pkg/{watcher => watch}/constructors.go | 0 pkg/{watcher => watch}/helpers.go | 0 pkg/{watcher => watch}/service.go | 0 pkg/{watcher => watch}/service_test.go | 2 +- pkg/{watcher => watch}/subscription.go | 0 pkg/{watcher => watch}/super_node_suite_test.go | 0 15 files changed, 8 insertions(+), 8 deletions(-) rename pkg/{watcher => watch}/api.go (100%) rename pkg/{watcher => watch}/backfiller.go (100%) rename pkg/{watcher => watch}/backfiller_test.go (99%) rename pkg/{watcher => watch}/config.go (100%) rename pkg/{watcher => watch}/constructors.go (100%) rename pkg/{watcher => watch}/helpers.go (100%) rename pkg/{watcher => watch}/service.go (100%) rename pkg/{watcher => watch}/service_test.go (97%) rename pkg/{watcher => watch}/subscription.go (100%) rename pkg/{watcher => watch}/super_node_suite_test.go (100%) diff --git a/cmd/streamEthSubscribe.go b/cmd/streamEthSubscribe.go index 74d5b275..4f615902 100644 --- a/cmd/streamEthSubscribe.go +++ b/cmd/streamEthSubscribe.go @@ -32,7 +32,7 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/streamer" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" ) // streamEthSubscriptionCmd represents the streamEthSubscription command diff --git a/cmd/watch.go b/cmd/watch.go index b5a8e3a2..f03b32dc 100644 --- a/cmd/watch.go +++ b/cmd/watch.go @@ -27,7 +27,7 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" v "github.com/vulcanize/ipfs-blockchain-watcher/version" ) diff --git a/documentation/apis.md b/documentation/apis.md index c098eccc..ab329875 100644 --- a/documentation/apis.md +++ b/documentation/apis.md @@ -22,7 +22,7 @@ All of their data can then be queried with standard [GraphQL](https://graphql.or ### RPC Subscription Interface -A direct, real-time subscription to the data being processed by ipfs-blockchain-watcher can be established over WS or IPC through the [Stream](../pkg/watcher/api.go#L53) RPC method. +A direct, real-time subscription to the data being processed by ipfs-blockchain-watcher can be established over WS or IPC through the [Stream](../pkg/watch/api.go#L53) RPC method. This method is not chain-specific and each chain-type supports it, it is accessed under the "vdb" namespace rather than a chain-specific namespace. An interface for subscribing to this endpoint is provided [here](../pkg/streamer/super_node_streamer.go). @@ -43,7 +43,7 @@ An example of how to subscribe to a real-time Ethereum data feed from ipfs-block "github.com/vulcanize/ipfs-blockchain-watcher/pkg/client" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/streamer" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" ) config, _ := eth.NewEthSubscriptionConfig() diff --git a/pkg/resync/service.go b/pkg/resync/service.go index 8e328cff..f14c856a 100644 --- a/pkg/resync/service.go +++ b/pkg/resync/service.go @@ -22,7 +22,7 @@ import ( "github.com/sirupsen/logrus" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" "github.com/vulcanize/ipfs-blockchain-watcher/utils" ) diff --git a/pkg/streamer/super_node_streamer.go b/pkg/streamer/super_node_streamer.go index 66f5968e..94af4539 100644 --- a/pkg/streamer/super_node_streamer.go +++ b/pkg/streamer/super_node_streamer.go @@ -21,7 +21,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" ) // SuperNodeStreamer is the underlying struct for the shared.SuperNodeStreamer interface diff --git a/pkg/watcher/api.go b/pkg/watch/api.go similarity index 100% rename from pkg/watcher/api.go rename to pkg/watch/api.go diff --git a/pkg/watcher/backfiller.go b/pkg/watch/backfiller.go similarity index 100% rename from pkg/watcher/backfiller.go rename to pkg/watch/backfiller.go diff --git a/pkg/watcher/backfiller_test.go b/pkg/watch/backfiller_test.go similarity index 99% rename from pkg/watcher/backfiller_test.go rename to pkg/watch/backfiller_test.go index 8d6d4353..af337d93 100644 --- a/pkg/watcher/backfiller_test.go +++ b/pkg/watch/backfiller_test.go @@ -27,7 +27,7 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" ) var _ = Describe("BackFiller", func() { diff --git a/pkg/watcher/config.go b/pkg/watch/config.go similarity index 100% rename from pkg/watcher/config.go rename to pkg/watch/config.go diff --git a/pkg/watcher/constructors.go b/pkg/watch/constructors.go similarity index 100% rename from pkg/watcher/constructors.go rename to pkg/watch/constructors.go diff --git a/pkg/watcher/helpers.go b/pkg/watch/helpers.go similarity index 100% rename from pkg/watcher/helpers.go rename to pkg/watch/helpers.go diff --git a/pkg/watcher/service.go b/pkg/watch/service.go similarity index 100% rename from pkg/watcher/service.go rename to pkg/watch/service.go diff --git a/pkg/watcher/service_test.go b/pkg/watch/service_test.go similarity index 97% rename from pkg/watcher/service_test.go rename to pkg/watch/service_test.go index 6c40cd8f..f731cd51 100644 --- a/pkg/watcher/service_test.go +++ b/pkg/watch/service_test.go @@ -27,7 +27,7 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watcher" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" ) var _ = Describe("Service", func() { diff --git a/pkg/watcher/subscription.go b/pkg/watch/subscription.go similarity index 100% rename from pkg/watcher/subscription.go rename to pkg/watch/subscription.go diff --git a/pkg/watcher/super_node_suite_test.go b/pkg/watch/super_node_suite_test.go similarity index 100% rename from pkg/watcher/super_node_suite_test.go rename to pkg/watch/super_node_suite_test.go From e2bcc06f8af2ab5993255ffad48274ea06a52b2a Mon Sep 17 00:00:00 2001 From: Ian Norden Date: Mon, 29 Jun 2020 19:16:52 -0500 Subject: [PATCH 2/6] major refactor pt 3 --- README.md | 2 +- cmd/resync.go | 6 +- cmd/streamEthSubscribe.go | 33 ++- cmd/watch.go | 192 +++++++++--------- dockerfiles/migrations/startup_script.sh | 2 +- dockerfiles/super_node/docker-compose.yml | 8 +- dockerfiles/super_node/entrypoint.sh | 4 +- dockerfiles/super_node/startup_script.sh | 2 +- documentation/apis.md | 32 +-- documentation/architecture.md | 22 +- environments/superNodeBTC.toml | 2 +- environments/superNodeETH.toml | 2 +- environments/superNodeSubscription.toml | 14 +- pkg/btc/btc_suite_test.go | 4 +- pkg/btc/cid_retriever.go | 2 +- pkg/btc/subscription_config.go | 30 +-- pkg/btc/test_helpers.go | 2 +- .../constructors.go => builders/builders.go} | 2 +- .../client.go} | 27 +-- pkg/client/rpc_client.go | 93 --------- pkg/core/eth_client.go | 36 ---- pkg/eth/api.go | 6 +- pkg/eth/backend.go | 2 +- pkg/eth/cid_retriever.go | 2 +- pkg/eth/eth_suite_test.go | 4 +- pkg/eth/mocks/batch_client.go | 4 +- pkg/eth/subscription_config.go | 48 ++--- pkg/eth/test_helpers.go | 2 +- pkg/historical/config.go | 149 ++++++++++++++ .../historical_suite_test.go} | 24 ++- .../backfiller.go => historical/service.go} | 34 ++-- .../service_test.go} | 24 +-- pkg/{core/node_info.go => node/node.go} | 35 +--- pkg/postgres/postgres.go | 8 +- pkg/postgres/postgres_test.go | 6 +- pkg/resync/config.go | 8 +- pkg/resync/service.go | 18 +- pkg/{config/client.go => shared/constants.go} | 9 +- pkg/shared/env.go | 12 +- pkg/shared/functions.go | 6 +- pkg/shared/test_helpers.go | 6 +- pkg/watch/api.go | 56 ++--- pkg/watch/config.go | 117 ++--------- pkg/watch/helpers.go | 2 +- pkg/watch/service.go | 85 ++++---- pkg/watch/service_test.go | 4 +- pkg/watch/subscription.go | 6 +- ...node_suite_test.go => watch_suite_test.go} | 2 +- utils/utils.go | 7 +- 49 files changed, 557 insertions(+), 646 deletions(-) rename pkg/{watch/constructors.go => builders/builders.go} (99%) rename pkg/{streamer/super_node_streamer.go => client/client.go} (50%) delete mode 100644 pkg/client/rpc_client.go delete mode 100644 pkg/core/eth_client.go create mode 100644 pkg/historical/config.go rename pkg/{core/rpc_client.go => historical/historical_suite_test.go} (61%) rename pkg/{watch/backfiller.go => historical/service.go} (85%) rename pkg/{watch/backfiller_test.go => historical/service_test.go} (91%) rename pkg/{core/node_info.go => node/node.go} (65%) rename pkg/{config/client.go => shared/constants.go} (88%) rename pkg/watch/{super_node_suite_test.go => watch_suite_test.go} (97%) diff --git a/README.md b/README.md index 884ed591..844e44fc 100644 --- a/README.md +++ b/README.md @@ -192,7 +192,7 @@ This set of parameters needs to be set no matter the chain type. path = "~/.ipfs" # $IPFS_PATH mode = "postgres" # $IPFS_MODE -[superNode] +[watcher] chain = "bitcoin" # $SUPERNODE_CHAIN server = true # $SUPERNODE_SERVER ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH diff --git a/cmd/resync.go b/cmd/resync.go index 0b2cb616..3f0df616 100644 --- a/cmd/resync.go +++ b/cmd/resync.go @@ -30,7 +30,7 @@ import ( var resyncCmd = &cobra.Command{ Use: "resync", Short: "Resync historical data", - Long: `Use this command to fill in sections of missing data in the super node`, + Long: `Use this command to fill in sections of missing data in the ipfs-blockchain-watcher database`, Run: func(cmd *cobra.Command, args []string) { subCommand = cmd.CalledAs() logWithCommand = *log.WithField("SubCommand", subCommand) @@ -40,8 +40,8 @@ var resyncCmd = &cobra.Command{ func rsyncCmdCommand() { logWithCommand.Infof("running vdb version: %s", v.VersionWithMeta) - logWithCommand.Debug("loading super node configuration variables") - rConfig, err := resync.NewReSyncConfig() + logWithCommand.Debug("loading resync configuration variables") + rConfig, err := resync.NewConfig() if err != nil { logWithCommand.Fatal(err) } diff --git a/cmd/streamEthSubscribe.go b/cmd/streamEthSubscribe.go index 4f615902..fb7f6f43 100644 --- a/cmd/streamEthSubscribe.go +++ b/cmd/streamEthSubscribe.go @@ -29,18 +29,16 @@ import ( "github.com/spf13/viper" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/client" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/streamer" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" + w "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" ) // streamEthSubscriptionCmd represents the streamEthSubscription command var streamEthSubscriptionCmd = &cobra.Command{ Use: "streamEthSubscription", - Short: "This command is used to subscribe to the super node eth stream with the provided filters", - Long: `This command is for demo and testing purposes and is used to subscribe to the super node with the provided subscription configuration parameters. -It does not do anything with the data streamed from the super node other than unpack it and print it out for demonstration purposes.`, + Short: "This command is used to subscribe to the eth ipfs watcher data stream with the provided filters", + Long: `This command is for demo and testing purposes and is used to subscribe to the watcher with the provided subscription configuration parameters. +It does not do anything with the data streamed from the watcher other than unpack it and print it out for demonstration purposes.`, Run: func(cmd *cobra.Command, args []string) { subCommand = cmd.CalledAs() logWithCommand = *log.WithField("SubCommand", subCommand) @@ -60,18 +58,21 @@ func streamEthSubscription() { } // Create a new rpc client and a subscription streamer with that client - rpcClient := getRPCClient() - str := streamer.NewSuperNodeStreamer(rpcClient) + rpcClient, err := getRPCClient() + if err != nil { + logWithCommand.Fatal(err) + } + subClient := client.NewClient(rpcClient) // Buffered channel for reading subscription payloads - payloadChan := make(chan watcher.SubscriptionPayload, 20000) + payloadChan := make(chan w.SubscriptionPayload, 20000) - // Subscribe to the super node service with the given config/filter parameters + // Subscribe to the watcher service with the given config/filter parameters rlpParams, err := rlp.EncodeToBytes(ethSubConfig) if err != nil { logWithCommand.Fatal(err) } - sub, err := str.Stream(payloadChan, rlpParams) + sub, err := subClient.Stream(payloadChan, rlpParams) if err != nil { logWithCommand.Fatal(err) } @@ -167,14 +168,10 @@ func streamEthSubscription() { } } -func getRPCClient() core.RPCClient { - vulcPath := viper.GetString("superNode.ethSubscription.wsPath") +func getRPCClient() (*rpc.Client, error) { + vulcPath := viper.GetString("watcher.ethSubscription.wsPath") if vulcPath == "" { vulcPath = "ws://127.0.0.1:8080" // default to and try the default ws url if no path is provided } - rawRPCClient, err := rpc.Dial(vulcPath) - if err != nil { - logWithCommand.Fatal(err) - } - return client.NewRPCClient(rawRPCClient, vulcPath) + return rpc.Dial(vulcPath) } diff --git a/cmd/watch.go b/cmd/watch.go index f03b32dc..942c58a5 100644 --- a/cmd/watch.go +++ b/cmd/watch.go @@ -18,22 +18,23 @@ package cmd import ( "os" "os/signal" - "sync" + s "sync" "github.com/ethereum/go-ethereum/rpc" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" + h "github.com/vulcanize/ipfs-blockchain-watcher/pkg/historical" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" + w "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" v "github.com/vulcanize/ipfs-blockchain-watcher/version" ) -// superNodeCmd represents the superNode command -var superNodeCmd = &cobra.Command{ - Use: "superNode", +// watchCmd represents the watch command +var watchCmd = &cobra.Command{ + Use: "watch", Short: "sync chain data into PG-IPFS", Long: `This command configures a VulcanizeDB ipfs-blockchain-watcher. @@ -49,146 +50,155 @@ and fill in gaps in the data Run: func(cmd *cobra.Command, args []string) { subCommand = cmd.CalledAs() logWithCommand = *log.WithField("SubCommand", subCommand) - superNode() + watch() }, } -func superNode() { +func watch() { logWithCommand.Infof("running vdb version: %s", v.VersionWithMeta) - logWithCommand.Debug("loading super node configuration variables") - superNodeConfig, err := watcher.NewSuperNodeConfig() + + var forwardPayloadChan chan shared.ConvertedData + wg := new(s.WaitGroup) + logWithCommand.Debug("loading watcher configuration variables") + watcherConfig, err := w.NewConfig() if err != nil { logWithCommand.Fatal(err) } - logWithCommand.Infof("super node config: %+v", superNodeConfig) - if superNodeConfig.IPFSMode == shared.LocalInterface { + logWithCommand.Infof("watcher config: %+v", watcherConfig) + if watcherConfig.IPFSMode == shared.LocalInterface { if err := ipfs.InitIPFSPlugins(); err != nil { logWithCommand.Fatal(err) } } - wg := &sync.WaitGroup{} - logWithCommand.Debug("initializing new super node service") - superNode, err := watcher.NewSuperNode(superNodeConfig) + logWithCommand.Debug("initializing new watcher service") + watcher, err := w.NewWatcher(watcherConfig) if err != nil { logWithCommand.Fatal(err) } - var forwardPayloadChan chan shared.ConvertedData - if superNodeConfig.Serve { - logWithCommand.Info("starting up super node servers") - forwardPayloadChan = make(chan shared.ConvertedData, watcher.PayloadChanBufferSize) - superNode.Serve(wg, forwardPayloadChan) - if err := startServers(superNode, superNodeConfig); err != nil { + + if watcherConfig.Serve { + logWithCommand.Info("starting up watcher servers") + forwardPayloadChan = make(chan shared.ConvertedData, w.PayloadChanBufferSize) + watcher.Serve(wg, forwardPayloadChan) + if err := startServers(watcher, watcherConfig); err != nil { logWithCommand.Fatal(err) } } - if superNodeConfig.Sync { - logWithCommand.Info("starting up super node sync process") - if err := superNode.Sync(wg, forwardPayloadChan); err != nil { + + if watcherConfig.Sync { + logWithCommand.Info("starting up watcher sync process") + if err := watcher.Sync(wg, forwardPayloadChan); err != nil { logWithCommand.Fatal(err) } } - var backFiller watcher.BackFillInterface - if superNodeConfig.BackFill { - logWithCommand.Debug("initializing new super node backfill service") - backFiller, err = watcher.NewBackFillService(superNodeConfig, forwardPayloadChan) + + var backFiller h.BackFillInterface + if watcherConfig.Historical { + historicalConfig, err := h.NewConfig() if err != nil { logWithCommand.Fatal(err) } - logWithCommand.Info("starting up super node backfill process") + logWithCommand.Debug("initializing new historical backfill service") + backFiller, err = h.NewBackFillService(historicalConfig, forwardPayloadChan) + if err != nil { + logWithCommand.Fatal(err) + } + logWithCommand.Info("starting up watcher backfill process") backFiller.BackFill(wg) } + shutdown := make(chan os.Signal) signal.Notify(shutdown, os.Interrupt) <-shutdown - if superNodeConfig.BackFill { + if watcherConfig.Historical { backFiller.Stop() } - superNode.Stop() + watcher.Stop() wg.Wait() } -func startServers(superNode watcher.SuperNode, settings *watcher.Config) error { +func startServers(watcher w.Watcher, settings *w.Config) error { logWithCommand.Debug("starting up IPC server") - _, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, superNode.APIs()) + _, _, err := rpc.StartIPCEndpoint(settings.IPCEndpoint, watcher.APIs()) if err != nil { return err } logWithCommand.Debug("starting up WS server") - _, _, err = rpc.StartWSEndpoint(settings.WSEndpoint, superNode.APIs(), []string{"vdb"}, nil, true) + _, _, err = rpc.StartWSEndpoint(settings.WSEndpoint, watcher.APIs(), []string{"vdb"}, nil, true) if err != nil { return err } logWithCommand.Debug("starting up HTTP server") - _, _, err = rpc.StartHTTPEndpoint(settings.HTTPEndpoint, superNode.APIs(), []string{settings.Chain.API()}, nil, nil, rpc.HTTPTimeouts{}) + _, _, err = rpc.StartHTTPEndpoint(settings.HTTPEndpoint, watcher.APIs(), []string{settings.Chain.API()}, nil, nil, rpc.HTTPTimeouts{}) return err } func init() { - rootCmd.AddCommand(superNodeCmd) + rootCmd.AddCommand(watchCmd) // flags for all config variables - superNodeCmd.PersistentFlags().String("ipfs-path", "", "ipfs repository path") + watchCmd.PersistentFlags().String("ipfs-path", "", "ipfs repository path") - superNodeCmd.PersistentFlags().String("supernode-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.") - superNodeCmd.PersistentFlags().Bool("supernode-server", false, "turn vdb server on or off") - superNodeCmd.PersistentFlags().String("supernode-ws-path", "", "vdb server ws path") - superNodeCmd.PersistentFlags().String("supernode-http-path", "", "vdb server http path") - superNodeCmd.PersistentFlags().String("supernode-ipc-path", "", "vdb server ipc path") - superNodeCmd.PersistentFlags().Bool("supernode-sync", false, "turn vdb sync on or off") - superNodeCmd.PersistentFlags().Int("supernode-workers", 0, "how many worker goroutines to publish and index data") - superNodeCmd.PersistentFlags().Bool("supernode-back-fill", false, "turn vdb backfill on or off") - superNodeCmd.PersistentFlags().Int("supernode-frequency", 0, "how often (in seconds) the backfill process checks for gaps") - superNodeCmd.PersistentFlags().Int("supernode-batch-size", 0, "data fetching batch size") - superNodeCmd.PersistentFlags().Int("supernode-batch-number", 0, "how many goroutines to fetch data concurrently") - superNodeCmd.PersistentFlags().Int("supernode-validation-level", 0, "backfill will resync any data below this level") - superNodeCmd.PersistentFlags().Int("supernode-timeout", 0, "timeout used for backfill http requests") + watchCmd.PersistentFlags().String("watcher-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.") + watchCmd.PersistentFlags().Bool("watcher-server", false, "turn vdb server on or off") + watchCmd.PersistentFlags().String("watcher-ws-path", "", "vdb server ws path") + watchCmd.PersistentFlags().String("watcher-http-path", "", "vdb server http path") + watchCmd.PersistentFlags().String("watcher-ipc-path", "", "vdb server ipc path") + watchCmd.PersistentFlags().Bool("watcher-sync", false, "turn vdb sync on or off") + watchCmd.PersistentFlags().Int("watcher-workers", 0, "how many worker goroutines to publish and index data") + watchCmd.PersistentFlags().Bool("watcher-back-fill", false, "turn vdb backfill on or off") + watchCmd.PersistentFlags().Int("watcher-frequency", 0, "how often (in seconds) the backfill process checks for gaps") + watchCmd.PersistentFlags().Int("watcher-batch-size", 0, "data fetching batch size") + watchCmd.PersistentFlags().Int("watcher-batch-number", 0, "how many goroutines to fetch data concurrently") + watchCmd.PersistentFlags().Int("watcher-validation-level", 0, "backfill will resync any data below this level") + watchCmd.PersistentFlags().Int("watcher-timeout", 0, "timeout used for backfill http requests") - superNodeCmd.PersistentFlags().String("btc-ws-path", "", "ws url for bitcoin node") - superNodeCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node") - superNodeCmd.PersistentFlags().String("btc-password", "", "password for btc node") - superNodeCmd.PersistentFlags().String("btc-username", "", "username for btc node") - superNodeCmd.PersistentFlags().String("btc-node-id", "", "btc node id") - superNodeCmd.PersistentFlags().String("btc-client-name", "", "btc client name") - superNodeCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash") - superNodeCmd.PersistentFlags().String("btc-network-id", "", "btc network id") + watchCmd.PersistentFlags().String("btc-ws-path", "", "ws url for bitcoin node") + watchCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node") + watchCmd.PersistentFlags().String("btc-password", "", "password for btc node") + watchCmd.PersistentFlags().String("btc-username", "", "username for btc node") + watchCmd.PersistentFlags().String("btc-node-id", "", "btc node id") + watchCmd.PersistentFlags().String("btc-client-name", "", "btc client name") + watchCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash") + watchCmd.PersistentFlags().String("btc-network-id", "", "btc network id") - superNodeCmd.PersistentFlags().String("eth-ws-path", "", "ws url for ethereum node") - superNodeCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node") - superNodeCmd.PersistentFlags().String("eth-node-id", "", "eth node id") - superNodeCmd.PersistentFlags().String("eth-client-name", "", "eth client name") - superNodeCmd.PersistentFlags().String("eth-genesis-block", "", "eth genesis block hash") - superNodeCmd.PersistentFlags().String("eth-network-id", "", "eth network id") + watchCmd.PersistentFlags().String("eth-ws-path", "", "ws url for ethereum node") + watchCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node") + watchCmd.PersistentFlags().String("eth-node-id", "", "eth node id") + watchCmd.PersistentFlags().String("eth-client-name", "", "eth client name") + watchCmd.PersistentFlags().String("eth-genesis-block", "", "eth genesis block hash") + watchCmd.PersistentFlags().String("eth-network-id", "", "eth network id") // and their bindings - viper.BindPFlag("ipfs.path", superNodeCmd.PersistentFlags().Lookup("ipfs-path")) + viper.BindPFlag("ipfs.path", watchCmd.PersistentFlags().Lookup("ipfs-path")) - viper.BindPFlag("superNode.chain", superNodeCmd.PersistentFlags().Lookup("supernode-chain")) - viper.BindPFlag("superNode.server", superNodeCmd.PersistentFlags().Lookup("supernode-server")) - viper.BindPFlag("superNode.wsPath", superNodeCmd.PersistentFlags().Lookup("supernode-ws-path")) - viper.BindPFlag("superNode.httpPath", superNodeCmd.PersistentFlags().Lookup("supernode-http-path")) - viper.BindPFlag("superNode.ipcPath", superNodeCmd.PersistentFlags().Lookup("supernode-ipc-path")) - viper.BindPFlag("superNode.sync", superNodeCmd.PersistentFlags().Lookup("supernode-sync")) - viper.BindPFlag("superNode.workers", superNodeCmd.PersistentFlags().Lookup("supernode-workers")) - viper.BindPFlag("superNode.backFill", superNodeCmd.PersistentFlags().Lookup("supernode-back-fill")) - viper.BindPFlag("superNode.frequency", superNodeCmd.PersistentFlags().Lookup("supernode-frequency")) - viper.BindPFlag("superNode.batchSize", superNodeCmd.PersistentFlags().Lookup("supernode-batch-size")) - viper.BindPFlag("superNode.batchNumber", superNodeCmd.PersistentFlags().Lookup("supernode-batch-number")) - viper.BindPFlag("superNode.validationLevel", superNodeCmd.PersistentFlags().Lookup("supernode-validation-level")) - viper.BindPFlag("superNode.timeout", superNodeCmd.PersistentFlags().Lookup("supernode-timeout")) + viper.BindPFlag("watcher.chain", watchCmd.PersistentFlags().Lookup("watcher-chain")) + viper.BindPFlag("watcher.server", watchCmd.PersistentFlags().Lookup("watcher-server")) + viper.BindPFlag("watcher.wsPath", watchCmd.PersistentFlags().Lookup("watcher-ws-path")) + viper.BindPFlag("watcher.httpPath", watchCmd.PersistentFlags().Lookup("watcher-http-path")) + viper.BindPFlag("watcher.ipcPath", watchCmd.PersistentFlags().Lookup("watcher-ipc-path")) + viper.BindPFlag("watcher.sync", watchCmd.PersistentFlags().Lookup("watcher-sync")) + viper.BindPFlag("watcher.workers", watchCmd.PersistentFlags().Lookup("watcher-workers")) + viper.BindPFlag("watcher.backFill", watchCmd.PersistentFlags().Lookup("watcher-back-fill")) + viper.BindPFlag("watcher.frequency", watchCmd.PersistentFlags().Lookup("watcher-frequency")) + viper.BindPFlag("watcher.batchSize", watchCmd.PersistentFlags().Lookup("watcher-batch-size")) + viper.BindPFlag("watcher.batchNumber", watchCmd.PersistentFlags().Lookup("watcher-batch-number")) + viper.BindPFlag("watcher.validationLevel", watchCmd.PersistentFlags().Lookup("watcher-validation-level")) + viper.BindPFlag("watcher.timeout", watchCmd.PersistentFlags().Lookup("watcher-timeout")) - viper.BindPFlag("bitcoin.wsPath", superNodeCmd.PersistentFlags().Lookup("btc-ws-path")) - viper.BindPFlag("bitcoin.httpPath", superNodeCmd.PersistentFlags().Lookup("btc-http-path")) - viper.BindPFlag("bitcoin.pass", superNodeCmd.PersistentFlags().Lookup("btc-password")) - viper.BindPFlag("bitcoin.user", superNodeCmd.PersistentFlags().Lookup("btc-username")) - viper.BindPFlag("bitcoin.nodeID", superNodeCmd.PersistentFlags().Lookup("btc-node-id")) - viper.BindPFlag("bitcoin.clientName", superNodeCmd.PersistentFlags().Lookup("btc-client-name")) - viper.BindPFlag("bitcoin.genesisBlock", superNodeCmd.PersistentFlags().Lookup("btc-genesis-block")) - viper.BindPFlag("bitcoin.networkID", superNodeCmd.PersistentFlags().Lookup("btc-network-id")) + viper.BindPFlag("bitcoin.wsPath", watchCmd.PersistentFlags().Lookup("btc-ws-path")) + viper.BindPFlag("bitcoin.httpPath", watchCmd.PersistentFlags().Lookup("btc-http-path")) + viper.BindPFlag("bitcoin.pass", watchCmd.PersistentFlags().Lookup("btc-password")) + viper.BindPFlag("bitcoin.user", watchCmd.PersistentFlags().Lookup("btc-username")) + viper.BindPFlag("bitcoin.nodeID", watchCmd.PersistentFlags().Lookup("btc-node-id")) + viper.BindPFlag("bitcoin.clientName", watchCmd.PersistentFlags().Lookup("btc-client-name")) + viper.BindPFlag("bitcoin.genesisBlock", watchCmd.PersistentFlags().Lookup("btc-genesis-block")) + viper.BindPFlag("bitcoin.networkID", watchCmd.PersistentFlags().Lookup("btc-network-id")) - viper.BindPFlag("ethereum.wsPath", superNodeCmd.PersistentFlags().Lookup("eth-ws-path")) - viper.BindPFlag("ethereum.httpPath", superNodeCmd.PersistentFlags().Lookup("eth-http-path")) - viper.BindPFlag("ethereum.nodeID", superNodeCmd.PersistentFlags().Lookup("eth-node-id")) - viper.BindPFlag("ethereum.clientName", superNodeCmd.PersistentFlags().Lookup("eth-client-name")) - viper.BindPFlag("ethereum.genesisBlock", superNodeCmd.PersistentFlags().Lookup("eth-genesis-block")) - viper.BindPFlag("ethereum.networkID", superNodeCmd.PersistentFlags().Lookup("eth-network-id")) + viper.BindPFlag("ethereum.wsPath", watchCmd.PersistentFlags().Lookup("eth-ws-path")) + viper.BindPFlag("ethereum.httpPath", watchCmd.PersistentFlags().Lookup("eth-http-path")) + viper.BindPFlag("ethereum.nodeID", watchCmd.PersistentFlags().Lookup("eth-node-id")) + viper.BindPFlag("ethereum.clientName", watchCmd.PersistentFlags().Lookup("eth-client-name")) + viper.BindPFlag("ethereum.genesisBlock", watchCmd.PersistentFlags().Lookup("eth-genesis-block")) + viper.BindPFlag("ethereum.networkID", watchCmd.PersistentFlags().Lookup("eth-network-id")) } diff --git a/dockerfiles/migrations/startup_script.sh b/dockerfiles/migrations/startup_script.sh index ca5c5159..ddd45182 100755 --- a/dockerfiles/migrations/startup_script.sh +++ b/dockerfiles/migrations/startup_script.sh @@ -1,5 +1,5 @@ #!/bin/sh -# Runs the db migrations and starts the super node services +# Runs the db migrations and starts the watcher services # Exit if the variable tests fail set -e diff --git a/dockerfiles/super_node/docker-compose.yml b/dockerfiles/super_node/docker-compose.yml index 4487be47..0011928e 100644 --- a/dockerfiles/super_node/docker-compose.yml +++ b/dockerfiles/super_node/docker-compose.yml @@ -26,9 +26,9 @@ services: dockerfile: ./dockerfiles/super_node/Dockerfile args: USER: "vdbm" - CONFIG_FILE: ./environments/superNodeBTC.toml + CONFIG_FILE: ./environments/watcherBTC.toml environment: - VDB_COMMAND: "superNode" + VDB_COMMAND: "watcher" DATABASE_NAME: "vulcanize_public" DATABASE_HOSTNAME: "db" DATABASE_PORT: 5432 @@ -49,9 +49,9 @@ services: dockerfile: ./dockerfiles/super_node/Dockerfile args: USER: "vdbm" - CONFIG_FILE: ./environments/superNodeETH.toml + CONFIG_FILE: ./environments/watcherETH.toml environment: - VDB_COMMAND: "superNode" + VDB_COMMAND: "watcher" DATABASE_NAME: "vulcanize_public" DATABASE_HOSTNAME: "db" DATABASE_PORT: 5432 diff --git a/dockerfiles/super_node/entrypoint.sh b/dockerfiles/super_node/entrypoint.sh index a5f9ae7b..1af5a9cb 100755 --- a/dockerfiles/super_node/entrypoint.sh +++ b/dockerfiles/super_node/entrypoint.sh @@ -1,5 +1,5 @@ #!/bin/sh -# Runs the db migrations and starts the super node services +# Runs the db migrations and starts the watcher services # Exit if the variable tests fail set -e @@ -14,7 +14,7 @@ set +x #test $DATABASE_PASSWORD #test $IPFS_INIT #test $IPFS_PATH -VDB_COMMAND=${VDB_COMMAND:-superNode} +VDB_COMMAND=${VDB_COMMAND:-watch} set +e # Construct the connection string for postgres diff --git a/dockerfiles/super_node/startup_script.sh b/dockerfiles/super_node/startup_script.sh index 0f07ea0b..e30c5069 100755 --- a/dockerfiles/super_node/startup_script.sh +++ b/dockerfiles/super_node/startup_script.sh @@ -1,5 +1,5 @@ #!/bin/sh -# Runs the db migrations and starts the super node services +# Runs the db migrations and starts the watcher services # Exit if the variable tests fail set -e diff --git a/documentation/apis.md b/documentation/apis.md index ab329875..7fdebe09 100644 --- a/documentation/apis.md +++ b/documentation/apis.md @@ -24,7 +24,7 @@ All of their data can then be queried with standard [GraphQL](https://graphql.or ### RPC Subscription Interface A direct, real-time subscription to the data being processed by ipfs-blockchain-watcher can be established over WS or IPC through the [Stream](../pkg/watch/api.go#L53) RPC method. This method is not chain-specific and each chain-type supports it, it is accessed under the "vdb" namespace rather than a chain-specific namespace. An interface for -subscribing to this endpoint is provided [here](../pkg/streamer/super_node_streamer.go). +subscribing to this endpoint is provided [here](../pkg/client/client.go). When subscribing to this endpoint, the subscriber provides a set of RLP-encoded subscription parameters. These parameters will be chain-specific, and are used by ipfs-blockchain-watcher to filter and return a requested subset of chain data to the subscriber. (e.g. [BTC](../pkg/btc/subscription_config.go), [ETH](../../pkg/eth/subscription_config.go)). @@ -48,7 +48,7 @@ An example of how to subscribe to a real-time Ethereum data feed from ipfs-block config, _ := eth.NewEthSubscriptionConfig() rlpConfig, _ := rlp.EncodeToBytes(config) - vulcPath := viper.GetString("superNode.ethSubscription.path") + vulcPath := viper.GetString("watcher.ethSubscription.path") rawRPCClient, _ := rpc.Dial(vulcPath) rpcClient := client.NewRPCClient(rawRPCClient, vulcPath) stream := streamer.NewSuperNodeStreamer(rpcClient) @@ -67,32 +67,32 @@ An example of how to subscribe to a real-time Ethereum data feed from ipfs-block The .toml file being used to fill the Ethereum subscription config would look something like this: ```toml -[superNode] - [superNode.ethSubscription] +[watcher] + [watcher.ethSubscription] historicalData = false historicalDataOnly = false startingBlock = 0 endingBlock = 0 wsPath = "ws://127.0.0.1:8080" - [superNode.ethSubscription.headerFilter] + [watcher.ethSubscription.headerFilter] off = false uncles = false - [superNode.ethSubscription.txFilter] + [watcher.ethSubscription.txFilter] off = false src = [] dst = [] - [superNode.ethSubscription.receiptFilter] + [watcher.ethSubscription.receiptFilter] off = false contracts = [] topic0s = [] topic1s = [] topic2s = [] topic3s = [] - [superNode.ethSubscription.stateFilter] + [watcher.ethSubscription.stateFilter] off = false addresses = [] intermediateNodes = false - [superNode.ethSubscription.storageFilter] + [watcher.ethSubscription.storageFilter] off = true addresses = [] storageKeys = [] @@ -131,9 +131,9 @@ in `src` and `dst`, respectively. - Setting `off` to true tells ipfs-blockchain-watcher to not send any receipts to the subscriber - `topic0s` is a string array which can be filled with event topics we want to filter for, if it has any topics then ipfs-blockchain-watcher will only send receipts that contain logs which have that topic0. -- `contracts` is a string array which can be filled with contract addresses we want to filter for, if it contains any contract addresses the super node will +- `contracts` is a string array which can be filled with contract addresses we want to filter for, if it contains any contract addresses the watcher will only send receipts that correspond to one of those contracts. -- `matchTrxs` is a bool which when set to true any receipts that correspond to filtered for transactions will be sent by the super node, regardless of whether or not the receipt satisfies the `topics` or `contracts` filters. +- `matchTrxs` is a bool which when set to true any receipts that correspond to filtered for transactions will be sent by the watcher, regardless of whether or not the receipt satisfies the `topics` or `contracts` filters. `ethSubscription.stateFilter` has three sub-options: `off`, `addresses`, and `intermediateNodes`. @@ -170,7 +170,7 @@ An example of how to subscribe to a real-time Bitcoin data feed from ipfs-blockc config, _ := btc.NewBtcSubscriptionConfig() rlpConfig, _ := rlp.EncodeToBytes(config) - vulcPath := viper.GetString("superNode.btcSubscription.path") + vulcPath := viper.GetString("watcher.btcSubscription.path") rawRPCClient, _ := rpc.Dial(vulcPath) rpcClient := client.NewRPCClient(rawRPCClient, vulcPath) stream := streamer.NewSuperNodeStreamer(rpcClient) @@ -189,16 +189,16 @@ An example of how to subscribe to a real-time Bitcoin data feed from ipfs-blockc The .toml file being used to fill the Bitcoin subscription config would look something like this: ```toml -[superNode] - [superNode.btcSubscription] +[watcher] + [watcher.btcSubscription] historicalData = false historicalDataOnly = false startingBlock = 0 endingBlock = 0 wsPath = "ws://127.0.0.1:8080" - [superNode.btcSubscription.headerFilter] + [watcher.btcSubscription.headerFilter] off = false - [superNode.btcSubscription.txFilter] + [watcher.btcSubscription.txFilter] off = false segwit = false witnessHashes = [] diff --git a/documentation/architecture.md b/documentation/architecture.md index e943e971..86f33c6e 100644 --- a/documentation/architecture.md +++ b/documentation/architecture.md @@ -8,17 +8,17 @@ 1. [IPFS Considerations](#ipfs-considerations) ## Processes -ipfs-blockchain-watcher is a [service](../pkg/super_node/service.go#L61) comprised of the following interfaces: +ipfs-blockchain-watcher is a [service](../pkg/watch/service.go#L61) comprised of the following interfaces: -* [Payload Fetcher](../pkg/super_node/shared/interfaces.go#L29): Fetches raw chain data from a half-duplex endpoint (HTTP/IPC), used for historical data fetching. ([BTC](../../pkg/super_node/btc/payload_fetcher.go), [ETH](../../pkg/super_node/eth/payload_fetcher.go)). -* [Payload Streamer](../pkg/super_node/shared/interfaces.go#L24): Streams raw chain data from a full-duplex endpoint (WebSocket/IPC), used for syncing data at the head of the chain in real-time. ([BTC](../../pkg/super_node/btc/http_streamer.go), [ETH](../../pkg/super_node/eth/streamer.go)). -* [Payload Converter](../pkg/super_node/shared/interfaces.go#L34): Converters raw chain data to an intermediary form prepared for IPFS publishing. ([BTC](../../pkg/super_node/btc/converter.go), [ETH](../../pkg/super_node/eth/converter.go)). -* [IPLD Publisher](../pkg/super_node/shared/interfaces.go#L39): Publishes the converted data to IPFS, returning their CIDs and associated metadata for indexing. ([BTC](../../pkg/super_node/btc/publisher.go), [ETH](../../pkg/super_node/eth/publisher.go)). -* [CID Indexer](../pkg/super_node/shared/interfaces.go#L44): Indexes CIDs in Postgres with their associated metadata. This metadata is chain specific and selected based on utility. ([BTC](../../pkg/super_node/btc/indexer.go), [ETH](../../pkg/super_node/eth/indexer.go)). -* [CID Retriever](../pkg/super_node/shared/interfaces.go#L54): Retrieves CIDs from Postgres by searching against their associated metadata, is used to lookup data to serve API requests/subscriptions. ([BTC](../../pkg/super_node/btc/retriever.go), [ETH](../../pkg/super_node/eth/retriever.go)). -* [IPLD Fetcher](../pkg/super_node/shared/interfaces.go#L62): Fetches the IPLDs needed to service API requests/subscriptions from IPFS using retrieved CIDS; can route through a IPFS block-exchange to search for objects that are not directly available. ([BTC](../../pkg/super_node/btc/ipld_fetcher.go), [ETH](../../pkg/super_node/eth/ipld_fetcher.go)) -* [Response Filterer](../pkg/super_node/shared/interfaces.go#L49): Filters converted data payloads served to API subscriptions; filters according to the subscriber provided parameters. ([BTC](../../pkg/super_node/btc/filterer.go), [ETH](../../pkg/super_node/eth/filterer.go)). -* [API](https://github.com/ethereum/go-ethereum/blob/master/rpc/types.go#L31): Expose RPC methods for clients to interface with the data. Chain-specific APIs should aim to recapitulate as much of the native API as possible. ([VDB](../../pkg/super_node/api.go), [ETH](../../pkg/super_node/eth/api.go)). +* [Payload Fetcher](../pkg/shared/interfaces.go#L29): Fetches raw chain data from a half-duplex endpoint (HTTP/IPC), used for historical data fetching. ([BTC](../../pkg/btc/payload_fetcher.go), [ETH](../../pkg/eth/payload_fetcher.go)). +* [Payload Streamer](../pkg/shared/interfaces.go#L24): Streams raw chain data from a full-duplex endpoint (WebSocket/IPC), used for syncing data at the head of the chain in real-time. ([BTC](../../pkg/btc/http_streamer.go), [ETH](../../pkg/eth/streamer.go)). +* [Payload Converter](../pkg/shared/interfaces.go#L34): Converters raw chain data to an intermediary form prepared for IPFS publishing. ([BTC](../../pkg/btc/converter.go), [ETH](../../pkg/eth/converter.go)). +* [IPLD Publisher](../pkg/shared/interfaces.go#L39): Publishes the converted data to IPFS, returning their CIDs and associated metadata for indexing. ([BTC](../../pkg/btc/publisher.go), [ETH](../../pkg/eth/publisher.go)). +* [CID Indexer](../pkg/shared/interfaces.go#L44): Indexes CIDs in Postgres with their associated metadata. This metadata is chain specific and selected based on utility. ([BTC](../../pkg/btc/indexer.go), [ETH](../../pkg/eth/indexer.go)). +* [CID Retriever](../pkg/shared/interfaces.go#L54): Retrieves CIDs from Postgres by searching against their associated metadata, is used to lookup data to serve API requests/subscriptions. ([BTC](../../pkg/btc/retriever.go), [ETH](../../pkg/eth/retriever.go)). +* [IPLD Fetcher](../pkg/shared/interfaces.go#L62): Fetches the IPLDs needed to service API requests/subscriptions from IPFS using retrieved CIDS; can route through a IPFS block-exchange to search for objects that are not directly available. ([BTC](../../pkg/btc/ipld_fetcher.go), [ETH](../../pkg/eth/ipld_fetcher.go)) +* [Response Filterer](../pkg/shared/interfaces.go#L49): Filters converted data payloads served to API subscriptions; filters according to the subscriber provided parameters. ([BTC](../../pkg/btc/filterer.go), [ETH](../../pkg/eth/filterer.go)). +* [API](https://github.com/ethereum/go-ethereum/blob/master/rpc/types.go#L31): Expose RPC methods for clients to interface with the data. Chain-specific APIs should aim to recapitulate as much of the native API as possible. ([VDB](../../pkg/api.go), [ETH](../../pkg/eth/api.go)). Appropriating the service for a new chain is done by creating underlying types to satisfy these interfaces for @@ -56,7 +56,7 @@ This set of parameters needs to be set no matter the chain type. path = "~/.ipfs" # $IPFS_PATH mode = "direct" # $IPFS_MODE -[superNode] +[watcher] chain = "bitcoin" # $SUPERNODE_CHAIN server = true # $SUPERNODE_SERVER ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH diff --git a/environments/superNodeBTC.toml b/environments/superNodeBTC.toml index 73178b98..00e1ec30 100644 --- a/environments/superNodeBTC.toml +++ b/environments/superNodeBTC.toml @@ -20,7 +20,7 @@ clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE resetValidation = true # $RESYNC_RESET_VALIDATION -[superNode] +[watcher] chain = "bitcoin" # $SUPERNODE_CHAIN server = true # $SUPERNODE_SERVER ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH diff --git a/environments/superNodeETH.toml b/environments/superNodeETH.toml index 997ba92e..11bcc709 100644 --- a/environments/superNodeETH.toml +++ b/environments/superNodeETH.toml @@ -21,7 +21,7 @@ clearOldCache = true # $RESYNC_CLEAR_OLD_CACHE resetValidation = true # $RESYNC_RESET_VALIDATION -[superNode] +[watcher] chain = "ethereum" # $SUPERNODE_CHAIN server = true # $SUPERNODE_SERVER ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH diff --git a/environments/superNodeSubscription.toml b/environments/superNodeSubscription.toml index 79f21775..14a82379 100644 --- a/environments/superNodeSubscription.toml +++ b/environments/superNodeSubscription.toml @@ -1,29 +1,29 @@ -[superNode] - [superNode.ethSubscription] +[watcher] + [watcher.ethSubscription] historicalData = false historicalDataOnly = false startingBlock = 0 endingBlock = 0 wsPath = "ws://127.0.0.1:8080" - [superNode.ethSubscription.headerFilter] + [watcher.ethSubscription.headerFilter] off = false uncles = false - [superNode.ethSubscription.txFilter] + [watcher.ethSubscription.txFilter] off = false src = [] dst = [] - [superNode.ethSubscription.receiptFilter] + [watcher.ethSubscription.receiptFilter] off = false contracts = [] topic0s = [] topic1s = [] topic2s = [] topic3s = [] - [superNode.ethSubscription.stateFilter] + [watcher.ethSubscription.stateFilter] off = false addresses = [] intermediateNodes = false - [superNode.ethSubscription.storageFilter] + [watcher.ethSubscription.storageFilter] off = true addresses = [] storageKeys = [] diff --git a/pkg/btc/btc_suite_test.go b/pkg/btc/btc_suite_test.go index ba716427..76720ce7 100644 --- a/pkg/btc/btc_suite_test.go +++ b/pkg/btc/btc_suite_test.go @@ -25,9 +25,9 @@ import ( "github.com/sirupsen/logrus" ) -func TestBTCSuperNode(t *testing.T) { +func TestBTCWatcher(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Super Node BTC Suite Test") + RunSpecs(t, "BTC IPFS Watcher Suite Test") } var _ = BeforeSuite(func() { diff --git a/pkg/btc/cid_retriever.go b/pkg/btc/cid_retriever.go index 5a322ea1..b52836e2 100644 --- a/pkg/btc/cid_retriever.go +++ b/pkg/btc/cid_retriever.go @@ -169,7 +169,7 @@ func (bcr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID // RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db func (bcr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap, error) { - log.Info("searching for gaps in the btc super node database") + log.Info("searching for gaps in the btc ipfs watcher database") startingBlock, err := bcr.RetrieveFirstBlockNumber() if err != nil { return nil, fmt.Errorf("btc CIDRetriever RetrieveFirstBlockNumber error: %v", err) diff --git a/pkg/btc/subscription_config.go b/pkg/btc/subscription_config.go index 0e0c2ab7..49c18d94 100644 --- a/pkg/btc/subscription_config.go +++ b/pkg/btc/subscription_config.go @@ -25,7 +25,7 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" ) -// SubscriptionSettings config is used by a subscriber to specify what bitcoin data to stream from the super node +// SubscriptionSettings config is used by a subscriber to specify what bitcoin data to stream from the watcher type SubscriptionSettings struct { BackFill bool BackFillOnly bool @@ -55,36 +55,36 @@ type TxFilter struct { func NewBtcSubscriptionConfig() (*SubscriptionSettings, error) { sc := new(SubscriptionSettings) // Below default to false, which means we do not backfill by default - sc.BackFill = viper.GetBool("superNode.btcSubscription.historicalData") - sc.BackFillOnly = viper.GetBool("superNode.btcSubscription.historicalDataOnly") + sc.BackFill = viper.GetBool("watcher.btcSubscription.historicalData") + sc.BackFillOnly = viper.GetBool("watcher.btcSubscription.historicalDataOnly") // Below default to 0 // 0 start means we start at the beginning and 0 end means we continue indefinitely - sc.Start = big.NewInt(viper.GetInt64("superNode.btcSubscription.startingBlock")) - sc.End = big.NewInt(viper.GetInt64("superNode.btcSubscription.endingBlock")) + sc.Start = big.NewInt(viper.GetInt64("watcher.btcSubscription.startingBlock")) + sc.End = big.NewInt(viper.GetInt64("watcher.btcSubscription.endingBlock")) // Below default to false, which means we get all headers by default sc.HeaderFilter = HeaderFilter{ - Off: viper.GetBool("superNode.btcSubscription.headerFilter.off"), + Off: viper.GetBool("watcher.btcSubscription.headerFilter.off"), } // Below defaults to false and two slices of length 0 // Which means we get all transactions by default - pksc := viper.Get("superNode.btcSubscription.txFilter.pkScriptClass") + pksc := viper.Get("watcher.btcSubscription.txFilter.pkScriptClass") pkScriptClasses, ok := pksc.([]uint8) if !ok { - return nil, errors.New("superNode.btcSubscription.txFilter.pkScriptClass needs to be an array of uint8s") + return nil, errors.New("watcher.btcSubscription.txFilter.pkScriptClass needs to be an array of uint8s") } - is := viper.Get("superNode.btcSubscription.txFilter.indexes") + is := viper.Get("watcher.btcSubscription.txFilter.indexes") indexes, ok := is.([]int64) if !ok { - return nil, errors.New("superNode.btcSubscription.txFilter.indexes needs to be an array of int64s") + return nil, errors.New("watcher.btcSubscription.txFilter.indexes needs to be an array of int64s") } sc.TxFilter = TxFilter{ - Off: viper.GetBool("superNode.btcSubscription.txFilter.off"), - Segwit: viper.GetBool("superNode.btcSubscription.txFilter.segwit"), - WitnessHashes: viper.GetStringSlice("superNode.btcSubscription.txFilter.witnessHashes"), + Off: viper.GetBool("watcher.btcSubscription.txFilter.off"), + Segwit: viper.GetBool("watcher.btcSubscription.txFilter.segwit"), + WitnessHashes: viper.GetStringSlice("watcher.btcSubscription.txFilter.witnessHashes"), PkScriptClasses: pkScriptClasses, Indexes: indexes, - MultiSig: viper.GetBool("superNode.btcSubscription.txFilter.multiSig"), - Addresses: viper.GetStringSlice("superNode.btcSubscription.txFilter.addresses"), + MultiSig: viper.GetBool("watcher.btcSubscription.txFilter.multiSig"), + Addresses: viper.GetStringSlice("watcher.btcSubscription.txFilter.addresses"), } return sc, nil } diff --git a/pkg/btc/test_helpers.go b/pkg/btc/test_helpers.go index 44b786c9..09a36093 100644 --- a/pkg/btc/test_helpers.go +++ b/pkg/btc/test_helpers.go @@ -22,7 +22,7 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" ) -// TearDownDB is used to tear down the super node dbs after tests +// TearDownDB is used to tear down the watcher dbs after tests func TearDownDB(db *postgres.DB) { tx, err := db.Beginx() Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/watch/constructors.go b/pkg/builders/builders.go similarity index 99% rename from pkg/watch/constructors.go rename to pkg/builders/builders.go index f4785cf2..51aa3432 100644 --- a/pkg/watch/constructors.go +++ b/pkg/builders/builders.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher +package builders import ( "fmt" diff --git a/pkg/streamer/super_node_streamer.go b/pkg/client/client.go similarity index 50% rename from pkg/streamer/super_node_streamer.go rename to pkg/client/client.go index 94af4539..5f365525 100644 --- a/pkg/streamer/super_node_streamer.go +++ b/pkg/client/client.go @@ -14,29 +14,30 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -// Streamer is used by watchers to stream eth data from a vulcanizedb super node -package streamer +// Client is used by watchers to stream chain IPLD data from a vulcanizedb ipfs-blockchain-watcher +package client import ( + "context" + "github.com/ethereum/go-ethereum/rpc" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" ) -// SuperNodeStreamer is the underlying struct for the shared.SuperNodeStreamer interface -type SuperNodeStreamer struct { - Client core.RPCClient +// Client is used to subscribe to the ipfs-blockchain-watcher ipld data stream +type Client struct { + c *rpc.Client } -// NewSuperNodeStreamer creates a pointer to a new SuperNodeStreamer which satisfies the ISuperNodeStreamer interface -func NewSuperNodeStreamer(client core.RPCClient) *SuperNodeStreamer { - return &SuperNodeStreamer{ - Client: client, +// NewClient creates a new Client +func NewClient(c *rpc.Client) *Client { + return &Client{ + c: c, } } -// Stream is the main loop for subscribing to data from a vulcanizedb super node -func (sds *SuperNodeStreamer) Stream(payloadChan chan watcher.SubscriptionPayload, rlpParams []byte) (*rpc.ClientSubscription, error) { - return sds.Client.Subscribe("vdb", payloadChan, "stream", rlpParams) +// Stream is the main loop for subscribing to iplds from an ipfs-blockchain-watcher server +func (c *Client) Stream(payloadChan chan watch.SubscriptionPayload, rlpParams []byte) (*rpc.ClientSubscription, error) { + return c.c.Subscribe(context.Background(), "vdb", payloadChan, "stream", rlpParams) } diff --git a/pkg/client/rpc_client.go b/pkg/client/rpc_client.go deleted file mode 100644 index f4aa889f..00000000 --- a/pkg/client/rpc_client.go +++ /dev/null @@ -1,93 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package client - -import ( - "context" - "errors" - "reflect" - - "github.com/ethereum/go-ethereum/rpc" -) - -// RPCClient is a wrapper around the geth RPC client -type RPCClient struct { - client *rpc.Client - ipcPath string -} - -// BatchElem is a struct to hold the elements of a BatchCall -type BatchElem struct { - Method string - Args []interface{} - Result interface{} - Error error -} - -// NewRPCClient creates a new RpcClient -func NewRPCClient(client *rpc.Client, ipcPath string) RPCClient { - return RPCClient{ - client: client, - ipcPath: ipcPath, - } -} - -// CallContext makes an rpc method call with the provided context and arguments -func (client RPCClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { - //If an empty interface (or other nil object) is passed to CallContext, when the JSONRPC message is created the params will - //be interpreted as [null]. This seems to work fine for most of the ethereum clients (which presumably ignore a null parameter. - //Ganache however does not ignore it, and throws an 'Incorrect number of arguments' error. - if args == nil { - return client.client.CallContext(ctx, result, method) - } - return client.client.CallContext(ctx, result, method, args...) -} - -func (client RPCClient) IpcPath() string { - return client.ipcPath -} - -func (client RPCClient) SupportedModules() (map[string]string, error) { - return client.client.SupportedModules() -} - -func (client RPCClient) BatchCall(batch []BatchElem) error { - var rpcBatch []rpc.BatchElem - for _, batchElem := range batch { - var newBatchElem = rpc.BatchElem{ - Result: batchElem.Result, - Method: batchElem.Method, - Args: batchElem.Args, - Error: batchElem.Error, - } - rpcBatch = append(rpcBatch, newBatchElem) - } - return client.client.BatchCall(rpcBatch) -} - -// Subscribe subscribes to an rpc "namespace_subscribe" subscription with the given channel -// The first argument needs to be the method we wish to invoke -func (client RPCClient) Subscribe(namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error) { - chanVal := reflect.ValueOf(payloadChan) - if chanVal.Kind() != reflect.Chan || chanVal.Type().ChanDir()&reflect.SendDir == 0 { - return nil, errors.New("second argument to Subscribe must be a writable channel") - } - if chanVal.IsNil() { - return nil, errors.New("channel given to Subscribe must not be nil") - } - return client.client.Subscribe(context.Background(), namespace, payloadChan, args...) -} diff --git a/pkg/core/eth_client.go b/pkg/core/eth_client.go deleted file mode 100644 index d2aa2779..00000000 --- a/pkg/core/eth_client.go +++ /dev/null @@ -1,36 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package core - -import ( - "context" - "math/big" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -type EthClient interface { - BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) - CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) - FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) - HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) - TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) - TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) - BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) -} diff --git a/pkg/eth/api.go b/pkg/eth/api.go index 0111b137..6c9c40bd 100644 --- a/pkg/eth/api.go +++ b/pkg/eth/api.go @@ -29,10 +29,10 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) -// APIName is the namespace for the super node's eth api +// APIName is the namespace for the watcher's eth api const APIName = "eth" -// APIVersion is the version of the super node's eth api +// APIVersion is the version of the watcher's eth api const APIVersion = "0.0.1" type PublicEthAPI struct { @@ -181,7 +181,7 @@ func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, f } // GetTransactionByHash returns the transaction for the given hash -// SuperNode cannot currently handle pending/tx_pool txs +// eth ipfs-blockchain-watcher cannot currently handle pending/tx_pool txs func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) { // Try to return an already finalized transaction tx, blockHash, blockNumber, index, err := pea.B.GetTransaction(ctx, hash) diff --git a/pkg/eth/backend.go b/pkg/eth/backend.go index 61213cee..2128a47d 100644 --- a/pkg/eth/backend.go +++ b/pkg/eth/backend.go @@ -158,7 +158,7 @@ func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log } // BlockByNumber returns the requested canonical block. -// Since the SuperNode can contain forked blocks, it is recommended to fetch BlockByHash as +// Since the ipfs-blockchain-watcher database can contain forked blocks, it is recommended to fetch BlockByHash as // fetching by number can return non-deterministic results (returns the first block found at that height) func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Block, error) { var err error diff --git a/pkg/eth/cid_retriever.go b/pkg/eth/cid_retriever.go index c5ca07e1..367b9ead 100644 --- a/pkg/eth/cid_retriever.go +++ b/pkg/eth/cid_retriever.go @@ -443,7 +443,7 @@ func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageF // RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db // it finds the union of heights where no data exists and where the times_validated is lower than the validation level func (ecr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap, error) { - log.Info("searching for gaps in the eth super node database") + log.Info("searching for gaps in the eth ipfs watcher database") startingBlock, err := ecr.RetrieveFirstBlockNumber() if err != nil { return nil, fmt.Errorf("eth CIDRetriever RetrieveFirstBlockNumber error: %v", err) diff --git a/pkg/eth/eth_suite_test.go b/pkg/eth/eth_suite_test.go index a2831e54..ab4f73df 100644 --- a/pkg/eth/eth_suite_test.go +++ b/pkg/eth/eth_suite_test.go @@ -25,9 +25,9 @@ import ( "github.com/sirupsen/logrus" ) -func TestETHSuperNode(t *testing.T) { +func TestETHWatcher(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Super Node ETH Suite Test") + RunSpecs(t, "ETH IPFS WatcherSuite Test") } var _ = BeforeSuite(func() { diff --git a/pkg/eth/mocks/batch_client.go b/pkg/eth/mocks/batch_client.go index ccb16d3f..a4b02729 100644 --- a/pkg/eth/mocks/batch_client.go +++ b/pkg/eth/mocks/batch_client.go @@ -22,9 +22,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/statediff" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/client" ) // BackFillerClient is a mock client for use in backfiller tests @@ -46,7 +44,7 @@ func (mc *BackFillerClient) SetReturnDiffAt(height uint64, diffPayload statediff } // BatchCall mockClient method to simulate batch call to geth -func (mc *BackFillerClient) BatchCall(batch []client.BatchElem) error { +func (mc *BackFillerClient) BatchCall(batch []rpc.BatchElem) error { if mc.MappedStateDiffAt == nil { return errors.New("mockclient needs to be initialized with statediff payloads and errors") } diff --git a/pkg/eth/subscription_config.go b/pkg/eth/subscription_config.go index 9c86d5d7..b56585ee 100644 --- a/pkg/eth/subscription_config.go +++ b/pkg/eth/subscription_config.go @@ -24,7 +24,7 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" ) -// SubscriptionSettings config is used by a subscriber to specify what eth data to stream from the super node +// SubscriptionSettings config is used by a subscriber to specify what eth data to stream from the watcher type SubscriptionSettings struct { BackFill bool BackFillOnly bool @@ -78,50 +78,50 @@ type StorageFilter struct { func NewEthSubscriptionConfig() (*SubscriptionSettings, error) { sc := new(SubscriptionSettings) // Below default to false, which means we do not backfill by default - sc.BackFill = viper.GetBool("superNode.ethSubscription.historicalData") - sc.BackFillOnly = viper.GetBool("superNode.ethSubscription.historicalDataOnly") + sc.BackFill = viper.GetBool("watcher.ethSubscription.historicalData") + sc.BackFillOnly = viper.GetBool("watcher.ethSubscription.historicalDataOnly") // Below default to 0 // 0 start means we start at the beginning and 0 end means we continue indefinitely - sc.Start = big.NewInt(viper.GetInt64("superNode.ethSubscription.startingBlock")) - sc.End = big.NewInt(viper.GetInt64("superNode.ethSubscription.endingBlock")) + sc.Start = big.NewInt(viper.GetInt64("watcher.ethSubscription.startingBlock")) + sc.End = big.NewInt(viper.GetInt64("watcher.ethSubscription.endingBlock")) // Below default to false, which means we get all headers and no uncles by default sc.HeaderFilter = HeaderFilter{ - Off: viper.GetBool("superNode.ethSubscription.headerFilter.off"), - Uncles: viper.GetBool("superNode.ethSubscription.headerFilter.uncles"), + Off: viper.GetBool("watcher.ethSubscription.headerFilter.off"), + Uncles: viper.GetBool("watcher.ethSubscription.headerFilter.uncles"), } // Below defaults to false and two slices of length 0 // Which means we get all transactions by default sc.TxFilter = TxFilter{ - Off: viper.GetBool("superNode.ethSubscription.txFilter.off"), - Src: viper.GetStringSlice("superNode.ethSubscription.txFilter.src"), - Dst: viper.GetStringSlice("superNode.ethSubscription.txFilter.dst"), + Off: viper.GetBool("watcher.ethSubscription.txFilter.off"), + Src: viper.GetStringSlice("watcher.ethSubscription.txFilter.src"), + Dst: viper.GetStringSlice("watcher.ethSubscription.txFilter.dst"), } // By default all of the topic slices will be empty => match on any/all topics topics := make([][]string, 4) - topics[0] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic0s") - topics[1] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic1s") - topics[2] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic2s") - topics[3] = viper.GetStringSlice("superNode.ethSubscription.receiptFilter.topic3s") + topics[0] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic0s") + topics[1] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic1s") + topics[2] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic2s") + topics[3] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic3s") sc.ReceiptFilter = ReceiptFilter{ - Off: viper.GetBool("superNode.ethSubscription.receiptFilter.off"), - MatchTxs: viper.GetBool("superNode.ethSubscription.receiptFilter.matchTxs"), - LogAddresses: viper.GetStringSlice("superNode.ethSubscription.receiptFilter.contracts"), + Off: viper.GetBool("watcher.ethSubscription.receiptFilter.off"), + MatchTxs: viper.GetBool("watcher.ethSubscription.receiptFilter.matchTxs"), + LogAddresses: viper.GetStringSlice("watcher.ethSubscription.receiptFilter.contracts"), Topics: topics, } // Below defaults to two false, and a slice of length 0 // Which means we get all state leafs by default, but no intermediate nodes sc.StateFilter = StateFilter{ - Off: viper.GetBool("superNode.ethSubscription.stateFilter.off"), - IntermediateNodes: viper.GetBool("superNode.ethSubscription.stateFilter.intermediateNodes"), - Addresses: viper.GetStringSlice("superNode.ethSubscription.stateFilter.addresses"), + Off: viper.GetBool("watcher.ethSubscription.stateFilter.off"), + IntermediateNodes: viper.GetBool("watcher.ethSubscription.stateFilter.intermediateNodes"), + Addresses: viper.GetStringSlice("watcher.ethSubscription.stateFilter.addresses"), } // Below defaults to two false, and two slices of length 0 // Which means we get all storage leafs by default, but no intermediate nodes sc.StorageFilter = StorageFilter{ - Off: viper.GetBool("superNode.ethSubscription.storageFilter.off"), - IntermediateNodes: viper.GetBool("superNode.ethSubscription.storageFilter.intermediateNodes"), - Addresses: viper.GetStringSlice("superNode.ethSubscription.storageFilter.addresses"), - StorageKeys: viper.GetStringSlice("superNode.ethSubscription.storageFilter.storageKeys"), + Off: viper.GetBool("watcher.ethSubscription.storageFilter.off"), + IntermediateNodes: viper.GetBool("watcher.ethSubscription.storageFilter.intermediateNodes"), + Addresses: viper.GetStringSlice("watcher.ethSubscription.storageFilter.addresses"), + StorageKeys: viper.GetStringSlice("watcher.ethSubscription.storageFilter.storageKeys"), } return sc, nil } diff --git a/pkg/eth/test_helpers.go b/pkg/eth/test_helpers.go index 200bfcd3..5d241203 100644 --- a/pkg/eth/test_helpers.go +++ b/pkg/eth/test_helpers.go @@ -22,7 +22,7 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" ) -// TearDownDB is used to tear down the super node dbs after tests +// TearDownDB is used to tear down the watcher dbs after tests func TearDownDB(db *postgres.DB) { tx, err := db.Beginx() Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/historical/config.go b/pkg/historical/config.go new file mode 100644 index 00000000..f102da2b --- /dev/null +++ b/pkg/historical/config.go @@ -0,0 +1,149 @@ +// VulcanizeDB +// Copyright © 2019 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package historical + +import ( + "fmt" + "time" + + "github.com/spf13/viper" + + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" + "github.com/vulcanize/ipfs-blockchain-watcher/utils" +) + +// Env variables +const ( + SUPERNODE_CHAIN = "SUPERNODE_CHAIN" + SUPERNODE_FREQUENCY = "SUPERNODE_FREQUENCY" + SUPERNODE_BATCH_SIZE = "SUPERNODE_BATCH_SIZE" + SUPERNODE_BATCH_NUMBER = "SUPERNODE_BATCH_NUMBER" + SUPERNODE_VALIDATION_LEVEL = "SUPERNODE_VALIDATION_LEVEL" + + BACKFILL_MAX_IDLE_CONNECTIONS = "BACKFILL_MAX_IDLE_CONNECTIONS" + BACKFILL_MAX_OPEN_CONNECTIONS = "BACKFILL_MAX_OPEN_CONNECTIONS" + BACKFILL_MAX_CONN_LIFETIME = "BACKFILL_MAX_CONN_LIFETIME" +) + +// Config struct +type Config struct { + Chain shared.ChainType + IPFSPath string + IPFSMode shared.IPFSMode + DBConfig config.Database + + DB *postgres.DB + HTTPClient interface{} + Frequency time.Duration + BatchSize uint64 + BatchNumber uint64 + ValidationLevel int + Timeout time.Duration // HTTP connection timeout in seconds + NodeInfo node.Node +} + +// NewConfig is used to initialize a historical config from a .toml file +func NewConfig() (*Config, error) { + c := new(Config) + var err error + + viper.BindEnv("superNode.chain", SUPERNODE_CHAIN) + chain := viper.GetString("superNode.chain") + c.Chain, err = shared.NewChainType(chain) + if err != nil { + return nil, err + } + + c.IPFSMode, err = shared.GetIPFSMode() + if err != nil { + return nil, err + } + if c.IPFSMode == shared.LocalInterface || c.IPFSMode == shared.RemoteClient { + c.IPFSPath, err = shared.GetIPFSPath() + if err != nil { + return nil, err + } + } + + c.DBConfig.Init() + + if err := c.init(); err != nil { + return nil, err + } + + return c, nil +} + +func (c *Config) init() error { + var err error + + viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH) + viper.BindEnv("bitcoin.httpPath", shared.BTC_HTTP_PATH) + viper.BindEnv("superNode.frequency", SUPERNODE_FREQUENCY) + viper.BindEnv("superNode.batchSize", SUPERNODE_BATCH_SIZE) + viper.BindEnv("superNode.batchNumber", SUPERNODE_BATCH_NUMBER) + viper.BindEnv("superNode.validationLevel", SUPERNODE_VALIDATION_LEVEL) + viper.BindEnv("superNode.timeout", shared.HTTP_TIMEOUT) + + timeout := viper.GetInt("superNode.timeout") + if timeout < 15 { + timeout = 15 + } + c.Timeout = time.Second * time.Duration(timeout) + + switch c.Chain { + case shared.Ethereum: + ethHTTP := viper.GetString("ethereum.httpPath") + c.NodeInfo, c.HTTPClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP)) + if err != nil { + return err + } + case shared.Bitcoin: + btcHTTP := viper.GetString("bitcoin.httpPath") + c.NodeInfo, c.HTTPClient = shared.GetBtcNodeAndClient(btcHTTP) + } + + freq := viper.GetInt("superNode.frequency") + var frequency time.Duration + if freq <= 0 { + frequency = time.Second * 30 + } else { + frequency = time.Second * time.Duration(freq) + } + c.Frequency = frequency + c.BatchSize = uint64(viper.GetInt64("superNode.batchSize")) + c.BatchNumber = uint64(viper.GetInt64("superNode.batchNumber")) + c.ValidationLevel = viper.GetInt("superNode.validationLevel") + + dbConn := overrideDBConnConfig(c.DBConfig) + db := utils.LoadPostgres(dbConn, c.NodeInfo) + c.DB = &db + return nil +} + +func overrideDBConnConfig(con config.Database) config.Database { + viper.BindEnv("database.backFill.maxIdle", BACKFILL_MAX_IDLE_CONNECTIONS) + viper.BindEnv("database.backFill.maxOpen", BACKFILL_MAX_OPEN_CONNECTIONS) + viper.BindEnv("database.backFill.maxLifetime", BACKFILL_MAX_CONN_LIFETIME) + con.MaxIdle = viper.GetInt("database.backFill.maxIdle") + con.MaxOpen = viper.GetInt("database.backFill.maxOpen") + con.MaxLifetime = viper.GetInt("database.backFill.maxLifetime") + return con +} diff --git a/pkg/core/rpc_client.go b/pkg/historical/historical_suite_test.go similarity index 61% rename from pkg/core/rpc_client.go rename to pkg/historical/historical_suite_test.go index 2d962633..835abd05 100644 --- a/pkg/core/rpc_client.go +++ b/pkg/historical/historical_suite_test.go @@ -14,20 +14,22 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package core +package historical_test import ( - "context" + "io/ioutil" + "testing" - "github.com/ethereum/go-ethereum/rpc" - - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/client" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" ) -type RPCClient interface { - CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error - BatchCall(batch []client.BatchElem) error - IpcPath() string - SupportedModules() (map[string]string, error) - Subscribe(namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error) +func TestIPFSWatcher(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "IPFS Watcher Historical Suite Test") } + +var _ = BeforeSuite(func() { + logrus.SetOutput(ioutil.Discard) +}) diff --git a/pkg/watch/backfiller.go b/pkg/historical/service.go similarity index 85% rename from pkg/watch/backfiller.go rename to pkg/historical/service.go index 5f676c3e..81494d5d 100644 --- a/pkg/watch/backfiller.go +++ b/pkg/historical/service.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher +package historical import ( "sync" @@ -22,23 +22,19 @@ import ( log "github.com/sirupsen/logrus" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/builders" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/utils" ) -const ( - DefaultMaxBatchSize uint64 = 100 - DefaultMaxBatchNumber int64 = 50 -) - -// BackFillInterface for filling in gaps in the super node +// BackFillInterface for filling in gaps in the ipfs-blockchain-watcher db type BackFillInterface interface { - // Method for the super node to periodically check for and fill in gaps in its data using an archival node + // Method for the watcher to periodically check for and fill in gaps in its data using an archival node BackFill(wg *sync.WaitGroup) Stop() error } -// BackFillService for filling in gaps in the super node +// BackFillService for filling in gaps in the watcher type BackFillService struct { // Interface for converting payloads into IPLD object payloads Converter shared.PayloadConverter @@ -68,33 +64,33 @@ type BackFillService struct { // NewBackFillService returns a new BackFillInterface func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) { - publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.BackFillDBConn, settings.IPFSMode) + publisher, err := builders.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode) if err != nil { return nil, err } - indexer, err := NewCIDIndexer(settings.Chain, settings.BackFillDBConn, settings.IPFSMode) + indexer, err := builders.NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode) if err != nil { return nil, err } - converter, err := NewPayloadConverter(settings.Chain) + converter, err := builders.NewPayloadConverter(settings.Chain) if err != nil { return nil, err } - retriever, err := NewCIDRetriever(settings.Chain, settings.BackFillDBConn) + retriever, err := builders.NewCIDRetriever(settings.Chain, settings.DB) if err != nil { return nil, err } - fetcher, err := NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout) + fetcher, err := builders.NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout) if err != nil { return nil, err } batchSize := settings.BatchSize if batchSize == 0 { - batchSize = DefaultMaxBatchSize + batchSize = shared.DefaultMaxBatchSize } batchNumber := int64(settings.BatchNumber) if batchNumber == 0 { - batchNumber = DefaultMaxBatchNumber + batchNumber = shared.DefaultMaxBatchNumber } return &BackFillService{ Indexer: indexer, @@ -112,7 +108,7 @@ func NewBackFillService(settings *Config, screenAndServeChan chan shared.Convert }, nil } -// BackFill periodically checks for and fills in gaps in the super node db +// BackFill periodically checks for and fills in gaps in the watcher db func (bfs *BackFillService) BackFill(wg *sync.WaitGroup) { ticker := time.NewTicker(bfs.GapCheckFrequency) go func() { @@ -126,7 +122,7 @@ func (bfs *BackFillService) BackFill(wg *sync.WaitGroup) { case <-ticker.C: gaps, err := bfs.Retriever.RetrieveGapsInData(bfs.validationLevel) if err != nil { - log.Errorf("%s super node db backFill RetrieveGapsInData error: %v", bfs.chain.String(), err) + log.Errorf("%s watcher db backFill RetrieveGapsInData error: %v", bfs.chain.String(), err) continue } // spin up worker goroutines for this search pass @@ -140,7 +136,7 @@ func (bfs *BackFillService) BackFill(wg *sync.WaitGroup) { log.Infof("backFilling %s data from %d to %d", bfs.chain.String(), gap.Start, gap.Stop) blockRangeBins, err := utils.GetBlockHeightBins(gap.Start, gap.Stop, bfs.BatchSize) if err != nil { - log.Errorf("%s super node db backFill GetBlockHeightBins error: %v", bfs.chain.String(), err) + log.Errorf("%s watcher db backFill GetBlockHeightBins error: %v", bfs.chain.String(), err) continue } for _, heights := range blockRangeBins { diff --git a/pkg/watch/backfiller_test.go b/pkg/historical/service_test.go similarity index 91% rename from pkg/watch/backfiller_test.go rename to pkg/historical/service_test.go index af337d93..2e3ef81b 100644 --- a/pkg/watch/backfiller_test.go +++ b/pkg/historical/service_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher_test +package historical_test import ( "sync" @@ -25,14 +25,14 @@ import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/historical" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" ) var _ = Describe("BackFiller", func() { Describe("FillGaps", func() { - It("Periodically checks for and fills in gaps in the super node's data", func() { + It("Periodically checks for and fills in gaps in the watcher's data", func() { mockCidRepo := &mocks.CIDIndexer{ ReturnErr: nil, } @@ -59,15 +59,15 @@ var _ = Describe("BackFiller", func() { }, } quitChan := make(chan bool, 1) - backfiller := &watcher.BackFillService{ + backfiller := &historical.BackFillService{ Indexer: mockCidRepo, Publisher: mockPublisher, Converter: mockConverter, Fetcher: mockFetcher, Retriever: mockRetriever, GapCheckFrequency: time.Second * 2, - BatchSize: watcher.DefaultMaxBatchSize, - BatchNumber: watcher.DefaultMaxBatchNumber, + BatchSize: shared.DefaultMaxBatchSize, + BatchNumber: shared.DefaultMaxBatchNumber, QuitChan: quitChan, } wg := &sync.WaitGroup{} @@ -114,15 +114,15 @@ var _ = Describe("BackFiller", func() { }, } quitChan := make(chan bool, 1) - backfiller := &watcher.BackFillService{ + backfiller := &historical.BackFillService{ Indexer: mockCidRepo, Publisher: mockPublisher, Converter: mockConverter, Fetcher: mockFetcher, Retriever: mockRetriever, GapCheckFrequency: time.Second * 2, - BatchSize: watcher.DefaultMaxBatchSize, - BatchNumber: watcher.DefaultMaxBatchNumber, + BatchSize: shared.DefaultMaxBatchSize, + BatchNumber: shared.DefaultMaxBatchNumber, QuitChan: quitChan, } wg := &sync.WaitGroup{} @@ -168,15 +168,15 @@ var _ = Describe("BackFiller", func() { }, } quitChan := make(chan bool, 1) - backfiller := &watcher.BackFillService{ + backfiller := &historical.BackFillService{ Indexer: mockCidRepo, Publisher: mockPublisher, Converter: mockConverter, Fetcher: mockFetcher, Retriever: mockRetriever, GapCheckFrequency: time.Second * 2, - BatchSize: watcher.DefaultMaxBatchSize, - BatchNumber: watcher.DefaultMaxBatchNumber, + BatchSize: shared.DefaultMaxBatchSize, + BatchNumber: shared.DefaultMaxBatchNumber, QuitChan: quitChan, } wg := &sync.WaitGroup{} diff --git a/pkg/core/node_info.go b/pkg/node/node.go similarity index 65% rename from pkg/core/node_info.go rename to pkg/node/node.go index 245bd532..7faa881f 100644 --- a/pkg/core/node_info.go +++ b/pkg/node/node.go @@ -14,24 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package core - -import ( - "fmt" -) - -type NodeType int - -const ( - GETH NodeType = iota - PARITY - INFURA - GANACHE -) - -const ( - KOVAN_NETWORK_ID = 42 -) +package node type Node struct { GenesisBlock string @@ -39,19 +22,3 @@ type Node struct { ID string ClientName string } - -type ParityNodeInfo struct { - Track string - ParityVersion `json:"version"` - Hash string -} - -func (pn ParityNodeInfo) String() string { - return fmt.Sprintf("Parity/v%d.%d.%d/", pn.Major, pn.Minor, pn.Patch) -} - -type ParityVersion struct { - Major int - Minor int - Patch int -} diff --git a/pkg/postgres/postgres.go b/pkg/postgres/postgres.go index df638ab1..1e257568 100644 --- a/pkg/postgres/postgres.go +++ b/pkg/postgres/postgres.go @@ -22,16 +22,16 @@ import ( "github.com/jmoiron/sqlx" _ "github.com/lib/pq" //postgres driver "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" ) type DB struct { *sqlx.DB - Node core.Node + Node node.Node NodeID int64 } -func NewDB(databaseConfig config.Database, node core.Node) (*DB, error) { +func NewDB(databaseConfig config.Database, node node.Node) (*DB, error) { connectString := config.DbConnectionString(databaseConfig) db, connectErr := sqlx.Connect("postgres", connectString) if connectErr != nil { @@ -55,7 +55,7 @@ func NewDB(databaseConfig config.Database, node core.Node) (*DB, error) { return &pg, nil } -func (db *DB) CreateNode(node *core.Node) error { +func (db *DB) CreateNode(node *node.Node) error { var nodeID int64 err := db.QueryRow( `INSERT INTO nodes (genesis_block, network_id, node_id, client_name) diff --git a/pkg/postgres/postgres_test.go b/pkg/postgres/postgres_test.go index 7d0fac0d..f4faa090 100644 --- a/pkg/postgres/postgres_test.go +++ b/pkg/postgres/postgres_test.go @@ -28,7 +28,7 @@ import ( . "github.com/onsi/gomega" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/test_config" ) @@ -84,7 +84,7 @@ var _ = Describe("Postgres DB", func() { It("throws error when can't connect to the database", func() { invalidDatabase := config.Database{} - node := core.Node{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"} + node := node.Node{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"} _, err := postgres.NewDB(invalidDatabase, node) @@ -94,7 +94,7 @@ var _ = Describe("Postgres DB", func() { It("throws error when can't create node", func() { badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100)) - node := core.Node{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"} + node := node.Node{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"} _, err := postgres.NewDB(test_config.DBConfig, node) diff --git a/pkg/resync/config.go b/pkg/resync/config.go index a6e91496..1451b2a6 100644 --- a/pkg/resync/config.go +++ b/pkg/resync/config.go @@ -23,7 +23,7 @@ import ( "github.com/spf13/viper" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/utils" @@ -55,15 +55,15 @@ type Config struct { IPFSMode shared.IPFSMode HTTPClient interface{} // Note this client is expected to support the retrieval of the specified data type(s) - NodeInfo core.Node // Info for the associated node + NodeInfo node.Node // Info for the associated node Ranges [][2]uint64 // The block height ranges to resync BatchSize uint64 // BatchSize for the resync http calls (client has to support batch sizing) Timeout time.Duration // HTTP connection timeout in seconds BatchNumber uint64 } -// NewReSyncConfig fills and returns a resync config from toml parameters -func NewReSyncConfig() (*Config, error) { +// NewConfig fills and returns a resync config from toml parameters +func NewConfig() (*Config, error) { c := new(Config) var err error diff --git a/pkg/resync/service.go b/pkg/resync/service.go index f14c856a..34bd1bf0 100644 --- a/pkg/resync/service.go +++ b/pkg/resync/service.go @@ -21,8 +21,8 @@ import ( "github.com/sirupsen/logrus" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/builders" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch" "github.com/vulcanize/ipfs-blockchain-watcher/utils" ) @@ -63,37 +63,37 @@ type Service struct { // NewResyncService creates and returns a resync service from the provided settings func NewResyncService(settings *Config) (Resync, error) { - publisher, err := watcher.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode) + publisher, err := builders.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode) if err != nil { return nil, err } - indexer, err := watcher.NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode) + indexer, err := builders.NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode) if err != nil { return nil, err } - converter, err := watcher.NewPayloadConverter(settings.Chain) + converter, err := builders.NewPayloadConverter(settings.Chain) if err != nil { return nil, err } - retriever, err := watcher.NewCIDRetriever(settings.Chain, settings.DB) + retriever, err := builders.NewCIDRetriever(settings.Chain, settings.DB) if err != nil { return nil, err } - fetcher, err := watcher.NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout) + fetcher, err := builders.NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout) if err != nil { return nil, err } - cleaner, err := watcher.NewCleaner(settings.Chain, settings.DB) + cleaner, err := builders.NewCleaner(settings.Chain, settings.DB) if err != nil { return nil, err } batchSize := settings.BatchSize if batchSize == 0 { - batchSize = watcher.DefaultMaxBatchSize + batchSize = shared.DefaultMaxBatchSize } batchNumber := int64(settings.BatchNumber) if batchNumber == 0 { - batchNumber = watcher.DefaultMaxBatchNumber + batchNumber = shared.DefaultMaxBatchNumber } return &Service{ Indexer: indexer, diff --git a/pkg/config/client.go b/pkg/shared/constants.go similarity index 88% rename from pkg/config/client.go rename to pkg/shared/constants.go index ea426d5d..3dc2994c 100644 --- a/pkg/config/client.go +++ b/pkg/shared/constants.go @@ -14,8 +14,9 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package config +package shared -type Client struct { - IPCPath string -} +const ( + DefaultMaxBatchSize uint64 = 100 + DefaultMaxBatchNumber int64 = 50 +) diff --git a/pkg/shared/env.go b/pkg/shared/env.go index 70cdf497..58191613 100644 --- a/pkg/shared/env.go +++ b/pkg/shared/env.go @@ -24,7 +24,7 @@ import ( "github.com/btcsuite/btcd/rpcclient" "github.com/spf13/viper" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" ) // Env variables @@ -51,7 +51,7 @@ const ( ) // GetEthNodeAndClient returns eth node info and client from path url -func GetEthNodeAndClient(path string) (core.Node, *rpc.Client, error) { +func GetEthNodeAndClient(path string) (node.Node, *rpc.Client, error) { viper.BindEnv("ethereum.nodeID", ETH_NODE_ID) viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME) viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK) @@ -59,9 +59,9 @@ func GetEthNodeAndClient(path string) (core.Node, *rpc.Client, error) { rpcClient, err := rpc.Dial(path) if err != nil { - return core.Node{}, nil, err + return node.Node{}, nil, err } - return core.Node{ + return node.Node{ ID: viper.GetString("ethereum.nodeID"), ClientName: viper.GetString("ethereum.clientName"), GenesisBlock: viper.GetString("ethereum.genesisBlock"), @@ -94,7 +94,7 @@ func GetIPFSMode() (IPFSMode, error) { } // GetBtcNodeAndClient returns btc node info from path url -func GetBtcNodeAndClient(path string) (core.Node, *rpcclient.ConnConfig) { +func GetBtcNodeAndClient(path string) (node.Node, *rpcclient.ConnConfig) { viper.BindEnv("bitcoin.nodeID", BTC_NODE_ID) viper.BindEnv("bitcoin.clientName", BTC_CLIENT_NAME) viper.BindEnv("bitcoin.genesisBlock", BTC_GENESIS_BLOCK) @@ -103,7 +103,7 @@ func GetBtcNodeAndClient(path string) (core.Node, *rpcclient.ConnConfig) { viper.BindEnv("bitcoin.user", BTC_NODE_USER) // For bitcoin we load in node info from the config because there is no RPC endpoint to retrieve this from the node - return core.Node{ + return node.Node{ ID: viper.GetString("bitcoin.nodeID"), ClientName: viper.GetString("bitcoin.clientName"), GenesisBlock: viper.GetString("bitcoin.genesisBlock"), diff --git a/pkg/shared/functions.go b/pkg/shared/functions.go index 5eecad2e..9116116c 100644 --- a/pkg/shared/functions.go +++ b/pkg/shared/functions.go @@ -19,11 +19,8 @@ package shared import ( "bytes" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld" - - "github.com/ipfs/go-cid" - "github.com/ethereum/go-ethereum/common" + "github.com/ipfs/go-cid" "github.com/ipfs/go-ipfs-blockstore" "github.com/ipfs/go-ipfs-ds-help" node "github.com/ipfs/go-ipld-format" @@ -31,6 +28,7 @@ import ( "github.com/sirupsen/logrus" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld" ) // ListContainsString used to check if a list of strings contains a particular string diff --git a/pkg/shared/test_helpers.go b/pkg/shared/test_helpers.go index da4a5070..8767e3b9 100644 --- a/pkg/shared/test_helpers.go +++ b/pkg/shared/test_helpers.go @@ -18,15 +18,15 @@ package shared import ( "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" ) -// SetupDB is use to setup a db for super node tests +// SetupDB is use to setup a db for watcher tests func SetupDB() (*postgres.DB, error) { return postgres.NewDB(config.Database{ Hostname: "localhost", Name: "vulcanize_testing", Port: 5432, - }, core.Node{}) + }, node.Node{}) } diff --git a/pkg/watch/api.go b/pkg/watch/api.go index e7231889..44d23ef4 100644 --- a/pkg/watch/api.go +++ b/pkg/watch/api.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher +package watch import ( "context" @@ -25,8 +25,8 @@ import ( log "github.com/sirupsen/logrus" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" v "github.com/vulcanize/ipfs-blockchain-watcher/version" ) @@ -37,22 +37,22 @@ const APIName = "vdb" // APIVersion is the version of the state diffing service API const APIVersion = "0.0.1" -// PublicSuperNodeAPI is the public api for the super node -type PublicSuperNodeAPI struct { - sn SuperNode +// PublicWatcherAPI is the public api for the watcher +type PublicWatcherAPI struct { + w Watcher } -// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process -func NewPublicSuperNodeAPI(superNodeInterface SuperNode) *PublicSuperNodeAPI { - return &PublicSuperNodeAPI{ - sn: superNodeInterface, +// NewPublicWatcherAPI creates a new PublicWatcherAPI with the provided underlying Watcher process +func NewPublicWatcherAPI(w Watcher) *PublicWatcherAPI { + return &PublicWatcherAPI{ + w: w, } } -// Stream is the public method to setup a subscription that fires off super node payloads as they are processed -func (api *PublicSuperNodeAPI) Stream(ctx context.Context, rlpParams []byte) (*rpc.Subscription, error) { +// Stream is the public method to setup a subscription that fires off IPLD payloads as they are processed +func (api *PublicWatcherAPI) Stream(ctx context.Context, rlpParams []byte) (*rpc.Subscription, error) { var params shared.SubscriptionSettings - switch api.sn.Chain() { + switch api.w.Chain() { case shared.Ethereum: var ethParams eth.SubscriptionSettings if err := rlp.DecodeBytes(rlpParams, ðParams); err != nil { @@ -81,22 +81,22 @@ func (api *PublicSuperNodeAPI) Stream(ctx context.Context, rlpParams []byte) (*r // subscribe to events from the SyncPublishScreenAndServe service payloadChannel := make(chan SubscriptionPayload, PayloadChanBufferSize) quitChan := make(chan bool, 1) - go api.sn.Subscribe(rpcSub.ID, payloadChannel, quitChan, params) + go api.w.Subscribe(rpcSub.ID, payloadChannel, quitChan, params) // loop and await payloads and relay them to the subscriber using notifier for { select { case packet := <-payloadChannel: if err := notifier.Notify(rpcSub.ID, packet); err != nil { - log.Error("Failed to send super node packet", "err", err) - api.sn.Unsubscribe(rpcSub.ID) + log.Error("Failed to send watcher data packet", "err", err) + api.w.Unsubscribe(rpcSub.ID) return } case <-rpcSub.Err(): - api.sn.Unsubscribe(rpcSub.ID) + api.w.Unsubscribe(rpcSub.ID) return case <-quitChan: - // don't need to unsubscribe to super node, the service does so before sending the quit signal this way + // don't need to unsubscribe from the watcher, the service does so before sending the quit signal this way return } } @@ -105,21 +105,21 @@ func (api *PublicSuperNodeAPI) Stream(ctx context.Context, rlpParams []byte) (*r return rpcSub, nil } -// Node is a public rpc method to allow transformers to fetch the node info for the super node -// NOTE: this is the node info for the node that the super node is syncing from, not the node info for the super node itself -func (api *PublicSuperNodeAPI) Node() *core.Node { - return api.sn.Node() +// Node is a public rpc method to allow transformers to fetch the node info for the watcher +// NOTE: this is the node info for the node that the watcher is syncing from, not the node info for the watcher itself +func (api *PublicWatcherAPI) Node() *node.Node { + return api.w.Node() } -// Chain returns the chain type that this super node instance supports -func (api *PublicSuperNodeAPI) Chain() shared.ChainType { - return api.sn.Chain() +// Chain returns the chain type that this watcher instance supports +func (api *PublicWatcherAPI) Chain() shared.ChainType { + return api.w.Chain() } -// Struct for holding super node meta data +// Struct for holding watcher meta data type InfoAPI struct{} -// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process +// NewInfoAPI creates a new InfoAPI func NewInfoAPI() *InfoAPI { return &InfoAPI{} } @@ -131,7 +131,7 @@ func (iapi *InfoAPI) Modules() map[string]string { } } -// NodeInfo gathers and returns a collection of metadata for the super node +// NodeInfo gathers and returns a collection of metadata for the watcher func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo { return &p2p.NodeInfo{ // TODO: formalize this @@ -140,7 +140,7 @@ func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo { } } -// Version returns the version of the super node +// Version returns the version of the watcher func (iapi *InfoAPI) Version() string { return v.VersionWithMeta } diff --git a/pkg/watch/config.go b/pkg/watch/config.go index 306643aa..aeb7df85 100644 --- a/pkg/watch/config.go +++ b/pkg/watch/config.go @@ -14,18 +14,17 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher +package watch import ( "fmt" "os" "path/filepath" - "time" "github.com/spf13/viper" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" "github.com/vulcanize/ipfs-blockchain-watcher/utils" @@ -33,27 +32,19 @@ import ( // Env variables const ( - SUPERNODE_CHAIN = "SUPERNODE_CHAIN" - SUPERNODE_SYNC = "SUPERNODE_SYNC" - SUPERNODE_WORKERS = "SUPERNODE_WORKERS" - SUPERNODE_SERVER = "SUPERNODE_SERVER" - SUPERNODE_WS_PATH = "SUPERNODE_WS_PATH" - SUPERNODE_IPC_PATH = "SUPERNODE_IPC_PATH" - SUPERNODE_HTTP_PATH = "SUPERNODE_HTTP_PATH" - SUPERNODE_BACKFILL = "SUPERNODE_BACKFILL" - SUPERNODE_FREQUENCY = "SUPERNODE_FREQUENCY" - SUPERNODE_BATCH_SIZE = "SUPERNODE_BATCH_SIZE" - SUPERNODE_BATCH_NUMBER = "SUPERNODE_BATCH_NUMBER" - SUPERNODE_VALIDATION_LEVEL = "SUPERNODE_VALIDATION_LEVEL" + SUPERNODE_CHAIN = "SUPERNODE_CHAIN" + SUPERNODE_SYNC = "SUPERNODE_SYNC" + SUPERNODE_WORKERS = "SUPERNODE_WORKERS" + SUPERNODE_SERVER = "SUPERNODE_SERVER" + SUPERNODE_WS_PATH = "SUPERNODE_WS_PATH" + SUPERNODE_IPC_PATH = "SUPERNODE_IPC_PATH" + SUPERNODE_HTTP_PATH = "SUPERNODE_HTTP_PATH" + SUPERNODE_BACKFILL = "SUPERNODE_BACKFILL" SYNC_MAX_IDLE_CONNECTIONS = "SYNC_MAX_IDLE_CONNECTIONS" SYNC_MAX_OPEN_CONNECTIONS = "SYNC_MAX_OPEN_CONNECTIONS" SYNC_MAX_CONN_LIFETIME = "SYNC_MAX_CONN_LIFETIME" - BACKFILL_MAX_IDLE_CONNECTIONS = "BACKFILL_MAX_IDLE_CONNECTIONS" - BACKFILL_MAX_OPEN_CONNECTIONS = "BACKFILL_MAX_OPEN_CONNECTIONS" - BACKFILL_MAX_CONN_LIFETIME = "BACKFILL_MAX_CONN_LIFETIME" - SERVER_MAX_IDLE_CONNECTIONS = "SERVER_MAX_IDLE_CONNECTIONS" SERVER_MAX_OPEN_CONNECTIONS = "SERVER_MAX_OPEN_CONNECTIONS" SERVER_MAX_CONN_LIFETIME = "SERVER_MAX_CONN_LIFETIME" @@ -61,7 +52,6 @@ const ( // Config struct type Config struct { - // Ubiquitous fields Chain shared.ChainType IPFSPath string IPFSMode shared.IPFSMode @@ -77,21 +67,14 @@ type Config struct { SyncDBConn *postgres.DB Workers int WSClient interface{} - NodeInfo core.Node - // Backfiller params - BackFill bool - BackFillDBConn *postgres.DB - HTTPClient interface{} - Frequency time.Duration - BatchSize uint64 - BatchNumber uint64 - ValidationLevel int - Timeout time.Duration // HTTP connection timeout in seconds + NodeInfo node.Node + // Historical switch + Historical bool } -// NewSuperNodeConfig is used to initialize a SuperNode config from a .toml file -// Separate chain supernode instances need to be ran with separate ipfs path in order to avoid lock contention on the ipfs repository lockfile -func NewSuperNodeConfig() (*Config, error) { +// NewConfig is used to initialize a watcher config from a .toml file +// Separate chain watcher instances need to be ran with separate ipfs path in order to avoid lock contention on the ipfs repository lockfile +func NewConfig() (*Config, error) { c := new(Config) var err error @@ -106,6 +89,7 @@ func NewSuperNodeConfig() (*Config, error) { viper.BindEnv("superNode.httpPath", SUPERNODE_HTTP_PATH) viper.BindEnv("superNode.backFill", SUPERNODE_BACKFILL) + c.Historical = viper.GetBool("superNode.backFill") chain := viper.GetString("superNode.chain") c.Chain, err = shared.NewChainType(chain) if err != nil { @@ -174,70 +158,14 @@ func NewSuperNodeConfig() (*Config, error) { c.ServeDBConn = &serveDB } - c.BackFill = viper.GetBool("superNode.backFill") - if c.BackFill { - if err := c.BackFillFields(); err != nil { - return nil, err - } - } - return c, nil } -// BackFillFields is used to fill in the BackFill fields of the config -func (c *Config) BackFillFields() error { - var err error - - viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH) - viper.BindEnv("bitcoin.httpPath", shared.BTC_HTTP_PATH) - viper.BindEnv("superNode.frequency", SUPERNODE_FREQUENCY) - viper.BindEnv("superNode.batchSize", SUPERNODE_BATCH_SIZE) - viper.BindEnv("superNode.batchNumber", SUPERNODE_BATCH_NUMBER) - viper.BindEnv("superNode.validationLevel", SUPERNODE_VALIDATION_LEVEL) - viper.BindEnv("superNode.timeout", shared.HTTP_TIMEOUT) - - timeout := viper.GetInt("superNode.timeout") - if timeout < 15 { - timeout = 15 - } - c.Timeout = time.Second * time.Duration(timeout) - - switch c.Chain { - case shared.Ethereum: - ethHTTP := viper.GetString("ethereum.httpPath") - c.NodeInfo, c.HTTPClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP)) - if err != nil { - return err - } - case shared.Bitcoin: - btcHTTP := viper.GetString("bitcoin.httpPath") - c.NodeInfo, c.HTTPClient = shared.GetBtcNodeAndClient(btcHTTP) - } - - freq := viper.GetInt("superNode.frequency") - var frequency time.Duration - if freq <= 0 { - frequency = time.Second * 30 - } else { - frequency = time.Second * time.Duration(freq) - } - c.Frequency = frequency - c.BatchSize = uint64(viper.GetInt64("superNode.batchSize")) - c.BatchNumber = uint64(viper.GetInt64("superNode.batchNumber")) - c.ValidationLevel = viper.GetInt("superNode.validationLevel") - - backFillDBConn := overrideDBConnConfig(c.DBConfig, BackFill) - backFillDB := utils.LoadPostgres(backFillDBConn, c.NodeInfo) - c.BackFillDBConn = &backFillDB - return nil -} - type mode string var ( - Sync mode = "sync" - BackFill mode = "backFill" - Serve mode = "serve" + Sync mode = "sync" + Serve mode = "serve" ) func overrideDBConnConfig(con config.Database, m mode) config.Database { @@ -249,13 +177,6 @@ func overrideDBConnConfig(con config.Database, m mode) config.Database { con.MaxIdle = viper.GetInt("database.sync.maxIdle") con.MaxOpen = viper.GetInt("database.sync.maxOpen") con.MaxLifetime = viper.GetInt("database.sync.maxLifetime") - case BackFill: - viper.BindEnv("database.backFill.maxIdle", BACKFILL_MAX_IDLE_CONNECTIONS) - viper.BindEnv("database.backFill.maxOpen", BACKFILL_MAX_OPEN_CONNECTIONS) - viper.BindEnv("database.backFill.maxLifetime", BACKFILL_MAX_CONN_LIFETIME) - con.MaxIdle = viper.GetInt("database.backFill.maxIdle") - con.MaxOpen = viper.GetInt("database.backFill.maxOpen") - con.MaxLifetime = viper.GetInt("database.backFill.maxLifetime") case Serve: viper.BindEnv("database.server.maxIdle", SERVER_MAX_IDLE_CONNECTIONS) viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS) diff --git a/pkg/watch/helpers.go b/pkg/watch/helpers.go index b6b9af81..1e13c18a 100644 --- a/pkg/watch/helpers.go +++ b/pkg/watch/helpers.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher +package watch import log "github.com/sirupsen/logrus" diff --git a/pkg/watch/service.go b/pkg/watch/service.go index 2ce6a6ac..922ff6fe 100644 --- a/pkg/watch/service.go +++ b/pkg/watch/service.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher +package watch import ( "fmt" @@ -22,13 +22,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/node" + ethnode "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" log "github.com/sirupsen/logrus" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/builders" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" ) @@ -37,12 +38,12 @@ const ( PayloadChanBufferSize = 2000 ) -// SuperNode is the top level interface for streaming, converting to IPLDs, publishing, +// Watcher is the top level interface for streaming, converting to IPLDs, publishing, // and indexing all chain data; screening this data; and serving it up to subscribed clients // This service is compatible with the Ethereum service interface (node.Service) -type SuperNode interface { +type Watcher interface { // APIs(), Protocols(), Start() and Stop() - node.Service + ethnode.Service // Data processing event loop Sync(wg *sync.WaitGroup, forwardPayloadChan chan<- shared.ConvertedData) error // Pub-Sub handling event loop @@ -52,12 +53,12 @@ type SuperNode interface { // Method to unsubscribe from the service Unsubscribe(id rpc.ID) // Method to access the node info for the service - Node() *core.Node + Node() *node.Node // Method to access chain type Chain() shared.ChainType } -// Service is the underlying struct for the super node +// Service is the underlying struct for the watcher type Service struct { // Used to sync access to the Subscriptions sync.Mutex @@ -83,8 +84,8 @@ type Service struct { Subscriptions map[common.Hash]map[rpc.ID]Subscription // A mapping of subscription params hash to the corresponding subscription params SubscriptionTypes map[common.Hash]shared.SubscriptionSettings - // Info for the Geth node that this super node is working with - NodeInfo *core.Node + // Info for the Geth node that this watcher is working with + NodeInfo *node.Node // Number of publishAndIndex workers WorkerPoolSize int // chain type for this service @@ -97,40 +98,40 @@ type Service struct { serveWg *sync.WaitGroup } -// NewSuperNode creates a new super_node.Interface using an underlying super_node.Service struct -func NewSuperNode(settings *Config) (SuperNode, error) { +// NewWatcher creates a new Watcher using an underlying Service struct +func NewWatcher(settings *Config) (Watcher, error) { sn := new(Service) var err error // If we are syncing, initialize the needed interfaces if settings.Sync { - sn.Streamer, sn.PayloadChan, err = NewPayloadStreamer(settings.Chain, settings.WSClient) + sn.Streamer, sn.PayloadChan, err = builders.NewPayloadStreamer(settings.Chain, settings.WSClient) if err != nil { return nil, err } - sn.Converter, err = NewPayloadConverter(settings.Chain) + sn.Converter, err = builders.NewPayloadConverter(settings.Chain) if err != nil { return nil, err } - sn.Publisher, err = NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.SyncDBConn, settings.IPFSMode) + sn.Publisher, err = builders.NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.SyncDBConn, settings.IPFSMode) if err != nil { return nil, err } - sn.Indexer, err = NewCIDIndexer(settings.Chain, settings.SyncDBConn, settings.IPFSMode) + sn.Indexer, err = builders.NewCIDIndexer(settings.Chain, settings.SyncDBConn, settings.IPFSMode) if err != nil { return nil, err } - sn.Filterer, err = NewResponseFilterer(settings.Chain) + sn.Filterer, err = builders.NewResponseFilterer(settings.Chain) if err != nil { return nil, err } } // If we are serving, initialize the needed interfaces if settings.Serve { - sn.Retriever, err = NewCIDRetriever(settings.Chain, settings.ServeDBConn) + sn.Retriever, err = builders.NewCIDRetriever(settings.Chain, settings.ServeDBConn) if err != nil { return nil, err } - sn.IPLDFetcher, err = NewIPLDFetcher(settings.Chain, settings.IPFSPath, settings.ServeDBConn, settings.IPFSMode) + sn.IPLDFetcher, err = builders.NewIPLDFetcher(settings.Chain, settings.IPFSPath, settings.ServeDBConn, settings.IPFSMode) if err != nil { return nil, err } @@ -151,14 +152,14 @@ func (sap *Service) Protocols() []p2p.Protocol { return []p2p.Protocol{} } -// APIs returns the RPC descriptors the super node service offers +// APIs returns the RPC descriptors the watcher service offers func (sap *Service) APIs() []rpc.API { ifnoAPI := NewInfoAPI() apis := []rpc.API{ { Namespace: APIName, Version: APIVersion, - Service: NewPublicSuperNodeAPI(sap), + Service: NewPublicWatcherAPI(sap), Public: true, }, { @@ -180,7 +181,7 @@ func (sap *Service) APIs() []rpc.API { Public: true, }, } - chainAPI, err := NewPublicAPI(sap.chain, sap.db, sap.ipfsPath) + chainAPI, err := builders.NewPublicAPI(sap.chain, sap.db, sap.ipfsPath) if err != nil { log.Error(err) return apis @@ -211,7 +212,7 @@ func (sap *Service) Sync(wg *sync.WaitGroup, screenAndServePayload chan<- shared case payload := <-sap.PayloadChan: ipldPayload, err := sap.Converter.Convert(payload) if err != nil { - log.Errorf("super node conversion error for chain %s: %v", sap.chain.String(), err) + log.Errorf("watcher conversion error for chain %s: %v", sap.chain.String(), err) continue } log.Infof("%s data streamed at head height %d", sap.chain.String(), ipldPayload.Height()) @@ -229,7 +230,7 @@ func (sap *Service) Sync(wg *sync.WaitGroup, screenAndServePayload chan<- shared publishAndIndexPayload <- ipldPayload } case err := <-sub.Err(): - log.Errorf("super node subscription error for chain %s: %v", sap.chain.String(), err) + log.Errorf("watcher subscription error for chain %s: %v", sap.chain.String(), err) case <-sap.QuitChan: log.Infof("quiting %s Sync process", sap.chain.String()) return @@ -248,18 +249,18 @@ func (sap *Service) publishAndIndex(wg *sync.WaitGroup, id int, publishAndIndexP for { select { case payload := <-publishAndIndexPayload: - log.Debugf("%s super node publishAndIndex worker %d publishing data streamed at head height %d", sap.chain.String(), id, payload.Height()) + log.Debugf("%s watcher publishAndIndex worker %d publishing data streamed at head height %d", sap.chain.String(), id, payload.Height()) cidPayload, err := sap.Publisher.Publish(payload) if err != nil { - log.Errorf("%s super node publishAndIndex worker %d publishing error: %v", sap.chain.String(), id, err) + log.Errorf("%s watcher publishAndIndex worker %d publishing error: %v", sap.chain.String(), id, err) continue } - log.Debugf("%s super node publishAndIndex worker %d indexing data streamed at head height %d", sap.chain.String(), id, payload.Height()) + log.Debugf("%s watcher publishAndIndex worker %d indexing data streamed at head height %d", sap.chain.String(), id, payload.Height()) if err := sap.Indexer.Index(cidPayload); err != nil { - log.Errorf("%s super node publishAndIndex worker %d indexing error: %v", sap.chain.String(), id, err) + log.Errorf("%s watcher publishAndIndex worker %d indexing error: %v", sap.chain.String(), id, err) } case <-sap.QuitChan: - log.Infof("%s super node publishAndIndex worker %d shutting down", sap.chain.String(), id) + log.Infof("%s watcher publishAndIndex worker %d shutting down", sap.chain.String(), id) return } } @@ -298,7 +299,7 @@ func (sap *Service) filterAndServe(payload shared.ConvertedData) { // Retrieve the subscription parameters for this subscription type subConfig, ok := sap.SubscriptionTypes[ty] if !ok { - log.Errorf("super node %s subscription configuration for subscription type %s not available", sap.chain.String(), ty.Hex()) + log.Errorf("watcher %s subscription configuration for subscription type %s not available", sap.chain.String(), ty.Hex()) sap.closeType(ty) continue } @@ -310,19 +311,19 @@ func (sap *Service) filterAndServe(payload shared.ConvertedData) { } response, err := sap.Filterer.Filter(subConfig, payload) if err != nil { - log.Errorf("super node filtering error for chain %s: %v", sap.chain.String(), err) + log.Errorf("watcher filtering error for chain %s: %v", sap.chain.String(), err) sap.closeType(ty) continue } responseRLP, err := rlp.EncodeToBytes(response) if err != nil { - log.Errorf("super node rlp encoding error for chain %s: %v", sap.chain.String(), err) + log.Errorf("watcher rlp encoding error for chain %s: %v", sap.chain.String(), err) continue } for id, sub := range subs { select { case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.Height()}: - log.Debugf("sending super node %s payload to subscription %s", sap.chain.String(), id) + log.Debugf("sending watcher %s payload to subscription %s", sap.chain.String(), id) default: log.Infof("unable to send %s payload to subscription %s; channel has no receiver", sap.chain.String(), id) } @@ -368,7 +369,7 @@ func (sap *Service) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitCha // Otherwise we only filter new data as it is streamed in from the state diffing geth node if params.HistoricalData() || params.HistoricalDataOnly() { if err := sap.sendHistoricalData(subscription, id, params); err != nil { - sendNonBlockingErr(subscription, fmt.Errorf("%s super node subscriber backfill error: %v", sap.chain.String(), err)) + sendNonBlockingErr(subscription, fmt.Errorf("%s watcher subscriber backfill error: %v", sap.chain.String(), err)) sendNonBlockingQuit(subscription) return } @@ -404,13 +405,13 @@ func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params share for i := startingBlock; i <= endingBlock; i++ { select { case <-sap.QuitChan: - log.Infof("%s super node historical data feed to subscription %s closed", sap.chain.String(), id) + log.Infof("%s watcher historical data feed to subscription %s closed", sap.chain.String(), id) return default: } cidWrappers, empty, err := sap.Retriever.Retrieve(params, i) if err != nil { - sendNonBlockingErr(sub, fmt.Errorf(" %s super node CID Retrieval error at block %d\r%s", sap.chain.String(), i, err.Error())) + sendNonBlockingErr(sub, fmt.Errorf(" %s watcher CID Retrieval error at block %d\r%s", sap.chain.String(), i, err.Error())) continue } if empty { @@ -419,7 +420,7 @@ func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params share for _, cids := range cidWrappers { response, err := sap.IPLDFetcher.Fetch(cids) if err != nil { - sendNonBlockingErr(sub, fmt.Errorf("%s super node IPLD Fetching error at block %d\r%s", sap.chain.String(), i, err.Error())) + sendNonBlockingErr(sub, fmt.Errorf("%s watcher IPLD Fetching error at block %d\r%s", sap.chain.String(), i, err.Error())) continue } responseRLP, err := rlp.EncodeToBytes(response) @@ -429,7 +430,7 @@ func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params share } select { case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.Height()}: - log.Debugf("sending super node historical data payload to %s subscription %s", sap.chain.String(), id) + log.Debugf("sending watcher historical data payload to %s subscription %s", sap.chain.String(), id) default: log.Infof("unable to send backFill payload to %s subscription %s; channel has no receiver", sap.chain.String(), id) } @@ -448,7 +449,7 @@ func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params share // Unsubscribe is used by the API to remotely unsubscribe to the StateDiffingService loop func (sap *Service) Unsubscribe(id rpc.ID) { - log.Infof("Unsubscribing %s from the %s super node service", id, sap.chain.String()) + log.Infof("Unsubscribing %s from the %s watcher service", id, sap.chain.String()) sap.Lock() for ty := range sap.Subscriptions { delete(sap.Subscriptions[ty], id) @@ -464,7 +465,7 @@ func (sap *Service) Unsubscribe(id rpc.ID) { // Start is used to begin the service // This is mostly just to satisfy the node.Service interface func (sap *Service) Start(*p2p.Server) error { - log.Infof("Starting %s super node service", sap.chain.String()) + log.Infof("Starting %s watcher service", sap.chain.String()) wg := new(sync.WaitGroup) payloadChan := make(chan shared.ConvertedData, PayloadChanBufferSize) if err := sap.Sync(wg, payloadChan); err != nil { @@ -477,7 +478,7 @@ func (sap *Service) Start(*p2p.Server) error { // Stop is used to close down the service // This is mostly just to satisfy the node.Service interface func (sap *Service) Stop() error { - log.Infof("Stopping %s super node service", sap.chain.String()) + log.Infof("Stopping %s watcher service", sap.chain.String()) sap.Lock() close(sap.QuitChan) sap.close() @@ -486,7 +487,7 @@ func (sap *Service) Stop() error { } // Node returns the node info for this service -func (sap *Service) Node() *core.Node { +func (sap *Service) Node() *node.Node { return sap.NodeInfo } diff --git a/pkg/watch/service_test.go b/pkg/watch/service_test.go index f731cd51..b94b5a8a 100644 --- a/pkg/watch/service_test.go +++ b/pkg/watch/service_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher_test +package watch_test import ( "sync" @@ -54,7 +54,7 @@ var _ = Describe("Service", func() { ReturnIPLDPayload: mocks.MockConvertedPayload, ReturnErr: nil, } - processor := &watcher.Service{ + processor := &watch.Service{ Indexer: mockCidIndexer, Publisher: mockPublisher, Streamer: mockStreamer, diff --git a/pkg/watch/subscription.go b/pkg/watch/subscription.go index 029b6fd4..1b3474c7 100644 --- a/pkg/watch/subscription.go +++ b/pkg/watch/subscription.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher +package watch import ( "errors" @@ -29,14 +29,14 @@ const ( BackFillCompleteFlag ) -// Subscription holds the information for an individual client subscription to the super node +// Subscription holds the information for an individual client subscription to the watcher type Subscription struct { ID rpc.ID PayloadChan chan<- SubscriptionPayload QuitChan chan<- bool } -// SubscriptionPayload is the struct for a super node stream payload +// SubscriptionPayload is the struct for a watcher data subscription payload // It carries data of a type specific to the chain being supported/queried and an error message type SubscriptionPayload struct { Data []byte `json:"data"` // e.g. for Ethereum rlp serialized eth.StreamPayload diff --git a/pkg/watch/super_node_suite_test.go b/pkg/watch/watch_suite_test.go similarity index 97% rename from pkg/watch/super_node_suite_test.go rename to pkg/watch/watch_suite_test.go index f09135b9..821ae69b 100644 --- a/pkg/watch/super_node_suite_test.go +++ b/pkg/watch/watch_suite_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package watcher_test +package watch_test import ( "io/ioutil" diff --git a/utils/utils.go b/utils/utils.go index 37c8e039..73e70708 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -22,12 +22,12 @@ import ( "github.com/sirupsen/logrus" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/core" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" ) -func LoadPostgres(database config.Database, node core.Node) postgres.DB { +func LoadPostgres(database config.Database, node node.Node) postgres.DB { db, err := postgres.NewDB(database, node) if err != nil { logrus.Fatal("Error loading postgres: ", err) @@ -45,8 +45,7 @@ func GetBlockHeightBins(startingBlock, endingBlock, batchSize uint64) ([][]uint6 } length := endingBlock - startingBlock + 1 numberOfBins := length / batchSize - remainder := length % batchSize - if remainder != 0 { + if length%batchSize != 0 { numberOfBins++ } blockRangeBins := make([][]uint64, numberOfBins) From 0ab55ef9d81e2e5b3302f4d158c7fadff13ec361 Mon Sep 17 00:00:00 2001 From: Ian Norden Date: Wed, 1 Jul 2020 13:44:04 -0500 Subject: [PATCH 3/6] distinguish between differential state/storage nodes and eventual ones --- cmd/root.go | 29 ++---------------------- db/migrations/00025_eth_add_diff_row.sql | 13 +++++++++++ db/schema.sql | 6 +++-- pkg/config/config_test.go | 6 ----- pkg/eth/indexer.go | 18 +++++++-------- pkg/eth/models.go | 3 +++ 6 files changed, 31 insertions(+), 44 deletions(-) create mode 100644 db/migrations/00025_eth_add_diff_row.sql diff --git a/cmd/root.go b/cmd/root.go index 36a09eb3..942ab5f8 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -20,42 +20,31 @@ import ( "fmt" "os" "strings" - "time" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" - - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" ) var ( cfgFile string - databaseConfig config.Database - ipc string subCommand string logWithCommand log.Entry ) -const ( - pollingInterval = 7 * time.Second - validationWindow = 15 -) - var rootCmd = &cobra.Command{ - Use: "vulcanizedb", + Use: "ipfs-blockchain-watcher", PersistentPreRun: initFuncs, } func Execute() { - log.Info("----- Starting vDB -----") + log.Info("----- Starting IPFS blockchain watcher -----") if err := rootCmd.Execute(); err != nil { log.Fatal(err) } } func initFuncs(cmd *cobra.Command, args []string) { - setViperConfigs() logfile := viper.GetString("logfile") if logfile != "" { file, err := os.OpenFile(logfile, @@ -75,18 +64,6 @@ func initFuncs(cmd *cobra.Command, args []string) { } } -func setViperConfigs() { - ipc = viper.GetString("client.ipcpath") - databaseConfig = config.Database{ - Name: viper.GetString("database.name"), - Hostname: viper.GetString("database.hostname"), - Port: viper.GetInt("database.port"), - User: viper.GetString("database.user"), - Password: viper.GetString("database.password"), - } - viper.Set("database.config", databaseConfig) -} - func logLevel() error { lvl, err := log.ParseLevel(viper.GetString("log.level")) if err != nil { @@ -102,7 +79,6 @@ func logLevel() error { func init() { cobra.OnInitialize(initConfig) - // When searching for env variables, replace dots in config keys with underscores viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) viper.AutomaticEnv() @@ -122,7 +98,6 @@ func init() { viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname")) viper.BindPFlag("database.user", rootCmd.PersistentFlags().Lookup("database-user")) viper.BindPFlag("database.password", rootCmd.PersistentFlags().Lookup("database-password")) - viper.BindPFlag("client.ipcPath", rootCmd.PersistentFlags().Lookup("client-ipcPath")) viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level")) } diff --git a/db/migrations/00025_eth_add_diff_row.sql b/db/migrations/00025_eth_add_diff_row.sql new file mode 100644 index 00000000..fa2c5640 --- /dev/null +++ b/db/migrations/00025_eth_add_diff_row.sql @@ -0,0 +1,13 @@ +-- +goose Up +ALTER TABLE eth.state_cids +ADD COLUMN diff BOOLEAN NOT NULL DEFAULT FALSE; + +ALTER TABLE eth.storage_cids +ADD COLUMN diff BOOLEAN NOT NULL DEFAULT FALSE; + +-- +goose Down +ALTER TABLE eth.state_cids +DROP COLUMN diff; + +ALTER TABLE eth.storage_cids +DROP COLUMN diff; \ No newline at end of file diff --git a/db/schema.sql b/db/schema.sql index a13e13a1..3520e20b 100644 --- a/db/schema.sql +++ b/db/schema.sql @@ -413,7 +413,8 @@ CREATE TABLE eth.state_cids ( state_leaf_key character varying(66), cid text NOT NULL, state_path bytea, - node_type integer + node_type integer, + diff boolean DEFAULT false NOT NULL ); @@ -447,7 +448,8 @@ CREATE TABLE eth.storage_cids ( storage_leaf_key character varying(66), cid text NOT NULL, storage_path bytea, - node_type integer + node_type integer, + diff boolean DEFAULT false NOT NULL ); diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index c9e368d0..9fccc554 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -29,17 +29,12 @@ var vulcanizeConfig = []byte(` name = "dbname" hostname = "localhost" port = 5432 - -[client] -ipcPath = "IPCPATH/geth.ipc" `) var _ = Describe("Loading the config", func() { - It("reads the private config using the environment", func() { viper.SetConfigName("config") viper.AddConfigPath("$GOPATH/src/github.com/vulcanize/ipfs-blockchain-watcher/environments/") - Expect(viper.Get("client.ipcpath")).To(BeNil()) testConfig := viper.New() testConfig.SetConfigType("toml") @@ -48,7 +43,6 @@ var _ = Describe("Loading the config", func() { Expect(testConfig.Get("database.hostname")).To(Equal("localhost")) Expect(testConfig.Get("database.name")).To(Equal("dbname")) Expect(testConfig.Get("database.port")).To(Equal(int64(5432))) - Expect(testConfig.Get("client.ipcpath")).To(Equal("IPCPATH/geth.ipc")) }) }) diff --git a/pkg/eth/indexer.go b/pkg/eth/indexer.go index 299a7573..48f57373 100644 --- a/pkg/eth/indexer.go +++ b/pkg/eth/indexer.go @@ -149,10 +149,10 @@ func (in *CIDIndexer) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, if stateCID.StateKey != nullHash.String() { stateKey = stateCID.StateKey } - err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type) = ($2, $3, $5) + err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff) VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff) = ($2, $3, $5, $6) RETURNING id`, - headerID, stateKey, stateCID.CID, stateCID.Path, stateCID.NodeType).Scan(&stateID) + headerID, stateKey, stateCID.CID, stateCID.Path, stateCID.NodeType, true).Scan(&stateID) if err != nil { return err } @@ -180,10 +180,10 @@ func (in *CIDIndexer) indexStateCID(tx *sqlx.Tx, stateNode StateNodeModel, heade if stateNode.StateKey != nullHash.String() { stateKey = stateNode.StateKey } - err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type) = ($2, $3, $5) + err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff) VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff) = ($2, $3, $5, $6) RETURNING id`, - headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType).Scan(&stateID) + headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true).Scan(&stateID) return stateID, err } @@ -199,8 +199,8 @@ func (in *CIDIndexer) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeModel, if storageCID.StorageKey != nullHash.String() { storageKey = storageCID.StorageKey } - _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type) = ($2, $3, $5)`, - stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType) + _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff) VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff) = ($2, $3, $5, $6)`, + stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true) return err } diff --git a/pkg/eth/models.go b/pkg/eth/models.go index 5f73bfe1..e9b860ec 100644 --- a/pkg/eth/models.go +++ b/pkg/eth/models.go @@ -80,6 +80,7 @@ type StateNodeModel struct { StateKey string `db:"state_leaf_key"` NodeType int `db:"node_type"` CID string `db:"cid"` + Diff bool `db:"diff"` } // StorageNodeModel is the db model for eth.storage_cids @@ -90,6 +91,7 @@ type StorageNodeModel struct { StorageKey string `db:"storage_leaf_key"` NodeType int `db:"node_type"` CID string `db:"cid"` + Diff bool `db:"diff"` } // StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key @@ -101,6 +103,7 @@ type StorageNodeWithStateKeyModel struct { StorageKey string `db:"storage_leaf_key"` NodeType int `db:"node_type"` CID string `db:"cid"` + Diff bool `db:"diff"` } // StateAccountModel is a db model for an eth state account (decoded value of state leaf node) From 18299c76e22b1a700a689f7e66e0b789931d1bb1 Mon Sep 17 00:00:00 2001 From: Ian Norden Date: Mon, 13 Jul 2020 10:39:51 -0500 Subject: [PATCH 4/6] refactor migrations for new db instance, add mh_key rows --- ...sql => 00001_create_ipfs_blocks_table.sql} | 0 ...table.sql => 00002_create_nodes_table.sql} | 0 ...schema.sql => 00003_create_eth_schema.sql} | 0 ...=> 00004_create_eth_header_cids_table.sql} | 8 + ... => 00005_create_eth_uncle_cids_table.sql} | 1 + ...006_create_eth_transaction_cids_table.sql} | 1 + ...> 00007_create_eth_receipt_cids_table.sql} | 6 +- .../00007_create_eth_state_cids_table.sql | 12 - .../00008_create_eth_state_cids_table.sql | 15 ++ .../00008_create_eth_storage_cids_table.sql | 12 - .../00009_create_eth_storage_cids_table.sql | 15 ++ ... 00010_create_eth_state_accouts_table.sql} | 0 .../00011_create_btc_header_cids_table.sql | 15 -- ...schema.sql => 00011_create_btc_schema.sql} | 0 .../00012_create_btc_header_cids_table.sql | 17 ++ ...013_create_btc_transaction_cids_table.sql} | 1 + ... => 00014_create_btc_tx_outputs_table.sql} | 0 ...l => 00015_create_btc_tx_inputs_table.sql} | 0 .../00015_create_eth_queued_data_table.sql | 9 - .../00016_create_btc_queued_data_table.sql | 9 - ...=> 00016_create_postgraphile_comments.sql} | 2 - db/migrations/00018_update_state_cids.sql | 37 --- db/migrations/00019_update_storage_cids.sql | 37 --- db/migrations/00020_update_header_cids.sql | 37 --- db/migrations/00022_rename_to_leaf_key.sql | 13 -- db/migrations/00023_update_receipt_cids.sql | 22 -- db/migrations/00024_add_times_validated.sql | 13 -- db/migrations/00025_eth_add_diff_row.sql | 13 -- db/schema.sql | 212 +++++++----------- 29 files changed, 144 insertions(+), 363 deletions(-) rename db/migrations/{00009_create_ipfs_blocks_table.sql => 00001_create_ipfs_blocks_table.sql} (100%) rename db/migrations/{00001_create_nodes_table.sql => 00002_create_nodes_table.sql} (100%) rename db/migrations/{00002_create_eth_schema.sql => 00003_create_eth_schema.sql} (100%) rename db/migrations/{00003_create_eth_header_cids_table.sql => 00004_create_eth_header_cids_table.sql} (53%) rename db/migrations/{00004_create_eth_uncle_cids_table.sql => 00005_create_eth_uncle_cids_table.sql} (79%) rename db/migrations/{00005_create_eth_transaction_cids_table.sql => 00006_create_eth_transaction_cids_table.sql} (81%) rename db/migrations/{00006_create_eth_receipt_cids_table.sql => 00007_create_eth_receipt_cids_table.sql} (64%) delete mode 100644 db/migrations/00007_create_eth_state_cids_table.sql create mode 100644 db/migrations/00008_create_eth_state_cids_table.sql delete mode 100644 db/migrations/00008_create_eth_storage_cids_table.sql create mode 100644 db/migrations/00009_create_eth_storage_cids_table.sql rename db/migrations/{00021_create_eth_state_accouts_table.sql => 00010_create_eth_state_accouts_table.sql} (100%) delete mode 100644 db/migrations/00011_create_btc_header_cids_table.sql rename db/migrations/{00010_create_btc_schema.sql => 00011_create_btc_schema.sql} (100%) create mode 100644 db/migrations/00012_create_btc_header_cids_table.sql rename db/migrations/{00012_create_btc_transaction_cids_table.sql => 00013_create_btc_transaction_cids_table.sql} (78%) rename db/migrations/{00013_create_btc_tx_outputs_table.sql => 00014_create_btc_tx_outputs_table.sql} (100%) rename db/migrations/{00014_create_btc_tx_inputs_table.sql => 00015_create_btc_tx_inputs_table.sql} (100%) delete mode 100644 db/migrations/00015_create_eth_queued_data_table.sql delete mode 100644 db/migrations/00016_create_btc_queued_data_table.sql rename db/migrations/{00017_create_postgraphile_comments.sql => 00016_create_postgraphile_comments.sql} (81%) delete mode 100644 db/migrations/00018_update_state_cids.sql delete mode 100644 db/migrations/00019_update_storage_cids.sql delete mode 100644 db/migrations/00020_update_header_cids.sql delete mode 100644 db/migrations/00022_rename_to_leaf_key.sql delete mode 100644 db/migrations/00023_update_receipt_cids.sql delete mode 100644 db/migrations/00024_add_times_validated.sql delete mode 100644 db/migrations/00025_eth_add_diff_row.sql diff --git a/db/migrations/00009_create_ipfs_blocks_table.sql b/db/migrations/00001_create_ipfs_blocks_table.sql similarity index 100% rename from db/migrations/00009_create_ipfs_blocks_table.sql rename to db/migrations/00001_create_ipfs_blocks_table.sql diff --git a/db/migrations/00001_create_nodes_table.sql b/db/migrations/00002_create_nodes_table.sql similarity index 100% rename from db/migrations/00001_create_nodes_table.sql rename to db/migrations/00002_create_nodes_table.sql diff --git a/db/migrations/00002_create_eth_schema.sql b/db/migrations/00003_create_eth_schema.sql similarity index 100% rename from db/migrations/00002_create_eth_schema.sql rename to db/migrations/00003_create_eth_schema.sql diff --git a/db/migrations/00003_create_eth_header_cids_table.sql b/db/migrations/00004_create_eth_header_cids_table.sql similarity index 53% rename from db/migrations/00003_create_eth_header_cids_table.sql rename to db/migrations/00004_create_eth_header_cids_table.sql index c689ec40..339eb427 100644 --- a/db/migrations/00003_create_eth_header_cids_table.sql +++ b/db/migrations/00004_create_eth_header_cids_table.sql @@ -5,9 +5,17 @@ CREATE TABLE eth.header_cids ( block_hash VARCHAR(66) NOT NULL, parent_hash VARCHAR(66) NOT NULL, cid TEXT NOT NULL, + mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, td NUMERIC NOT NULL, node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE, reward NUMERIC NOT NULL, + state_root VARCHAR(66) NOT NULL, + tx_root VARCHAR(66) NOT NULL, + receipt_root VARCHAR(66) NOT NULL, + uncle_root VARCHAR(66) NOT NULL, + bloom BYTEA NOT NULL, + timestamp NUMERIC NOT NULL, + times_validated INTEGER NOT NULL DEFAULT 1, UNIQUE (block_number, block_hash) ); diff --git a/db/migrations/00004_create_eth_uncle_cids_table.sql b/db/migrations/00005_create_eth_uncle_cids_table.sql similarity index 79% rename from db/migrations/00004_create_eth_uncle_cids_table.sql rename to db/migrations/00005_create_eth_uncle_cids_table.sql index 8e372e4c..c46cafb9 100644 --- a/db/migrations/00004_create_eth_uncle_cids_table.sql +++ b/db/migrations/00005_create_eth_uncle_cids_table.sql @@ -5,6 +5,7 @@ CREATE TABLE eth.uncle_cids ( block_hash VARCHAR(66) NOT NULL, parent_hash VARCHAR(66) NOT NULL, cid TEXT NOT NULL, + mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, reward NUMERIC NOT NULL, UNIQUE (header_id, block_hash) ); diff --git a/db/migrations/00005_create_eth_transaction_cids_table.sql b/db/migrations/00006_create_eth_transaction_cids_table.sql similarity index 81% rename from db/migrations/00005_create_eth_transaction_cids_table.sql rename to db/migrations/00006_create_eth_transaction_cids_table.sql index e3750ced..cbbef782 100644 --- a/db/migrations/00005_create_eth_transaction_cids_table.sql +++ b/db/migrations/00006_create_eth_transaction_cids_table.sql @@ -5,6 +5,7 @@ CREATE TABLE eth.transaction_cids ( tx_hash VARCHAR(66) NOT NULL, index INTEGER NOT NULL, cid TEXT NOT NULL, + mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, dst VARCHAR(66) NOT NULL, src VARCHAR(66) NOT NULL, UNIQUE (header_id, tx_hash) diff --git a/db/migrations/00006_create_eth_receipt_cids_table.sql b/db/migrations/00007_create_eth_receipt_cids_table.sql similarity index 64% rename from db/migrations/00006_create_eth_receipt_cids_table.sql rename to db/migrations/00007_create_eth_receipt_cids_table.sql index a8f77de0..5d4ae0cc 100644 --- a/db/migrations/00006_create_eth_receipt_cids_table.sql +++ b/db/migrations/00007_create_eth_receipt_cids_table.sql @@ -3,11 +3,15 @@ CREATE TABLE eth.receipt_cids ( id SERIAL PRIMARY KEY, tx_id INTEGER NOT NULL REFERENCES eth.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, cid TEXT NOT NULL, + mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, contract VARCHAR(66), + contract_hash VARCHAR(66), topic0s VARCHAR(66)[], topic1s VARCHAR(66)[], topic2s VARCHAR(66)[], - topic3s VARCHAR(66)[] + topic3s VARCHAR(66)[], + log_contracts VARCHAR(66)[], + UNIQUE (tx_id) ); -- +goose Down diff --git a/db/migrations/00007_create_eth_state_cids_table.sql b/db/migrations/00007_create_eth_state_cids_table.sql deleted file mode 100644 index 6f0170a5..00000000 --- a/db/migrations/00007_create_eth_state_cids_table.sql +++ /dev/null @@ -1,12 +0,0 @@ --- +goose Up -CREATE TABLE eth.state_cids ( - id SERIAL PRIMARY KEY, - header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, - state_key VARCHAR(66) NOT NULL, - leaf BOOLEAN NOT NULL, - cid TEXT NOT NULL, - UNIQUE (header_id, state_key) -); - --- +goose Down -DROP TABLE eth.state_cids; \ No newline at end of file diff --git a/db/migrations/00008_create_eth_state_cids_table.sql b/db/migrations/00008_create_eth_state_cids_table.sql new file mode 100644 index 00000000..e0bf6e57 --- /dev/null +++ b/db/migrations/00008_create_eth_state_cids_table.sql @@ -0,0 +1,15 @@ +-- +goose Up +CREATE TABLE eth.state_cids ( + id SERIAL PRIMARY KEY, + header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + state_leaf_key VARCHAR(66), + cid TEXT NOT NULL, + mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + state_path BYTEA, + node_type INTEGER, + diff BOOLEAN NOT NULL DEFAULT FALSE, + UNIQUE (header_id, state_path) +); + +-- +goose Down +DROP TABLE eth.state_cids; \ No newline at end of file diff --git a/db/migrations/00008_create_eth_storage_cids_table.sql b/db/migrations/00008_create_eth_storage_cids_table.sql deleted file mode 100644 index 070eda45..00000000 --- a/db/migrations/00008_create_eth_storage_cids_table.sql +++ /dev/null @@ -1,12 +0,0 @@ --- +goose Up -CREATE TABLE eth.storage_cids ( - id SERIAL PRIMARY KEY, - state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, - storage_key VARCHAR(66) NOT NULL, - leaf BOOLEAN NOT NULL, - cid TEXT NOT NULL, - UNIQUE (state_id, storage_key) -); - --- +goose Down -DROP TABLE eth.storage_cids; \ No newline at end of file diff --git a/db/migrations/00009_create_eth_storage_cids_table.sql b/db/migrations/00009_create_eth_storage_cids_table.sql new file mode 100644 index 00000000..944d39ed --- /dev/null +++ b/db/migrations/00009_create_eth_storage_cids_table.sql @@ -0,0 +1,15 @@ +-- +goose Up +CREATE TABLE eth.storage_cids ( + id SERIAL PRIMARY KEY, + state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + storage_leaf_key VARCHAR(66), + cid TEXT NOT NULL, + mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + storage_path BYTEA, + node_type INTEGER NOT NULL, + diff BOOLEAN NOT NULL DEFAULT FALSE, + UNIQUE (state_id, storage_path) +); + +-- +goose Down +DROP TABLE eth.storage_cids; \ No newline at end of file diff --git a/db/migrations/00021_create_eth_state_accouts_table.sql b/db/migrations/00010_create_eth_state_accouts_table.sql similarity index 100% rename from db/migrations/00021_create_eth_state_accouts_table.sql rename to db/migrations/00010_create_eth_state_accouts_table.sql diff --git a/db/migrations/00011_create_btc_header_cids_table.sql b/db/migrations/00011_create_btc_header_cids_table.sql deleted file mode 100644 index 04a6f65e..00000000 --- a/db/migrations/00011_create_btc_header_cids_table.sql +++ /dev/null @@ -1,15 +0,0 @@ --- +goose Up -CREATE TABLE btc.header_cids ( - id SERIAL PRIMARY KEY, - block_number BIGINT NOT NULL, - block_hash VARCHAR(66) NOT NULL, - parent_hash VARCHAR(66) NOT NULL, - cid TEXT NOT NULL, - timestamp NUMERIC NOT NULL, - bits BIGINT NOT NULL, - node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE, - UNIQUE (block_number, block_hash) -); - --- +goose Down -DROP TABLE btc.header_cids; \ No newline at end of file diff --git a/db/migrations/00010_create_btc_schema.sql b/db/migrations/00011_create_btc_schema.sql similarity index 100% rename from db/migrations/00010_create_btc_schema.sql rename to db/migrations/00011_create_btc_schema.sql diff --git a/db/migrations/00012_create_btc_header_cids_table.sql b/db/migrations/00012_create_btc_header_cids_table.sql new file mode 100644 index 00000000..fcdb075d --- /dev/null +++ b/db/migrations/00012_create_btc_header_cids_table.sql @@ -0,0 +1,17 @@ +-- +goose Up +CREATE TABLE btc.header_cids ( + id SERIAL PRIMARY KEY, + block_number BIGINT NOT NULL, + block_hash VARCHAR(66) NOT NULL, + parent_hash VARCHAR(66) NOT NULL, + cid TEXT NOT NULL, + mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, + timestamp NUMERIC NOT NULL, + bits BIGINT NOT NULL, + node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE, + times_validated INTEGER NOT NULL DEFAULT 1, + UNIQUE (block_number, block_hash) +); + +-- +goose Down +DROP TABLE btc.header_cids; \ No newline at end of file diff --git a/db/migrations/00012_create_btc_transaction_cids_table.sql b/db/migrations/00013_create_btc_transaction_cids_table.sql similarity index 78% rename from db/migrations/00012_create_btc_transaction_cids_table.sql rename to db/migrations/00013_create_btc_transaction_cids_table.sql index 2648c6ef..aabf8af9 100644 --- a/db/migrations/00012_create_btc_transaction_cids_table.sql +++ b/db/migrations/00013_create_btc_transaction_cids_table.sql @@ -5,6 +5,7 @@ CREATE TABLE btc.transaction_cids ( index INTEGER NOT NULL, tx_hash VARCHAR(66) NOT NULL UNIQUE, cid TEXT NOT NULL, + mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, segwit BOOL NOT NULL, witness_hash VARCHAR(66) ); diff --git a/db/migrations/00013_create_btc_tx_outputs_table.sql b/db/migrations/00014_create_btc_tx_outputs_table.sql similarity index 100% rename from db/migrations/00013_create_btc_tx_outputs_table.sql rename to db/migrations/00014_create_btc_tx_outputs_table.sql diff --git a/db/migrations/00014_create_btc_tx_inputs_table.sql b/db/migrations/00015_create_btc_tx_inputs_table.sql similarity index 100% rename from db/migrations/00014_create_btc_tx_inputs_table.sql rename to db/migrations/00015_create_btc_tx_inputs_table.sql diff --git a/db/migrations/00015_create_eth_queued_data_table.sql b/db/migrations/00015_create_eth_queued_data_table.sql deleted file mode 100644 index 87f58ad8..00000000 --- a/db/migrations/00015_create_eth_queued_data_table.sql +++ /dev/null @@ -1,9 +0,0 @@ --- +goose Up -CREATE TABLE eth.queue_data ( - id SERIAL PRIMARY KEY, - data BYTEA NOT NULL, - height BIGINT UNIQUE NOT NULL -); - --- +goose Down -DROP TABLE eth.queue_data; \ No newline at end of file diff --git a/db/migrations/00016_create_btc_queued_data_table.sql b/db/migrations/00016_create_btc_queued_data_table.sql deleted file mode 100644 index c1344f86..00000000 --- a/db/migrations/00016_create_btc_queued_data_table.sql +++ /dev/null @@ -1,9 +0,0 @@ --- +goose Up -CREATE TABLE btc.queue_data ( - id SERIAL PRIMARY KEY, - data BYTEA NOT NULL, - height BIGINT UNIQUE NOT NULL -); - --- +goose Down -DROP TABLE btc.queue_data; \ No newline at end of file diff --git a/db/migrations/00017_create_postgraphile_comments.sql b/db/migrations/00016_create_postgraphile_comments.sql similarity index 81% rename from db/migrations/00017_create_postgraphile_comments.sql rename to db/migrations/00016_create_postgraphile_comments.sql index 6d7668ba..c426efd1 100644 --- a/db/migrations/00017_create_postgraphile_comments.sql +++ b/db/migrations/00016_create_postgraphile_comments.sql @@ -2,10 +2,8 @@ COMMENT ON TABLE public.nodes IS E'@name NodeInfo'; COMMENT ON TABLE btc.header_cids IS E'@name BtcHeaderCids'; COMMENT ON TABLE btc.transaction_cids IS E'@name BtcTransactionCids'; -COMMENT ON TABLE btc.queue_data IS E'@name BtcQueueData'; COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids'; COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids'; -COMMENT ON TABLE eth.queue_data IS E'@name EthQueueData'; COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID'; COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID'; COMMENT ON COLUMN btc.header_cids.node_id IS E'@name BtcNodeID'; \ No newline at end of file diff --git a/db/migrations/00018_update_state_cids.sql b/db/migrations/00018_update_state_cids.sql deleted file mode 100644 index daea294b..00000000 --- a/db/migrations/00018_update_state_cids.sql +++ /dev/null @@ -1,37 +0,0 @@ --- +goose Up -ALTER TABLE eth.state_cids -ADD COLUMN state_path BYTEA; - -ALTER TABLE eth.state_cids -DROP COLUMN leaf; - -ALTER TABLE eth.state_cids -ADD COLUMN node_type INTEGER; - -ALTER TABLE eth.state_cids -ALTER COLUMN state_key DROP NOT NULL; - -ALTER TABLE eth.state_cids -DROP CONSTRAINT state_cids_header_id_state_key_key; - -ALTER TABLE eth.state_cids -ADD CONSTRAINT state_cids_header_id_state_path_key UNIQUE (header_id, state_path); - --- +goose Down -ALTER TABLE eth.state_cids -ADD CONSTRAINT state_cids_header_id_state_key_key UNIQUE (header_id, state_key); - -ALTER TABLE eth.state_cids -DROP CONSTRAINT state_cids_header_id_state_path_key; - -ALTER TABLE eth.state_cids -ALTER COLUMN state_key SET NOT NULL; - -ALTER TABLE eth.state_cids -DROP COLUMN node_type; - -ALTER TABLE eth.state_cids -ADD COLUMN leaf BOOLEAN NOT NULL; - -ALTER TABLE eth.state_cids -DROP COLUMN state_path; \ No newline at end of file diff --git a/db/migrations/00019_update_storage_cids.sql b/db/migrations/00019_update_storage_cids.sql deleted file mode 100644 index 385f2468..00000000 --- a/db/migrations/00019_update_storage_cids.sql +++ /dev/null @@ -1,37 +0,0 @@ --- +goose Up -ALTER TABLE eth.storage_cids -ADD COLUMN storage_path BYTEA; - -ALTER TABLE eth.storage_cids -DROP COLUMN leaf; - -ALTER TABLE eth.storage_cids -ADD COLUMN node_type INTEGER; - -ALTER TABLE eth.storage_cids -ALTER COLUMN storage_key DROP NOT NULL; - -ALTER TABLE eth.storage_cids -DROP CONSTRAINT storage_cids_state_id_storage_key_key; - -ALTER TABLE eth.storage_cids -ADD CONSTRAINT storage_cids_state_id_storage_path_key UNIQUE (state_id, storage_path); - --- +goose Down -ALTER TABLE eth.storage_cids -DROP CONSTRAINT storage_cids_state_id_storage_path_key; - -ALTER TABLE eth.storage_cids -ADD CONSTRAINT storage_cids_state_id_storage_key_key UNIQUE (state_id, storage_key); - -ALTER TABLE eth.storage_cids -ALTER COLUMN storage_key SET NOT NULL; - -ALTER TABLE eth.storage_cids -DROP COLUMN node_type; - -ALTER TABLE eth.storage_cids -ADD COLUMN leaf BOOLEAN NOT NULL; - -ALTER TABLE eth.storage_cids -DROP COLUMN storage_path; \ No newline at end of file diff --git a/db/migrations/00020_update_header_cids.sql b/db/migrations/00020_update_header_cids.sql deleted file mode 100644 index 1c69c11c..00000000 --- a/db/migrations/00020_update_header_cids.sql +++ /dev/null @@ -1,37 +0,0 @@ --- +goose Up -ALTER TABLE eth.header_cids -ADD COLUMN state_root VARCHAR(66); - -ALTER TABLE eth.header_cids -ADD COLUMN tx_root VARCHAR(66); - -ALTER TABLE eth.header_cids -ADD COLUMN receipt_root VARCHAR(66); - -ALTER TABLE eth.header_cids -ADD COLUMN uncle_root VARCHAR(66); - -ALTER TABLE eth.header_cids -ADD COLUMN bloom BYTEA; - -ALTER TABLE eth.header_cids -ADD COLUMN timestamp NUMERIC; - --- +goose Down -ALTER TABLE eth.header_cids -DROP COLUMN timestamp; - -ALTER TABLE eth.header_cids -DROP COLUMN bloom; - -ALTER TABLE eth.header_cids -DROP COLUMN uncle_root; - -ALTER TABLE eth.header_cids -DROP COLUMN receipt_root; - -ALTER TABLE eth.header_cids -DROP COLUMN tx_root; - -ALTER TABLE eth.header_cids -DROP COLUMN state_root; \ No newline at end of file diff --git a/db/migrations/00022_rename_to_leaf_key.sql b/db/migrations/00022_rename_to_leaf_key.sql deleted file mode 100644 index 0bcf28bf..00000000 --- a/db/migrations/00022_rename_to_leaf_key.sql +++ /dev/null @@ -1,13 +0,0 @@ --- +goose Up -ALTER TABLE eth.state_cids -RENAME COLUMN state_key TO state_leaf_key; - -ALTER TABLE eth.storage_cids -RENAME COLUMN storage_key TO storage_leaf_key; - --- +goose Down -ALTER TABLE eth.storage_cids -RENAME COLUMN storage_leaf_key TO storage_key; - -ALTER TABLE eth.state_cids -RENAME COLUMN state_leaf_key TO state_key; \ No newline at end of file diff --git a/db/migrations/00023_update_receipt_cids.sql b/db/migrations/00023_update_receipt_cids.sql deleted file mode 100644 index 15ec931e..00000000 --- a/db/migrations/00023_update_receipt_cids.sql +++ /dev/null @@ -1,22 +0,0 @@ --- +goose Up -ALTER TABLE eth.receipt_cids -ADD COLUMN log_contracts VARCHAR(66)[]; - -ALTER TABLE eth.receipt_cids -ADD COLUMN contract_hash VARCHAR(66); - -WITH uniques AS (SELECT DISTINCT ON (tx_id) * FROM eth.receipt_cids) -DELETE FROM eth.receipt_cids WHERE receipt_cids.id NOT IN (SELECT id FROM uniques); - -ALTER TABLE eth.receipt_cids -ADD CONSTRAINT receipt_cids_tx_id_key UNIQUE (tx_id); - --- +goose Down -ALTER TABLE eth.receipt_cids -DROP CONSTRAINT receipt_cids_tx_id_key; - -ALTER TABLE eth.receipt_cids -DROP COLUMN contract_hash; - -ALTER TABLE eth.receipt_cids -DROP COLUMN log_contracts; \ No newline at end of file diff --git a/db/migrations/00024_add_times_validated.sql b/db/migrations/00024_add_times_validated.sql deleted file mode 100644 index eb8cbd27..00000000 --- a/db/migrations/00024_add_times_validated.sql +++ /dev/null @@ -1,13 +0,0 @@ --- +goose Up -ALTER TABLE eth.header_cids -ADD COLUMN times_validated INTEGER NOT NULL DEFAULT 1; - -ALTER TABLE btc.header_cids -ADD COLUMN times_validated INTEGER NOT NULL DEFAULT 1; - --- +goose Down -ALTER TABLE btc.header_cids -DROP COLUMN times_validated; - -ALTER TABLE eth.header_cids -DROP COLUMN times_validated; \ No newline at end of file diff --git a/db/migrations/00025_eth_add_diff_row.sql b/db/migrations/00025_eth_add_diff_row.sql deleted file mode 100644 index fa2c5640..00000000 --- a/db/migrations/00025_eth_add_diff_row.sql +++ /dev/null @@ -1,13 +0,0 @@ --- +goose Up -ALTER TABLE eth.state_cids -ADD COLUMN diff BOOLEAN NOT NULL DEFAULT FALSE; - -ALTER TABLE eth.storage_cids -ADD COLUMN diff BOOLEAN NOT NULL DEFAULT FALSE; - --- +goose Down -ALTER TABLE eth.state_cids -DROP COLUMN diff; - -ALTER TABLE eth.storage_cids -DROP COLUMN diff; \ No newline at end of file diff --git a/db/schema.sql b/db/schema.sql index 3520e20b..caa1bb67 100644 --- a/db/schema.sql +++ b/db/schema.sql @@ -44,6 +44,7 @@ CREATE TABLE btc.header_cids ( block_hash character varying(66) NOT NULL, parent_hash character varying(66) NOT NULL, cid text NOT NULL, + mh_key text NOT NULL, "timestamp" numeric NOT NULL, bits bigint NOT NULL, node_id integer NOT NULL, @@ -85,44 +86,6 @@ CREATE SEQUENCE btc.header_cids_id_seq ALTER SEQUENCE btc.header_cids_id_seq OWNED BY btc.header_cids.id; --- --- Name: queue_data; Type: TABLE; Schema: btc; Owner: - --- - -CREATE TABLE btc.queue_data ( - id integer NOT NULL, - data bytea NOT NULL, - height bigint NOT NULL -); - - --- --- Name: TABLE queue_data; Type: COMMENT; Schema: btc; Owner: - --- - -COMMENT ON TABLE btc.queue_data IS '@name BtcQueueData'; - - --- --- Name: queue_data_id_seq; Type: SEQUENCE; Schema: btc; Owner: - --- - -CREATE SEQUENCE btc.queue_data_id_seq - AS integer - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; - - --- --- Name: queue_data_id_seq; Type: SEQUENCE OWNED BY; Schema: btc; Owner: - --- - -ALTER SEQUENCE btc.queue_data_id_seq OWNED BY btc.queue_data.id; - - -- -- Name: transaction_cids; Type: TABLE; Schema: btc; Owner: - -- @@ -133,6 +96,7 @@ CREATE TABLE btc.transaction_cids ( index integer NOT NULL, tx_hash character varying(66) NOT NULL, cid text NOT NULL, + mh_key text NOT NULL, segwit boolean NOT NULL, witness_hash character varying(66) ); @@ -246,15 +210,16 @@ CREATE TABLE eth.header_cids ( block_hash character varying(66) NOT NULL, parent_hash character varying(66) NOT NULL, cid text NOT NULL, + mh_key text NOT NULL, td numeric NOT NULL, node_id integer NOT NULL, reward numeric NOT NULL, - state_root character varying(66), - tx_root character varying(66), - receipt_root character varying(66), - uncle_root character varying(66), - bloom bytea, - "timestamp" numeric, + state_root character varying(66) NOT NULL, + tx_root character varying(66) NOT NULL, + receipt_root character varying(66) NOT NULL, + uncle_root character varying(66) NOT NULL, + bloom bytea NOT NULL, + "timestamp" numeric NOT NULL, times_validated integer DEFAULT 1 NOT NULL ); @@ -293,44 +258,6 @@ CREATE SEQUENCE eth.header_cids_id_seq ALTER SEQUENCE eth.header_cids_id_seq OWNED BY eth.header_cids.id; --- --- Name: queue_data; Type: TABLE; Schema: eth; Owner: - --- - -CREATE TABLE eth.queue_data ( - id integer NOT NULL, - data bytea NOT NULL, - height bigint NOT NULL -); - - --- --- Name: TABLE queue_data; Type: COMMENT; Schema: eth; Owner: - --- - -COMMENT ON TABLE eth.queue_data IS '@name EthQueueData'; - - --- --- Name: queue_data_id_seq; Type: SEQUENCE; Schema: eth; Owner: - --- - -CREATE SEQUENCE eth.queue_data_id_seq - AS integer - START WITH 1 - INCREMENT BY 1 - NO MINVALUE - NO MAXVALUE - CACHE 1; - - --- --- Name: queue_data_id_seq; Type: SEQUENCE OWNED BY; Schema: eth; Owner: - --- - -ALTER SEQUENCE eth.queue_data_id_seq OWNED BY eth.queue_data.id; - - -- -- Name: receipt_cids; Type: TABLE; Schema: eth; Owner: - -- @@ -339,13 +266,14 @@ CREATE TABLE eth.receipt_cids ( id integer NOT NULL, tx_id integer NOT NULL, cid text NOT NULL, + mh_key text NOT NULL, contract character varying(66), + contract_hash character varying(66), topic0s character varying(66)[], topic1s character varying(66)[], topic2s character varying(66)[], topic3s character varying(66)[], - log_contracts character varying(66)[], - contract_hash character varying(66) + log_contracts character varying(66)[] ); @@ -412,6 +340,7 @@ CREATE TABLE eth.state_cids ( header_id integer NOT NULL, state_leaf_key character varying(66), cid text NOT NULL, + mh_key text NOT NULL, state_path bytea, node_type integer, diff boolean DEFAULT false NOT NULL @@ -447,8 +376,9 @@ CREATE TABLE eth.storage_cids ( state_id integer NOT NULL, storage_leaf_key character varying(66), cid text NOT NULL, + mh_key text NOT NULL, storage_path bytea, - node_type integer, + node_type integer NOT NULL, diff boolean DEFAULT false NOT NULL ); @@ -483,6 +413,7 @@ CREATE TABLE eth.transaction_cids ( tx_hash character varying(66) NOT NULL, index integer NOT NULL, cid text NOT NULL, + mh_key text NOT NULL, dst character varying(66) NOT NULL, src character varying(66) NOT NULL ); @@ -525,6 +456,7 @@ CREATE TABLE eth.uncle_cids ( block_hash character varying(66) NOT NULL, parent_hash character varying(66) NOT NULL, cid text NOT NULL, + mh_key text NOT NULL, reward numeric NOT NULL ); @@ -645,13 +577,6 @@ ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id; ALTER TABLE ONLY btc.header_cids ALTER COLUMN id SET DEFAULT nextval('btc.header_cids_id_seq'::regclass); --- --- Name: queue_data id; Type: DEFAULT; Schema: btc; Owner: - --- - -ALTER TABLE ONLY btc.queue_data ALTER COLUMN id SET DEFAULT nextval('btc.queue_data_id_seq'::regclass); - - -- -- Name: transaction_cids id; Type: DEFAULT; Schema: btc; Owner: - -- @@ -680,13 +605,6 @@ ALTER TABLE ONLY btc.tx_outputs ALTER COLUMN id SET DEFAULT nextval('btc.tx_outp ALTER TABLE ONLY eth.header_cids ALTER COLUMN id SET DEFAULT nextval('eth.header_cids_id_seq'::regclass); --- --- Name: queue_data id; Type: DEFAULT; Schema: eth; Owner: - --- - -ALTER TABLE ONLY eth.queue_data ALTER COLUMN id SET DEFAULT nextval('eth.queue_data_id_seq'::regclass); - - -- -- Name: receipt_cids id; Type: DEFAULT; Schema: eth; Owner: - -- @@ -759,22 +677,6 @@ ALTER TABLE ONLY btc.header_cids ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id); --- --- Name: queue_data queue_data_height_key; Type: CONSTRAINT; Schema: btc; Owner: - --- - -ALTER TABLE ONLY btc.queue_data - ADD CONSTRAINT queue_data_height_key UNIQUE (height); - - --- --- Name: queue_data queue_data_pkey; Type: CONSTRAINT; Schema: btc; Owner: - --- - -ALTER TABLE ONLY btc.queue_data - ADD CONSTRAINT queue_data_pkey PRIMARY KEY (id); - - -- -- Name: transaction_cids transaction_cids_pkey; Type: CONSTRAINT; Schema: btc; Owner: - -- @@ -839,22 +741,6 @@ ALTER TABLE ONLY eth.header_cids ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id); --- --- Name: queue_data queue_data_height_key; Type: CONSTRAINT; Schema: eth; Owner: - --- - -ALTER TABLE ONLY eth.queue_data - ADD CONSTRAINT queue_data_height_key UNIQUE (height); - - --- --- Name: queue_data queue_data_pkey; Type: CONSTRAINT; Schema: eth; Owner: - --- - -ALTER TABLE ONLY eth.queue_data - ADD CONSTRAINT queue_data_pkey PRIMARY KEY (id); - - -- -- Name: receipt_cids receipt_cids_pkey; Type: CONSTRAINT; Schema: eth; Owner: - -- @@ -983,6 +869,14 @@ ALTER TABLE ONLY public.nodes ADD CONSTRAINT nodes_pkey PRIMARY KEY (id); +-- +-- Name: header_cids header_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: - +-- + +ALTER TABLE ONLY btc.header_cids + ADD CONSTRAINT header_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; + + -- -- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: - -- @@ -999,6 +893,14 @@ ALTER TABLE ONLY btc.transaction_cids ADD CONSTRAINT transaction_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES btc.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; +-- +-- Name: transaction_cids transaction_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: - +-- + +ALTER TABLE ONLY btc.transaction_cids + ADD CONSTRAINT transaction_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; + + -- -- Name: tx_inputs tx_inputs_tx_id_fkey; Type: FK CONSTRAINT; Schema: btc; Owner: - -- @@ -1015,6 +917,14 @@ ALTER TABLE ONLY btc.tx_outputs ADD CONSTRAINT tx_outputs_tx_id_fkey FOREIGN KEY (tx_id) REFERENCES btc.transaction_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; +-- +-- Name: header_cids header_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - +-- + +ALTER TABLE ONLY eth.header_cids + ADD CONSTRAINT header_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; + + -- -- Name: header_cids header_cids_node_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - -- @@ -1023,6 +933,14 @@ ALTER TABLE ONLY eth.header_cids ADD CONSTRAINT header_cids_node_id_fkey FOREIGN KEY (node_id) REFERENCES public.nodes(id) ON DELETE CASCADE; +-- +-- Name: receipt_cids receipt_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - +-- + +ALTER TABLE ONLY eth.receipt_cids + ADD CONSTRAINT receipt_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; + + -- -- Name: receipt_cids receipt_cids_tx_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - -- @@ -1047,6 +965,22 @@ ALTER TABLE ONLY eth.state_cids ADD CONSTRAINT state_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; +-- +-- Name: state_cids state_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - +-- + +ALTER TABLE ONLY eth.state_cids + ADD CONSTRAINT state_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; + + +-- +-- Name: storage_cids storage_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - +-- + +ALTER TABLE ONLY eth.storage_cids + ADD CONSTRAINT storage_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; + + -- -- Name: storage_cids storage_cids_state_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - -- @@ -1063,6 +997,14 @@ ALTER TABLE ONLY eth.transaction_cids ADD CONSTRAINT transaction_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; +-- +-- Name: transaction_cids transaction_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - +-- + +ALTER TABLE ONLY eth.transaction_cids + ADD CONSTRAINT transaction_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; + + -- -- Name: uncle_cids uncle_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - -- @@ -1071,6 +1013,14 @@ ALTER TABLE ONLY eth.uncle_cids ADD CONSTRAINT uncle_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES eth.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; +-- +-- Name: uncle_cids uncle_cids_mh_key_fkey; Type: FK CONSTRAINT; Schema: eth; Owner: - +-- + +ALTER TABLE ONLY eth.uncle_cids + ADD CONSTRAINT uncle_cids_mh_key_fkey FOREIGN KEY (mh_key) REFERENCES public.blocks(key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED; + + -- -- PostgreSQL database dump complete -- From 77490e8b4b507332d38c0ab89ab152884cf2c892 Mon Sep 17 00:00:00 2001 From: Ian Norden Date: Tue, 4 Aug 2020 22:26:11 -0500 Subject: [PATCH 5/6] add diff field to db constraint --- db/migrations/00008_create_eth_state_cids_table.sql | 2 +- db/migrations/00009_create_eth_storage_cids_table.sql | 2 +- db/schema.sql | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/db/migrations/00008_create_eth_state_cids_table.sql b/db/migrations/00008_create_eth_state_cids_table.sql index e0bf6e57..4bfa8228 100644 --- a/db/migrations/00008_create_eth_state_cids_table.sql +++ b/db/migrations/00008_create_eth_state_cids_table.sql @@ -8,7 +8,7 @@ CREATE TABLE eth.state_cids ( state_path BYTEA, node_type INTEGER, diff BOOLEAN NOT NULL DEFAULT FALSE, - UNIQUE (header_id, state_path) + UNIQUE (header_id, state_path, diff) ); -- +goose Down diff --git a/db/migrations/00009_create_eth_storage_cids_table.sql b/db/migrations/00009_create_eth_storage_cids_table.sql index 944d39ed..f19bc62e 100644 --- a/db/migrations/00009_create_eth_storage_cids_table.sql +++ b/db/migrations/00009_create_eth_storage_cids_table.sql @@ -8,7 +8,7 @@ CREATE TABLE eth.storage_cids ( storage_path BYTEA, node_type INTEGER NOT NULL, diff BOOLEAN NOT NULL DEFAULT FALSE, - UNIQUE (state_id, storage_path) + UNIQUE (state_id, storage_path, diff) ); -- +goose Down diff --git a/db/schema.sql b/db/schema.sql index caa1bb67..a3b55a44 100644 --- a/db/schema.sql +++ b/db/schema.sql @@ -774,11 +774,11 @@ ALTER TABLE ONLY eth.state_accounts -- --- Name: state_cids state_cids_header_id_state_path_key; Type: CONSTRAINT; Schema: eth; Owner: - +-- Name: state_cids state_cids_header_id_state_path_diff_key; Type: CONSTRAINT; Schema: eth; Owner: - -- ALTER TABLE ONLY eth.state_cids - ADD CONSTRAINT state_cids_header_id_state_path_key UNIQUE (header_id, state_path); + ADD CONSTRAINT state_cids_header_id_state_path_diff_key UNIQUE (header_id, state_path, diff); -- @@ -798,11 +798,11 @@ ALTER TABLE ONLY eth.storage_cids -- --- Name: storage_cids storage_cids_state_id_storage_path_key; Type: CONSTRAINT; Schema: eth; Owner: - +-- Name: storage_cids storage_cids_state_id_storage_path_diff_key; Type: CONSTRAINT; Schema: eth; Owner: - -- ALTER TABLE ONLY eth.storage_cids - ADD CONSTRAINT storage_cids_state_id_storage_path_key UNIQUE (state_id, storage_path); + ADD CONSTRAINT storage_cids_state_id_storage_path_diff_key UNIQUE (state_id, storage_path, diff); -- From 77b7bcc94ca5e1175a7097bf84d3853dbe8ec5de Mon Sep 17 00:00:00 2001 From: Ian Norden Date: Tue, 4 Aug 2020 22:34:49 -0500 Subject: [PATCH 6/6] adjust everything to work with mh fks --- pkg/btc/cid_retriever.go | 2 +- pkg/btc/cleaner.go | 4 +- pkg/btc/cleaner_test.go | 58 +++++--- pkg/btc/indexer.go | 16 +- pkg/btc/indexer_test.go | 19 ++- pkg/btc/ipld_pg_fetcher.go | 4 +- pkg/btc/mocks/test_data.go | 69 +++------ pkg/btc/models.go | 3 + pkg/btc/publish_and_indexer.go | 2 + pkg/btc/publisher.go | 4 + pkg/btc/publisher_test.go | 8 +- pkg/eth/api_test.go | 2 +- pkg/eth/backend.go | 6 +- pkg/eth/cid_retriever.go | 20 +-- pkg/eth/cid_retriever_test.go | 262 ++++++++++++++++++--------------- pkg/eth/cleaner.go | 12 +- pkg/eth/cleaner_test.go | 123 ++++++++++------ pkg/eth/converter.go | 6 +- pkg/eth/indexer.go | 50 +++---- pkg/eth/indexer_test.go | 11 ++ pkg/eth/ipld_pg_fetcher.go | 12 +- pkg/eth/mocks/test_data.go | 38 ++++- pkg/eth/models.go | 7 + pkg/eth/publish_and_indexer.go | 8 + pkg/eth/publisher.go | 6 + pkg/shared/functions.go | 57 +++---- pkg/shared/test_helpers.go | 54 +++++++ 27 files changed, 501 insertions(+), 362 deletions(-) diff --git a/pkg/btc/cid_retriever.go b/pkg/btc/cid_retriever.go index b52836e2..78acd489 100644 --- a/pkg/btc/cid_retriever.go +++ b/pkg/btc/cid_retriever.go @@ -130,7 +130,7 @@ func (bcr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID results := make([]TxModel, 0) id := 1 pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id, - transaction_cids.tx_hash, transaction_cids.cid, + transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key, transaction_cids.segwit, transaction_cids.witness_hash, transaction_cids.index FROM btc.transaction_cids, btc.header_cids, btc.tx_inputs, btc.tx_outputs WHERE transaction_cids.header_id = header_cids.id diff --git a/pkg/btc/cleaner.go b/pkg/btc/cleaner.go index a9f83b2d..0b9d0167 100644 --- a/pkg/btc/cleaner.go +++ b/pkg/btc/cleaner.go @@ -160,7 +160,7 @@ func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error { pgStr := `DELETE FROM public.blocks A USING btc.transaction_cids B, btc.header_cids C - WHERE A.key = B.cid + WHERE A.key = B.mh_key AND B.header_id = C.id AND C.block_number BETWEEN $1 AND $2` _, err := tx.Exec(pgStr, rng[0], rng[1]) @@ -179,7 +179,7 @@ func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error { pgStr := `DELETE FROM public.blocks A USING btc.header_cids B - WHERE A.key = B.cid + WHERE A.key = B.mh_key AND B.block_number BETWEEN $1 AND $2` _, err := tx.Exec(pgStr, rng[0], rng[1]) return err diff --git a/pkg/btc/cleaner_test.go b/pkg/btc/cleaner_test.go index d468367e..c8e43320 100644 --- a/pkg/btc/cleaner_test.go +++ b/pkg/btc/cleaner_test.go @@ -33,25 +33,30 @@ var ( // header variables blockHash1 = crypto.Keccak256Hash([]byte{00, 02}) blocKNumber1 = big.NewInt(0) - headerCid1 = "mockHeader1CID" + headerCid1 = shared.TestCID([]byte("mockHeader1CID")) + headerMhKey1 = shared.MultihashKeyFromCID(headerCid1) parentHash = crypto.Keccak256Hash([]byte{00, 01}) headerModel1 = btc.HeaderModel{ BlockHash: blockHash1.String(), BlockNumber: blocKNumber1.String(), ParentHash: parentHash.String(), - CID: headerCid1, + CID: headerCid1.String(), + MhKey: headerMhKey1, } // tx variables - tx1CID = "mockTx1CID" - tx2CID = "mockTx2CID" + tx1CID = shared.TestCID([]byte("mockTx1CID")) + tx1MhKey = shared.MultihashKeyFromCID(tx1CID) + tx2CID = shared.TestCID([]byte("mockTx2CID")) + tx2MhKey = shared.MultihashKeyFromCID(tx2CID) tx1Hash = crypto.Keccak256Hash([]byte{01, 01}) tx2Hash = crypto.Keccak256Hash([]byte{01, 02}) opHash = crypto.Keccak256Hash([]byte{02, 01}) txModels1 = []btc.TxModelWithInsAndOuts{ { Index: 0, - CID: tx1CID, + CID: tx1CID.String(), + MhKey: tx1MhKey, TxHash: tx1Hash.String(), SegWit: true, TxInputs: []btc.TxInput{ @@ -75,7 +80,8 @@ var ( }, { Index: 1, - CID: tx2CID, + CID: tx2CID.String(), + MhKey: tx2MhKey, TxHash: tx2Hash.String(), SegWit: true, }, @@ -89,21 +95,25 @@ var ( // header variables blockHash2 = crypto.Keccak256Hash([]byte{00, 03}) blocKNumber2 = big.NewInt(1) - headerCid2 = "mockHeaderCID2" + headerCid2 = shared.TestCID([]byte("mockHeaderCID2")) + headerMhKey2 = shared.MultihashKeyFromCID(headerCid2) headerModel2 = btc.HeaderModel{ BlockNumber: blocKNumber2.String(), BlockHash: blockHash2.String(), ParentHash: blockHash1.String(), - CID: headerCid2, + CID: headerCid2.String(), + MhKey: headerMhKey2, } // tx variables - tx3CID = "mockTx3CID" + tx3CID = shared.TestCID([]byte("mockTx3CID")) + tx3MhKey = shared.MultihashKeyFromCID(tx3CID) tx3Hash = crypto.Keccak256Hash([]byte{01, 03}) txModels2 = []btc.TxModelWithInsAndOuts{ { Index: 0, - CID: tx3CID, + CID: tx3CID.String(), + MhKey: tx3MhKey, TxHash: tx3Hash.String(), SegWit: true, }, @@ -112,13 +122,13 @@ var ( HeaderCID: headerModel2, TransactionCIDs: txModels2, } - rngs = [][2]uint64{{0, 1}} - cids = []string{ - headerCid1, - headerCid2, - tx1CID, - tx2CID, - tx3CID, + rngs = [][2]uint64{{0, 1}} + mhKeys = []string{ + headerMhKey1, + headerMhKey2, + tx1MhKey, + tx2MhKey, + tx3MhKey, } mockData = []byte{'\x01'} ) @@ -139,16 +149,15 @@ var _ = Describe("Cleaner", func() { Describe("Clean", func() { BeforeEach(func() { + for _, key := range mhKeys { + _, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData) + Expect(err).ToNot(HaveOccurred()) + } err := repo.Index(mockCIDPayload1) Expect(err).ToNot(HaveOccurred()) err = repo.Index(mockCIDPayload2) Expect(err).ToNot(HaveOccurred()) - for _, cid := range cids { - _, err = db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, cid, mockData) - Expect(err).ToNot(HaveOccurred()) - } - tx, err := db.Beginx() Expect(err).ToNot(HaveOccurred()) var startingIPFSBlocksCount int @@ -286,6 +295,11 @@ var _ = Describe("Cleaner", func() { Describe("ResetValidation", func() { BeforeEach(func() { + for _, key := range mhKeys { + _, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData) + Expect(err).ToNot(HaveOccurred()) + } + err := repo.Index(mockCIDPayload1) Expect(err).ToNot(HaveOccurred()) err = repo.Index(mockCIDPayload2) diff --git a/pkg/btc/indexer.go b/pkg/btc/indexer.go index 4f6f1f5d..bd9af179 100644 --- a/pkg/btc/indexer.go +++ b/pkg/btc/indexer.go @@ -74,11 +74,11 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error { func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) { var headerID int64 - err := tx.QueryRowx(`INSERT INTO btc.header_cids (block_number, block_hash, parent_hash, cid, timestamp, bits, node_id, times_validated) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, timestamp, bits, node_id, times_validated) = ($3, $4, $5, $6, $7, btc.header_cids.times_validated + 1) + err := tx.QueryRowx(`INSERT INTO btc.header_cids (block_number, block_hash, parent_hash, cid, timestamp, bits, node_id, mh_key, times_validated) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, timestamp, bits, node_id, mh_key, times_validated) = ($3, $4, $5, $6, $7, $8, btc.header_cids.times_validated + 1) RETURNING id`, - header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.Timestamp, header.Bits, in.db.NodeID, 1).Scan(&headerID) + header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.Timestamp, header.Bits, in.db.NodeID, header.MhKey, 1).Scan(&headerID) return headerID, err } @@ -107,11 +107,11 @@ func (in *CIDIndexer) indexTransactionCIDs(tx *sqlx.Tx, transactions []TxModelWi func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModelWithInsAndOuts, headerID int64) (int64, error) { var txID int64 - err := tx.QueryRowx(`INSERT INTO btc.transaction_cids (header_id, tx_hash, index, cid, segwit, witness_hash) - VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (tx_hash) DO UPDATE SET (header_id, index, cid, segwit, witness_hash) = ($1, $3, $4, $5, $6) + err := tx.QueryRowx(`INSERT INTO btc.transaction_cids (header_id, tx_hash, index, cid, segwit, witness_hash, mh_key) + VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (tx_hash) DO UPDATE SET (header_id, index, cid, segwit, witness_hash, mh_key) = ($1, $3, $4, $5, $6, $7) RETURNING id`, - headerID, transaction.TxHash, transaction.Index, transaction.CID, transaction.SegWit, transaction.WitnessHash).Scan(&txID) + headerID, transaction.TxHash, transaction.Index, transaction.CID, transaction.SegWit, transaction.WitnessHash, transaction.MhKey).Scan(&txID) return txID, err } diff --git a/pkg/btc/indexer_test.go b/pkg/btc/indexer_test.go index 5484150f..8b4dea11 100644 --- a/pkg/btc/indexer_test.go +++ b/pkg/btc/indexer_test.go @@ -28,14 +28,20 @@ import ( var _ = Describe("Indexer", func() { var ( - db *postgres.DB - err error - repo *btc.CIDIndexer + db *postgres.DB + err error + repo *btc.CIDIndexer + mockData = []byte{1, 2, 3} ) BeforeEach(func() { db, err = shared.SetupDB() Expect(err).ToNot(HaveOccurred()) repo = btc.NewCIDIndexer(db) + // need entries in the public.blocks with the mhkeys or the FK constraint will fail + shared.PublishMockIPLD(db, mocks.MockHeaderMhKey, mockData) + shared.PublishMockIPLD(db, mocks.MockTrxMhKey1, mockData) + shared.PublishMockIPLD(db, mocks.MockTrxMhKey2, mockData) + shared.PublishMockIPLD(db, mocks.MockTrxMhKey3, mockData) }) AfterEach(func() { btc.TearDownDB(db) @@ -43,6 +49,7 @@ var _ = Describe("Indexer", func() { Describe("Index", func() { It("Indexes CIDs and related metadata into vulcanizedb", func() { + err = repo.Index(&mocks.MockCIDPayload) Expect(err).ToNot(HaveOccurred()) pgStr := `SELECT * FROM btc.header_cids @@ -72,13 +79,13 @@ var _ = Describe("Indexer", func() { Expect(tx.WitnessHash).To(Equal("")) switch tx.Index { case 0: - Expect(tx.CID).To(Equal("mockTrxCID1")) + Expect(tx.CID).To(Equal(mocks.MockTrxCID1.String())) Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[0].TxHash().String())) case 1: - Expect(tx.CID).To(Equal("mockTrxCID2")) + Expect(tx.CID).To(Equal(mocks.MockTrxCID2.String())) Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[1].TxHash().String())) case 2: - Expect(tx.CID).To(Equal("mockTrxCID3")) + Expect(tx.CID).To(Equal(mocks.MockTrxCID3.String())) Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[2].TxHash().String())) } } diff --git a/pkg/btc/ipld_pg_fetcher.go b/pkg/btc/ipld_pg_fetcher.go index af9ab34a..cd673c75 100644 --- a/pkg/btc/ipld_pg_fetcher.go +++ b/pkg/btc/ipld_pg_fetcher.go @@ -79,7 +79,7 @@ func (f *IPLDPGFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) // FetchHeaders fetches headers func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) { log.Debug("fetching header ipld") - headerBytes, err := shared.FetchIPLD(tx, c.CID) + headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) if err != nil { return ipfs.BlockModel{}, err } @@ -94,7 +94,7 @@ func (f *IPLDPGFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockMode log.Debug("fetching transaction iplds") trxIPLDs := make([]ipfs.BlockModel, len(cids)) for i, c := range cids { - trxBytes, err := shared.FetchIPLD(tx, c.CID) + trxBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) if err != nil { return nil, err } diff --git a/pkg/btc/mocks/test_data.go b/pkg/btc/mocks/test_data.go index 663a8b35..7fc347ea 100644 --- a/pkg/btc/mocks/test_data.go +++ b/pkg/btc/mocks/test_data.go @@ -25,11 +25,19 @@ import ( "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" ) var ( + MockHeaderCID = shared.TestCID([]byte("MockHeaderCID")) + MockTrxCID1 = shared.TestCID([]byte("MockTrxCID1")) + MockTrxCID2 = shared.TestCID([]byte("MockTrxCID2")) + MockTrxCID3 = shared.TestCID([]byte("MockTrxCID3")) + MockHeaderMhKey = shared.MultihashKeyFromCID(MockHeaderCID) + MockTrxMhKey1 = shared.MultihashKeyFromCID(MockTrxCID1) + MockTrxMhKey2 = shared.MultihashKeyFromCID(MockTrxCID2) + MockTrxMhKey3 = shared.MultihashKeyFromCID(MockTrxCID3) MockBlockHeight int64 = 1337 MockBlock = wire.MsgBlock{ Header: wire.BlockHeader{ @@ -479,7 +487,8 @@ var ( } MockTxsMetaDataPostPublish = []btc.TxModelWithInsAndOuts{ { - CID: "mockTrxCID1", + CID: MockTrxCID1.String(), + MhKey: MockTrxMhKey1, TxHash: MockBlock.Transactions[0].TxHash().String(), Index: 0, SegWit: MockBlock.Transactions[0].HasWitness(), @@ -517,7 +526,8 @@ var ( }, }, { - CID: "mockTrxCID2", + CID: MockTrxCID2.String(), + MhKey: MockTrxMhKey2, TxHash: MockBlock.Transactions[1].TxHash().String(), Index: 1, SegWit: MockBlock.Transactions[1].HasWitness(), @@ -594,7 +604,8 @@ var ( }, }, { - CID: "mockTrxCID3", + CID: MockTrxCID3.String(), + MhKey: MockTrxMhKey3, TxHash: MockBlock.Transactions[2].TxHash().String(), Index: 2, SegWit: MockBlock.Transactions[2].HasWitness(), @@ -671,7 +682,8 @@ var ( }, } MockHeaderMetaData = btc.HeaderModel{ - CID: "mockHeaderCID", + CID: MockHeaderCID.String(), + MhKey: MockHeaderMhKey, ParentHash: MockBlock.Header.PrevBlock.String(), BlockNumber: strconv.Itoa(int(MockBlockHeight)), BlockHash: MockBlock.Header.BlockHash().String(), @@ -686,53 +698,6 @@ var ( HeaderCID: MockHeaderMetaData, TransactionCIDs: MockTxsMetaDataPostPublish, } - DummyCIDPayloadForFKReference = btc.CIDPayload{ - HeaderCID: btc.HeaderModel{ - CID: "dummyHeader", - ParentHash: "", - BlockHash: "", - BlockNumber: "1336", - Bits: 1, - Timestamp: 1000000000, - }, - TransactionCIDs: []btc.TxModelWithInsAndOuts{ - { - TxHash: "87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03", - CID: "dummyTx1", - Index: 0, - TxOutputs: []btc.TxOutput{ - { - Index: 0, - RequiredSigs: 0, - Value: 0, - PkScript: []byte{}, - ScriptClass: 0, - }, - }, - }, - { - TxHash: "cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3", - CID: "dummyTx2", - Index: 1, - TxOutputs: []btc.TxOutput{ - { - Index: 0, - RequiredSigs: 0, - Value: 0, - PkScript: []byte{}, - ScriptClass: 0, - }, - { - Index: 1, - RequiredSigs: 0, - Value: 0, - PkScript: []byte{}, - ScriptClass: 0, - }, - }, - }, - }, - } ) func stringSliceFromAddresses(addrs []btcutil.Address) []string { diff --git a/pkg/btc/models.go b/pkg/btc/models.go index 1317a1bc..c2bbb81c 100644 --- a/pkg/btc/models.go +++ b/pkg/btc/models.go @@ -25,6 +25,7 @@ type HeaderModel struct { BlockHash string `db:"block_hash"` ParentHash string `db:"parent_hash"` CID string `db:"cid"` + MhKey string `db:"mh_key"` Timestamp int64 `db:"timestamp"` Bits uint32 `db:"bits"` NodeID int64 `db:"node_id"` @@ -38,6 +39,7 @@ type TxModel struct { Index int64 `db:"index"` TxHash string `db:"tx_hash"` CID string `db:"cid"` + MhKey string `db:"mh_key"` SegWit bool `db:"segwit"` WitnessHash string `db:"witness_hash"` } @@ -49,6 +51,7 @@ type TxModelWithInsAndOuts struct { Index int64 `db:"index"` TxHash string `db:"tx_hash"` CID string `db:"cid"` + MhKey string `db:"mh_key"` SegWit bool `db:"segwit"` WitnessHash string `db:"witness_hash"` TxInputs []TxInput diff --git a/pkg/btc/publish_and_indexer.go b/pkg/btc/publish_and_indexer.go index 799434ae..dd5a72cb 100644 --- a/pkg/btc/publish_and_indexer.go +++ b/pkg/btc/publish_and_indexer.go @@ -80,6 +80,7 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share } header := HeaderModel{ CID: headerNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), ParentHash: ipldPayload.Header.PrevBlock.String(), BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)), BlockHash: ipldPayload.Header.BlockHash().String(), @@ -98,6 +99,7 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share } txModel := ipldPayload.TxMetaData[i] txModel.CID = txNode.Cid().String() + txModel.MhKey = shared.MultihashKeyFromCID(txNode.Cid()) txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID) if err != nil { return nil, err diff --git a/pkg/btc/publisher.go b/pkg/btc/publisher.go index 992f81ae..5c6c4d07 100644 --- a/pkg/btc/publisher.go +++ b/pkg/btc/publisher.go @@ -62,8 +62,10 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI if err != nil { return nil, err } + mhKey, _ := shared.MultihashKeyFromCIDString(headerCid) header := HeaderModel{ CID: headerCid, + MhKey: mhKey, ParentHash: ipldPayload.Header.PrevBlock.String(), BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)), BlockHash: ipldPayload.Header.BlockHash().String(), @@ -97,8 +99,10 @@ func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.BtcTx, txTrie if err != nil { return nil, err } + mhKey, _ := shared.MultihashKeyFromCIDString(cid) txCids[i] = TxModelWithInsAndOuts{ CID: cid, + MhKey: mhKey, Index: trxMeta[i].Index, TxHash: trxMeta[i].TxHash, SegWit: trxMeta[i].SegWit, diff --git a/pkg/btc/publisher_test.go b/pkg/btc/publisher_test.go index 2fdace5e..ef809f3a 100644 --- a/pkg/btc/publisher_test.go +++ b/pkg/btc/publisher_test.go @@ -57,12 +57,12 @@ var _ = Describe("Publisher", func() { Expect(err).ToNot(HaveOccurred()) tx3Bytes := by.Bytes() mockHeaderDagPutter.CIDsToReturn = map[common.Hash]string{ - common.BytesToHash(headerBytes): "mockHeaderCID", + common.BytesToHash(headerBytes): mocks.MockHeaderCID.String(), } mockTrxDagPutter.CIDsToReturn = map[common.Hash]string{ - common.BytesToHash(tx1Bytes): "mockTrxCID1", - common.BytesToHash(tx2Bytes): "mockTrxCID2", - common.BytesToHash(tx3Bytes): "mockTrxCID3", + common.BytesToHash(tx1Bytes): mocks.MockTrxCID1.String(), + common.BytesToHash(tx2Bytes): mocks.MockTrxCID2.String(), + common.BytesToHash(tx3Bytes): mocks.MockTrxCID3.String(), } publisher := btc.IPLDPublisher{ HeaderPutter: mockHeaderDagPutter, diff --git a/pkg/eth/api_test.go b/pkg/eth/api_test.go index fb7d3d53..edf4ec40 100644 --- a/pkg/eth/api_test.go +++ b/pkg/eth/api_test.go @@ -124,7 +124,7 @@ var _ = Describe("API", func() { }) Describe("GetTransactionByHash", func() { - It("Retrieves the head block number", func() { + It("Retrieves a transaction by hash", func() { hash := mocks.MockTransactions[0].Hash() tx, err := api.GetTransactionByHash(context.Background(), hash) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/eth/backend.go b/pkg/eth/backend.go index 2128a47d..81ad8382 100644 --- a/pkg/eth/backend.go +++ b/pkg/eth/backend.go @@ -326,12 +326,12 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo // GetTransaction retrieves a tx by hash // It also returns the blockhash, blocknumber, and tx index associated with the transaction func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { - pgStr := `SELECT transaction_cids.cid, transaction_cids.index, header_cids.block_hash, header_cids.block_number + pgStr := `SELECT transaction_cids.mh_key, transaction_cids.index, header_cids.block_hash, header_cids.block_number FROM eth.transaction_cids, eth.header_cids WHERE transaction_cids.header_id = header_cids.id AND transaction_cids.tx_hash = $1` var txCIDWithHeaderInfo struct { - CID string `db:"cid"` + MhKey string `db:"mh_key"` Index int64 `db:"index"` BlockHash string `db:"block_hash"` BlockNumber int64 `db:"block_number"` @@ -356,7 +356,7 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type } }() - txIPLD, err := b.Fetcher.FetchTrxs(tx, []TxModel{{CID: txCIDWithHeaderInfo.CID}}) + txIPLD, err := b.Fetcher.FetchTrxs(tx, []TxModel{{MhKey: txCIDWithHeaderInfo.MhKey}}) if err != nil { return nil, common.Hash{}, 0, 0, err } diff --git a/pkg/eth/cid_retriever.go b/pkg/eth/cid_retriever.go index 367b9ead..ad93d3c2 100644 --- a/pkg/eth/cid_retriever.go +++ b/pkg/eth/cid_retriever.go @@ -186,7 +186,7 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID results := make([]TxModel, 0) id := 1 pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id, - transaction_cids.tx_hash, transaction_cids.cid, + transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst, transaction_cids.src, transaction_cids.index FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id) WHERE header_cids.id = $%d`, id) @@ -210,8 +210,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]ReceiptModel, error) { log.Debug("retrieving receipt cids for header id ", headerID) args := make([]interface{}, 0, 4) - pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.contract, - receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, + pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key, + receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids WHERE receipt_cids.tx_id = transaction_cids.id @@ -290,8 +290,8 @@ func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter Receip func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]ReceiptModel, error) { log.Debug("retrieving receipt cids for block ", blockNumber) args := make([]interface{}, 0, 5) - pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.contract, - receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, + pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key, + receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids WHERE receipt_cids.tx_id = transaction_cids.id @@ -387,7 +387,7 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, log.Debug("retrieving state cids for header id ", headerID) args := make([]interface{}, 0, 2) pgStr := `SELECT state_cids.id, state_cids.header_id, - state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.state_path + state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id) WHERE header_cids.id = $1` args = append(args, headerID) @@ -411,8 +411,8 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]StorageNodeWithStateKeyModel, error) { log.Debug("retrieving storage cids for header id ", headerID) args := make([]interface{}, 0, 3) - pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, - storage_cids.node_type, storage_cids.cid, storage_cids.storage_path, state_cids.state_leaf_key + pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, storage_cids.node_type, + storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, state_cids.state_leaf_key FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE storage_cids.state_id = state_cids.id AND state_cids.header_id = header_cids.id @@ -607,8 +607,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ( // RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]ReceiptModel, error) { log.Debugf("retrieving receipt cids for tx ids %v", txIDs) - pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.contract, - receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, + pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key, + receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts FROM eth.receipt_cids, eth.transaction_cids WHERE tx_id = ANY($1::INTEGER[]) diff --git a/pkg/eth/cid_retriever_test.go b/pkg/eth/cid_retriever_test.go index 5a355bd9..668d763d 100644 --- a/pkg/eth/cid_retriever_test.go +++ b/pkg/eth/cid_retriever_test.go @@ -19,6 +19,8 @@ package eth_test import ( "math/big" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/common" . "github.com/onsi/ginkgo" @@ -209,14 +211,14 @@ var ( var _ = Describe("Retriever", func() { var ( db *postgres.DB - repo *eth2.CIDIndexer + repo *eth2.IPLDPublisherAndIndexer retriever *eth2.CIDRetriever ) BeforeEach(func() { var err error db, err = shared.SetupDB() Expect(err).ToNot(HaveOccurred()) - repo = eth2.NewCIDIndexer(db) + repo = eth2.NewIPLDPublisherAndIndexer(db) retriever = eth2.NewCIDRetriever(db) }) AfterEach(func() { @@ -225,7 +227,7 @@ var _ = Describe("Retriever", func() { Describe("Retrieve", func() { BeforeEach(func() { - err := repo.Index(mocks.MockCIDPayload) + _, err := repo.Publish(mocks.MockConvertedPayload) Expect(err).ToNot(HaveOccurred()) }) It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() { @@ -395,6 +397,7 @@ var _ = Describe("Retriever", func() { NodeType: 2, StateKey: common.BytesToHash(mocks.AccountLeafKey).Hex(), CID: mocks.State2CID.String(), + MhKey: mocks.State2MhKey, Path: []byte{'\x0c'}, })) @@ -405,8 +408,12 @@ var _ = Describe("Retriever", func() { }) Describe("RetrieveFirstBlockNumber", func() { + It("Throws an error if there are no blocks in the database", func() { + _, err := retriever.RetrieveFirstBlockNumber() + Expect(err).To(HaveOccurred()) + }) It("Gets the number of the first block that has data in the database", func() { - err := repo.Index(mocks.MockCIDPayload) + _, err := repo.Publish(mocks.MockConvertedPayload) Expect(err).ToNot(HaveOccurred()) num, err := retriever.RetrieveFirstBlockNumber() Expect(err).ToNot(HaveOccurred()) @@ -414,9 +421,9 @@ var _ = Describe("Retriever", func() { }) It("Gets the number of the first block that has data in the database", func() { - payload := *mocks.MockCIDPayload - payload.HeaderCID.BlockNumber = "1010101" - err := repo.Index(&payload) + payload := mocks.MockConvertedPayload + payload.Block = newMockBlock(1010101) + _, err := repo.Publish(payload) Expect(err).ToNot(HaveOccurred()) num, err := retriever.RetrieveFirstBlockNumber() Expect(err).ToNot(HaveOccurred()) @@ -424,13 +431,13 @@ var _ = Describe("Retriever", func() { }) It("Gets the number of the first block that has data in the database", func() { - payload1 := *mocks.MockCIDPayload - payload1.HeaderCID.BlockNumber = "1010101" + payload1 := mocks.MockConvertedPayload + payload1.Block = newMockBlock(1010101) payload2 := payload1 - payload2.HeaderCID.BlockNumber = "5" - err := repo.Index(&payload1) + payload2.Block = newMockBlock(5) + _, err := repo.Publish(payload1) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload2) + _, err = repo.Publish(payload2) Expect(err).ToNot(HaveOccurred()) num, err := retriever.RetrieveFirstBlockNumber() Expect(err).ToNot(HaveOccurred()) @@ -439,8 +446,12 @@ var _ = Describe("Retriever", func() { }) Describe("RetrieveLastBlockNumber", func() { + It("Throws an error if there are no blocks in the database", func() { + _, err := retriever.RetrieveLastBlockNumber() + Expect(err).To(HaveOccurred()) + }) It("Gets the number of the latest block that has data in the database", func() { - err := repo.Index(mocks.MockCIDPayload) + _, err := repo.Publish(mocks.MockConvertedPayload) Expect(err).ToNot(HaveOccurred()) num, err := retriever.RetrieveLastBlockNumber() Expect(err).ToNot(HaveOccurred()) @@ -448,9 +459,9 @@ var _ = Describe("Retriever", func() { }) It("Gets the number of the latest block that has data in the database", func() { - payload := *mocks.MockCIDPayload - payload.HeaderCID.BlockNumber = "1010101" - err := repo.Index(&payload) + payload := mocks.MockConvertedPayload + payload.Block = newMockBlock(1010101) + _, err := repo.Publish(payload) Expect(err).ToNot(HaveOccurred()) num, err := retriever.RetrieveLastBlockNumber() Expect(err).ToNot(HaveOccurred()) @@ -458,13 +469,13 @@ var _ = Describe("Retriever", func() { }) It("Gets the number of the latest block that has data in the database", func() { - payload1 := *mocks.MockCIDPayload - payload1.HeaderCID.BlockNumber = "1010101" + payload1 := mocks.MockConvertedPayload + payload1.Block = newMockBlock(1010101) payload2 := payload1 - payload2.HeaderCID.BlockNumber = "5" - err := repo.Index(&payload1) + payload2.Block = newMockBlock(5) + _, err := repo.Publish(payload1) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload2) + _, err = repo.Publish(payload2) Expect(err).ToNot(HaveOccurred()) num, err := retriever.RetrieveLastBlockNumber() Expect(err).ToNot(HaveOccurred()) @@ -474,21 +485,20 @@ var _ = Describe("Retriever", func() { Describe("RetrieveGapsInData", func() { It("Doesn't return gaps if there are none", func() { - payload0 := *mocks.MockCIDPayload - payload0.HeaderCID.BlockNumber = "0" - payload1 := *mocks.MockCIDPayload - payload1.HeaderCID.BlockNumber = "1" + payload0 := mocks.MockConvertedPayload + payload0.Block = newMockBlock(0) + payload1 := mocks.MockConvertedPayload payload2 := payload1 - payload2.HeaderCID.BlockNumber = "2" + payload2.Block = newMockBlock(2) payload3 := payload2 - payload3.HeaderCID.BlockNumber = "3" - err := repo.Index(&payload0) + payload3.Block = newMockBlock(3) + _, err := repo.Publish(payload0) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload1) + _, err = repo.Publish(payload1) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload2) + _, err = repo.Publish(payload2) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload3) + _, err = repo.Publish(payload3) Expect(err).ToNot(HaveOccurred()) gaps, err := retriever.RetrieveGapsInData(1) Expect(err).ToNot(HaveOccurred()) @@ -496,9 +506,9 @@ var _ = Describe("Retriever", func() { }) It("Returns the gap from 0 to the earliest block", func() { - payload := *mocks.MockCIDPayload - payload.HeaderCID.BlockNumber = "5" - err := repo.Index(&payload) + payload := mocks.MockConvertedPayload + payload.Block = newMockBlock(5) + _, err := repo.Publish(payload) Expect(err).ToNot(HaveOccurred()) gaps, err := retriever.RetrieveGapsInData(1) Expect(err).ToNot(HaveOccurred()) @@ -508,17 +518,16 @@ var _ = Describe("Retriever", func() { }) It("Can handle single block gaps", func() { - payload0 := *mocks.MockCIDPayload - payload0.HeaderCID.BlockNumber = "0" - payload1 := *mocks.MockCIDPayload - payload1.HeaderCID.BlockNumber = "1" + payload0 := mocks.MockConvertedPayload + payload0.Block = newMockBlock(0) + payload1 := mocks.MockConvertedPayload payload3 := payload1 - payload3.HeaderCID.BlockNumber = "3" - err := repo.Index(&payload0) + payload3.Block = newMockBlock(3) + _, err := repo.Publish(payload0) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload1) + _, err = repo.Publish(payload1) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload3) + _, err = repo.Publish(payload3) Expect(err).ToNot(HaveOccurred()) gaps, err := retriever.RetrieveGapsInData(1) Expect(err).ToNot(HaveOccurred()) @@ -528,13 +537,13 @@ var _ = Describe("Retriever", func() { }) It("Finds gap between two entries", func() { - payload1 := *mocks.MockCIDPayload - payload1.HeaderCID.BlockNumber = "1010101" + payload1 := mocks.MockConvertedPayload + payload1.Block = newMockBlock(1010101) payload2 := payload1 - payload2.HeaderCID.BlockNumber = "0" - err := repo.Index(&payload1) + payload2.Block = newMockBlock(0) + _, err := repo.Publish(payload1) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload2) + _, err = repo.Publish(payload2) Expect(err).ToNot(HaveOccurred()) gaps, err := retriever.RetrieveGapsInData(1) Expect(err).ToNot(HaveOccurred()) @@ -544,49 +553,50 @@ var _ = Describe("Retriever", func() { }) It("Finds gaps between multiple entries", func() { - payload := *mocks.MockCIDPayload - payload.HeaderCID.BlockNumber = "1010101" - payload1 := payload - payload1.HeaderCID.BlockNumber = "1" - payload2 := payload1 - payload2.HeaderCID.BlockNumber = "5" - payload3 := payload2 - payload3.HeaderCID.BlockNumber = "100" - payload4 := payload3 - payload4.HeaderCID.BlockNumber = "101" - payload5 := payload4 - payload5.HeaderCID.BlockNumber = "102" - payload6 := payload4 - payload6.HeaderCID.BlockNumber = "103" - payload7 := payload4 - payload7.HeaderCID.BlockNumber = "104" - payload8 := payload4 - payload8.HeaderCID.BlockNumber = "105" - payload9 := payload4 - payload9.HeaderCID.BlockNumber = "106" - payload10 := payload5 - payload10.HeaderCID.BlockNumber = "1000" - err := repo.Index(&payload) + payload1 := mocks.MockConvertedPayload + payload1.Block = newMockBlock(1010101) + payload2 := mocks.MockConvertedPayload + payload2.Block = newMockBlock(1) + payload3 := mocks.MockConvertedPayload + payload3.Block = newMockBlock(5) + payload4 := mocks.MockConvertedPayload + payload4.Block = newMockBlock(100) + payload5 := mocks.MockConvertedPayload + payload5.Block = newMockBlock(101) + payload6 := mocks.MockConvertedPayload + payload6.Block = newMockBlock(102) + payload7 := mocks.MockConvertedPayload + payload7.Block = newMockBlock(103) + payload8 := mocks.MockConvertedPayload + payload8.Block = newMockBlock(104) + payload9 := mocks.MockConvertedPayload + payload9.Block = newMockBlock(105) + payload10 := mocks.MockConvertedPayload + payload10.Block = newMockBlock(106) + payload11 := mocks.MockConvertedPayload + payload11.Block = newMockBlock(1000) + + _, err := repo.Publish(payload1) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload1) + _, err = repo.Publish(payload2) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload2) + _, err = repo.Publish(payload3) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload3) + _, err = repo.Publish(payload4) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload4) + _, err = repo.Publish(payload5) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload5) + _, err = repo.Publish(payload6) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload6) + _, err = repo.Publish(payload7) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload7) + _, err = repo.Publish(payload8) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload8) + _, err = repo.Publish(payload9) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload9) + _, err = repo.Publish(payload10) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload10) + _, err = repo.Publish(payload11) Expect(err).ToNot(HaveOccurred()) gaps, err := retriever.RetrieveGapsInData(1) @@ -600,61 +610,63 @@ var _ = Describe("Retriever", func() { }) It("Finds validation level gaps", func() { - payload := *mocks.MockCIDPayload - payload.HeaderCID.BlockNumber = "1010101" - payload1 := payload - payload1.HeaderCID.BlockNumber = "1" - payload2 := payload1 - payload2.HeaderCID.BlockNumber = "5" - payload3 := payload2 - payload3.HeaderCID.BlockNumber = "100" - payload4 := payload3 - payload4.HeaderCID.BlockNumber = "101" - payload5 := payload4 - payload5.HeaderCID.BlockNumber = "102" - payload6 := payload4 - payload6.HeaderCID.BlockNumber = "103" - payload7 := payload4 - payload7.HeaderCID.BlockNumber = "104" - payload8 := payload4 - payload8.HeaderCID.BlockNumber = "105" - payload9 := payload4 - payload9.HeaderCID.BlockNumber = "106" - payload10 := payload4 - payload10.HeaderCID.BlockNumber = "107" - payload11 := payload4 - payload11.HeaderCID.BlockNumber = "108" - payload12 := payload4 - payload12.HeaderCID.BlockNumber = "109" - payload13 := payload5 - payload13.HeaderCID.BlockNumber = "1000" - err := repo.Index(&payload) + + payload1 := mocks.MockConvertedPayload + payload1.Block = newMockBlock(1010101) + payload2 := mocks.MockConvertedPayload + payload2.Block = newMockBlock(1) + payload3 := mocks.MockConvertedPayload + payload3.Block = newMockBlock(5) + payload4 := mocks.MockConvertedPayload + payload4.Block = newMockBlock(100) + payload5 := mocks.MockConvertedPayload + payload5.Block = newMockBlock(101) + payload6 := mocks.MockConvertedPayload + payload6.Block = newMockBlock(102) + payload7 := mocks.MockConvertedPayload + payload7.Block = newMockBlock(103) + payload8 := mocks.MockConvertedPayload + payload8.Block = newMockBlock(104) + payload9 := mocks.MockConvertedPayload + payload9.Block = newMockBlock(105) + payload10 := mocks.MockConvertedPayload + payload10.Block = newMockBlock(106) + payload11 := mocks.MockConvertedPayload + payload11.Block = newMockBlock(107) + payload12 := mocks.MockConvertedPayload + payload12.Block = newMockBlock(108) + payload13 := mocks.MockConvertedPayload + payload13.Block = newMockBlock(109) + payload14 := mocks.MockConvertedPayload + payload14.Block = newMockBlock(1000) + + _, err := repo.Publish(payload1) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload1) + _, err = repo.Publish(payload2) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload2) + _, err = repo.Publish(payload3) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload3) + _, err = repo.Publish(payload4) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload4) + _, err = repo.Publish(payload5) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload5) + _, err = repo.Publish(payload6) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload6) + _, err = repo.Publish(payload7) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload7) + _, err = repo.Publish(payload8) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload8) + _, err = repo.Publish(payload9) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload9) + _, err = repo.Publish(payload10) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload10) + _, err = repo.Publish(payload11) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload11) + _, err = repo.Publish(payload12) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload12) + _, err = repo.Publish(payload13) Expect(err).ToNot(HaveOccurred()) - err = repo.Index(&payload13) + _, err = repo.Publish(payload14) Expect(err).ToNot(HaveOccurred()) cleaner := eth.NewCleaner(db) @@ -675,3 +687,9 @@ var _ = Describe("Retriever", func() { }) }) }) + +func newMockBlock(blockNumber uint64) *types.Block { + header := mocks.MockHeader + header.Number.SetUint64(blockNumber) + return types.NewBlock(&mocks.MockHeader, mocks.MockTransactions, nil, mocks.MockReceipts) +} diff --git a/pkg/eth/cleaner.go b/pkg/eth/cleaner.go index e8686037..3b0bdf82 100644 --- a/pkg/eth/cleaner.go +++ b/pkg/eth/cleaner.go @@ -243,7 +243,7 @@ func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanStorageIPLDs(tx *sqlx.Tx, rng [2]uint64) error { pgStr := `DELETE FROM public.blocks A USING eth.storage_cids B, eth.state_cids C, eth.header_cids D - WHERE A.key = B.cid + WHERE A.key = B.mh_key AND B.state_id = C.id AND C.header_id = D.id AND D.block_number BETWEEN $1 AND $2` @@ -264,7 +264,7 @@ func (c *Cleaner) cleanStorageMetaData(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanStateIPLDs(tx *sqlx.Tx, rng [2]uint64) error { pgStr := `DELETE FROM public.blocks A USING eth.state_cids B, eth.header_cids C - WHERE A.key = B.cid + WHERE A.key = B.mh_key AND B.header_id = C.id AND C.block_number BETWEEN $1 AND $2` _, err := tx.Exec(pgStr, rng[0], rng[1]) @@ -283,7 +283,7 @@ func (c *Cleaner) cleanStateMetaData(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanReceiptIPLDs(tx *sqlx.Tx, rng [2]uint64) error { pgStr := `DELETE FROM public.blocks A USING eth.receipt_cids B, eth.transaction_cids C, eth.header_cids D - WHERE A.key = B.cid + WHERE A.key = B.mh_key AND B.tx_id = C.id AND C.header_id = D.id AND D.block_number BETWEEN $1 AND $2` @@ -304,7 +304,7 @@ func (c *Cleaner) cleanReceiptMetaData(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error { pgStr := `DELETE FROM public.blocks A USING eth.transaction_cids B, eth.header_cids C - WHERE A.key = B.cid + WHERE A.key = B.mh_key AND B.header_id = C.id AND C.block_number BETWEEN $1 AND $2` _, err := tx.Exec(pgStr, rng[0], rng[1]) @@ -323,7 +323,7 @@ func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanUncleIPLDs(tx *sqlx.Tx, rng [2]uint64) error { pgStr := `DELETE FROM public.blocks A USING eth.uncle_cids B, eth.header_cids C - WHERE A.key = B.cid + WHERE A.key = B.mh_key AND B.header_id = C.id AND C.block_number BETWEEN $1 AND $2` _, err := tx.Exec(pgStr, rng[0], rng[1]) @@ -342,7 +342,7 @@ func (c *Cleaner) cleanUncleMetaData(tx *sqlx.Tx, rng [2]uint64) error { func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error { pgStr := `DELETE FROM public.blocks A USING eth.header_cids B - WHERE A.key = B.cid + WHERE A.key = B.mh_key AND B.block_number BETWEEN $1 AND $2` _, err := tx.Exec(pgStr, rng[0], rng[1]) return err diff --git a/pkg/eth/cleaner_test.go b/pkg/eth/cleaner_test.go index 80beb9b2..14900bbc 100644 --- a/pkg/eth/cleaner_test.go +++ b/pkg/eth/cleaner_test.go @@ -34,47 +34,55 @@ var ( // header variables blockHash1 = crypto.Keccak256Hash([]byte{00, 02}) blocKNumber1 = big.NewInt(0) - headerCID1 = "mockHeader1CID" + headerCID1 = shared.TestCID([]byte("mockHeader1CID")) + headerMhKey1 = shared.MultihashKeyFromCID(headerCID1) parentHash = crypto.Keccak256Hash([]byte{00, 01}) totalDifficulty = "50000000000000000000" reward = "5000000000000000000" headerModel = eth.HeaderModel{ BlockHash: blockHash1.String(), BlockNumber: blocKNumber1.String(), - CID: headerCID1, + CID: headerCID1.String(), + MhKey: headerMhKey1, ParentHash: parentHash.String(), TotalDifficulty: totalDifficulty, Reward: reward, } // tx variables - tx1CID = "mockTx1CID" - tx2CID = "mockTx2CID" + tx1CID = shared.TestCID([]byte("mockTx1CID")) + tx1MhKey = shared.MultihashKeyFromCID(tx1CID) + tx2CID = shared.TestCID([]byte("mockTx2CID")) + tx2MhKey = shared.MultihashKeyFromCID(tx2CID) tx1Hash = crypto.Keccak256Hash([]byte{01, 01}) tx2Hash = crypto.Keccak256Hash([]byte{01, 02}) txSrc = common.HexToAddress("0x010a") txDst = common.HexToAddress("0x020a") txModels1 = []eth.TxModel{ { - CID: tx1CID, + CID: tx1CID.String(), + MhKey: tx1MhKey, TxHash: tx1Hash.String(), Index: 0, }, { - CID: tx2CID, + CID: tx2CID.String(), + MhKey: tx2MhKey, TxHash: tx2Hash.String(), Index: 1, }, } // uncle variables - uncleCID = "mockUncle1CID" + uncleCID = shared.TestCID([]byte("mockUncle1CID")) + uncleMhKey = shared.MultihashKeyFromCID(uncleCID) uncleHash = crypto.Keccak256Hash([]byte{02, 02}) uncleParentHash = crypto.Keccak256Hash([]byte{02, 01}) uncleReward = "1000000000000000000" uncleModels1 = []eth.UncleModel{ { - CID: uncleCID, + CID: uncleCID.String(), + MhKey: uncleMhKey, Reward: uncleReward, BlockHash: uncleHash.String(), ParentHash: uncleParentHash.String(), @@ -82,37 +90,45 @@ var ( } // receipt variables - rct1CID = "mockRct1CID" - rct2CID = "mockRct2CID" + rct1CID = shared.TestCID([]byte("mockRct1CID")) + rct1MhKey = shared.MultihashKeyFromCID(rct1CID) + rct2CID = shared.TestCID([]byte("mockRct2CID")) + rct2MhKey = shared.MultihashKeyFromCID(rct2CID) rct1Contract = common.Address{} rct2Contract = common.HexToAddress("0x010c") receiptModels1 = map[common.Hash]eth.ReceiptModel{ tx1Hash: { - CID: rct1CID, + CID: rct1CID.String(), + MhKey: rct1MhKey, ContractHash: crypto.Keccak256Hash(rct1Contract.Bytes()).String(), }, tx2Hash: { - CID: rct2CID, + CID: rct2CID.String(), + MhKey: rct2MhKey, ContractHash: crypto.Keccak256Hash(rct2Contract.Bytes()).String(), }, } // state variables - state1CID1 = "mockState1CID1" + state1CID1 = shared.TestCID([]byte("mockState1CID1")) + state1MhKey1 = shared.MultihashKeyFromCID(state1CID1) state1Path = []byte{'\x01'} state1Key = crypto.Keccak256Hash(txSrc.Bytes()) - state2CID1 = "mockState2CID1" + state2CID1 = shared.TestCID([]byte("mockState2CID1")) + state2MhKey1 = shared.MultihashKeyFromCID(state2CID1) state2Path = []byte{'\x02'} state2Key = crypto.Keccak256Hash(txDst.Bytes()) stateModels1 = []eth.StateNodeModel{ { - CID: state1CID1, + CID: state1CID1.String(), + MhKey: state1MhKey1, Path: state1Path, NodeType: 2, StateKey: state1Key.String(), }, { - CID: state2CID1, + CID: state2CID1.String(), + MhKey: state2MhKey1, Path: state2Path, NodeType: 2, StateKey: state2Key.String(), @@ -120,13 +136,15 @@ var ( } // storage variables - storageCID = "mockStorageCID1" + storageCID = shared.TestCID([]byte("mockStorageCID1")) + storageMhKey = shared.MultihashKeyFromCID(storageCID) storagePath = []byte{'\x01'} storageKey = crypto.Keccak256Hash(common.Hex2Bytes("0x0000000000000000000000000000000000000000000000000000000000000000")) storageModels1 = map[string][]eth.StorageNodeModel{ common.Bytes2Hex(state1Path): { { - CID: storageCID, + CID: storageCID.String(), + MhKey: storageMhKey, StorageKey: storageKey.String(), Path: storagePath, NodeType: 2, @@ -146,39 +164,47 @@ var ( // header variables blockHash2 = crypto.Keccak256Hash([]byte{00, 03}) blocKNumber2 = big.NewInt(1) - headerCID2 = "mockHeaderCID2" + headerCID2 = shared.TestCID([]byte("mockHeaderCID2")) + headerMhKey2 = shared.MultihashKeyFromCID(headerCID2) headerModel2 = eth.HeaderModel{ BlockHash: blockHash2.String(), BlockNumber: blocKNumber2.String(), - CID: headerCID2, + CID: headerCID2.String(), + MhKey: headerMhKey2, ParentHash: blockHash1.String(), TotalDifficulty: totalDifficulty, Reward: reward, } // tx variables - tx3CID = "mockTx3CID" + tx3CID = shared.TestCID([]byte("mockTx3CID")) + tx3MhKey = shared.MultihashKeyFromCID(tx3CID) tx3Hash = crypto.Keccak256Hash([]byte{01, 03}) txModels2 = []eth.TxModel{ { - CID: tx3CID, + CID: tx3CID.String(), + MhKey: tx3MhKey, TxHash: tx3Hash.String(), Index: 0, }, } // receipt variables - rct3CID = "mockRct3CID" + rct3CID = shared.TestCID([]byte("mockRct3CID")) + rct3MhKey = shared.MultihashKeyFromCID(rct3CID) receiptModels2 = map[common.Hash]eth.ReceiptModel{ tx3Hash: { - CID: rct3CID, + CID: rct3CID.String(), + MhKey: rct3MhKey, ContractHash: crypto.Keccak256Hash(rct1Contract.Bytes()).String(), }, } // state variables - state1CID2 = "mockState1CID2" + state1CID2 = shared.TestCID([]byte("mockState1CID2")) + state1MhKey2 = shared.MultihashKeyFromCID(state1CID2) stateModels2 = []eth.StateNodeModel{ { - CID: state1CID2, + CID: state1CID2.String(), + MhKey: state1MhKey2, Path: state1Path, NodeType: 2, StateKey: state1Key.String(), @@ -190,21 +216,21 @@ var ( ReceiptCIDs: receiptModels2, StateNodeCIDs: stateModels2, } - rngs = [][2]uint64{{0, 1}} - cids = []string{ - headerCID1, - headerCID2, - uncleCID, - tx1CID, - tx2CID, - tx3CID, - rct1CID, - rct2CID, - rct3CID, - state1CID1, - state2CID1, - state1CID2, - storageCID, + rngs = [][2]uint64{{0, 1}} + mhKeys = []string{ + headerMhKey1, + headerMhKey2, + uncleMhKey, + tx1MhKey, + tx2MhKey, + tx3MhKey, + rct1MhKey, + rct2MhKey, + rct3MhKey, + state1MhKey1, + state2MhKey1, + state1MhKey2, + storageMhKey, } mockData = []byte{'\x01'} ) @@ -224,16 +250,16 @@ var _ = Describe("Cleaner", func() { }) Describe("Clean", func() { BeforeEach(func() { + for _, key := range mhKeys { + _, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData) + Expect(err).ToNot(HaveOccurred()) + } + err := repo.Index(mockCIDPayload1) Expect(err).ToNot(HaveOccurred()) err = repo.Index(mockCIDPayload2) Expect(err).ToNot(HaveOccurred()) - for _, cid := range cids { - _, err = db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, cid, mockData) - Expect(err).ToNot(HaveOccurred()) - } - tx, err := db.Beginx() Expect(err).ToNot(HaveOccurred()) @@ -613,6 +639,11 @@ var _ = Describe("Cleaner", func() { Describe("ResetValidation", func() { BeforeEach(func() { + for _, key := range mhKeys { + _, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData) + Expect(err).ToNot(HaveOccurred()) + } + err := repo.Index(mockCIDPayload1) Expect(err).ToNot(HaveOccurred()) err = repo.Index(mockCIDPayload2) diff --git a/pkg/eth/converter.go b/pkg/eth/converter.go index 4fb7bea6..1d6d2d31 100644 --- a/pkg/eth/converter.go +++ b/pkg/eth/converter.go @@ -72,8 +72,8 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Convert return nil, err } txMeta := TxModel{ - Dst: shared.HandleNullAddrPointer(trx.To()), - Src: shared.HandleNullAddr(from), + Dst: shared.HandleZeroAddrPointer(trx.To()), + Src: shared.HandleZeroAddr(from), TxHash: trx.Hash().String(), Index: int64(i), } @@ -106,7 +106,7 @@ func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.Convert logContracts = append(logContracts, addr) } // This is the contract address if this receipt is for a contract creation tx - contract := shared.HandleNullAddr(receipt.ContractAddress) + contract := shared.HandleZeroAddr(receipt.ContractAddress) var contractHash string if contract != "" { contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String() diff --git a/pkg/eth/indexer.go b/pkg/eth/indexer.go index 48f57373..4285c9fa 100644 --- a/pkg/eth/indexer.go +++ b/pkg/eth/indexer.go @@ -90,29 +90,29 @@ func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error { func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) { var headerID int64 - err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, times_validated) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) - ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, times_validated) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, eth.header_cids.times_validated + 1) + err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) + ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1) RETURNING id`, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID, header.Reward, header.StateRoot, header.TxRoot, - header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, 1).Scan(&headerID) + header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1).Scan(&headerID) return headerID, err } func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int64) error { - _, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward) = ($3, $4, $5)`, - uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward) + _, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)`, + uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey) return err } func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error { for _, trxCidMeta := range payload.TransactionCIDs { var txID int64 - err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index) VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index) = ($3, $4, $5, $6) + err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key) = ($3, $4, $5, $6, $7) RETURNING id`, - headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index).Scan(&txID) + headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index, trxCidMeta.MhKey).Scan(&txID) if err != nil { return err } @@ -128,17 +128,17 @@ func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPa func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModel, headerID int64) (int64, error) { var txID int64 - err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index) VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index) = ($3, $4, $5, $6) + err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key) = ($3, $4, $5, $6, $7) RETURNING id`, - headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index).Scan(&txID) + headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey).Scan(&txID) return txID, err } func (in *CIDIndexer) indexReceiptCID(tx *sqlx.Tx, cidMeta ReceiptModel, txID int64) error { - _, err := tx.Exec(`INSERT INTO eth.receipt_cids (tx_id, cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) - ON CONFLICT (tx_id) DO UPDATE SET (cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts) = ($2, $3, $4, $5, $6, $7, $8, $9)`, - txID, cidMeta.CID, cidMeta.Contract, cidMeta.ContractHash, cidMeta.Topic0s, cidMeta.Topic1s, cidMeta.Topic2s, cidMeta.Topic3s, cidMeta.LogContracts) + _, err := tx.Exec(`INSERT INTO eth.receipt_cids (tx_id, cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) + ON CONFLICT (tx_id) DO UPDATE SET (cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts, mh_key) = ($2, $3, $4, $5, $6, $7, $8, $9, $10)`, + txID, cidMeta.CID, cidMeta.Contract, cidMeta.ContractHash, cidMeta.Topic0s, cidMeta.Topic1s, cidMeta.Topic2s, cidMeta.Topic3s, cidMeta.LogContracts, cidMeta.MhKey) return err } @@ -149,10 +149,10 @@ func (in *CIDIndexer) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, if stateCID.StateKey != nullHash.String() { stateKey = stateCID.StateKey } - err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff) VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff) = ($2, $3, $5, $6) + err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (header_id, state_path, diff) DO UPDATE SET (state_leaf_key, cid, node_type, mh_key) = ($2, $3, $5, $7) RETURNING id`, - headerID, stateKey, stateCID.CID, stateCID.Path, stateCID.NodeType, true).Scan(&stateID) + headerID, stateKey, stateCID.CID, stateCID.Path, stateCID.NodeType, true, stateCID.MhKey).Scan(&stateID) if err != nil { return err } @@ -180,10 +180,10 @@ func (in *CIDIndexer) indexStateCID(tx *sqlx.Tx, stateNode StateNodeModel, heade if stateNode.StateKey != nullHash.String() { stateKey = stateNode.StateKey } - err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff) VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff) = ($2, $3, $5, $6) + err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (header_id, state_path, diff) DO UPDATE SET (state_leaf_key, cid, node_type, mh_key) = ($2, $3, $5, $7) RETURNING id`, - headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true).Scan(&stateID) + headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID) return stateID, err } @@ -199,8 +199,8 @@ func (in *CIDIndexer) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeModel, if storageCID.StorageKey != nullHash.String() { storageKey = storageCID.StorageKey } - _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff) VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff) = ($2, $3, $5, $6)`, - stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true) + _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (state_id, storage_path, diff) DO UPDATE SET (storage_leaf_key, cid, node_type, mh_key) = ($2, $3, $5, $7)`, + stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey) return err } diff --git a/pkg/eth/indexer_test.go b/pkg/eth/indexer_test.go index 03169249..a00c958a 100644 --- a/pkg/eth/indexer_test.go +++ b/pkg/eth/indexer_test.go @@ -37,6 +37,17 @@ var _ = Describe("Indexer", func() { db, err = shared.SetupDB() Expect(err).ToNot(HaveOccurred()) repo = eth.NewCIDIndexer(db) + // need entries in the public.blocks with the mhkeys or the FK constraint will fail + shared.PublishMockIPLD(db, mocks.HeaderMhKey, mockData) + shared.PublishMockIPLD(db, mocks.Trx1MhKey, mockData) + shared.PublishMockIPLD(db, mocks.Trx2MhKey, mockData) + shared.PublishMockIPLD(db, mocks.Trx3MhKey, mockData) + shared.PublishMockIPLD(db, mocks.Rct1MhKey, mockData) + shared.PublishMockIPLD(db, mocks.Rct2MhKey, mockData) + shared.PublishMockIPLD(db, mocks.Rct3MhKey, mockData) + shared.PublishMockIPLD(db, mocks.State1MhKey, mockData) + shared.PublishMockIPLD(db, mocks.State2MhKey, mockData) + shared.PublishMockIPLD(db, mocks.StorageMhKey, mockData) }) AfterEach(func() { eth.TearDownDB(db) diff --git a/pkg/eth/ipld_pg_fetcher.go b/pkg/eth/ipld_pg_fetcher.go index d344d9c1..bbfe6b3d 100644 --- a/pkg/eth/ipld_pg_fetcher.go +++ b/pkg/eth/ipld_pg_fetcher.go @@ -102,7 +102,7 @@ func (f *IPLDPGFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) // FetchHeaders fetches headers func (f *IPLDPGFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) { log.Debug("fetching header ipld") - headerBytes, err := shared.FetchIPLD(tx, c.CID) + headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) if err != nil { return ipfs.BlockModel{}, err } @@ -117,7 +117,7 @@ func (f *IPLDPGFetcher) FetchUncles(tx *sqlx.Tx, cids []UncleModel) ([]ipfs.Bloc log.Debug("fetching uncle iplds") uncleIPLDs := make([]ipfs.BlockModel, len(cids)) for i, c := range cids { - uncleBytes, err := shared.FetchIPLD(tx, c.CID) + uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) if err != nil { return nil, err } @@ -134,7 +134,7 @@ func (f *IPLDPGFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockMode log.Debug("fetching transaction iplds") trxIPLDs := make([]ipfs.BlockModel, len(cids)) for i, c := range cids { - txBytes, err := shared.FetchIPLD(tx, c.CID) + txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) if err != nil { return nil, err } @@ -151,7 +151,7 @@ func (f *IPLDPGFetcher) FetchRcts(tx *sqlx.Tx, cids []ReceiptModel) ([]ipfs.Bloc log.Debug("fetching receipt iplds") rctIPLDs := make([]ipfs.BlockModel, len(cids)) for i, c := range cids { - rctBytes, err := shared.FetchIPLD(tx, c.CID) + rctBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) if err != nil { return nil, err } @@ -171,7 +171,7 @@ func (f *IPLDPGFetcher) FetchState(tx *sqlx.Tx, cids []StateNodeModel) ([]StateN if stateNode.CID == "" { continue } - stateBytes, err := shared.FetchIPLD(tx, stateNode.CID) + stateBytes, err := shared.FetchIPLDByMhKey(tx, stateNode.MhKey) if err != nil { return nil, err } @@ -196,7 +196,7 @@ func (f *IPLDPGFetcher) FetchStorage(tx *sqlx.Tx, cids []StorageNodeWithStateKey if storageNode.CID == "" || storageNode.StateKey == "" { continue } - storageBytes, err := shared.FetchIPLD(tx, storageNode.CID) + storageBytes, err := shared.FetchIPLDByMhKey(tx, storageNode.MhKey) if err != nil { return nil, err } diff --git a/pkg/eth/mocks/test_data.go b/pkg/eth/mocks/test_data.go index ca925421..fb5a32fe 100644 --- a/pkg/eth/mocks/test_data.go +++ b/pkg/eth/mocks/test_data.go @@ -22,6 +22,8 @@ import ( "crypto/rand" "math/big" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -76,18 +78,29 @@ var ( Data: []byte{}, } HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256) + HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID) Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256) + Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID) Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256) + Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID) Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(2), multihash.KECCAK_256) + Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID) Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256) + Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID) Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256) + Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID) Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(2), multihash.KECCAK_256) + Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID) State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256) + State1MhKey = shared.MultihashKeyFromCID(State1CID) State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256) + State2MhKey = shared.MultihashKeyFromCID(State2CID) StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256) + StorageMhKey = shared.MultihashKeyFromCID(StorageCID) MockTrxMeta = []eth.TxModel{ { CID: "", // This is empty until we go to publish to ipfs + MhKey: "", Src: SenderAddr.Hex(), Dst: Address.String(), Index: 0, @@ -95,6 +108,7 @@ var ( }, { CID: "", + MhKey: "", Src: SenderAddr.Hex(), Dst: AnotherAddress.String(), Index: 1, @@ -102,6 +116,7 @@ var ( }, { CID: "", + MhKey: "", Src: SenderAddr.Hex(), Dst: "", Index: 2, @@ -111,6 +126,7 @@ var ( MockTrxMetaPostPublsh = []eth.TxModel{ { CID: Trx1CID.String(), // This is empty until we go to publish to ipfs + MhKey: Trx1MhKey, Src: SenderAddr.Hex(), Dst: Address.String(), Index: 0, @@ -118,6 +134,7 @@ var ( }, { CID: Trx2CID.String(), + MhKey: Trx2MhKey, Src: SenderAddr.Hex(), Dst: AnotherAddress.String(), Index: 1, @@ -125,6 +142,7 @@ var ( }, { CID: Trx3CID.String(), + MhKey: Trx3MhKey, Src: SenderAddr.Hex(), Dst: "", Index: 2, @@ -133,7 +151,8 @@ var ( } MockRctMeta = []eth.ReceiptModel{ { - CID: "", + CID: "", + MhKey: "", Topic0s: []string{ mockTopic11.String(), }, @@ -147,7 +166,8 @@ var ( }, }, { - CID: "", + CID: "", + MhKey: "", Topic0s: []string{ mockTopic21.String(), }, @@ -162,6 +182,7 @@ var ( }, { CID: "", + MhKey: "", Contract: ContractAddress.String(), ContractHash: ContractHash, LogContracts: []string{}, @@ -169,7 +190,8 @@ var ( } MockRctMetaPostPublish = []eth.ReceiptModel{ { - CID: Rct1CID.String(), + CID: Rct1CID.String(), + MhKey: Rct1MhKey, Topic0s: []string{ mockTopic11.String(), }, @@ -183,7 +205,8 @@ var ( }, }, { - CID: Rct2CID.String(), + CID: Rct2CID.String(), + MhKey: Rct2MhKey, Topic0s: []string{ mockTopic21.String(), }, @@ -198,6 +221,7 @@ var ( }, { CID: Rct3CID.String(), + MhKey: Rct3MhKey, Contract: ContractAddress.String(), ContractHash: ContractHash, LogContracts: []string{}, @@ -296,12 +320,14 @@ var ( MockStateMetaPostPublish = []eth.StateNodeModel{ { CID: State1CID.String(), + MhKey: State1MhKey, Path: []byte{'\x06'}, NodeType: 2, StateKey: common.BytesToHash(ContractLeafKey).Hex(), }, { CID: State2CID.String(), + MhKey: State2MhKey, Path: []byte{'\x0c'}, NodeType: 2, StateKey: common.BytesToHash(AccountLeafKey).Hex(), @@ -341,6 +367,7 @@ var ( BlockHash: MockBlock.Hash().String(), BlockNumber: MockBlock.Number().String(), CID: HeaderCID.String(), + MhKey: HeaderMhKey, ParentHash: MockBlock.ParentHash().String(), TotalDifficulty: MockBlock.Difficulty().String(), Reward: "5000000000000000000", @@ -363,6 +390,7 @@ var ( contractPath: { { CID: StorageCID.String(), + MhKey: StorageMhKey, Path: []byte{}, StorageKey: common.BytesToHash(StorageLeafKey).Hex(), NodeType: 2, @@ -392,6 +420,7 @@ var ( BlockHash: MockBlock.Hash().String(), ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000", CID: HeaderCID.String(), + MhKey: HeaderMhKey, TotalDifficulty: MockBlock.Difficulty().String(), Reward: "5000000000000000000", StateRoot: MockBlock.Root().String(), @@ -410,6 +439,7 @@ var ( { Path: []byte{}, CID: StorageCID.String(), + MhKey: StorageMhKey, NodeType: 2, StateKey: common.BytesToHash(ContractLeafKey).Hex(), StorageKey: common.BytesToHash(StorageLeafKey).Hex(), diff --git a/pkg/eth/models.go b/pkg/eth/models.go index e9b860ec..6c44a801 100644 --- a/pkg/eth/models.go +++ b/pkg/eth/models.go @@ -25,6 +25,7 @@ type HeaderModel struct { BlockHash string `db:"block_hash"` ParentHash string `db:"parent_hash"` CID string `db:"cid"` + MhKey string `db:"mh_key"` TotalDifficulty string `db:"td"` NodeID int64 `db:"node_id"` Reward string `db:"reward"` @@ -44,6 +45,7 @@ type UncleModel struct { BlockHash string `db:"block_hash"` ParentHash string `db:"parent_hash"` CID string `db:"cid"` + MhKey string `db:"mh_key"` Reward string `db:"reward"` } @@ -54,6 +56,7 @@ type TxModel struct { Index int64 `db:"index"` TxHash string `db:"tx_hash"` CID string `db:"cid"` + MhKey string `db:"mh_key"` Dst string `db:"dst"` Src string `db:"src"` } @@ -63,6 +66,7 @@ type ReceiptModel struct { ID int64 `db:"id"` TxID int64 `db:"tx_id"` CID string `db:"cid"` + MhKey string `db:"mh_key"` Contract string `db:"contract"` ContractHash string `db:"contract_hash"` LogContracts pq.StringArray `db:"log_contracts"` @@ -80,6 +84,7 @@ type StateNodeModel struct { StateKey string `db:"state_leaf_key"` NodeType int `db:"node_type"` CID string `db:"cid"` + MhKey string `db:"mh_key"` Diff bool `db:"diff"` } @@ -91,6 +96,7 @@ type StorageNodeModel struct { StorageKey string `db:"storage_leaf_key"` NodeType int `db:"node_type"` CID string `db:"cid"` + MhKey string `db:"mh_key"` Diff bool `db:"diff"` } @@ -103,6 +109,7 @@ type StorageNodeWithStateKeyModel struct { StorageKey string `db:"storage_leaf_key"` NodeType int `db:"node_type"` CID string `db:"cid"` + MhKey string `db:"mh_key"` Diff bool `db:"diff"` } diff --git a/pkg/eth/publish_and_indexer.go b/pkg/eth/publish_and_indexer.go index f352440e..eceaf59d 100644 --- a/pkg/eth/publish_and_indexer.go +++ b/pkg/eth/publish_and_indexer.go @@ -92,6 +92,7 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts) header := HeaderModel{ CID: headerNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), ParentHash: ipldPayload.Block.ParentHash().String(), BlockNumber: ipldPayload.Block.Number().String(), BlockHash: ipldPayload.Block.Hash().String(), @@ -117,6 +118,7 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncleNode.Number.Int64()) uncle := UncleModel{ CID: uncleNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()), ParentHash: uncleNode.ParentHash.String(), BlockHash: uncleNode.Hash().String(), Reward: uncleReward.String(), @@ -137,12 +139,14 @@ func (pub *IPLDPublisherAndIndexer) Publish(payload shared.ConvertedData) (share } txModel := ipldPayload.TxMetaData[i] txModel.CID = txNode.Cid().String() + txModel.MhKey = shared.MultihashKeyFromCID(txNode.Cid()) txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID) if err != nil { return nil, err } rctModel := ipldPayload.ReceiptMetaData[i] rctModel.CID = rctNode.Cid().String() + rctModel.MhKey = shared.MultihashKeyFromCID(rctNode.Cid()) if err := pub.indexer.indexReceiptCID(tx, rctModel, txID); err != nil { return nil, err } @@ -162,10 +166,12 @@ func (pub *IPLDPublisherAndIndexer) publishAndIndexStateAndStorage(tx *sqlx.Tx, if err != nil { return err } + mhKey, _ := shared.MultihashKeyFromCIDString(stateCIDStr) stateModel := StateNodeModel{ Path: stateNode.Path, StateKey: stateNode.LeafKey.String(), CID: stateCIDStr, + MhKey: mhKey, NodeType: ResolveFromNodeType(stateNode.Type), } stateID, err := pub.indexer.indexStateCID(tx, stateModel, headerID) @@ -199,10 +205,12 @@ func (pub *IPLDPublisherAndIndexer) publishAndIndexStateAndStorage(tx *sqlx.Tx, if err != nil { return err } + mhKey, _ := shared.MultihashKeyFromCIDString(storageCIDStr) storageModel := StorageNodeModel{ Path: storageNode.Path, StorageKey: storageNode.LeafKey.Hex(), CID: storageCIDStr, + MhKey: mhKey, NodeType: ResolveFromNodeType(storageNode.Type), } if err := pub.indexer.indexStorageCID(tx, storageModel, stateID); err != nil { diff --git a/pkg/eth/publisher.go b/pkg/eth/publisher.go index ecc1a6ac..89169270 100644 --- a/pkg/eth/publisher.go +++ b/pkg/eth/publisher.go @@ -79,6 +79,7 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts) header := HeaderModel{ CID: headerCid, + MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), ParentHash: ipldPayload.Block.ParentHash().String(), BlockNumber: ipldPayload.Block.Number().String(), BlockHash: ipldPayload.Block.Hash().String(), @@ -102,6 +103,7 @@ func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForI uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncle.Number.Int64()) uncleCids[i] = UncleModel{ CID: uncleCid, + MhKey: shared.MultihashKeyFromCID(uncle.Cid()), ParentHash: uncle.ParentHash.String(), BlockHash: uncle.Hash().String(), Reward: uncleReward.String(), @@ -162,6 +164,7 @@ func (pub *IPLDPublisher) publishTransactions(transactions []*ipld.EthTx, txTrie } trxCids[i] = TxModel{ CID: cid, + MhKey: shared.MultihashKeyFromCID(tx.Cid()), Index: trxMeta[i].Index, TxHash: trxMeta[i].TxHash, Src: trxMeta[i].Src, @@ -186,6 +189,7 @@ func (pub *IPLDPublisher) publishReceipts(receipts []*ipld.EthReceipt, receiptTr } rctCids[rct.TxHash] = ReceiptModel{ CID: cid, + MhKey: shared.MultihashKeyFromCID(rct.Cid()), Contract: receiptMeta[i].Contract, ContractHash: receiptMeta[i].ContractHash, Topic0s: receiptMeta[i].Topic0s, @@ -220,6 +224,7 @@ func (pub *IPLDPublisher) publishStateNodes(stateNodes []TrieNode) ([]StateNodeM Path: stateNode.Path, StateKey: stateNode.LeafKey.String(), CID: cid, + MhKey: shared.MultihashKeyFromCID(node.Cid()), NodeType: ResolveFromNodeType(stateNode.Type), }) // If we have a leaf, decode the account to extract additional metadata for indexing @@ -266,6 +271,7 @@ func (pub *IPLDPublisher) publishStorageNodes(storageNodes map[string][]TrieNode Path: storageNode.Path, StorageKey: storageNode.LeafKey.Hex(), CID: cid, + MhKey: shared.MultihashKeyFromCID(node.Cid()), NodeType: ResolveFromNodeType(storageNode.Type), }) } diff --git a/pkg/shared/functions.go b/pkg/shared/functions.go index 9116116c..9b364ec6 100644 --- a/pkg/shared/functions.go +++ b/pkg/shared/functions.go @@ -17,8 +17,6 @@ package shared import ( - "bytes" - "github.com/ethereum/go-ethereum/common" "github.com/ipfs/go-cid" "github.com/ipfs/go-ipfs-blockstore" @@ -26,51 +24,19 @@ import ( node "github.com/ipfs/go-ipld-format" "github.com/jmoiron/sqlx" "github.com/sirupsen/logrus" - - "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld" ) -// ListContainsString used to check if a list of strings contains a particular string -func ListContainsString(sss []string, s string) bool { - for _, str := range sss { - if s == str { - return true - } - } - return false -} - -// IPLDsContainBytes used to check if a list of strings contains a particular string -func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool { - for _, ipld := range iplds { - if bytes.Equal(ipld.Data, b) { - return true - } - } - return false -} - -// ListContainsGap used to check if a list of Gaps contains a particular Gap -func ListContainsGap(gapList []Gap, gap Gap) bool { - for _, listGap := range gapList { - if listGap == gap { - return true - } - } - return false -} - -// HandleNullAddrPointer will return an emtpy string for a nil address pointer -func HandleNullAddrPointer(to *common.Address) string { +// HandleZeroAddrPointer will return an emtpy string for a nil address pointer +func HandleZeroAddrPointer(to *common.Address) string { if to == nil { return "" } return to.Hex() } -// HandleNullAddr will return an empty string for a a null address -func HandleNullAddr(to common.Address) string { +// HandleZeroAddr will return an empty string for a 0 value address +func HandleZeroAddr(to common.Address) string { if to.Hex() == "0x0000000000000000000000000000000000000000" { return "" } @@ -93,7 +59,7 @@ func PublishIPLD(tx *sqlx.Tx, i node.Node) error { return err } -// FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx +// FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx and cid string func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) { mhKey, err := MultihashKeyFromCIDString(cid) if err != nil { @@ -104,6 +70,19 @@ func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) { return block, tx.Get(&block, pgStr, mhKey) } +// FetchIPLDByMhKey is used to retrieve an ipld from Postgres blockstore with the provided tx and mhkey string +func FetchIPLDByMhKey(tx *sqlx.Tx, mhKey string) ([]byte, error) { + pgStr := `SELECT data FROM public.blocks WHERE key = $1` + var block []byte + return block, tx.Get(&block, pgStr, mhKey) +} + +// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string +func MultihashKeyFromCID(c cid.Cid) string { + dbKey := dshelp.MultihashToDsKey(c.Hash()) + return blockstore.BlockPrefix.String() + dbKey.String() +} + // MultihashKeyFromCIDString converts a cid string into a blockstore-prefixed multihash db key string func MultihashKeyFromCIDString(c string) (string, error) { dc, err := cid.Decode(c) diff --git a/pkg/shared/test_helpers.go b/pkg/shared/test_helpers.go index 8767e3b9..09291d2f 100644 --- a/pkg/shared/test_helpers.go +++ b/pkg/shared/test_helpers.go @@ -17,7 +17,13 @@ package shared import ( + "bytes" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/config" + "github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/node" "github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres" ) @@ -30,3 +36,51 @@ func SetupDB() (*postgres.DB, error) { Port: 5432, }, node.Node{}) } + +// ListContainsString used to check if a list of strings contains a particular string +func ListContainsString(sss []string, s string) bool { + for _, str := range sss { + if s == str { + return true + } + } + return false +} + +// IPLDsContainBytes used to check if a list of strings contains a particular string +func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool { + for _, ipld := range iplds { + if bytes.Equal(ipld.Data, b) { + return true + } + } + return false +} + +// ListContainsGap used to check if a list of Gaps contains a particular Gap +func ListContainsGap(gapList []Gap, gap Gap) bool { + for _, listGap := range gapList { + if listGap == gap { + return true + } + } + return false +} + +// TestCID creates a basic CID for testing purposes +func TestCID(b []byte) cid.Cid { + pref := cid.Prefix{ + Version: 1, + Codec: cid.Raw, + MhType: multihash.KECCAK_256, + MhLength: -1, + } + c, _ := pref.Sum(b) + return c +} + +// PublishMockIPLD writes a mhkey-data pair to the public.blocks table so that test data can FK reference the mhkey +func PublishMockIPLD(db *postgres.DB, mhKey string, mockData []byte) error { + _, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, mhKey, mockData) + return err +}